Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. // -*- C++ -*- Allocate exception objects.
  2. // Copyright (C) 2001-2015 Free Software Foundation, Inc.
  3. //
  4. // This file is part of GCC.
  5. //
  6. // GCC is free software; you can redistribute it and/or modify
  7. // it under the terms of the GNU General Public License as published by
  8. // the Free Software Foundation; either version 3, or (at your option)
  9. // any later version.
  10. //
  11. // GCC is distributed in the hope that it will be useful,
  12. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14. // GNU General Public License for more details.
  15. //
  16. // Under Section 7 of GPL version 3, you are granted additional
  17. // permissions described in the GCC Runtime Library Exception, version
  18. // 3.1, as published by the Free Software Foundation.
  19.  
  20. // You should have received a copy of the GNU General Public License and
  21. // a copy of the GCC Runtime Library Exception along with this program;
  22. // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
  23. // <http://www.gnu.org/licenses/>.
  24.  
  25. // This is derived from the C++ ABI for IA-64.  Where we diverge
  26. // for cross-architecture compatibility are noted with "@@@".
  27.  
  28. #include <bits/c++config.h>
  29. #include <cstdlib>
  30. #if _GLIBCXX_HOSTED
  31. #include <cstring>
  32. #endif
  33. #include <climits>
  34. #include <exception>
  35. #include "unwind-cxx.h"
  36. #include <ext/concurrence.h>
  37. #include <new>
  38.  
  39. #if _GLIBCXX_HOSTED
  40. using std::free;
  41. using std::malloc;
  42. using std::memset;
  43. #else
  44. // In a freestanding environment, these functions may not be available
  45. // -- but for now, we assume that they are.
  46. extern "C" void *malloc (std::size_t);
  47. extern "C" void free(void *);
  48. extern "C" void *memset (void *, int, std::size_t);
  49. #endif
  50.  
  51. using namespace __cxxabiv1;
  52.  
  53. // ??? How to control these parameters.
  54.  
  55. // Guess from the size of basic types how large a buffer is reasonable.
  56. // Note that the basic c++ exception header has 13 pointers and 2 ints,
  57. // so on a system with PSImode pointers we're talking about 56 bytes
  58. // just for overhead.
  59.  
  60. #if INT_MAX == 32767
  61. # define EMERGENCY_OBJ_SIZE     128
  62. # define EMERGENCY_OBJ_COUNT    16
  63. #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
  64. # define EMERGENCY_OBJ_SIZE     512
  65. # define EMERGENCY_OBJ_COUNT    32
  66. #else
  67. # define EMERGENCY_OBJ_SIZE     1024
  68. # define EMERGENCY_OBJ_COUNT    64
  69. #endif
  70.  
  71. #ifndef __GTHREADS
  72. # undef EMERGENCY_OBJ_COUNT
  73. # define EMERGENCY_OBJ_COUNT    4
  74. #endif
  75.  
  76.  
  77. namespace
  78. {
  79.   // A fixed-size heap, variable size object allocator
  80.   class pool
  81.     {
  82.     public:
  83.       pool();
  84.  
  85.       void *allocate (std::size_t);
  86.       void free (void *);
  87.  
  88.       bool in_pool (void *);
  89.  
  90.     private:
  91.       struct free_entry {
  92.         std::size_t size;
  93.         free_entry *next;
  94.       };
  95.       struct allocated_entry {
  96.         std::size_t size;
  97.         char data[] __attribute__((aligned));
  98.       };
  99.  
  100.   // A single mutex controlling emergency allocations.
  101.   __gnu_cxx::__mutex emergency_mutex;
  102.  
  103.       // The free-list
  104.       free_entry *first_free_entry;
  105.       // The arena itself - we need to keep track of these only
  106.       // to implement in_pool.
  107.       char *arena;
  108.       std::size_t arena_size;
  109.     };
  110.  
  111.   pool::pool()
  112.     {
  113.       // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
  114.       // to make this tunable.
  115.       arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
  116.                     + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
  117.       arena = (char *)malloc (arena_size);
  118.       if (!arena)
  119.         {
  120.           // If the allocation failed go without an emergency pool.
  121.           arena_size = 0;
  122.           first_free_entry = NULL;
  123.           return;
  124.         }
  125.  
  126.       // Populate the free-list with a single entry covering the whole arena
  127.       first_free_entry = reinterpret_cast <free_entry *> (arena);
  128.       new (first_free_entry) free_entry;
  129.       first_free_entry->size = arena_size;
  130.       first_free_entry->next = NULL;
  131.     }
  132.  
  133.   void *pool::allocate (std::size_t size)
  134.     {
  135.       __gnu_cxx::__scoped_lock sentry(emergency_mutex);
  136.       // We need an additional size_t member plus the padding to
  137.       // ensure proper alignment of data.
  138.       size += offsetof (allocated_entry, data);
  139.       // And we need to at least hand out objects of the size of
  140.       // a freelist entry.
  141.       if (size < sizeof (free_entry))
  142.         size = sizeof (free_entry);
  143.       // And we need to align objects we hand out to the maximum
  144.       // alignment required on the target (this really aligns the
  145.       // tail which will become a new freelist entry).
  146.       size = ((size + __alignof__ (allocated_entry::data) - 1)
  147.               & ~(__alignof__ (allocated_entry::data) - 1));
  148.       // Search for an entry of proper size on the freelist.
  149.       free_entry **e;
  150.       for (e = &first_free_entry;
  151.            *e && (*e)->size < size;
  152.            e = &(*e)->next)
  153.         ;
  154.       if (!*e)
  155.         return NULL;
  156.       allocated_entry *x;
  157.       if ((*e)->size - size >= sizeof (free_entry))
  158.         {
  159.           // Slit block if it is too large.
  160.           free_entry *f = reinterpret_cast <free_entry *>
  161.               (reinterpret_cast <char *> (*e) + size);
  162.           std::size_t sz = (*e)->size;
  163.           free_entry *next = (*e)->next;
  164.           new (f) free_entry;
  165.           f->next = next;
  166.           f->size = sz - size;
  167.           x = reinterpret_cast <allocated_entry *> (*e);
  168.           new (x) allocated_entry;
  169.           x->size = size;
  170.           *e = f;
  171.         }
  172.       else
  173.         {
  174.           // Exact size match or too small overhead for a free entry.
  175.           std::size_t sz = (*e)->size;
  176.           free_entry *next = (*e)->next;
  177.           x = reinterpret_cast <allocated_entry *> (*e);
  178.           new (x) allocated_entry;
  179.           x->size = sz;
  180.           *e = next;
  181.         }
  182.       return &x->data;
  183.     }
  184.  
  185.   void pool::free (void *data)
  186.     {
  187.       __gnu_cxx::__scoped_lock sentry(emergency_mutex);
  188.       allocated_entry *e = reinterpret_cast <allocated_entry *>
  189.         (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
  190.       std::size_t sz = e->size;
  191.       if (!first_free_entry)
  192.         {
  193.           // If the free list is empty just put the entry there.
  194.           free_entry *f = reinterpret_cast <free_entry *> (e);
  195.           new (f) free_entry;
  196.           f->size = sz;
  197.           f->next = NULL;
  198.           first_free_entry = f;
  199.         }
  200.       else if (reinterpret_cast <char *> (e) + sz
  201.                == reinterpret_cast <char *> (first_free_entry))
  202.         {
  203.           // Check if we can merge with the first free entry being right
  204.           // after us.
  205.           free_entry *f = reinterpret_cast <free_entry *> (e);
  206.           new (f) free_entry;
  207.           f->size = sz + first_free_entry->size;
  208.           f->next = first_free_entry->next;
  209.           first_free_entry = f;
  210.         }
  211.       else
  212.         {
  213.           // Else search for a free item we can merge with at its end.
  214.           free_entry **fe;
  215.           for (fe = &first_free_entry;
  216.                (*fe)->next
  217.                && (reinterpret_cast <char *> ((*fe)->next)
  218.                    > reinterpret_cast <char *> (e) + sz);
  219.                fe = &(*fe)->next)
  220.             ;
  221.           if (reinterpret_cast <char *> (*fe) + (*fe)->size
  222.               == reinterpret_cast <char *> (e))
  223.             /* Merge with the freelist entry.  */
  224.             (*fe)->size += sz;
  225.           else
  226.             {
  227.               // Else put it after it which keeps the freelist sorted.
  228.               free_entry *f = reinterpret_cast <free_entry *> (e);
  229.               new (f) free_entry;
  230.               f->size = sz;
  231.               f->next = (*fe)->next;
  232.               (*fe)->next = f;
  233.             }
  234.         }
  235.     }
  236.  
  237.   bool pool::in_pool (void *ptr)
  238.     {
  239.       char *p = reinterpret_cast <char *> (ptr);
  240.       return (p > arena
  241.               && p < arena + arena_size);
  242.     }
  243.  
  244.   pool emergency_pool;
  245. }
  246.  
  247. extern "C" void *
  248. __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
  249. {
  250.   void *ret;
  251.  
  252.   thrown_size += sizeof (__cxa_refcounted_exception);
  253.   ret = malloc (thrown_size);
  254.  
  255.   if (!ret)
  256.     ret = emergency_pool.allocate (thrown_size);
  257.  
  258.       if (!ret)
  259.         std::terminate ();
  260.  
  261.   memset (ret, 0, sizeof (__cxa_refcounted_exception));
  262.  
  263.   return (void *)((char *)ret + sizeof (__cxa_refcounted_exception));
  264. }
  265.  
  266.  
  267. extern "C" void
  268. __cxxabiv1::__cxa_free_exception(void *vptr) _GLIBCXX_NOTHROW
  269. {
  270.   char *ptr = (char *) vptr - sizeof (__cxa_refcounted_exception);
  271.   if (emergency_pool.in_pool (ptr))
  272.     emergency_pool.free (ptr);
  273.   else
  274.     free (ptr);
  275. }
  276.  
  277.  
  278. extern "C" __cxa_dependent_exception*
  279. __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
  280. {
  281.   __cxa_dependent_exception *ret;
  282.  
  283.   ret = static_cast<__cxa_dependent_exception*>
  284.     (malloc (sizeof (__cxa_dependent_exception)));
  285.  
  286.   if (!ret)
  287.     ret = static_cast <__cxa_dependent_exception*>
  288.       (emergency_pool.allocate (sizeof (__cxa_dependent_exception)));
  289.  
  290.       if (!ret)
  291.         std::terminate ();
  292.  
  293.   memset (ret, 0, sizeof (__cxa_dependent_exception));
  294.  
  295.   return ret;
  296. }
  297.  
  298.  
  299. extern "C" void
  300. __cxxabiv1::__cxa_free_dependent_exception
  301.   (__cxa_dependent_exception *vptr) _GLIBCXX_NOTHROW
  302. {
  303.   if (emergency_pool.in_pool (vptr))
  304.     emergency_pool.free (vptr);
  305.   else
  306.     free (vptr);
  307. }
  308.