Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. // Allocator details.
  2.  
  3. // Copyright (C) 2004-2013 Free Software Foundation, Inc.
  4. //
  5. // This file is part of the GNU ISO C++ Library.  This library is free
  6. // software; you can redistribute it and/or modify it under the
  7. // terms of the GNU General Public License as published by the
  8. // Free Software Foundation; either version 3, or (at your option)
  9. // any later version.
  10.  
  11. // This library is distributed in the hope that it will be useful,
  12. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14. // GNU General Public License for more details.
  15.  
  16. // Under Section 7 of GPL version 3, you are granted additional
  17. // permissions described in the GCC Runtime Library Exception, version
  18. // 3.1, as published by the Free Software Foundation.
  19.  
  20. // You should have received a copy of the GNU General Public License and
  21. // a copy of the GCC Runtime Library Exception along with this program;
  22. // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
  23. // <http://www.gnu.org/licenses/>.
  24.  
  25. //
  26. // ISO C++ 14882:
  27. //
  28.  
  29. #include <bits/c++config.h>
  30. #include <cstdlib>
  31. #include <ext/pool_allocator.h>
  32.  
  33. namespace
  34. {
  35.   __gnu_cxx::__mutex&
  36.   get_palloc_mutex()
  37.   {
  38.     static __gnu_cxx::__mutex palloc_mutex;
  39.     return palloc_mutex;
  40.   }
  41. } // anonymous namespace
  42.  
  43. namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
  44. {
  45. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  46.  
  47.   // Definitions for __pool_alloc_base.
  48.   __pool_alloc_base::_Obj* volatile*
  49.   __pool_alloc_base::_M_get_free_list(size_t __bytes) throw ()
  50.   {
  51.     size_t __i = ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1);
  52.     return _S_free_list + __i;
  53.   }
  54.  
  55.   __mutex&
  56.   __pool_alloc_base::_M_get_mutex() throw ()
  57.   { return get_palloc_mutex(); }
  58.  
  59.   // Allocate memory in large chunks in order to avoid fragmenting the
  60.   // heap too much.  Assume that __n is properly aligned.  We hold the
  61.   // allocation lock.
  62.   char*
  63.   __pool_alloc_base::_M_allocate_chunk(size_t __n, int& __nobjs)
  64.   {
  65.     char* __result;
  66.     size_t __total_bytes = __n * __nobjs;
  67.     size_t __bytes_left = _S_end_free - _S_start_free;
  68.    
  69.     if (__bytes_left >= __total_bytes)
  70.       {
  71.         __result = _S_start_free;
  72.         _S_start_free += __total_bytes;
  73.         return __result ;
  74.       }
  75.     else if (__bytes_left >= __n)
  76.       {
  77.         __nobjs = (int)(__bytes_left / __n);
  78.         __total_bytes = __n * __nobjs;
  79.         __result = _S_start_free;
  80.         _S_start_free += __total_bytes;
  81.         return __result;
  82.       }
  83.     else
  84.       {
  85.         // Try to make use of the left-over piece.
  86.         if (__bytes_left > 0)
  87.           {
  88.             _Obj* volatile* __free_list = _M_get_free_list(__bytes_left);
  89.             ((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
  90.             *__free_list = (_Obj*)(void*)_S_start_free;
  91.           }
  92.        
  93.         size_t __bytes_to_get = (2 * __total_bytes
  94.                                  + _M_round_up(_S_heap_size >> 4));
  95.         __try
  96.           {
  97.             _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
  98.           }
  99.         __catch(const std::bad_alloc&)
  100.           {
  101.             // Try to make do with what we have.  That can't hurt.  We
  102.             // do not try smaller requests, since that tends to result
  103.             // in disaster on multi-process machines.
  104.             size_t __i = __n;
  105.             for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
  106.               {
  107.                 _Obj* volatile* __free_list = _M_get_free_list(__i);
  108.                 _Obj* __p = *__free_list;
  109.                 if (__p != 0)
  110.                   {
  111.                     *__free_list = __p->_M_free_list_link;
  112.                     _S_start_free = (char*)__p;
  113.                     _S_end_free = _S_start_free + __i;
  114.                     return _M_allocate_chunk(__n, __nobjs);
  115.                     // Any leftover piece will eventually make it to the
  116.                     // right free list.
  117.                   }
  118.               }
  119.             // What we have wasn't enough.  Rethrow.
  120.             _S_start_free = _S_end_free = 0;   // We have no chunk.
  121.             __throw_exception_again;
  122.           }
  123.         _S_heap_size += __bytes_to_get;
  124.         _S_end_free = _S_start_free + __bytes_to_get;
  125.         return _M_allocate_chunk(__n, __nobjs);
  126.       }
  127.   }
  128.  
  129.   // Returns an object of size __n, and optionally adds to "size
  130.   // __n"'s free list.  We assume that __n is properly aligned.  We
  131.   // hold the allocation lock.
  132.   void*
  133.   __pool_alloc_base::_M_refill(size_t __n)
  134.   {
  135.     int __nobjs = 20;
  136.     char* __chunk = _M_allocate_chunk(__n, __nobjs);
  137.     _Obj* volatile* __free_list;
  138.     _Obj* __result;
  139.     _Obj* __current_obj;
  140.     _Obj* __next_obj;
  141.    
  142.     if (__nobjs == 1)
  143.       return __chunk;
  144.     __free_list = _M_get_free_list(__n);
  145.    
  146.     // Build free list in chunk.
  147.     __result = (_Obj*)(void*)__chunk;
  148.     *__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
  149.     for (int __i = 1; ; __i++)
  150.       {
  151.         __current_obj = __next_obj;
  152.         __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
  153.         if (__nobjs - 1 == __i)
  154.           {
  155.             __current_obj->_M_free_list_link = 0;
  156.             break;
  157.           }
  158.         else
  159.           __current_obj->_M_free_list_link = __next_obj;
  160.       }
  161.     return __result;
  162.   }
  163.  
  164.   __pool_alloc_base::_Obj* volatile __pool_alloc_base::_S_free_list[_S_free_list_size];
  165.  
  166.   char* __pool_alloc_base::_S_start_free = 0;
  167.  
  168.   char* __pool_alloc_base::_S_end_free = 0;
  169.  
  170.   size_t __pool_alloc_base::_S_heap_size = 0;
  171.  
  172.   // Instantiations.
  173.   template class __pool_alloc<char>;
  174.   template class __pool_alloc<wchar_t>;
  175.  
  176. _GLIBCXX_END_NAMESPACE_VERSION
  177. } // namespace
  178.