Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. ; System allocator.
  2. ; Based on dlmalloc 2.8.6.
  3. ; dlmalloc is written by Doug Lea and released to the public domain.
  4.  
  5. ; Algorithms are the same as in dlmalloc, with the following differences:
  6. ; * segment management uses large segments,
  7. ;   since segments can never be merged;
  8. ; * top chunk is usually large, so the code tries mmap
  9. ;   for chunks with size >= mmap_threshold before allocating from top;
  10. ; * there is additional bookkeeping for releasing physical memory
  11. ;   instead of relying on unmapping entire segments:
  12. ;   tree chunks have additional field in the end,
  13. ;   all recently expanded tree chunks are linked in one list for sys_trim;
  14. ; * there is an additional list of all mmapped chunks,
  15. ;   so that mspace_destroy can free everything, including mmapped chunks;
  16. ; * realloc and memalign can give back a space before a free chunk
  17. ;   (extending that chunk) even if a space is less than minimal chunk size.
  18.  
  19. ; Statistics:
  20. ;   Alignment: 8 bytes
  21. ;   Minimum overhead per allocated chunk: 4 or 8 bytes,
  22. ;     depending on whether FOOTERS is defined.
  23. ;   Minimum allocated size: 16 bytes (including overhead)
  24. ; See details at http://gee.cs.oswego.edu/dl/html/malloc.html.
  25.  
  26. ; The KolibriOS kernel provides functions similar to mmap/mremap/munmap,
  27. ; they are used as base for allocations.
  28.  
  29. FOOTERS = 0
  30. ;  If true, provide extra checking and dispatching by placing
  31. ;  information in the footers of allocated chunks. This adds
  32. ;  space and time overhead, but can be useful for debugging.
  33.  
  34. DEFAULT_MMAP_THRESHOLD = 256*1024
  35. ;  The request size threshold for using MMAP to directly service a
  36. ;  request. Requests of at least this size that cannot be allocated
  37. ;  using already-existing space will be serviced via mmap.  (If enough
  38. ;  normal freed space already exists it is used instead.)  Using mmap
  39. ;  segregates relatively large chunks of memory so that they can be
  40. ;  individually obtained and released from the host system. A request
  41. ;  serviced through mmap is never reused by any other request (at least
  42. ;  not directly; the system may just so happen to remap successive
  43. ;  requests to the same locations).  Segregating space in this way has
  44. ;  the benefits that: Mmapped space can always be individually released
  45. ;  back to the system, which helps keep the system level memory demands
  46. ;  of a long-lived program low.  Also, mapped memory doesn't become
  47. ;  `locked' between other chunks, as can happen with normally allocated
  48. ;  chunks, which means that even trimming via malloc_trim would not
  49. ;  release them.  However, it has the disadvantage that the space
  50. ;  cannot be reclaimed, consolidated, and then used to service later
  51. ;  requests, as happens with normal chunks.  The advantages of mmap
  52. ;  nearly always outweigh disadvantages for "large" chunks, but the
  53. ;  value of "large" may vary across systems.  The default is an
  54. ;  empirically derived value that works well in most systems. You can
  55. ;  disable mmap by setting to 0xFFFFFFFF.
  56.  
  57. RELEASE_CHECK_RATE = 64
  58. ;  The number of consolidated frees between checks to release
  59. ;  unused segments when freeing. When using non-contiguous segments,
  60. ;  especially with multiple mspaces, checking only for topmost space
  61. ;  doesn't always suffice to trigger trimming. To compensate for this,
  62. ;  free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
  63. ;  current number of segments, if greater) try to release unused
  64. ;  segments to the OS when freeing chunks that result in
  65. ;  consolidation. The best value for this parameter is a compromise
  66. ;  between slowing down frees with relatively costly checks that
  67. ;  rarely trigger versus holding on to unused memory. To effectively
  68. ;  disable, set to MAX_SIZE_T. This may lead to a very slight speed
  69. ;  improvement at the expense of carrying around more memory.
  70.  
  71. DEFAULT_MSPACE_SIZE = 1024*1024
  72.  
  73. include 'malloc_internal.inc'
  74.  
  75. prologue@proc equ fpo_prologue
  76. epilogue@proc equ fpo_epilogue
  77.  
  78. ;  void* create_mspace(size_t capacity, int locked)
  79. ;  create_mspace creates and returns a new independent space with the
  80. ;  given initial capacity, or, if 0, the default mspace size.  It
  81. ;  returns null if there is no system memory available to create the
  82. ;  space.  If argument locked is non-zero, the space uses a separate
  83. ;  lock to control access. The capacity of the space will grow
  84. ;  dynamically as needed to service mspace_malloc requests.
  85. proc create_mspace stdcall uses ebx, capacity, locked
  86.         do_create_mspace
  87. endp
  88.  
  89. ;  void destroy_mspace(mspace msp)
  90. ;  destroy_mspace destroys the given space, and attempts to return all
  91. ;  of its memory back to the system, returning the total number of
  92. ;  bytes freed. After destruction, the results of access to all memory
  93. ;  used by the space become undefined.
  94. proc destroy_mspace stdcall uses ebx, msp
  95.         do_destroy_mspace
  96. endp
  97.  
  98.  
  99. macro set_default_heap
  100. {
  101.         mov     ebp, FS_PROCESS_DATA
  102.         mov     ebp, [ebp+0x18]
  103. .got_mspace:
  104. }
  105.  
  106. macro set_explicit_heap
  107. {
  108.         mov     ebp, [msp]
  109. }
  110.  
  111. macro mspace_adapter common_label
  112. {
  113.         mov     eax, [esp]
  114.         mov     [esp], ebp
  115.         mov     ebp, [esp+4]
  116.         mov     [esp+4], eax
  117.         push    ebx
  118.         push    esi
  119.         jmp     common_label
  120. }
  121.  
  122. ;  void* malloc(size_t bytes)
  123. ;  Returns a pointer to a newly allocated chunk of at least n bytes, or
  124. ;  null if no space is available, in which case errno is set to ENOMEM
  125. ;  on ANSI C systems.
  126. ;
  127. ;  If n is zero, malloc returns a minimum-sized chunk. (The minimum
  128. ;  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
  129. ;  systems.)  Note that size_t is an unsigned type, so calls with
  130. ;  arguments that would be negative if signed are interpreted as
  131. ;  requests for huge amounts of space, which will often fail. The
  132. ;  maximum supported value of n differs across systems, but is in all
  133. ;  cases less than the maximum representable value of a size_t.
  134. align 16
  135. proc malloc stdcall uses ebp ebx esi, bytes
  136.         set_default_heap
  137.         do_malloc
  138. endp
  139.  
  140. ;  void free(void* mem)
  141. ;  Releases the chunk of memory pointed to by mem, that had been previously
  142. ;  allocated using malloc or a related routine such as realloc.
  143. ;  It has no effect if mem is null. If mem was not malloced or already
  144. ;  freed, free(mem) will by default cause the current program to abort.
  145. align 16
  146. proc free stdcall uses ebp ebx esi, mem
  147.         set_default_heap
  148.         do_free
  149. endp
  150.  
  151. ;  void* calloc(size_t n_elements, size_t elem_size);
  152. ;  Returns a pointer to n_elements * elem_size bytes, with all locations
  153. ;  set to zero.
  154. align 16
  155. proc calloc stdcall, n_elements, elem_size
  156.         do_calloc <stdcall malloc,eax>
  157. endp
  158.  
  159. ;  void* realloc(void* oldmem, size_t bytes)
  160. ;  Returns a pointer to a chunk of size bytes that contains the same data
  161. ;  as does chunk oldmem up to the minimum of (bytes, oldmem's size) bytes, or null
  162. ;  if no space is available.
  163. ;
  164. ;  The returned pointer may or may not be the same as oldmem. The algorithm
  165. ;  prefers extending oldmem in most cases when possible, otherwise it
  166. ;  employs the equivalent of a malloc-copy-free sequence.
  167. ;
  168. ;  If oldmem is null, realloc is equivalent to malloc.
  169. ;
  170. ;  If space is not available, realloc returns null, errno is set (if on
  171. ;  ANSI) and oldmem is NOT freed.
  172. ;
  173. ;  if bytes is for fewer bytes than already held by oldmem, the newly unused
  174. ;  space is lopped off and freed if possible.  realloc with a size
  175. ;  argument of zero (re)allocates a minimum-sized chunk.
  176. ;
  177. ;  The old unix realloc convention of allowing the last-free'd chunk
  178. ;  to be used as an argument to realloc is not supported.
  179. align 16
  180. proc realloc stdcall uses ebp ebx esi, oldmem, bytes
  181.         set_default_heap
  182. if used mspace_realloc
  183.         do_realloc <stdcall mspace_malloc,ebp,>, <stdcall mspace_free,ebp,>
  184. else
  185.         do_realloc <stdcall malloc,>, <stdcall free,>
  186. end if
  187. endp
  188.  
  189. ;  void* realloc_in_place(void* oldmem, size_t bytes)
  190. ;  Resizes the space allocated for oldmem to size bytes, only if this can be
  191. ;  done without moving oldmem (i.e., only if there is adjacent space
  192. ;  available if bytes is greater than oldmem's current allocated size, or bytes is
  193. ;  less than or equal to oldmem's size). This may be used instead of plain
  194. ;  realloc if an alternative allocation strategy is needed upon failure
  195. ;  to expand space; for example, reallocation of a buffer that must be
  196. ;  memory-aligned or cleared. You can use realloc_in_place to trigger
  197. ;  these alternatives only when needed.
  198. ;
  199. ;  Returns oldmem if successful; otherwise null.
  200. align 16
  201. proc realloc_in_place stdcall uses ebp ebx esi, oldmem, bytes
  202.         set_default_heap
  203.         do_realloc_in_place
  204. endp
  205.  
  206. ;  void* memalign(size_t alignment, size_t bytes);
  207. ;  Returns a pointer to a newly allocated chunk of bytes argument, aligned
  208. ;  in accord with the alignment argument.
  209. ;
  210. ;  The alignment argument should be a power of two. If the argument is
  211. ;  not a power of two, the nearest greater power is used.
  212. ;  8-byte alignment is guaranteed by normal malloc calls, so don't
  213. ;  bother calling memalign with an argument of 8 or less.
  214. ;
  215. ;  Overreliance on memalign is a sure way to fragment space.
  216. align 16
  217. proc memalign stdcall uses ebp ebx esi, alignment, bytes
  218.         set_default_heap
  219. if used mspace_memalign
  220.         do_memalign <stdcall mspace_malloc,ebp,>
  221. else
  222.         do_memalign <stdcall malloc,>
  223. end if
  224. endp
  225.  
  226. ;  void* mspace_malloc(mspace msp, size_t bytes)
  227. ;  mspace_malloc behaves as malloc, but operates within
  228. ;  the given space.
  229. align 16
  230. proc mspace_malloc ;stdcall uses ebp ebx esi, msp, bytes
  231. ;       set_explicit_heap
  232. ;       do_malloc
  233.         mspace_adapter malloc.got_mspace
  234. endp
  235.  
  236. ;  void mspace_free(mspace msp, void* mem)
  237. ;  mspace_free behaves as free, but operates within
  238. ;  the given space.
  239. align 16
  240. proc mspace_free ;stdcall uses ebp ebx esi, msp, mem
  241. ;       set_explicit_heap
  242. ;       do_free
  243.         mspace_adapter free.got_mspace
  244. endp
  245.  
  246. ;  void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
  247. ;  mspace_calloc behaves as calloc, but operates within
  248. ;  the given space.
  249. align 16
  250. proc mspace_calloc stdcall, msp, n_elements, elem_size
  251.         do_calloc <stdcall mspace_malloc,[msp+4],eax>
  252. endp
  253.  
  254. ;  void* mspace_realloc(mspace msp, void* oldmem, size_t bytes)
  255. ;  mspace_realloc behaves as realloc, but operates within
  256. ;  the given space.
  257. align 16
  258. proc mspace_realloc ;stdcall uses ebp ebx esi, msp, oldmem, bytes
  259. ;       set_explicit_heap
  260. ;       do_realloc <stdcall mspace_malloc,ebp,>, <stdcall mspace_free,ebp,>
  261.         mspace_adapter realloc.got_mspace
  262. endp
  263.  
  264. ;  void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes)
  265. align 16
  266. proc mspace_realloc_in_place ;stdcall uses ebp ebx esi, msp, oldmem, bytes
  267. ;       set_explicit_heap
  268. ;       do_realloc_in_place
  269.         mspace_adapter realloc_in_place.got_mspace
  270. endp
  271.  
  272. ;  void* mspace_memalign(mspace msp, size_t alignment, size_t bytes)
  273. ;  mspace_memalign behaves as memalign, but operates within
  274. ;  the given space.
  275. align 16
  276. proc mspace_memalign ;stdcall uses ebp ebx esi, msp, alignment, bytes
  277. ;       set_explicit_heap
  278. ;       do_memalign <stdcall mspace_malloc,ebp,>
  279.         mspace_adapter memalign.got_mspace
  280. endp
  281.  
  282. assert MALLOC_ALIGNMENT >= 8
  283. assert MALLOC_ALIGNMENT and (MALLOC_ALIGNMENT - 1) = 0
  284. assert MCHUNK_SIZE and (MCHUNK_SIZE - 1) = 0
  285. ; in: edx = initial size of the process heap
  286. macro malloc_init
  287. {
  288. if FOOTERS
  289.         mov     eax, 26
  290.         mov     ebx, 9
  291.         call    FS_SYSCALL_PTR
  292.         xor     eax, 0x55555555
  293.         or      eax, 8
  294.         and     eax, not 7
  295.         mov     [malloc_magic], eax
  296. end if
  297.         stdcall create_mspace, edx, 1
  298.         mov     ecx, FS_PROCESS_DATA
  299.         mov     [ecx+0x18], eax
  300. }
  301.  
  302. proc heap_corrupted
  303.         sub     esp, 400h
  304.         mov     eax, 9
  305.         mov     ebx, esp
  306.         or      ecx, -1
  307.         call    FS_SYSCALL_PTR
  308.         lea     esi, [ebx+10]
  309.         lea     edx, [ebx+10+11]
  310.         mov     eax, 63
  311.         mov     ebx, 1
  312.         mov     cl, '['
  313.         call    FS_SYSCALL_PTR
  314. @@:
  315.         mov     cl, [esi]
  316.         test    cl, cl
  317.         jz      @f
  318.         call    FS_SYSCALL_PTR
  319.         inc     esi
  320.         cmp     esi, ebx
  321.         jb      @b
  322. @@:
  323.         mov     esi, heap_corrupted_msg
  324. @@:
  325.         mov     cl, [esi]
  326.         inc     esi
  327.         test    cl, cl
  328.         jz      @f
  329.         mov     eax, 63
  330.         mov     ebx, 1
  331.         call    FS_SYSCALL_PTR
  332.         jmp     @b
  333. @@:
  334.         or      eax, -1
  335.         or      ebx, -1
  336.         call    FS_SYSCALL_PTR
  337. endp
  338.  
  339. heap_corrupted_msg      db      '] Heap corrupted, aborting',13,10,0
  340.