Rev 6536 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4349 | Serge | 1 | /* |
2 | This is a version (aka dlmalloc) of malloc/free/realloc written by |
||
3 | Doug Lea and released to the public domain, as explained at |
||
4 | http://creativecommons.org/licenses/publicdomain. Send questions, |
||
5 | comments, complaints, performance data, etc to dl@cs.oswego.edu |
||
6 | |||
7 | * Version 2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee) |
||
8 | |||
9 | Note: There may be an updated version of this malloc obtainable at |
||
10 | ftp://gee.cs.oswego.edu/pub/misc/malloc.c |
||
11 | Check before installing! |
||
12 | |||
13 | * Quickstart |
||
14 | |||
15 | This library is all in one file to simplify the most common usage: |
||
16 | ftp it, compile it (-O3), and link it into another program. All of |
||
17 | the compile-time options default to reasonable values for use on |
||
18 | most platforms. You might later want to step through various |
||
19 | compile-time and dynamic tuning options. |
||
20 | |||
21 | For convenience, an include file for code using this malloc is at: |
||
22 | ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h |
||
23 | You don't really need this .h file unless you call functions not |
||
24 | defined in your system include files. The .h file contains only the |
||
25 | excerpts from this file needed for using this malloc on ANSI C/C++ |
||
26 | systems, so long as you haven't changed compile-time options about |
||
27 | naming and tuning parameters. If you do, then you can create your |
||
28 | own malloc.h that does include all settings by cutting at the point |
||
29 | indicated below. Note that you may already by default be using a C |
||
30 | library containing a malloc that is based on some version of this |
||
31 | malloc (for example in linux). You might still want to use the one |
||
32 | in this file to customize settings or to avoid overheads associated |
||
33 | with library versions. |
||
34 | |||
35 | */ |
||
36 | #include |
||
37 | #include |
||
38 | #include |
||
9874 | turbocat | 39 | #include |
4349 | Serge | 40 | |
41 | |||
42 | struct malloc_chunk { |
||
43 | size_t prev_foot; /* Size of previous chunk (if free). */ |
||
44 | size_t head; /* Size and inuse bits. */ |
||
45 | struct malloc_chunk* fd; /* double links -- used only if free. */ |
||
46 | struct malloc_chunk* bk; |
||
47 | }; |
||
48 | |||
49 | typedef struct malloc_chunk mchunk; |
||
50 | typedef struct malloc_chunk* mchunkptr; |
||
51 | typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ |
||
52 | typedef unsigned int bindex_t; /* Described below */ |
||
53 | typedef unsigned int binmap_t; /* Described below */ |
||
54 | typedef unsigned int flag_t; /* The type of various bit flag sets */ |
||
55 | |||
56 | |||
57 | |||
58 | /* ------------------- size_t and alignment properties -------------------- */ |
||
59 | |||
60 | /* The maximum possible size_t value has all bits set */ |
||
61 | #define MAX_SIZE_T (~(size_t)0) |
||
62 | |||
9874 | turbocat | 63 | #define user_alloc(s) _ksys_alloc(s) |
64 | #define user_free _ksys_free |
||
4349 | Serge | 65 | |
66 | /* ------------------- size_t and alignment properties -------------------- */ |
||
67 | |||
68 | /* The byte and bit size of a size_t */ |
||
69 | #define SIZE_T_SIZE (sizeof(size_t)) |
||
70 | #define SIZE_T_BITSIZE (sizeof(size_t) << 3) |
||
71 | |||
72 | /* Some constants coerced to size_t */ |
||
73 | /* Annoying but necessary to avoid errors on some platforms */ |
||
74 | #define SIZE_T_ZERO ((size_t)0) |
||
75 | #define SIZE_T_ONE ((size_t)1) |
||
76 | #define SIZE_T_TWO ((size_t)2) |
||
77 | #define SIZE_T_FOUR ((size_t)4) |
||
78 | #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) |
||
79 | #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) |
||
80 | #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) |
||
81 | #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) |
||
82 | |||
83 | #define USE_LOCK_BIT (2U) |
||
84 | #define USE_MMAP_BIT (SIZE_T_ONE) |
||
85 | #define USE_NONCONTIGUOUS_BIT (4U) |
||
86 | |||
87 | /* segment bit set in create_mspace_with_base */ |
||
88 | #define EXTERN_BIT (8U) |
||
89 | |||
90 | #define HAVE_MMAP 1 |
||
91 | #define CALL_MMAP(s) MMAP_DEFAULT(s) |
||
92 | #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) |
||
93 | #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL |
||
94 | |||
95 | #define calloc_must_clear(p) (!is_mmapped(p)) |
||
96 | |||
97 | #define MALLOC_FAILURE_ACTION |
||
98 | |||
99 | #define MAX_RELEASE_CHECK_RATE 4095 |
||
100 | #define NO_SEGMENT_TRAVERSAL 1 |
||
101 | #define MALLOC_ALIGNMENT ((size_t)8U) |
||
102 | #define CHUNK_OVERHEAD (SIZE_T_SIZE) |
||
103 | #define DEFAULT_GRANULARITY ((size_t)512U * (size_t)1024U) |
||
104 | #define DEFAULT_MMAP_THRESHOLD ((size_t)1024U * (size_t)1024U) |
||
105 | #define DEFAULT_TRIM_THRESHOLD ((size_t)2048U * (size_t)1024U) |
||
106 | |||
107 | /* The bit mask value corresponding to MALLOC_ALIGNMENT */ |
||
108 | #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) |
||
109 | |||
110 | /* True if address a has acceptable alignment */ |
||
111 | #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) |
||
112 | |||
113 | /* the number of bytes to offset an address to align it */ |
||
114 | #define align_offset(A)\ |
||
115 | ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ |
||
116 | ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) |
||
117 | |||
118 | |||
119 | #define MFAIL ((void*)(MAX_SIZE_T)) |
||
120 | #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ |
||
121 | |||
122 | /* For sys_alloc, enough padding to ensure can malloc request on success */ |
||
123 | #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) |
||
124 | |||
125 | /* |
||
126 | TOP_FOOT_SIZE is padding at the end of a segment, including space |
||
127 | that may be needed to place segment records and fenceposts when new |
||
128 | noncontiguous segments are added. |
||
129 | */ |
||
130 | #define TOP_FOOT_SIZE\ |
||
131 | (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) |
||
132 | |||
133 | /* ------------------- Chunks sizes and alignments ----------------------- */ |
||
134 | |||
135 | #define MCHUNK_SIZE (sizeof(mchunk)) |
||
136 | |||
137 | /* MMapped chunks need a second word of overhead ... */ |
||
138 | #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
||
139 | /* ... and additional padding for fake next-chunk at foot */ |
||
140 | #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) |
||
141 | |||
142 | /* The smallest size we can malloc is an aligned minimal chunk */ |
||
143 | #define MIN_CHUNK_SIZE\ |
||
144 | ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
||
145 | |||
146 | /* conversion from malloc headers to user pointers, and back */ |
||
147 | #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) |
||
148 | #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) |
||
149 | /* chunk associated with aligned address A */ |
||
150 | #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) |
||
151 | |||
152 | /* Bounds on request (not chunk) sizes. */ |
||
153 | #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) |
||
154 | #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) |
||
155 | |||
156 | /* pad request bytes into a usable size */ |
||
157 | #define pad_request(req) \ |
||
158 | (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
||
159 | |||
160 | /* pad request, checking for minimum (but not maximum) */ |
||
161 | #define request2size(req) \ |
||
162 | (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) |
||
163 | |||
164 | /* ------------------ Operations on head and foot fields ----------------- */ |
||
165 | |||
166 | /* |
||
167 | The head field of a chunk is or'ed with PINUSE_BIT when previous |
||
168 | adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in |
||
169 | use, unless mmapped, in which case both bits are cleared. |
||
170 | |||
171 | FLAG4_BIT is not used by this malloc, but might be useful in extensions. |
||
172 | */ |
||
173 | |||
174 | #define PINUSE_BIT (SIZE_T_ONE) |
||
175 | #define CINUSE_BIT (SIZE_T_TWO) |
||
176 | #define FLAG4_BIT (SIZE_T_FOUR) |
||
177 | #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) |
||
178 | #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) |
||
179 | |||
180 | /* Head value for fenceposts */ |
||
181 | #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) |
||
182 | |||
183 | /* extraction of fields from head words */ |
||
184 | #define cinuse(p) ((p)->head & CINUSE_BIT) |
||
185 | #define pinuse(p) ((p)->head & PINUSE_BIT) |
||
186 | #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) |
||
187 | #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) |
||
188 | |||
189 | #define chunksize(p) ((p)->head & ~(FLAG_BITS)) |
||
190 | |||
191 | #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) |
||
192 | |||
193 | /* Treat space at ptr +/- offset as a chunk */ |
||
194 | #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) |
||
195 | #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) |
||
196 | |||
197 | /* Ptr to next or previous physical malloc_chunk. */ |
||
198 | #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) |
||
199 | #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) |
||
200 | |||
201 | /* extract next chunk's pinuse bit */ |
||
202 | #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) |
||
203 | |||
204 | /* Set size, pinuse bit, and foot */ |
||
205 | #define set_size_and_pinuse_of_free_chunk(p, s)\ |
||
206 | ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) |
||
207 | |||
208 | /* Set size, pinuse bit, foot, and clear next pinuse */ |
||
209 | #define set_free_with_pinuse(p, s, n)\ |
||
210 | (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) |
||
211 | |||
212 | /* Get the internal overhead associated with chunk p */ |
||
213 | #define overhead_for(p)\ |
||
214 | (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) |
||
215 | |||
216 | |||
217 | struct malloc_tree_chunk { |
||
218 | /* The first four fields must be compatible with malloc_chunk */ |
||
219 | size_t prev_foot; |
||
220 | size_t head; |
||
221 | struct malloc_tree_chunk* fd; |
||
222 | struct malloc_tree_chunk* bk; |
||
223 | |||
224 | struct malloc_tree_chunk* child[2]; |
||
225 | struct malloc_tree_chunk* parent; |
||
226 | bindex_t index; |
||
227 | }; |
||
228 | |||
229 | typedef struct malloc_tree_chunk tchunk; |
||
230 | typedef struct malloc_tree_chunk* tchunkptr; |
||
231 | typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ |
||
232 | |||
233 | /* A little helper macro for trees */ |
||
234 | #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) |
||
235 | |||
236 | |||
237 | struct malloc_segment { |
||
238 | char* base; /* base address */ |
||
239 | size_t size; /* allocated size */ |
||
240 | struct malloc_segment* next; /* ptr to next segment */ |
||
241 | flag_t sflags; /* mmap and extern flag */ |
||
242 | }; |
||
243 | |||
244 | #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) |
||
245 | #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) |
||
246 | |||
247 | typedef struct malloc_segment msegment; |
||
248 | typedef struct malloc_segment* msegmentptr; |
||
249 | |||
250 | /* ---------------------------- malloc_state ----------------------------- */ |
||
251 | |||
252 | /* |
||
253 | A malloc_state holds all of the bookkeeping for a space. |
||
254 | The main fields are: |
||
255 | |||
256 | Top |
||
257 | The topmost chunk of the currently active segment. Its size is |
||
258 | cached in topsize. The actual size of topmost space is |
||
259 | topsize+TOP_FOOT_SIZE, which includes space reserved for adding |
||
260 | fenceposts and segment records if necessary when getting more |
||
261 | space from the system. The size at which to autotrim top is |
||
262 | cached from mparams in trim_check, except that it is disabled if |
||
263 | an autotrim fails. |
||
264 | |||
265 | Designated victim (dv) |
||
266 | This is the preferred chunk for servicing small requests that |
||
267 | don't have exact fits. It is normally the chunk split off most |
||
268 | recently to service another small request. Its size is cached in |
||
269 | dvsize. The link fields of this chunk are not maintained since it |
||
270 | is not kept in a bin. |
||
271 | |||
272 | SmallBins |
||
273 | An array of bin headers for free chunks. These bins hold chunks |
||
274 | with sizes less than MIN_LARGE_SIZE bytes. Each bin contains |
||
275 | chunks of all the same size, spaced 8 bytes apart. To simplify |
||
276 | use in double-linked lists, each bin header acts as a malloc_chunk |
||
277 | pointing to the real first node, if it exists (else pointing to |
||
278 | itself). This avoids special-casing for headers. But to avoid |
||
279 | waste, we allocate only the fd/bk pointers of bins, and then use |
||
280 | repositioning tricks to treat these as the fields of a chunk. |
||
281 | |||
282 | TreeBins |
||
283 | Treebins are pointers to the roots of trees holding a range of |
||
284 | sizes. There are 2 equally spaced treebins for each power of two |
||
285 | from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything |
||
286 | larger. |
||
287 | |||
288 | Bin maps |
||
289 | There is one bit map for small bins ("smallmap") and one for |
||
290 | treebins ("treemap). Each bin sets its bit when non-empty, and |
||
291 | clears the bit when empty. Bit operations are then used to avoid |
||
292 | bin-by-bin searching -- nearly all "search" is done without ever |
||
293 | looking at bins that won't be selected. The bit maps |
||
294 | conservatively use 32 bits per map word, even if on 64bit system. |
||
295 | For a good description of some of the bit-based techniques used |
||
296 | here, see Henry S. Warren Jr's book "Hacker's Delight" (and |
||
297 | supplement at http://hackersdelight.org/). Many of these are |
||
298 | intended to reduce the branchiness of paths through malloc etc, as |
||
299 | well as to reduce the number of memory locations read or written. |
||
300 | |||
301 | Segments |
||
302 | A list of segments headed by an embedded malloc_segment record |
||
303 | representing the initial space. |
||
304 | |||
305 | Address check support |
||
306 | The least_addr field is the least address ever obtained from |
||
307 | MORECORE or MMAP. Attempted frees and reallocs of any address less |
||
308 | than this are trapped (unless INSECURE is defined). |
||
309 | |||
310 | Magic tag |
||
311 | A cross-check field that should always hold same value as mparams.magic. |
||
312 | |||
313 | Flags |
||
314 | Bits recording whether to use MMAP, locks, or contiguous MORECORE |
||
315 | |||
316 | Statistics |
||
317 | Each space keeps track of current and maximum system memory |
||
318 | obtained via MORECORE or MMAP. |
||
319 | |||
320 | Trim support |
||
321 | Fields holding the amount of unused topmost memory that should trigger |
||
322 | timming, and a counter to force periodic scanning to release unused |
||
323 | non-topmost segments. |
||
324 | |||
325 | Locking |
||
326 | If USE_LOCKS is defined, the "mutex" lock is acquired and released |
||
327 | around every public call using this mspace. |
||
328 | |||
329 | Extension support |
||
330 | A void* pointer and a size_t field that can be used to help implement |
||
331 | extensions to this malloc. |
||
332 | */ |
||
333 | |||
334 | /* Bin types, widths and sizes */ |
||
335 | #define NSMALLBINS (32U) |
||
336 | #define NTREEBINS (32U) |
||
337 | #define SMALLBIN_SHIFT (3U) |
||
338 | #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) |
||
339 | #define TREEBIN_SHIFT (8U) |
||
340 | #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) |
||
341 | #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) |
||
342 | #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) |
||
343 | |||
344 | struct malloc_state { |
||
345 | binmap_t smallmap; |
||
346 | binmap_t treemap; |
||
347 | size_t dvsize; |
||
348 | size_t topsize; |
||
349 | char* least_addr; |
||
350 | mchunkptr dv; |
||
351 | mchunkptr top; |
||
352 | size_t trim_check; |
||
353 | size_t release_checks; |
||
354 | size_t magic; |
||
355 | mchunkptr smallbins[(NSMALLBINS+1)*2]; |
||
356 | tbinptr treebins[NTREEBINS]; |
||
357 | size_t footprint; |
||
358 | size_t max_footprint; |
||
359 | flag_t mflags; |
||
6536 | serge | 360 | _LOCK_RECURSIVE_T lock; /* locate lock among fields that rarely change */ |
4349 | Serge | 361 | msegment seg; |
362 | void* extp; /* Unused but available for extensions */ |
||
363 | size_t exts; |
||
364 | }; |
||
365 | |||
366 | typedef struct malloc_state* mstate; |
||
367 | |||
368 | /* ------------- Global malloc_state and malloc_params ------------------- */ |
||
369 | |||
370 | /* |
||
371 | malloc_params holds global properties, including those that can be |
||
372 | dynamically set using mallopt. There is a single instance, mparams, |
||
373 | initialized in init_mparams. Note that the non-zeroness of "magic" |
||
374 | also serves as an initialization flag. |
||
375 | */ |
||
376 | |||
377 | struct malloc_params |
||
378 | { |
||
379 | volatile size_t magic; |
||
380 | size_t page_size; |
||
381 | size_t granularity; |
||
382 | size_t mmap_threshold; |
||
383 | size_t trim_threshold; |
||
384 | flag_t default_mflags; |
||
385 | }; |
||
386 | |||
387 | static struct malloc_params mparams; |
||
388 | |||
389 | /* Ensure mparams initialized */ |
||
390 | #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) |
||
391 | |||
392 | static struct malloc_state _gm_; |
||
393 | #define gm (&_gm_) |
||
394 | #define is_global(M) ((M) == &_gm_) |
||
395 | |||
396 | #define is_initialized(M) ((M)->top != 0) |
||
397 | |||
398 | |||
399 | |||
400 | |||
401 | __LOCK_INIT_RECURSIVE(static, malloc_global_mutex); |
||
402 | |||
6536 | serge | 403 | #define ACQUIRE_MALLOC_GLOBAL_LOCK() __lock_acquire_recursive(malloc_global_mutex); |
404 | #define RELEASE_MALLOC_GLOBAL_LOCK() __lock_release_recursive(malloc_global_mutex); |
||
4349 | Serge | 405 | |
6536 | serge | 406 | #define PREACTION(M) ( __lock_acquire_recursive((M)->lock)) |
407 | #define POSTACTION(M) { __lock_release_recursive((M)->lock); } |
||
4349 | Serge | 408 | |
409 | /* ---------------------------- Indexing Bins ---------------------------- */ |
||
410 | |||
411 | #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) |
||
412 | #define small_index(s) ((s) >> SMALLBIN_SHIFT) |
||
413 | #define small_index2size(i) ((i) << SMALLBIN_SHIFT) |
||
414 | #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) |
||
415 | |||
416 | /* addressing by index. See above about smallbin repositioning */ |
||
417 | #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) |
||
418 | #define treebin_at(M,i) (&((M)->treebins[i])) |
||
419 | |||
420 | |||
421 | #define compute_tree_index(S, I)\ |
||
422 | {\ |
||
423 | unsigned int X = S >> TREEBIN_SHIFT;\ |
||
424 | if (X == 0)\ |
||
425 | I = 0;\ |
||
426 | else if (X > 0xFFFF)\ |
||
427 | I = NTREEBINS-1;\ |
||
428 | else {\ |
||
429 | unsigned int K;\ |
||
430 | __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g" (X));\ |
||
431 | I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ |
||
432 | }\ |
||
433 | } |
||
434 | |||
435 | /* Bit representing maximum resolved size in a treebin at i */ |
||
436 | #define bit_for_tree_index(i) \ |
||
437 | (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) |
||
438 | |||
439 | /* Shift placing maximum resolved bit in a treebin at i as sign bit */ |
||
440 | #define leftshift_for_tree_index(i) \ |
||
441 | ((i == NTREEBINS-1)? 0 : \ |
||
442 | ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) |
||
443 | |||
444 | /* The size of the smallest chunk held in bin with index i */ |
||
445 | #define minsize_for_tree_index(i) \ |
||
446 | ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ |
||
447 | (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) |
||
448 | |||
449 | |||
450 | /* ------------------------ Operations on bin maps ----------------------- */ |
||
451 | |||
452 | /* bit corresponding to given index */ |
||
453 | #define idx2bit(i) ((binmap_t)(1) << (i)) |
||
454 | |||
455 | /* Mark/Clear bits with given index */ |
||
456 | #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) |
||
457 | #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) |
||
458 | #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) |
||
459 | |||
460 | #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) |
||
461 | #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) |
||
462 | #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) |
||
463 | |||
464 | /* isolate the least set bit of a bitmap */ |
||
465 | #define least_bit(x) ((x) & -(x)) |
||
466 | |||
467 | /* mask with all bits to left of least bit of x on */ |
||
468 | #define left_bits(x) ((x<<1) | -(x<<1)) |
||
469 | |||
470 | /* mask with all bits to left of or equal to least bit of x on */ |
||
471 | #define same_or_left_bits(x) ((x) | -(x)) |
||
472 | |||
473 | |||
474 | /* index corresponding to given bit. Use x86 asm if possible */ |
||
475 | |||
476 | #define compute_bit2idx(X, I)\ |
||
477 | {\ |
||
478 | unsigned int J;\ |
||
479 | __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\ |
||
480 | I = (bindex_t)J;\ |
||
481 | } |
||
482 | |||
483 | |||
484 | #define mark_inuse_foot(M,p,s) |
||
485 | |||
486 | /* Get/set size at footer */ |
||
487 | #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) |
||
488 | #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) |
||
489 | |||
490 | /* Macros for setting head/foot of non-mmapped chunks */ |
||
491 | |||
492 | /* Set cinuse bit and pinuse bit of next chunk */ |
||
493 | #define set_inuse(M,p,s)\ |
||
494 | ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ |
||
495 | ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) |
||
496 | |||
497 | /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ |
||
498 | #define set_inuse_and_pinuse(M,p,s)\ |
||
499 | ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
||
500 | ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) |
||
501 | |||
502 | /* Set size, cinuse and pinuse bit of this chunk */ |
||
503 | #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
||
504 | ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) |
||
505 | |||
506 | |||
507 | #define assert(x) |
||
508 | #define RTCHECK(e) __builtin_expect(e, 1) |
||
509 | |||
510 | #define check_free_chunk(M,P) |
||
511 | #define check_inuse_chunk(M,P) |
||
512 | #define check_malloced_chunk(M,P,N) |
||
513 | #define check_mmapped_chunk(M,P) |
||
514 | #define check_malloc_state(M) |
||
515 | #define check_top_chunk(M,P) |
||
516 | |||
517 | /* Check if address a is at least as high as any from MORECORE or MMAP */ |
||
518 | #define ok_address(M, a) ((char*)(a) >= (M)->least_addr) |
||
519 | /* Check if address of next chunk n is higher than base chunk p */ |
||
520 | #define ok_next(p, n) ((char*)(p) < (char*)(n)) |
||
521 | /* Check if p has inuse status */ |
||
522 | #define ok_inuse(p) is_inuse(p) |
||
523 | /* Check if p has its pinuse bit on */ |
||
524 | #define ok_pinuse(p) pinuse(p) |
||
525 | |||
526 | #define CORRUPTION_ERROR_ACTION(m) \ |
||
527 | do { \ |
||
6068 | serge | 528 | /*printf("%s malloc heap corrupted\n",__FUNCTION__); */\ |
4349 | Serge | 529 | __asm__("int3"); \ |
530 | }while(0) \ |
||
531 | |||
532 | |||
533 | #define USAGE_ERROR_ACTION(m, p) \ |
||
534 | do { \ |
||
6068 | serge | 535 | /*printf("%s malloc heap corrupted\n",__FUNCTION__); */\ |
4349 | Serge | 536 | __asm__("int3"); \ |
537 | }while(0) \ |
||
538 | |||
539 | /* ----------------------- Operations on smallbins ----------------------- */ |
||
540 | |||
541 | /* |
||
542 | Various forms of linking and unlinking are defined as macros. Even |
||
543 | the ones for trees, which are very long but have very short typical |
||
544 | paths. This is ugly but reduces reliance on inlining support of |
||
545 | compilers. |
||
546 | */ |
||
547 | |||
548 | /* Link a free chunk into a smallbin */ |
||
549 | #define insert_small_chunk(M, P, S) {\ |
||
550 | bindex_t I = small_index(S);\ |
||
551 | mchunkptr B = smallbin_at(M, I);\ |
||
552 | mchunkptr F = B;\ |
||
553 | assert(S >= MIN_CHUNK_SIZE);\ |
||
554 | if (!smallmap_is_marked(M, I))\ |
||
555 | mark_smallmap(M, I);\ |
||
556 | else if (RTCHECK(ok_address(M, B->fd)))\ |
||
557 | F = B->fd;\ |
||
558 | else {\ |
||
559 | CORRUPTION_ERROR_ACTION(M);\ |
||
560 | }\ |
||
561 | B->fd = P;\ |
||
562 | F->bk = P;\ |
||
563 | P->fd = F;\ |
||
564 | P->bk = B;\ |
||
565 | } |
||
566 | |||
567 | /* Unlink a chunk from a smallbin */ |
||
568 | #define unlink_small_chunk(M, P, S) {\ |
||
569 | mchunkptr F = P->fd;\ |
||
570 | mchunkptr B = P->bk;\ |
||
571 | bindex_t I = small_index(S);\ |
||
572 | assert(P != B);\ |
||
573 | assert(P != F);\ |
||
574 | assert(chunksize(P) == small_index2size(I));\ |
||
575 | if (F == B)\ |
||
576 | clear_smallmap(M, I);\ |
||
577 | else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ |
||
578 | (B == smallbin_at(M,I) || ok_address(M, B)))) {\ |
||
579 | F->bk = B;\ |
||
580 | B->fd = F;\ |
||
581 | }\ |
||
582 | else {\ |
||
583 | CORRUPTION_ERROR_ACTION(M);\ |
||
584 | }\ |
||
585 | } |
||
586 | |||
587 | /* Unlink the first chunk from a smallbin */ |
||
588 | #define unlink_first_small_chunk(M, B, P, I) {\ |
||
589 | mchunkptr F = P->fd;\ |
||
590 | assert(P != B);\ |
||
591 | assert(P != F);\ |
||
592 | assert(chunksize(P) == small_index2size(I));\ |
||
593 | if (B == F)\ |
||
594 | clear_smallmap(M, I);\ |
||
595 | else if (RTCHECK(ok_address(M, F))) {\ |
||
596 | B->fd = F;\ |
||
597 | F->bk = B;\ |
||
598 | }\ |
||
599 | else {\ |
||
600 | CORRUPTION_ERROR_ACTION(M);\ |
||
601 | }\ |
||
602 | } |
||
603 | |||
604 | /* Replace dv node, binning the old one */ |
||
605 | /* Used only when dvsize known to be small */ |
||
606 | #define replace_dv(M, P, S) {\ |
||
607 | size_t DVS = M->dvsize;\ |
||
608 | if (DVS != 0) {\ |
||
609 | mchunkptr DV = M->dv;\ |
||
610 | assert(is_small(DVS));\ |
||
611 | insert_small_chunk(M, DV, DVS);\ |
||
612 | }\ |
||
613 | M->dvsize = S;\ |
||
614 | M->dv = P;\ |
||
615 | } |
||
616 | |||
617 | |||
618 | /* ------------------------- Operations on trees ------------------------- */ |
||
619 | |||
620 | /* Insert chunk into tree */ |
||
621 | #define insert_large_chunk(M, X, S) {\ |
||
622 | tbinptr* H;\ |
||
623 | bindex_t I;\ |
||
624 | compute_tree_index(S, I);\ |
||
625 | H = treebin_at(M, I);\ |
||
626 | X->index = I;\ |
||
627 | X->child[0] = X->child[1] = 0;\ |
||
628 | if (!treemap_is_marked(M, I)) {\ |
||
629 | mark_treemap(M, I);\ |
||
630 | *H = X;\ |
||
631 | X->parent = (tchunkptr)H;\ |
||
632 | X->fd = X->bk = X;\ |
||
633 | }\ |
||
634 | else {\ |
||
635 | tchunkptr T = *H;\ |
||
636 | size_t K = S << leftshift_for_tree_index(I);\ |
||
637 | for (;;) {\ |
||
638 | if (chunksize(T) != S) {\ |
||
639 | tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ |
||
640 | K <<= 1;\ |
||
641 | if (*C != 0)\ |
||
642 | T = *C;\ |
||
643 | else if (RTCHECK(ok_address(M, C))) {\ |
||
644 | *C = X;\ |
||
645 | X->parent = T;\ |
||
646 | X->fd = X->bk = X;\ |
||
647 | break;\ |
||
648 | }\ |
||
649 | else {\ |
||
650 | CORRUPTION_ERROR_ACTION(M);\ |
||
651 | break;\ |
||
652 | }\ |
||
653 | }\ |
||
654 | else {\ |
||
655 | tchunkptr F = T->fd;\ |
||
656 | if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ |
||
657 | T->fd = F->bk = X;\ |
||
658 | X->fd = F;\ |
||
659 | X->bk = T;\ |
||
660 | X->parent = 0;\ |
||
661 | break;\ |
||
662 | }\ |
||
663 | else {\ |
||
664 | CORRUPTION_ERROR_ACTION(M);\ |
||
665 | break;\ |
||
666 | }\ |
||
667 | }\ |
||
668 | }\ |
||
669 | }\ |
||
670 | } |
||
671 | |||
672 | /* |
||
673 | Unlink steps: |
||
674 | |||
675 | 1. If x is a chained node, unlink it from its same-sized fd/bk links |
||
676 | and choose its bk node as its replacement. |
||
677 | 2. If x was the last node of its size, but not a leaf node, it must |
||
678 | be replaced with a leaf node (not merely one with an open left or |
||
679 | right), to make sure that lefts and rights of descendents |
||
680 | correspond properly to bit masks. We use the rightmost descendent |
||
681 | of x. We could use any other leaf, but this is easy to locate and |
||
682 | tends to counteract removal of leftmosts elsewhere, and so keeps |
||
683 | paths shorter than minimally guaranteed. This doesn't loop much |
||
684 | because on average a node in a tree is near the bottom. |
||
685 | 3. If x is the base of a chain (i.e., has parent links) relink |
||
686 | x's parent and children to x's replacement (or null if none). |
||
687 | */ |
||
688 | |||
689 | #define unlink_large_chunk(M, X) {\ |
||
690 | tchunkptr XP = X->parent;\ |
||
691 | tchunkptr R;\ |
||
692 | if (X->bk != X) {\ |
||
693 | tchunkptr F = X->fd;\ |
||
694 | R = X->bk;\ |
||
695 | if (RTCHECK(ok_address(M, F))) {\ |
||
696 | F->bk = R;\ |
||
697 | R->fd = F;\ |
||
698 | }\ |
||
699 | else {\ |
||
700 | CORRUPTION_ERROR_ACTION(M);\ |
||
701 | }\ |
||
702 | }\ |
||
703 | else {\ |
||
704 | tchunkptr* RP;\ |
||
705 | if (((R = *(RP = &(X->child[1]))) != 0) ||\ |
||
706 | ((R = *(RP = &(X->child[0]))) != 0)) {\ |
||
707 | tchunkptr* CP;\ |
||
708 | while ((*(CP = &(R->child[1])) != 0) ||\ |
||
709 | (*(CP = &(R->child[0])) != 0)) {\ |
||
710 | R = *(RP = CP);\ |
||
711 | }\ |
||
712 | if (RTCHECK(ok_address(M, RP)))\ |
||
713 | *RP = 0;\ |
||
714 | else {\ |
||
715 | CORRUPTION_ERROR_ACTION(M);\ |
||
716 | }\ |
||
717 | }\ |
||
718 | }\ |
||
719 | if (XP != 0) {\ |
||
720 | tbinptr* H = treebin_at(M, X->index);\ |
||
721 | if (X == *H) {\ |
||
722 | if ((*H = R) == 0) \ |
||
723 | clear_treemap(M, X->index);\ |
||
724 | }\ |
||
725 | else if (RTCHECK(ok_address(M, XP))) {\ |
||
726 | if (XP->child[0] == X) \ |
||
727 | XP->child[0] = R;\ |
||
728 | else \ |
||
729 | XP->child[1] = R;\ |
||
730 | }\ |
||
731 | else\ |
||
732 | CORRUPTION_ERROR_ACTION(M);\ |
||
733 | if (R != 0) {\ |
||
734 | if (RTCHECK(ok_address(M, R))) {\ |
||
735 | tchunkptr C0, C1;\ |
||
736 | R->parent = XP;\ |
||
737 | if ((C0 = X->child[0]) != 0) {\ |
||
738 | if (RTCHECK(ok_address(M, C0))) {\ |
||
739 | R->child[0] = C0;\ |
||
740 | C0->parent = R;\ |
||
741 | }\ |
||
742 | else\ |
||
743 | CORRUPTION_ERROR_ACTION(M);\ |
||
744 | }\ |
||
745 | if ((C1 = X->child[1]) != 0) {\ |
||
746 | if (RTCHECK(ok_address(M, C1))) {\ |
||
747 | R->child[1] = C1;\ |
||
748 | C1->parent = R;\ |
||
749 | }\ |
||
750 | else\ |
||
751 | CORRUPTION_ERROR_ACTION(M);\ |
||
752 | }\ |
||
753 | }\ |
||
754 | else\ |
||
755 | CORRUPTION_ERROR_ACTION(M);\ |
||
756 | }\ |
||
757 | }\ |
||
758 | } |
||
759 | |||
760 | /* Relays to large vs small bin operations */ |
||
761 | |||
762 | #define insert_chunk(M, P, S)\ |
||
763 | if (is_small(S)) insert_small_chunk(M, P, S)\ |
||
764 | else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } |
||
765 | |||
766 | #define unlink_chunk(M, P, S)\ |
||
767 | if (is_small(S)) unlink_small_chunk(M, P, S)\ |
||
768 | else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } |
||
769 | |||
770 | |||
771 | /* -------------------------- system alloc setup ------------------------- */ |
||
772 | |||
773 | /* Operations on mflags */ |
||
774 | |||
775 | #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) |
||
776 | #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) |
||
777 | #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) |
||
778 | |||
779 | #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) |
||
780 | #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) |
||
781 | #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) |
||
782 | |||
783 | #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) |
||
784 | #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) |
||
785 | |||
786 | #define set_lock(M,L)\ |
||
787 | ((M)->mflags = (L)?\ |
||
788 | ((M)->mflags | USE_LOCK_BIT) :\ |
||
789 | ((M)->mflags & ~USE_LOCK_BIT)) |
||
790 | |||
791 | /* page-align a size */ |
||
792 | #define page_align(S)\ |
||
793 | (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) |
||
794 | |||
795 | /* granularity-align a size */ |
||
796 | #define granularity_align(S)\ |
||
797 | (((S) + (mparams.granularity - SIZE_T_ONE))\ |
||
798 | & ~(mparams.granularity - SIZE_T_ONE)) |
||
799 | |||
800 | |||
801 | /* For mmap, use granularity alignment */ |
||
802 | #define mmap_align(S) granularity_align(S) |
||
803 | |||
804 | /* For sys_alloc, enough padding to ensure can malloc request on success */ |
||
805 | #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) |
||
806 | |||
807 | #define is_page_aligned(S)\ |
||
808 | (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) |
||
809 | #define is_granularity_aligned(S)\ |
||
810 | (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) |
||
811 | |||
812 | /* True if segment S holds address A */ |
||
813 | #define segment_holds(S, A)\ |
||
814 | ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) |
||
815 | |||
816 | /* Return segment holding given address */ |
||
817 | static msegmentptr segment_holding(mstate m, char* addr) |
||
818 | { |
||
819 | msegmentptr sp = &m->seg; |
||
820 | for (;;) { |
||
821 | if (addr >= sp->base && addr < sp->base + sp->size) |
||
822 | return sp; |
||
823 | if ((sp = sp->next) == 0) |
||
824 | return 0; |
||
825 | } |
||
826 | } |
||
827 | |||
828 | /* Return true if segment contains a segment link */ |
||
829 | static int has_segment_link(mstate m, msegmentptr ss) |
||
830 | { |
||
831 | msegmentptr sp = &m->seg; |
||
832 | for (;;) { |
||
833 | if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) |
||
834 | return 1; |
||
835 | if ((sp = sp->next) == 0) |
||
836 | return 0; |
||
837 | } |
||
838 | } |
||
839 | |||
840 | static inline void* os_mmap(size_t size) |
||
841 | { |
||
842 | void* ptr = user_alloc(size); |
||
843 | return (ptr != 0)? ptr: MFAIL; |
||
844 | } |
||
845 | |||
846 | static inline int os_munmap(void* ptr, size_t size) |
||
847 | { |
||
848 | return (user_free(ptr) != 0) ? 0 : -1; |
||
849 | } |
||
850 | |||
851 | #define should_trim(M,s) ((s) > (M)->trim_check) |
||
852 | |||
853 | |||
854 | #define MMAP_DEFAULT(s) os_mmap(s) |
||
855 | #define MUNMAP_DEFAULT(a, s) os_munmap((a), (s)) |
||
856 | #define DIRECT_MMAP_DEFAULT(s) os_mmap(s) |
||
857 | |||
858 | #define internal_malloc(m, b) malloc(b) |
||
859 | #define internal_free(m, mem) free(mem) |
||
860 | |||
861 | /* ----------------------- Direct-mmapping chunks ----------------------- */ |
||
862 | |||
863 | /* |
||
864 | Directly mmapped chunks are set up with an offset to the start of |
||
865 | the mmapped region stored in the prev_foot field of the chunk. This |
||
866 | allows reconstruction of the required argument to MUNMAP when freed, |
||
867 | and also allows adjustment of the returned chunk to meet alignment |
||
868 | requirements (especially in memalign). |
||
869 | */ |
||
870 | |||
871 | /* Malloc using mmap */ |
||
872 | static void* mmap_alloc(mstate m, size_t nb) |
||
873 | { |
||
874 | size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
||
875 | if (mmsize > nb) /* Check for wrap around 0 */ |
||
876 | { |
||
877 | char* mm = (char*)(os_mmap(mmsize)); |
||
878 | if (mm != CMFAIL) |
||
879 | { |
||
880 | size_t offset = align_offset(chunk2mem(mm)); |
||
881 | size_t psize = mmsize - offset - MMAP_FOOT_PAD; |
||
882 | mchunkptr p = (mchunkptr)(mm + offset); |
||
883 | p->prev_foot = offset; |
||
884 | p->head = psize; |
||
885 | mark_inuse_foot(m, p, psize); |
||
886 | chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; |
||
887 | chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; |
||
888 | |||
889 | if (m->least_addr == 0 || mm < m->least_addr) |
||
890 | m->least_addr = mm; |
||
891 | if ((m->footprint += mmsize) > m->max_footprint) |
||
892 | m->max_footprint = m->footprint; |
||
893 | assert(is_aligned(chunk2mem(p))); |
||
894 | check_mmapped_chunk(m, p); |
||
895 | return chunk2mem(p); |
||
896 | } |
||
897 | } |
||
898 | return 0; |
||
899 | } |
||
900 | |||
901 | /* Realloc using mmap */ |
||
902 | static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) |
||
903 | { |
||
904 | size_t oldsize = chunksize(oldp); |
||
905 | if (is_small(nb)) /* Can't shrink mmap regions below small size */ |
||
906 | return 0; |
||
907 | /* Keep old chunk if big enough but not too big */ |
||
908 | if (oldsize >= nb + SIZE_T_SIZE && |
||
909 | (oldsize - nb) <= (mparams.granularity << 1)) |
||
910 | return oldp; |
||
911 | else |
||
912 | { |
||
913 | size_t offset = oldp->prev_foot; |
||
914 | size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; |
||
915 | size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
||
916 | char* cp = (char*)CALL_MREMAP((char*)oldp - offset, |
||
917 | oldmmsize, newmmsize, 1); |
||
918 | if (cp != CMFAIL) |
||
919 | { |
||
920 | mchunkptr newp = (mchunkptr)(cp + offset); |
||
921 | size_t psize = newmmsize - offset - MMAP_FOOT_PAD; |
||
922 | newp->head = psize; |
||
923 | mark_inuse_foot(m, newp, psize); |
||
924 | chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; |
||
925 | chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; |
||
926 | |||
927 | if (cp < m->least_addr) |
||
928 | m->least_addr = cp; |
||
929 | if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) |
||
930 | m->max_footprint = m->footprint; |
||
931 | check_mmapped_chunk(m, newp); |
||
932 | return newp; |
||
933 | } |
||
934 | } |
||
935 | return 0; |
||
936 | } |
||
937 | |||
938 | /* ---------------------------- setting mparams -------------------------- */ |
||
939 | |||
940 | /* Initialize mparams */ |
||
941 | static int init_mparams(void) { |
||
942 | |||
943 | ACQUIRE_MALLOC_GLOBAL_LOCK(); |
||
944 | |||
945 | if (mparams.magic == 0) |
||
946 | { |
||
947 | size_t magic; |
||
948 | size_t psize; |
||
949 | size_t gsize; |
||
950 | |||
951 | psize = 4096; |
||
952 | gsize = DEFAULT_GRANULARITY; |
||
953 | |||
954 | /* Sanity-check configuration: |
||
955 | size_t must be unsigned and as wide as pointer type. |
||
956 | ints must be at least 4 bytes. |
||
957 | alignment must be at least 8. |
||
958 | Alignment, min chunk size, and page size must all be powers of 2. |
||
959 | */ |
||
960 | |||
961 | mparams.granularity = gsize; |
||
962 | mparams.page_size = psize; |
||
963 | mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
||
964 | mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; |
||
965 | mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; |
||
966 | |||
967 | /* Set up lock for main malloc area */ |
||
968 | gm->mflags = mparams.default_mflags; |
||
6536 | serge | 969 | __lock_init_recursive(gm->lock); |
4349 | Serge | 970 | |
971 | magic = (size_t)(0x12345678 ^ (size_t)0x55555555U); |
||
972 | magic |= (size_t)8U; /* ensure nonzero */ |
||
973 | magic &= ~(size_t)7U; /* improve chances of fault for bad values */ |
||
974 | mparams.magic = magic; |
||
975 | } |
||
976 | |||
977 | RELEASE_MALLOC_GLOBAL_LOCK(); |
||
978 | return 1; |
||
979 | } |
||
980 | |||
981 | /* -------------------------- mspace management -------------------------- */ |
||
982 | |||
983 | /* Initialize top chunk and its size */ |
||
984 | static void init_top(mstate m, mchunkptr p, size_t psize) |
||
985 | { |
||
986 | /* Ensure alignment */ |
||
987 | size_t offset = align_offset(chunk2mem(p)); |
||
988 | p = (mchunkptr)((char*)p + offset); |
||
989 | psize -= offset; |
||
990 | |||
991 | m->top = p; |
||
992 | m->topsize = psize; |
||
993 | p->head = psize | PINUSE_BIT; |
||
994 | /* set size of fake trailing chunk holding overhead space only once */ |
||
995 | chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; |
||
996 | m->trim_check = mparams.trim_threshold; /* reset on each update */ |
||
997 | } |
||
998 | |||
999 | /* Initialize bins for a new mstate that is otherwise zeroed out */ |
||
1000 | static void init_bins(mstate m) |
||
1001 | { |
||
1002 | /* Establish circular links for smallbins */ |
||
1003 | bindex_t i; |
||
1004 | for (i = 0; i < NSMALLBINS; ++i) { |
||
1005 | sbinptr bin = smallbin_at(m,i); |
||
1006 | bin->fd = bin->bk = bin; |
||
1007 | } |
||
1008 | } |
||
1009 | |||
1010 | /* Allocate chunk and prepend remainder with chunk in successor base. */ |
||
1011 | static void* prepend_alloc(mstate m, char* newbase, char* oldbase, |
||
1012 | size_t nb) |
||
1013 | { |
||
1014 | mchunkptr p = align_as_chunk(newbase); |
||
1015 | mchunkptr oldfirst = align_as_chunk(oldbase); |
||
1016 | size_t psize = (char*)oldfirst - (char*)p; |
||
1017 | mchunkptr q = chunk_plus_offset(p, nb); |
||
1018 | size_t qsize = psize - nb; |
||
1019 | set_size_and_pinuse_of_inuse_chunk(m, p, nb); |
||
1020 | |||
1021 | assert((char*)oldfirst > (char*)q); |
||
1022 | assert(pinuse(oldfirst)); |
||
1023 | assert(qsize >= MIN_CHUNK_SIZE); |
||
1024 | |||
1025 | /* consolidate remainder with first chunk of old base */ |
||
1026 | if (oldfirst == m->top) { |
||
1027 | size_t tsize = m->topsize += qsize; |
||
1028 | m->top = q; |
||
1029 | q->head = tsize | PINUSE_BIT; |
||
1030 | check_top_chunk(m, q); |
||
1031 | } |
||
1032 | else if (oldfirst == m->dv) { |
||
1033 | size_t dsize = m->dvsize += qsize; |
||
1034 | m->dv = q; |
||
1035 | set_size_and_pinuse_of_free_chunk(q, dsize); |
||
1036 | } |
||
1037 | else { |
||
1038 | if (!is_inuse(oldfirst)) { |
||
1039 | size_t nsize = chunksize(oldfirst); |
||
1040 | unlink_chunk(m, oldfirst, nsize); |
||
1041 | oldfirst = chunk_plus_offset(oldfirst, nsize); |
||
1042 | qsize += nsize; |
||
1043 | } |
||
1044 | set_free_with_pinuse(q, qsize, oldfirst); |
||
1045 | insert_chunk(m, q, qsize); |
||
1046 | check_free_chunk(m, q); |
||
1047 | } |
||
1048 | |||
1049 | check_malloced_chunk(m, chunk2mem(p), nb); |
||
1050 | return chunk2mem(p); |
||
1051 | } |
||
1052 | |||
1053 | /* Add a segment to hold a new noncontiguous region */ |
||
1054 | static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) |
||
1055 | { |
||
1056 | /* Determine locations and sizes of segment, fenceposts, old top */ |
||
1057 | char* old_top = (char*)m->top; |
||
1058 | msegmentptr oldsp = segment_holding(m, old_top); |
||
1059 | char* old_end = oldsp->base + oldsp->size; |
||
1060 | size_t ssize = pad_request(sizeof(struct malloc_segment)); |
||
1061 | char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
||
1062 | size_t offset = align_offset(chunk2mem(rawsp)); |
||
1063 | char* asp = rawsp + offset; |
||
1064 | char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; |
||
1065 | mchunkptr sp = (mchunkptr)csp; |
||
1066 | msegmentptr ss = (msegmentptr)(chunk2mem(sp)); |
||
1067 | mchunkptr tnext = chunk_plus_offset(sp, ssize); |
||
1068 | mchunkptr p = tnext; |
||
1069 | int nfences = 0; |
||
1070 | |||
1071 | /* reset top to new space */ |
||
1072 | init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); |
||
1073 | |||
1074 | /* Set up segment record */ |
||
1075 | assert(is_aligned(ss)); |
||
1076 | set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); |
||
1077 | *ss = m->seg; /* Push current record */ |
||
1078 | m->seg.base = tbase; |
||
1079 | m->seg.size = tsize; |
||
1080 | m->seg.sflags = mmapped; |
||
1081 | m->seg.next = ss; |
||
1082 | |||
1083 | /* Insert trailing fenceposts */ |
||
1084 | for (;;) { |
||
1085 | mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); |
||
1086 | p->head = FENCEPOST_HEAD; |
||
1087 | ++nfences; |
||
1088 | if ((char*)(&(nextp->head)) < old_end) |
||
1089 | p = nextp; |
||
1090 | else |
||
1091 | break; |
||
1092 | } |
||
1093 | assert(nfences >= 2); |
||
1094 | |||
1095 | /* Insert the rest of old top into a bin as an ordinary free chunk */ |
||
1096 | if (csp != old_top) { |
||
1097 | mchunkptr q = (mchunkptr)old_top; |
||
1098 | size_t psize = csp - old_top; |
||
1099 | mchunkptr tn = chunk_plus_offset(q, psize); |
||
1100 | set_free_with_pinuse(q, psize, tn); |
||
1101 | insert_chunk(m, q, psize); |
||
1102 | } |
||
1103 | |||
1104 | check_top_chunk(m, m->top); |
||
1105 | } |
||
1106 | |||
1107 | /* -------------------------- System allocation -------------------------- */ |
||
1108 | |||
1109 | /* Get memory from system using MORECORE or MMAP */ |
||
1110 | static void* sys_alloc(mstate m, size_t nb) |
||
1111 | { |
||
1112 | char* tbase = CMFAIL; |
||
1113 | size_t tsize = 0; |
||
1114 | flag_t mmap_flag = 0; |
||
1115 | |||
1116 | ensure_initialization(); |
||
1117 | |||
1118 | /* Directly map large chunks, but only if already initialized */ |
||
1119 | if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) |
||
1120 | { |
||
1121 | void* mem = mmap_alloc(m, nb); |
||
1122 | if (mem != 0) |
||
1123 | return mem; |
||
1124 | } |
||
1125 | |||
1126 | /* |
||
1127 | Try getting memory in any of three ways (in most-preferred to |
||
1128 | least-preferred order): |
||
1129 | 1. A call to MORECORE that can normally contiguously extend memory. |
||
1130 | (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or |
||
1131 | or main space is mmapped or a previous contiguous call failed) |
||
1132 | 2. A call to MMAP new space (disabled if not HAVE_MMAP). |
||
1133 | Note that under the default settings, if MORECORE is unable to |
||
1134 | fulfill a request, and HAVE_MMAP is true, then mmap is |
||
1135 | used as a noncontiguous system allocator. This is a useful backup |
||
1136 | strategy for systems with holes in address spaces -- in this case |
||
1137 | sbrk cannot contiguously expand the heap, but mmap may be able to |
||
1138 | find space. |
||
1139 | 3. A call to MORECORE that cannot usually contiguously extend memory. |
||
1140 | (disabled if not HAVE_MORECORE) |
||
1141 | |||
1142 | In all cases, we need to request enough bytes from system to ensure |
||
1143 | we can malloc nb bytes upon success, so pad with enough space for |
||
1144 | top_foot, plus alignment-pad to make sure we don't lose bytes if |
||
1145 | not on boundary, and round this up to a granularity unit. |
||
1146 | */ |
||
1147 | |||
1148 | if (HAVE_MMAP && tbase == CMFAIL) /* Try MMAP */ |
||
1149 | { |
||
1150 | size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING); |
||
1151 | if (rsize > nb) /* Fail if wraps around zero */ |
||
1152 | { |
||
1153 | char* mp = (char*)(CALL_MMAP(rsize)); |
||
1154 | if (mp != CMFAIL) |
||
1155 | { |
||
1156 | tbase = mp; |
||
1157 | tsize = rsize; |
||
1158 | mmap_flag = USE_MMAP_BIT; |
||
1159 | } |
||
1160 | } |
||
1161 | } |
||
1162 | |||
1163 | if (tbase != CMFAIL) |
||
1164 | { |
||
1165 | |||
1166 | if ((m->footprint += tsize) > m->max_footprint) |
||
1167 | m->max_footprint = m->footprint; |
||
1168 | |||
1169 | if (!is_initialized(m)) /* first-time initialization */ |
||
1170 | { |
||
1171 | if (m->least_addr == 0 || tbase < m->least_addr) |
||
1172 | m->least_addr = tbase; |
||
1173 | m->seg.base = tbase; |
||
1174 | m->seg.size = tsize; |
||
1175 | m->seg.sflags = mmap_flag; |
||
1176 | m->magic = mparams.magic; |
||
1177 | m->release_checks = MAX_RELEASE_CHECK_RATE; |
||
1178 | init_bins(m); |
||
1179 | |||
1180 | if (is_global(m)) |
||
1181 | init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); |
||
1182 | else |
||
1183 | { |
||
1184 | /* Offset top by embedded malloc_state */ |
||
1185 | mchunkptr mn = next_chunk(mem2chunk(m)); |
||
1186 | init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); |
||
1187 | } |
||
1188 | } |
||
1189 | else |
||
1190 | { |
||
1191 | /* Try to merge with an existing segment */ |
||
1192 | msegmentptr sp = &m->seg; |
||
1193 | /* Only consider most recent segment if traversal suppressed */ |
||
1194 | while (sp != 0 && tbase != sp->base + sp->size) |
||
1195 | sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; |
||
1196 | if (sp != 0 && !is_extern_segment(sp) && |
||
1197 | (sp->sflags & USE_MMAP_BIT) == mmap_flag && |
||
1198 | segment_holds(sp, m->top)) /* append */ |
||
1199 | { |
||
1200 | sp->size += tsize; |
||
1201 | init_top(m, m->top, m->topsize + tsize); |
||
1202 | } |
||
1203 | else |
||
1204 | { |
||
1205 | if (tbase < m->least_addr) |
||
1206 | m->least_addr = tbase; |
||
1207 | sp = &m->seg; |
||
1208 | while (sp != 0 && sp->base != tbase + tsize) |
||
1209 | sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; |
||
1210 | if (sp != 0 && !is_extern_segment(sp) && |
||
1211 | (sp->sflags & USE_MMAP_BIT) == mmap_flag) |
||
1212 | { |
||
1213 | char* oldbase = sp->base; |
||
1214 | sp->base = tbase; |
||
1215 | sp->size += tsize; |
||
1216 | return prepend_alloc(m, tbase, oldbase, nb); |
||
1217 | } |
||
1218 | else |
||
1219 | add_segment(m, tbase, tsize, mmap_flag); |
||
1220 | } |
||
1221 | } |
||
1222 | |||
1223 | if (nb < m->topsize) /* Allocate from new or extended top space */ |
||
1224 | { |
||
1225 | size_t rsize = m->topsize -= nb; |
||
1226 | mchunkptr p = m->top; |
||
1227 | mchunkptr r = m->top = chunk_plus_offset(p, nb); |
||
1228 | r->head = rsize | PINUSE_BIT; |
||
1229 | set_size_and_pinuse_of_inuse_chunk(m, p, nb); |
||
1230 | check_top_chunk(m, m->top); |
||
1231 | check_malloced_chunk(m, chunk2mem(p), nb); |
||
1232 | return chunk2mem(p); |
||
1233 | } |
||
1234 | } |
||
1235 | |||
1236 | MALLOC_FAILURE_ACTION; |
||
1237 | return 0; |
||
1238 | } |
||
1239 | |||
1240 | |||
1241 | /* ----------------------- system deallocation -------------------------- */ |
||
1242 | |||
1243 | /* Unmap and unlink any mmapped segments that don't contain used chunks */ |
||
1244 | static size_t release_unused_segments(mstate m) |
||
1245 | { |
||
1246 | size_t released = 0; |
||
1247 | int nsegs = 0; |
||
1248 | msegmentptr pred = &m->seg; |
||
1249 | msegmentptr sp = pred->next; |
||
1250 | while (sp != 0) |
||
1251 | { |
||
1252 | char* base = sp->base; |
||
1253 | size_t size = sp->size; |
||
1254 | msegmentptr next = sp->next; |
||
1255 | ++nsegs; |
||
1256 | if (is_mmapped_segment(sp) && !is_extern_segment(sp)) |
||
1257 | { |
||
1258 | mchunkptr p = align_as_chunk(base); |
||
1259 | size_t psize = chunksize(p); |
||
1260 | /* Can unmap if first chunk holds entire segment and not pinned */ |
||
1261 | if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) |
||
1262 | { |
||
1263 | tchunkptr tp = (tchunkptr)p; |
||
1264 | assert(segment_holds(sp, (char*)sp)); |
||
1265 | if (p == m->dv) { |
||
1266 | m->dv = 0; |
||
1267 | m->dvsize = 0; |
||
1268 | } |
||
1269 | else { |
||
1270 | unlink_large_chunk(m, tp); |
||
1271 | } |
||
1272 | if (CALL_MUNMAP(base, size) == 0) |
||
1273 | { |
||
1274 | released += size; |
||
1275 | m->footprint -= size; |
||
1276 | /* unlink obsoleted record */ |
||
1277 | sp = pred; |
||
1278 | sp->next = next; |
||
1279 | } |
||
1280 | else { /* back out if cannot unmap */ |
||
1281 | insert_large_chunk(m, tp, psize); |
||
1282 | } |
||
1283 | } |
||
1284 | } |
||
1285 | if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ |
||
1286 | break; |
||
1287 | pred = sp; |
||
1288 | sp = next; |
||
1289 | } |
||
1290 | /* Reset check counter */ |
||
1291 | m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)? |
||
1292 | nsegs : MAX_RELEASE_CHECK_RATE); |
||
1293 | return released; |
||
1294 | } |
||
1295 | |||
1296 | static int sys_trim(mstate m, size_t pad) |
||
1297 | { |
||
1298 | size_t released = 0; |
||
1299 | ensure_initialization(); |
||
1300 | if (pad < MAX_REQUEST && is_initialized(m)) |
||
1301 | { |
||
1302 | pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ |
||
1303 | |||
1304 | if (m->topsize > pad) |
||
1305 | { |
||
1306 | /* Shrink top space in granularity-size units, keeping at least one */ |
||
1307 | size_t unit = mparams.granularity; |
||
1308 | size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - |
||
1309 | SIZE_T_ONE) * unit; |
||
1310 | msegmentptr sp = segment_holding(m, (char*)m->top); |
||
1311 | |||
1312 | if (!is_extern_segment(sp)) |
||
1313 | { |
||
1314 | if (is_mmapped_segment(sp)) |
||
1315 | { |
||
1316 | if (HAVE_MMAP && |
||
1317 | sp->size >= extra && |
||
1318 | !has_segment_link(m, sp)) /* can't shrink if pinned */ |
||
1319 | { |
||
1320 | size_t newsize = sp->size - extra; |
||
1321 | /* Prefer mremap, fall back to munmap */ |
||
1322 | if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || |
||
1323 | (CALL_MUNMAP(sp->base + newsize, extra) == 0)) |
||
1324 | { |
||
1325 | released = extra; |
||
1326 | } |
||
1327 | } |
||
1328 | } |
||
1329 | } |
||
1330 | |||
1331 | if (released != 0) |
||
1332 | { |
||
1333 | sp->size -= released; |
||
1334 | m->footprint -= released; |
||
1335 | init_top(m, m->top, m->topsize - released); |
||
1336 | check_top_chunk(m, m->top); |
||
1337 | } |
||
1338 | } |
||
1339 | |||
1340 | /* Unmap any unused mmapped segments */ |
||
1341 | if (HAVE_MMAP) |
||
1342 | released += release_unused_segments(m); |
||
1343 | |||
1344 | /* On failure, disable autotrim to avoid repeated failed future calls */ |
||
1345 | if (released == 0 && m->topsize > m->trim_check) |
||
1346 | m->trim_check = MAX_SIZE_T; |
||
1347 | } |
||
1348 | |||
1349 | return (released != 0)? 1 : 0; |
||
1350 | } |
||
1351 | |||
1352 | |||
1353 | |||
1354 | /* ---------------------------- malloc support --------------------------- */ |
||
1355 | |||
1356 | /* allocate a large request from the best fitting chunk in a treebin */ |
||
1357 | static void* tmalloc_large(mstate m, size_t nb) { |
||
1358 | tchunkptr v = 0; |
||
1359 | size_t rsize = -nb; /* Unsigned negation */ |
||
1360 | tchunkptr t; |
||
1361 | bindex_t idx; |
||
1362 | compute_tree_index(nb, idx); |
||
1363 | if ((t = *treebin_at(m, idx)) != 0) { |
||
1364 | /* Traverse tree for this bin looking for node with size == nb */ |
||
1365 | size_t sizebits = nb << leftshift_for_tree_index(idx); |
||
1366 | tchunkptr rst = 0; /* The deepest untaken right subtree */ |
||
1367 | for (;;) { |
||
1368 | tchunkptr rt; |
||
1369 | size_t trem = chunksize(t) - nb; |
||
1370 | if (trem < rsize) { |
||
1371 | v = t; |
||
1372 | if ((rsize = trem) == 0) |
||
1373 | break; |
||
1374 | } |
||
1375 | rt = t->child[1]; |
||
1376 | t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; |
||
1377 | if (rt != 0 && rt != t) |
||
1378 | rst = rt; |
||
1379 | if (t == 0) { |
||
1380 | t = rst; /* set t to least subtree holding sizes > nb */ |
||
1381 | break; |
||
1382 | } |
||
1383 | sizebits <<= 1; |
||
1384 | } |
||
1385 | } |
||
1386 | if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ |
||
1387 | binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; |
||
1388 | if (leftbits != 0) { |
||
1389 | bindex_t i; |
||
1390 | binmap_t leastbit = least_bit(leftbits); |
||
1391 | compute_bit2idx(leastbit, i); |
||
1392 | t = *treebin_at(m, i); |
||
1393 | } |
||
1394 | } |
||
1395 | |||
1396 | while (t != 0) { /* find smallest of tree or subtree */ |
||
1397 | size_t trem = chunksize(t) - nb; |
||
1398 | if (trem < rsize) { |
||
1399 | rsize = trem; |
||
1400 | v = t; |
||
1401 | } |
||
1402 | t = leftmost_child(t); |
||
1403 | } |
||
1404 | |||
1405 | /* If dv is a better fit, return 0 so malloc will use it */ |
||
1406 | if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { |
||
1407 | if (RTCHECK(ok_address(m, v))) { /* split */ |
||
1408 | mchunkptr r = chunk_plus_offset(v, nb); |
||
1409 | assert(chunksize(v) == rsize + nb); |
||
1410 | if (RTCHECK(ok_next(v, r))) { |
||
1411 | unlink_large_chunk(m, v); |
||
1412 | if (rsize < MIN_CHUNK_SIZE) |
||
1413 | set_inuse_and_pinuse(m, v, (rsize + nb)); |
||
1414 | else { |
||
1415 | set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
||
1416 | set_size_and_pinuse_of_free_chunk(r, rsize); |
||
1417 | insert_chunk(m, r, rsize); |
||
1418 | } |
||
1419 | return chunk2mem(v); |
||
1420 | } |
||
1421 | } |
||
1422 | CORRUPTION_ERROR_ACTION(m); |
||
1423 | } |
||
1424 | return 0; |
||
1425 | } |
||
1426 | |||
1427 | /* allocate a small request from the best fitting chunk in a treebin */ |
||
1428 | static void* tmalloc_small(mstate m, size_t nb) |
||
1429 | { |
||
1430 | tchunkptr t, v; |
||
1431 | size_t rsize; |
||
1432 | bindex_t i; |
||
1433 | binmap_t leastbit = least_bit(m->treemap); |
||
1434 | compute_bit2idx(leastbit, i); |
||
1435 | v = t = *treebin_at(m, i); |
||
1436 | rsize = chunksize(t) - nb; |
||
1437 | |||
1438 | while ((t = leftmost_child(t)) != 0) { |
||
1439 | size_t trem = chunksize(t) - nb; |
||
1440 | if (trem < rsize) { |
||
1441 | rsize = trem; |
||
1442 | v = t; |
||
1443 | } |
||
1444 | } |
||
1445 | |||
1446 | if (RTCHECK(ok_address(m, v))) { |
||
1447 | mchunkptr r = chunk_plus_offset(v, nb); |
||
1448 | assert(chunksize(v) == rsize + nb); |
||
1449 | if (RTCHECK(ok_next(v, r))) { |
||
1450 | unlink_large_chunk(m, v); |
||
1451 | if (rsize < MIN_CHUNK_SIZE) |
||
1452 | set_inuse_and_pinuse(m, v, (rsize + nb)); |
||
1453 | else { |
||
1454 | set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
||
1455 | set_size_and_pinuse_of_free_chunk(r, rsize); |
||
1456 | replace_dv(m, r, rsize); |
||
1457 | } |
||
1458 | return chunk2mem(v); |
||
1459 | } |
||
1460 | } |
||
1461 | |||
1462 | CORRUPTION_ERROR_ACTION(m); |
||
1463 | return 0; |
||
1464 | } |
||
1465 | |||
1466 | /* --------------------------- realloc support --------------------------- */ |
||
1467 | |||
1468 | static void* internal_realloc(struct _reent *reent_ptr, mstate m, void* oldmem, size_t bytes) |
||
1469 | { |
||
1470 | if (bytes >= MAX_REQUEST) |
||
1471 | { |
||
1472 | MALLOC_FAILURE_ACTION; |
||
1473 | return 0; |
||
1474 | } |
||
1475 | |||
1476 | PREACTION(m); |
||
1477 | { |
||
1478 | mchunkptr oldp = mem2chunk(oldmem); |
||
1479 | size_t oldsize = chunksize(oldp); |
||
1480 | mchunkptr next = chunk_plus_offset(oldp, oldsize); |
||
1481 | mchunkptr newp = 0; |
||
1482 | void* extra = 0; |
||
1483 | |||
1484 | /* Try to either shrink or extend into top. Else malloc-copy-free */ |
||
1485 | |||
1486 | if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) && |
||
1487 | ok_next(oldp, next) && ok_pinuse(next))) |
||
1488 | { |
||
1489 | size_t nb = request2size(bytes); |
||
1490 | if (is_mmapped(oldp)) |
||
1491 | newp = mmap_resize(m, oldp, nb); |
||
1492 | else if (oldsize >= nb) { /* already big enough */ |
||
1493 | size_t rsize = oldsize - nb; |
||
1494 | newp = oldp; |
||
1495 | if (rsize >= MIN_CHUNK_SIZE) |
||
1496 | { |
||
1497 | mchunkptr remainder = chunk_plus_offset(newp, nb); |
||
1498 | set_inuse(m, newp, nb); |
||
1499 | set_inuse_and_pinuse(m, remainder, rsize); |
||
1500 | extra = chunk2mem(remainder); |
||
1501 | } |
||
1502 | } |
||
1503 | else if (next == m->top && oldsize + m->topsize > nb) |
||
1504 | { |
||
1505 | /* Expand into top */ |
||
1506 | size_t newsize = oldsize + m->topsize; |
||
1507 | size_t newtopsize = newsize - nb; |
||
1508 | mchunkptr newtop = chunk_plus_offset(oldp, nb); |
||
1509 | set_inuse(m, oldp, nb); |
||
1510 | newtop->head = newtopsize |PINUSE_BIT; |
||
1511 | m->top = newtop; |
||
1512 | m->topsize = newtopsize; |
||
1513 | newp = oldp; |
||
1514 | } |
||
1515 | } |
||
1516 | else { |
||
1517 | USAGE_ERROR_ACTION(m, oldmem); |
||
1518 | POSTACTION(m); |
||
1519 | return 0; |
||
1520 | } |
||
1521 | #if DEBUG |
||
1522 | if (newp != 0) { |
||
1523 | check_inuse_chunk(m, newp); /* Check requires lock */ |
||
1524 | } |
||
1525 | #endif |
||
1526 | |||
1527 | POSTACTION(m); |
||
1528 | |||
1529 | if (newp != 0) |
||
1530 | { |
||
1531 | if (extra != 0) { |
||
1532 | _free_r(reent_ptr, extra); |
||
1533 | } |
||
1534 | return chunk2mem(newp); |
||
1535 | } |
||
1536 | else |
||
1537 | { |
||
1538 | void* newmem = _malloc_r(reent_ptr, bytes); |
||
1539 | if (newmem != 0) { |
||
1540 | size_t oc = oldsize - overhead_for(oldp); |
||
1541 | memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); |
||
1542 | _free_r(reent_ptr, oldmem); |
||
1543 | } |
||
1544 | return newmem; |
||
1545 | } |
||
1546 | } |
||
1547 | return 0; |
||
1548 | } |
||
1549 | |||
1550 | /* --------------------------- memalign support -------------------------- */ |
||
1551 | |||
1552 | static void* internal_memalign(mstate m, size_t alignment, size_t bytes) |
||
1553 | { |
||
1554 | if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ |
||
1555 | return internal_malloc(m, bytes); |
||
1556 | if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ |
||
1557 | alignment = MIN_CHUNK_SIZE; |
||
1558 | if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ |
||
1559 | size_t a = MALLOC_ALIGNMENT << 1; |
||
1560 | while (a < alignment) a <<= 1; |
||
1561 | alignment = a; |
||
1562 | } |
||
1563 | |||
1564 | if (bytes >= MAX_REQUEST - alignment) { |
||
1565 | if (m != 0) { /* Test isn't needed but avoids compiler warning */ |
||
1566 | MALLOC_FAILURE_ACTION; |
||
1567 | } |
||
1568 | } |
||
1569 | else |
||
1570 | { |
||
1571 | size_t nb = request2size(bytes); |
||
1572 | size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; |
||
1573 | char* mem = (char*)internal_malloc(m, req); |
||
1574 | if (mem != 0) |
||
1575 | { |
||
1576 | void* leader = 0; |
||
1577 | void* trailer = 0; |
||
1578 | mchunkptr p = mem2chunk(mem); |
||
1579 | |||
1580 | PREACTION(m); |
||
1581 | |||
1582 | if ((((size_t)(mem)) % alignment) != 0) /* misaligned */ |
||
1583 | { |
||
1584 | /* |
||
1585 | Find an aligned spot inside chunk. Since we need to give |
||
1586 | back leading space in a chunk of at least MIN_CHUNK_SIZE, if |
||
1587 | the first calculation places us at a spot with less than |
||
1588 | MIN_CHUNK_SIZE leader, we can move to the next aligned spot. |
||
1589 | We've allocated enough total room so that this is always |
||
1590 | possible. |
||
1591 | */ |
||
1592 | char* br = (char*)mem2chunk((size_t)(((size_t)(mem + |
||
1593 | alignment - |
||
1594 | SIZE_T_ONE)) & |
||
1595 | -alignment)); |
||
1596 | char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? |
||
1597 | br : br+alignment; |
||
1598 | mchunkptr newp = (mchunkptr)pos; |
||
1599 | size_t leadsize = pos - (char*)(p); |
||
1600 | size_t newsize = chunksize(p) - leadsize; |
||
1601 | |||
1602 | if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ |
||
1603 | newp->prev_foot = p->prev_foot + leadsize; |
||
1604 | newp->head = newsize; |
||
1605 | } |
||
1606 | else { /* Otherwise, give back leader, use the rest */ |
||
1607 | set_inuse(m, newp, newsize); |
||
1608 | set_inuse(m, p, leadsize); |
||
1609 | leader = chunk2mem(p); |
||
1610 | } |
||
1611 | p = newp; |
||
1612 | } |
||
1613 | |||
1614 | /* Give back spare room at the end */ |
||
1615 | if (!is_mmapped(p)) |
||
1616 | { |
||
1617 | size_t size = chunksize(p); |
||
1618 | if (size > nb + MIN_CHUNK_SIZE) |
||
1619 | { |
||
1620 | size_t remainder_size = size - nb; |
||
1621 | mchunkptr remainder = chunk_plus_offset(p, nb); |
||
1622 | set_inuse(m, p, nb); |
||
1623 | set_inuse(m, remainder, remainder_size); |
||
1624 | trailer = chunk2mem(remainder); |
||
1625 | } |
||
1626 | } |
||
1627 | |||
1628 | assert (chunksize(p) >= nb); |
||
1629 | assert((((size_t)(chunk2mem(p))) % alignment) == 0); |
||
1630 | check_inuse_chunk(m, p); |
||
1631 | POSTACTION(m); |
||
1632 | if (leader != 0) { |
||
1633 | internal_free(m, leader); |
||
1634 | } |
||
1635 | if (trailer != 0) { |
||
1636 | internal_free(m, trailer); |
||
1637 | } |
||
1638 | return chunk2mem(p); |
||
1639 | } |
||
1640 | } |
||
1641 | return 0; |
||
1642 | } |
||
1643 | |||
1644 | void* memalign(size_t alignment, size_t bytes) |
||
1645 | { |
||
1646 | return internal_memalign(gm, alignment, bytes); |
||
1647 | } |
||
1648 | |||
1649 | |||
1650 | |||
1651 | void* _malloc_r(struct _reent *reent_ptr, size_t bytes) { |
||
1652 | /* |
||
1653 | Basic algorithm: |
||
1654 | If a small request (< 256 bytes minus per-chunk overhead): |
||
1655 | 1. If one exists, use a remainderless chunk in associated smallbin. |
||
1656 | (Remainderless means that there are too few excess bytes to |
||
1657 | represent as a chunk.) |
||
1658 | 2. If it is big enough, use the dv chunk, which is normally the |
||
1659 | chunk adjacent to the one used for the most recent small request. |
||
1660 | 3. If one exists, split the smallest available chunk in a bin, |
||
1661 | saving remainder in dv. |
||
1662 | 4. If it is big enough, use the top chunk. |
||
1663 | 5. If available, get memory from system and use it |
||
1664 | Otherwise, for a large request: |
||
1665 | 1. Find the smallest available binned chunk that fits, and use it |
||
1666 | if it is better fitting than dv chunk, splitting if necessary. |
||
1667 | 2. If better fitting than any binned chunk, use the dv chunk. |
||
1668 | 3. If it is big enough, use the top chunk. |
||
1669 | 4. If request size >= mmap threshold, try to directly mmap this chunk. |
||
1670 | 5. If available, get memory from system and use it |
||
1671 | |||
1672 | The ugly goto's here ensure that postaction occurs along all paths. |
||
1673 | */ |
||
1674 | |||
1675 | ensure_initialization(); /* initialize in sys_alloc if not using locks */ |
||
1676 | |||
1677 | PREACTION(gm); |
||
1678 | { |
||
1679 | void* mem; |
||
1680 | size_t nb; |
||
1681 | |||
1682 | if (bytes <= MAX_SMALL_REQUEST) |
||
1683 | { |
||
1684 | bindex_t idx; |
||
1685 | binmap_t smallbits; |
||
1686 | nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); |
||
1687 | idx = small_index(nb); |
||
1688 | smallbits = gm->smallmap >> idx; |
||
1689 | |||
1690 | if ((smallbits & 0x3U) != 0) /* Remainderless fit to a smallbin. */ |
||
1691 | { |
||
1692 | mchunkptr b, p; |
||
1693 | idx += ~smallbits & 1; /* Uses next bin if idx empty */ |
||
1694 | b = smallbin_at(gm, idx); |
||
1695 | p = b->fd; |
||
1696 | assert(chunksize(p) == small_index2size(idx)); |
||
1697 | unlink_first_small_chunk(gm, b, p, idx); |
||
1698 | set_inuse_and_pinuse(gm, p, small_index2size(idx)); |
||
1699 | mem = chunk2mem(p); |
||
1700 | check_malloced_chunk(gm, mem, nb); |
||
1701 | goto postaction; |
||
1702 | } |
||
1703 | else if (nb > gm->dvsize) |
||
1704 | { |
||
1705 | if (smallbits != 0) /* Use chunk in next nonempty smallbin */ |
||
1706 | { |
||
1707 | mchunkptr b, p, r; |
||
1708 | size_t rsize; |
||
1709 | bindex_t i; |
||
1710 | binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); |
||
1711 | binmap_t leastbit = least_bit(leftbits); |
||
1712 | compute_bit2idx(leastbit, i); |
||
1713 | b = smallbin_at(gm, i); |
||
1714 | p = b->fd; |
||
1715 | assert(chunksize(p) == small_index2size(i)); |
||
1716 | unlink_first_small_chunk(gm, b, p, i); |
||
1717 | rsize = small_index2size(i) - nb; |
||
1718 | /* Fit here cannot be remainderless if 4byte sizes */ |
||
1719 | if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) |
||
1720 | set_inuse_and_pinuse(gm, p, small_index2size(i)); |
||
1721 | else |
||
1722 | { |
||
1723 | set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
||
1724 | r = chunk_plus_offset(p, nb); |
||
1725 | set_size_and_pinuse_of_free_chunk(r, rsize); |
||
1726 | replace_dv(gm, r, rsize); |
||
1727 | } |
||
1728 | mem = chunk2mem(p); |
||
1729 | check_malloced_chunk(gm, mem, nb); |
||
1730 | goto postaction; |
||
1731 | } |
||
1732 | else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) |
||
1733 | { |
||
1734 | check_malloced_chunk(gm, mem, nb); |
||
1735 | goto postaction; |
||
1736 | } |
||
1737 | } |
||
1738 | } |
||
1739 | else if (bytes >= MAX_REQUEST) |
||
1740 | nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ |
||
1741 | else |
||
1742 | { |
||
1743 | nb = pad_request(bytes); |
||
1744 | if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) |
||
1745 | { |
||
1746 | check_malloced_chunk(gm, mem, nb); |
||
1747 | goto postaction; |
||
1748 | } |
||
1749 | } |
||
1750 | |||
1751 | if (nb <= gm->dvsize) { |
||
1752 | size_t rsize = gm->dvsize - nb; |
||
1753 | mchunkptr p = gm->dv; |
||
1754 | if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ |
||
1755 | mchunkptr r = gm->dv = chunk_plus_offset(p, nb); |
||
1756 | gm->dvsize = rsize; |
||
1757 | set_size_and_pinuse_of_free_chunk(r, rsize); |
||
1758 | set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
||
1759 | } |
||
1760 | else { /* exhaust dv */ |
||
1761 | size_t dvs = gm->dvsize; |
||
1762 | gm->dvsize = 0; |
||
1763 | gm->dv = 0; |
||
1764 | set_inuse_and_pinuse(gm, p, dvs); |
||
1765 | } |
||
1766 | mem = chunk2mem(p); |
||
1767 | check_malloced_chunk(gm, mem, nb); |
||
1768 | goto postaction; |
||
1769 | } |
||
1770 | else if (nb < gm->topsize) { /* Split top */ |
||
1771 | size_t rsize = gm->topsize -= nb; |
||
1772 | mchunkptr p = gm->top; |
||
1773 | mchunkptr r = gm->top = chunk_plus_offset(p, nb); |
||
1774 | r->head = rsize | PINUSE_BIT; |
||
1775 | set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
||
1776 | mem = chunk2mem(p); |
||
1777 | check_top_chunk(gm, gm->top); |
||
1778 | check_malloced_chunk(gm, mem, nb); |
||
1779 | goto postaction; |
||
1780 | } |
||
1781 | |||
1782 | mem = sys_alloc(gm, nb); |
||
1783 | |||
1784 | postaction: |
||
1785 | POSTACTION(gm); |
||
1786 | return mem; |
||
1787 | } |
||
1788 | |||
1789 | return 0; |
||
1790 | } |
||
1791 | |||
1792 | void _free_r(struct _reent *reent_ptr, void* mem) { |
||
1793 | /* |
||
1794 | Consolidate freed chunks with preceeding or succeeding bordering |
||
1795 | free chunks, if they exist, and then place in a bin. Intermixed |
||
1796 | with special cases for top, dv, mmapped chunks, and usage errors. |
||
1797 | */ |
||
1798 | |||
1799 | if (mem != 0) |
||
1800 | { |
||
1801 | mchunkptr p = mem2chunk(mem); |
||
1802 | |||
1803 | #define fm gm |
||
1804 | |||
1805 | PREACTION(fm); |
||
1806 | { |
||
1807 | check_inuse_chunk(fm, p); |
||
1808 | if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) |
||
1809 | { |
||
1810 | size_t psize = chunksize(p); |
||
1811 | mchunkptr next = chunk_plus_offset(p, psize); |
||
1812 | if (!pinuse(p)) |
||
1813 | { |
||
1814 | size_t prevsize = p->prev_foot; |
||
1815 | if (is_mmapped(p)) |
||
1816 | { |
||
1817 | psize += prevsize + MMAP_FOOT_PAD; |
||
1818 | if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
||
1819 | fm->footprint -= psize; |
||
1820 | goto postaction; |
||
1821 | } |
||
1822 | else |
||
1823 | { |
||
1824 | mchunkptr prev = chunk_minus_offset(p, prevsize); |
||
1825 | psize += prevsize; |
||
1826 | p = prev; |
||
1827 | if (RTCHECK(ok_address(fm, prev))) /* consolidate backward */ |
||
1828 | { |
||
1829 | if (p != fm->dv) |
||
1830 | { |
||
1831 | unlink_chunk(fm, p, prevsize); |
||
1832 | } |
||
1833 | else if ((next->head & INUSE_BITS) == INUSE_BITS) |
||
1834 | { |
||
1835 | fm->dvsize = psize; |
||
1836 | set_free_with_pinuse(p, psize, next); |
||
1837 | goto postaction; |
||
1838 | } |
||
1839 | } |
||
1840 | else |
||
1841 | goto erroraction; |
||
1842 | } |
||
1843 | } |
||
1844 | |||
1845 | if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) |
||
1846 | { |
||
1847 | if (!cinuse(next)) /* consolidate forward */ |
||
1848 | { |
||
1849 | if (next == fm->top) |
||
1850 | { |
||
1851 | size_t tsize = fm->topsize += psize; |
||
1852 | fm->top = p; |
||
1853 | p->head = tsize | PINUSE_BIT; |
||
1854 | if (p == fm->dv) |
||
1855 | { |
||
1856 | fm->dv = 0; |
||
1857 | fm->dvsize = 0; |
||
1858 | } |
||
1859 | if (should_trim(fm, tsize)) |
||
1860 | sys_trim(fm, 0); |
||
1861 | goto postaction; |
||
1862 | } |
||
1863 | else if (next == fm->dv) |
||
1864 | { |
||
1865 | size_t dsize = fm->dvsize += psize; |
||
1866 | fm->dv = p; |
||
1867 | set_size_and_pinuse_of_free_chunk(p, dsize); |
||
1868 | goto postaction; |
||
1869 | } |
||
1870 | else |
||
1871 | { |
||
1872 | size_t nsize = chunksize(next); |
||
1873 | psize += nsize; |
||
1874 | unlink_chunk(fm, next, nsize); |
||
1875 | set_size_and_pinuse_of_free_chunk(p, psize); |
||
1876 | if (p == fm->dv) |
||
1877 | { |
||
1878 | fm->dvsize = psize; |
||
1879 | goto postaction; |
||
1880 | } |
||
1881 | } |
||
1882 | } |
||
1883 | else |
||
1884 | set_free_with_pinuse(p, psize, next); |
||
1885 | |||
1886 | if (is_small(psize)) |
||
1887 | { |
||
1888 | insert_small_chunk(fm, p, psize); |
||
1889 | check_free_chunk(fm, p); |
||
1890 | } |
||
1891 | else |
||
1892 | { |
||
1893 | tchunkptr tp = (tchunkptr)p; |
||
1894 | insert_large_chunk(fm, tp, psize); |
||
1895 | check_free_chunk(fm, p); |
||
1896 | if (--fm->release_checks == 0) |
||
1897 | release_unused_segments(fm); |
||
1898 | } |
||
1899 | goto postaction; |
||
1900 | } |
||
1901 | } |
||
1902 | erroraction: |
||
1903 | USAGE_ERROR_ACTION(fm, p); |
||
1904 | postaction: |
||
1905 | POSTACTION(fm); |
||
1906 | } |
||
1907 | } |
||
1908 | #undef fm |
||
1909 | } |
||
1910 | |||
1911 | void* _calloc_r(struct _reent *reent_ptr, size_t n_elements, size_t elem_size) { |
||
1912 | void* mem; |
||
1913 | size_t req = 0; |
||
1914 | if (n_elements != 0) { |
||
1915 | req = n_elements * elem_size; |
||
1916 | if (((n_elements | elem_size) & ~(size_t)0xffff) && |
||
1917 | (req / n_elements != elem_size)) |
||
1918 | req = MAX_SIZE_T; /* force downstream failure on overflow */ |
||
1919 | } |
||
1920 | mem = _malloc_r(reent_ptr, req); |
||
1921 | if (mem != 0 && calloc_must_clear(mem2chunk(mem))) |
||
1922 | memset(mem, 0, req); |
||
1923 | return mem; |
||
1924 | } |
||
1925 | |||
1926 | void* _realloc_r (struct _reent *reent_ptr, void* oldmem, size_t bytes){ |
||
1927 | if (oldmem == 0) |
||
1928 | return _malloc_r(reent_ptr, bytes); |
||
1929 | #ifdef REALLOC_ZERO_BYTES_FREES |
||
1930 | if (bytes == 0) { |
||
1931 | _free_r(ptr, oldmem); |
||
1932 | return 0; |
||
1933 | } |
||
1934 | #endif /* REALLOC_ZERO_BYTES_FREES */ |
||
1935 | else { |
||
1936 | #if ! FOOTERS |
||
1937 | mstate m = gm; |
||
1938 | #else /* FOOTERS */ |
||
1939 | mstate m = get_mstate_for(mem2chunk(oldmem)); |
||
1940 | if (!ok_magic(m)) { |
||
1941 | USAGE_ERROR_ACTION(m, oldmem); |
||
1942 | return 0; |
||
1943 | } |
||
1944 | #endif /* FOOTERS */ |
||
1945 | return internal_realloc(reent_ptr, m, oldmem, bytes); |
||
1946 | } |
||
1947 | } |
||
1948 | |||
1949 | |||
1950 | |||
1951 | /* ----------------------------------------------------------------------- |
||
1952 | History: |
||
1953 | V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee) |
||
1954 | * Use zeros instead of prev foot for is_mmapped |
||
1955 | * Add mspace_track_large_chunks; thanks to Jean Brouwers |
||
1956 | * Fix set_inuse in internal_realloc; thanks to Jean Brouwers |
||
1957 | * Fix insufficient sys_alloc padding when using 16byte alignment |
||
1958 | * Fix bad error check in mspace_footprint |
||
1959 | * Adaptations for ptmalloc; thanks to Wolfram Gloger. |
||
1960 | * Reentrant spin locks; thanks to Earl Chew and others |
||
1961 | * Win32 improvements; thanks to Niall Douglas and Earl Chew |
||
1962 | * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options |
||
1963 | * Extension hook in malloc_state |
||
1964 | * Various small adjustments to reduce warnings on some compilers |
||
1965 | * Various configuration extensions/changes for more platforms. Thanks |
||
1966 | to all who contributed these. |
||
1967 | |||
1968 | V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) |
||
1969 | * Add max_footprint functions |
||
1970 | * Ensure all appropriate literals are size_t |
||
1971 | * Fix conditional compilation problem for some #define settings |
||
1972 | * Avoid concatenating segments with the one provided |
||
1973 | in create_mspace_with_base |
||
1974 | * Rename some variables to avoid compiler shadowing warnings |
||
1975 | * Use explicit lock initialization. |
||
1976 | * Better handling of sbrk interference. |
||
1977 | * Simplify and fix segment insertion, trimming and mspace_destroy |
||
1978 | * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x |
||
1979 | * Thanks especially to Dennis Flanagan for help on these. |
||
1980 | |||
1981 | V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) |
||
1982 | * Fix memalign brace error. |
||
1983 | |||
1984 | V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) |
||
1985 | * Fix improper #endif nesting in C++ |
||
1986 | * Add explicit casts needed for C++ |
||
1987 | |||
1988 | V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) |
||
1989 | * Use trees for large bins |
||
1990 | * Support mspaces |
||
1991 | * Use segments to unify sbrk-based and mmap-based system allocation, |
||
1992 | removing need for emulation on most platforms without sbrk. |
||
1993 | * Default safety checks |
||
1994 | * Optional footer checks. Thanks to William Robertson for the idea. |
||
1995 | * Internal code refactoring |
||
1996 | * Incorporate suggestions and platform-specific changes. |
||
1997 | Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, |
||
1998 | Aaron Bachmann, Emery Berger, and others. |
||
1999 | * Speed up non-fastbin processing enough to remove fastbins. |
||
2000 | * Remove useless cfree() to avoid conflicts with other apps. |
||
2001 | * Remove internal memcpy, memset. Compilers handle builtins better. |
||
2002 | * Remove some options that no one ever used and rename others. |
||
2003 | |||
2004 | V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) |
||
2005 | * Fix malloc_state bitmap array misdeclaration |
||
2006 | |||
2007 | V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) |
||
2008 | * Allow tuning of FIRST_SORTED_BIN_SIZE |
||
2009 | * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. |
||
2010 | * Better detection and support for non-contiguousness of MORECORE. |
||
2011 | Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger |
||
2012 | * Bypass most of malloc if no frees. Thanks To Emery Berger. |
||
2013 | * Fix freeing of old top non-contiguous chunk im sysmalloc. |
||
2014 | * Raised default trim and map thresholds to 256K. |
||
2015 | * Fix mmap-related #defines. Thanks to Lubos Lunak. |
||
2016 | * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. |
||
2017 | * Branch-free bin calculation |
||
2018 | * Default trim and mmap thresholds now 256K. |
||
2019 | |||
2020 | V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) |
||
2021 | * Introduce independent_comalloc and independent_calloc. |
||
2022 | Thanks to Michael Pachos for motivation and help. |
||
2023 | * Make optional .h file available |
||
2024 | * Allow > 2GB requests on 32bit systems. |
||
2025 | * new WIN32 sbrk, mmap, munmap, lock code from |
||
2026 | Thanks also to Andreas Mueller |
||
2027 | and Anonymous. |
||
2028 | * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for |
||
2029 | helping test this.) |
||
2030 | * memalign: check alignment arg |
||
2031 | * realloc: don't try to shift chunks backwards, since this |
||
2032 | leads to more fragmentation in some programs and doesn't |
||
2033 | seem to help in any others. |
||
2034 | * Collect all cases in malloc requiring system memory into sysmalloc |
||
2035 | * Use mmap as backup to sbrk |
||
2036 | * Place all internal state in malloc_state |
||
2037 | * Introduce fastbins (although similar to 2.5.1) |
||
2038 | * Many minor tunings and cosmetic improvements |
||
2039 | * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK |
||
2040 | * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS |
||
2041 | Thanks to Tony E. Bennett |
||
2042 | * Include errno.h to support default failure action. |
||
2043 | |||
2044 | V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) |
||
2045 | * return null for negative arguments |
||
2046 | * Added Several WIN32 cleanups from Martin C. Fong |
||
2047 | * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' |
||
2048 | (e.g. WIN32 platforms) |
||
2049 | * Cleanup header file inclusion for WIN32 platforms |
||
2050 | * Cleanup code to avoid Microsoft Visual C++ compiler complaints |
||
2051 | * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing |
||
2052 | memory allocation routines |
||
2053 | * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) |
||
2054 | * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to |
||
2055 | usage of 'assert' in non-WIN32 code |
||
2056 | * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to |
||
2057 | avoid infinite loop |
||
2058 | * Always call 'fREe()' rather than 'free()' |
||
2059 | |||
2060 | V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) |
||
2061 | * Fixed ordering problem with boundary-stamping |
||
2062 | |||
2063 | V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) |
||
2064 | * Added pvalloc, as recommended by H.J. Liu |
||
2065 | * Added 64bit pointer support mainly from Wolfram Gloger |
||
2066 | * Added anonymously donated WIN32 sbrk emulation |
||
2067 | * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen |
||
2068 | * malloc_extend_top: fix mask error that caused wastage after |
||
2069 | foreign sbrks |
||
2070 | * Add linux mremap support code from HJ Liu |
||
6099 | serge | 2071 | |
4349 | Serge | 2072 | V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) |
2073 | * Integrated most documentation with the code. |
||
6099 | serge | 2074 | * Add support for mmap, with help from |
4349 | Serge | 2075 | Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
2076 | * Use last_remainder in more cases. |
||
2077 | * Pack bins using idea from colin@nyx10.cs.du.edu |
||
2078 | * Use ordered bins instead of best-fit threshhold |
||
2079 | * Eliminate block-local decls to simplify tracing and debugging. |
||
2080 | * Support another case of realloc via move into top |
||
6099 | serge | 2081 | * Fix error occuring when initial sbrk_base not word-aligned. |
4349 | Serge | 2082 | * Rely on page size for units instead of SBRK_UNIT to |
2083 | avoid surprises about sbrk alignment conventions. |
||
2084 | * Add mallinfo, mallopt. Thanks to Raymond Nijssen |
||
6099 | serge | 2085 | (raymond@es.ele.tue.nl) for the suggestion. |
4349 | Serge | 2086 | * Add `pad' argument to malloc_trim and top_pad mallopt parameter. |
2087 | * More precautions for cases where other routines call sbrk, |
||
2088 | courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
||
2089 | * Added macros etc., allowing use in linux libc from |
||
2090 | H.J. Lu (hjl@gnu.ai.mit.edu) |
||
2091 | * Inverted this history list |
||
2092 | |||
2093 | V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) |
||
2094 | * Re-tuned and fixed to behave more nicely with V2.6.0 changes. |
||
2095 | * Removed all preallocation code since under current scheme |
||
2096 | the work required to undo bad preallocations exceeds |
||
2097 | the work saved in good cases for most test programs. |
||
2098 | * No longer use return list or unconsolidated bins since |
||
2099 | no scheme using them consistently outperforms those that don't |
||
2100 | given above changes. |
||
2101 | * Use best fit for very large chunks to prevent some worst-cases. |
||
2102 | * Added some support for debugging |
||
2103 | |||
2104 | V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) |
||
2105 | * Removed footers when chunks are in use. Thanks to |
||
2106 | Paul Wilson (wilson@cs.texas.edu) for the suggestion. |
||
2107 | |||
2108 | V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) |
||
6099 | serge | 2109 | * Added malloc_trim, with help from Wolfram Gloger |
4349 | Serge | 2110 | (wmglo@Dent.MED.Uni-Muenchen.DE). |
2111 | |||
2112 | V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) |
||
2113 | |||
2114 | V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) |
||
2115 | * realloc: try to expand in both directions |
||
2116 | * malloc: swap order of clean-bin strategy; |
||
2117 | * realloc: only conditionally expand backwards |
||
2118 | * Try not to scavenge used bins |
||
2119 | * Use bin counts as a guide to preallocation |
||
2120 | * Occasionally bin return list chunks in first scan |
||
2121 | * Add a few optimizations from colin@nyx10.cs.du.edu |
||
2122 | |||
2123 | V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) |
||
2124 | * faster bin computation & slightly different binning |
||
2125 | * merged all consolidations to one part of malloc proper |
||
2126 | (eliminating old malloc_find_space & malloc_clean_bin) |
||
2127 | * Scan 2 returns chunks (not just 1) |
||
2128 | * Propagate failure in realloc if malloc returns 0 |
||
6099 | serge | 2129 | * Add stuff to allow compilation on non-ANSI compilers |
4349 | Serge | 2130 | from kpv@research.att.com |
6099 | serge | 2131 | |
4349 | Serge | 2132 | V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) |
2133 | * removed potential for odd address access in prev_chunk |
||
2134 | * removed dependency on getpagesize.h |
||
2135 | * misc cosmetics and a bit more internal documentation |
||
2136 | * anticosmetics: mangled names in macros to evade debugger strangeness |
||
6099 | serge | 2137 | * tested on sparc, hp-700, dec-mips, rs6000 |
4349 | Serge | 2138 | with gcc & native cc (hp, dec only) allowing |
2139 | Detlefs & Zorn comparison study (in SIGPLAN Notices.) |
||
2140 | |||
2141 | Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) |
||
6099 | serge | 2142 | * Based loosely on libg++-1.2X malloc. (It retains some of the overall |
4349 | Serge | 2143 | structure of old version, but most details differ.) |
2144 | |||
2145 | */>=>>><>>=>>=><=>>><>>=>>>>>>>=><=>>><>>>>>>>>>><>=>>>>>=><=>><>>1)) |
||
2146 |