Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
1693 serge 1
/*
2
  This is a version (aka dlmalloc) of malloc/free/realloc written by
3
  Doug Lea and released to the public domain, as explained at
4
  http://creativecommons.org/licenses/publicdomain.  Send questions,
5
  comments, complaints, performance data, etc to dl@cs.oswego.edu
6
 
7
* Version 2.8.4 Wed May 27 09:56:23 2009  Doug Lea  (dl at gee)
8
 
9
   Note: There may be an updated version of this malloc obtainable at
10
           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
11
         Check before installing!
12
 
13
* Quickstart
14
 
15
  This library is all in one file to simplify the most common usage:
16
  ftp it, compile it (-O3), and link it into another program. All of
17
  the compile-time options default to reasonable values for use on
18
  most platforms.  You might later want to step through various
19
  compile-time and dynamic tuning options.
20
 
21
  For convenience, an include file for code using this malloc is at:
22
     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h
23
  You don't really need this .h file unless you call functions not
24
  defined in your system include files.  The .h file contains only the
25
  excerpts from this file needed for using this malloc on ANSI C/C++
26
  systems, so long as you haven't changed compile-time options about
27
  naming and tuning parameters.  If you do, then you can create your
28
  own malloc.h that does include all settings by cutting at the point
29
  indicated below. Note that you may already by default be using a C
30
  library containing a malloc that is based on some version of this
31
  malloc (for example in linux). You might still want to use the one
32
  in this file to customize settings or to avoid overheads associated
33
  with library versions.
34
 
35
*/
36
 
37
#include 
38
#include 
39
#include 
40
 
41
struct malloc_chunk {
42
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
43
  size_t               head;       /* Size and inuse bits. */
44
  struct malloc_chunk* fd;         /* double links -- used only if free. */
45
  struct malloc_chunk* bk;
46
};
47
 
48
typedef struct malloc_chunk  mchunk;
49
typedef struct malloc_chunk* mchunkptr;
50
typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
51
typedef unsigned int bindex_t;         /* Described below */
52
typedef unsigned int binmap_t;         /* Described below */
53
typedef unsigned int flag_t;           /* The type of various bit flag sets */
54
 
55
 
56
 
57
/* ------------------- size_t and alignment properties -------------------- */
58
 
59
/* The maximum possible size_t value has all bits set */
60
#define MAX_SIZE_T           (~(size_t)0)
61
 
62
void *user_alloc(size_t size)
63
{
64
    void  *val;
65
 
66
    __asm__("int3");
67
 
68
    __asm__ __volatile__(
69
    "int $0x40"
70
    :"=eax"(val)
71
    :"a"(68),"b"(12),"c"(size));
72
    return val;
73
}
74
 
75
static inline
76
int user_free(void *mem)
77
{
78
    int  val;
79
    __asm__ __volatile__(
80
    "int $0x40"
81
    :"=eax"(val)
82
    :"a"(68),"b"(12),"c"(mem));
83
    return val;
84
}
85
 
86
 
87
/* ------------------- size_t and alignment properties -------------------- */
88
 
89
/* The byte and bit size of a size_t */
90
#define SIZE_T_SIZE             (sizeof(size_t))
91
#define SIZE_T_BITSIZE          (sizeof(size_t) << 3)
92
 
93
/* Some constants coerced to size_t */
94
/* Annoying but necessary to avoid errors on some platforms */
95
#define SIZE_T_ZERO             ((size_t)0)
96
#define SIZE_T_ONE              ((size_t)1)
97
#define SIZE_T_TWO              ((size_t)2)
98
#define SIZE_T_FOUR             ((size_t)4)
99
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
100
#define FOUR_SIZE_T_SIZES       (SIZE_T_SIZE<<2)
101
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
102
#define HALF_MAX_SIZE_T         (MAX_SIZE_T / 2U)
103
 
104
#define USE_LOCK_BIT            (2U)
105
#define USE_MMAP_BIT            (SIZE_T_ONE)
106
#define USE_NONCONTIGUOUS_BIT   (4U)
107
 
108
/* segment bit set in create_mspace_with_base */
109
#define EXTERN_BIT              (8U)
110
 
111
#define HAVE_MMAP               1
112
#define CALL_MMAP(s)            MMAP_DEFAULT(s)
113
#define CALL_MUNMAP(a, s)       MUNMAP_DEFAULT((a), (s))
114
#define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
115
#define MAX_RELEASE_CHECK_RATE  4095
116
#define NO_SEGMENT_TRAVERSAL    1
117
#define MALLOC_ALIGNMENT        ((size_t)8U)
118
#define CHUNK_OVERHEAD          (SIZE_T_SIZE)
119
#define DEFAULT_GRANULARITY     ((size_t)64U * (size_t)1024U)
120
#define DEFAULT_MMAP_THRESHOLD  ((size_t)256U * (size_t)1024U)
121
#define DEFAULT_TRIM_THRESHOLD  ((size_t)512U * (size_t)1024U)
122
 
123
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
124
#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
125
 
126
/* True if address a has acceptable alignment */
127
#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
128
 
129
/* the number of bytes to offset an address to align it */
130
#define align_offset(A)\
131
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
132
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
133
 
134
 
135
#define MFAIL                ((void*)(MAX_SIZE_T))
136
#define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
137
 
138
/* For sys_alloc, enough padding to ensure can malloc request on success */
139
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
140
 
141
/*
142
  TOP_FOOT_SIZE is padding at the end of a segment, including space
143
  that may be needed to place segment records and fenceposts when new
144
  noncontiguous segments are added.
145
*/
146
#define TOP_FOOT_SIZE\
147
  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
148
 
149
/* ------------------- Chunks sizes and alignments ----------------------- */
150
 
151
#define MCHUNK_SIZE         (sizeof(mchunk))
152
 
153
/* MMapped chunks need a second word of overhead ... */
154
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
155
/* ... and additional padding for fake next-chunk at foot */
156
#define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
157
 
158
/* The smallest size we can malloc is an aligned minimal chunk */
159
#define MIN_CHUNK_SIZE\
160
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
161
 
162
/* conversion from malloc headers to user pointers, and back */
163
#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
164
#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
165
/* chunk associated with aligned address A */
166
#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
167
 
168
/* Bounds on request (not chunk) sizes. */
169
#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
170
#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
171
 
172
/* pad request bytes into a usable size */
173
#define pad_request(req) \
174
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
175
 
176
/* pad request, checking for minimum (but not maximum) */
177
#define request2size(req) \
178
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
179
 
180
/* ------------------ Operations on head and foot fields ----------------- */
181
 
182
/*
183
  The head field of a chunk is or'ed with PINUSE_BIT when previous
184
  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
185
  use, unless mmapped, in which case both bits are cleared.
186
 
187
  FLAG4_BIT is not used by this malloc, but might be useful in extensions.
188
*/
189
 
190
#define PINUSE_BIT          (SIZE_T_ONE)
191
#define CINUSE_BIT          (SIZE_T_TWO)
192
#define FLAG4_BIT           (SIZE_T_FOUR)
193
#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
194
#define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
195
 
196
/* Head value for fenceposts */
197
#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
198
 
199
/* extraction of fields from head words */
200
#define cinuse(p)           ((p)->head & CINUSE_BIT)
201
#define pinuse(p)           ((p)->head & PINUSE_BIT)
202
#define is_inuse(p)         (((p)->head & INUSE_BITS) != PINUSE_BIT)
203
#define is_mmapped(p)       (((p)->head & INUSE_BITS) == 0)
204
 
205
#define chunksize(p)        ((p)->head & ~(FLAG_BITS))
206
 
207
#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
208
 
209
/* Treat space at ptr +/- offset as a chunk */
210
#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
211
#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
212
 
213
/* Ptr to next or previous physical malloc_chunk. */
214
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
215
#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
216
 
217
/* extract next chunk's pinuse bit */
218
#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
219
 
220
/* Set size, pinuse bit, and foot */
221
#define set_size_and_pinuse_of_free_chunk(p, s)\
222
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
223
 
224
/* Set size, pinuse bit, foot, and clear next pinuse */
225
#define set_free_with_pinuse(p, s, n)\
226
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
227
 
228
/* Get the internal overhead associated with chunk p */
229
#define overhead_for(p)\
230
 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
231
 
232
 
233
struct malloc_tree_chunk {
234
  /* The first four fields must be compatible with malloc_chunk */
235
  size_t                    prev_foot;
236
  size_t                    head;
237
  struct malloc_tree_chunk* fd;
238
  struct malloc_tree_chunk* bk;
239
 
240
  struct malloc_tree_chunk* child[2];
241
  struct malloc_tree_chunk* parent;
242
  bindex_t                  index;
243
};
244
 
245
typedef struct malloc_tree_chunk  tchunk;
246
typedef struct malloc_tree_chunk* tchunkptr;
247
typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
248
 
249
/* A little helper macro for trees */
250
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
251
 
252
 
253
struct malloc_segment {
254
  char*        base;             /* base address */
255
  size_t       size;             /* allocated size */
256
  struct malloc_segment* next;   /* ptr to next segment */
257
  flag_t       sflags;           /* mmap and extern flag */
258
};
259
 
260
#define is_mmapped_segment(S)  ((S)->sflags & USE_MMAP_BIT)
261
#define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
262
 
263
typedef struct malloc_segment  msegment;
264
typedef struct malloc_segment* msegmentptr;
265
 
266
/* ---------------------------- malloc_state ----------------------------- */
267
 
268
/*
269
   A malloc_state holds all of the bookkeeping for a space.
270
   The main fields are:
271
 
272
  Top
273
    The topmost chunk of the currently active segment. Its size is
274
    cached in topsize.  The actual size of topmost space is
275
    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
276
    fenceposts and segment records if necessary when getting more
277
    space from the system.  The size at which to autotrim top is
278
    cached from mparams in trim_check, except that it is disabled if
279
    an autotrim fails.
280
 
281
  Designated victim (dv)
282
    This is the preferred chunk for servicing small requests that
283
    don't have exact fits.  It is normally the chunk split off most
284
    recently to service another small request.  Its size is cached in
285
    dvsize. The link fields of this chunk are not maintained since it
286
    is not kept in a bin.
287
 
288
  SmallBins
289
    An array of bin headers for free chunks.  These bins hold chunks
290
    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
291
    chunks of all the same size, spaced 8 bytes apart.  To simplify
292
    use in double-linked lists, each bin header acts as a malloc_chunk
293
    pointing to the real first node, if it exists (else pointing to
294
    itself).  This avoids special-casing for headers.  But to avoid
295
    waste, we allocate only the fd/bk pointers of bins, and then use
296
    repositioning tricks to treat these as the fields of a chunk.
297
 
298
  TreeBins
299
    Treebins are pointers to the roots of trees holding a range of
300
    sizes. There are 2 equally spaced treebins for each power of two
301
    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
302
    larger.
303
 
304
  Bin maps
305
    There is one bit map for small bins ("smallmap") and one for
306
    treebins ("treemap).  Each bin sets its bit when non-empty, and
307
    clears the bit when empty.  Bit operations are then used to avoid
308
    bin-by-bin searching -- nearly all "search" is done without ever
309
    looking at bins that won't be selected.  The bit maps
310
    conservatively use 32 bits per map word, even if on 64bit system.
311
    For a good description of some of the bit-based techniques used
312
    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
313
    supplement at http://hackersdelight.org/). Many of these are
314
    intended to reduce the branchiness of paths through malloc etc, as
315
    well as to reduce the number of memory locations read or written.
316
 
317
  Segments
318
    A list of segments headed by an embedded malloc_segment record
319
    representing the initial space.
320
 
321
  Address check support
322
    The least_addr field is the least address ever obtained from
323
    MORECORE or MMAP. Attempted frees and reallocs of any address less
324
    than this are trapped (unless INSECURE is defined).
325
 
326
  Magic tag
327
    A cross-check field that should always hold same value as mparams.magic.
328
 
329
  Flags
330
    Bits recording whether to use MMAP, locks, or contiguous MORECORE
331
 
332
  Statistics
333
    Each space keeps track of current and maximum system memory
334
    obtained via MORECORE or MMAP.
335
 
336
  Trim support
337
    Fields holding the amount of unused topmost memory that should trigger
338
    timming, and a counter to force periodic scanning to release unused
339
    non-topmost segments.
340
 
341
  Locking
342
    If USE_LOCKS is defined, the "mutex" lock is acquired and released
343
    around every public call using this mspace.
344
 
345
  Extension support
346
    A void* pointer and a size_t field that can be used to help implement
347
    extensions to this malloc.
348
*/
349
 
350
/* Bin types, widths and sizes */
351
#define NSMALLBINS        (32U)
352
#define NTREEBINS         (32U)
353
#define SMALLBIN_SHIFT    (3U)
354
#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
355
#define TREEBIN_SHIFT     (8U)
356
#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
357
#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
358
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
359
 
360
struct malloc_state {
361
  binmap_t   smallmap;
362
  binmap_t   treemap;
363
  size_t     dvsize;
364
  size_t     topsize;
365
  char*      least_addr;
366
  mchunkptr  dv;
367
  mchunkptr  top;
368
  size_t     trim_check;
369
  size_t     release_checks;
370
  size_t     magic;
371
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
372
  tbinptr    treebins[NTREEBINS];
373
  size_t     footprint;
374
  size_t     max_footprint;
375
  flag_t     mflags;
376
  struct mutex  lock;     /* locate lock among fields that rarely change */
377
  msegment   seg;
378
  void*      extp;      /* Unused but available for extensions */
379
  size_t     exts;
380
};
381
 
382
typedef struct malloc_state*    mstate;
383
 
384
/* ------------- Global malloc_state and malloc_params ------------------- */
385
 
386
/*
387
  malloc_params holds global properties, including those that can be
388
  dynamically set using mallopt. There is a single instance, mparams,
389
  initialized in init_mparams. Note that the non-zeroness of "magic"
390
  also serves as an initialization flag.
391
*/
392
 
393
struct malloc_params
394
{
395
    volatile size_t  magic;
396
    size_t  page_size;
397
    size_t  granularity;
398
    size_t  mmap_threshold;
399
    size_t  trim_threshold;
400
    flag_t  default_mflags;
401
};
402
 
403
static struct malloc_params mparams;
404
 
405
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
406
 
407
static struct malloc_state _gm_;
408
#define gm                 (&_gm_)
409
#define is_global(M)       ((M) == &_gm_)
410
 
411
#define is_initialized(M)  ((M)->top != 0)
412
 
413
 
414
//struct mutex malloc_global_mutex;
415
 
416
static DEFINE_MUTEX(malloc_global_mutex);
417
 
418
#define ACQUIRE_MALLOC_GLOBAL_LOCK()  MutexLock(&malloc_global_mutex);
419
#define RELEASE_MALLOC_GLOBAL_LOCK()  MutexUnlock(&malloc_global_mutex);
420
 
421
#define PREACTION(M)  ( MutexLock(&(M)->lock))
422
#define POSTACTION(M) { MutexUnlock(&(M)->lock); }
423
 
424
 
425
/* ---------------------------- Indexing Bins ---------------------------- */
426
 
427
#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
428
#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
429
#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
430
#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
431
 
432
/* addressing by index. See above about smallbin repositioning */
433
#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
434
#define treebin_at(M,i)     (&((M)->treebins[i]))
435
 
436
 
437
#define compute_tree_index(S, I)\
438
{\
439
  unsigned int X = S >> TREEBIN_SHIFT;\
440
  if (X == 0)\
441
    I = 0;\
442
  else if (X > 0xFFFF)\
443
    I = NTREEBINS-1;\
444
  else {\
445
    unsigned int K;\
446
    __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g"  (X));\
447
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
448
  }\
449
}
450
 
451
/* Bit representing maximum resolved size in a treebin at i */
452
#define bit_for_tree_index(i) \
453
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
454
 
455
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
456
#define leftshift_for_tree_index(i) \
457
   ((i == NTREEBINS-1)? 0 : \
458
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
459
 
460
/* The size of the smallest chunk held in bin with index i */
461
#define minsize_for_tree_index(i) \
462
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
463
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
464
 
465
 
466
/* ------------------------ Operations on bin maps ----------------------- */
467
 
468
/* bit corresponding to given index */
469
#define idx2bit(i)              ((binmap_t)(1) << (i))
470
 
471
/* Mark/Clear bits with given index */
472
#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
473
#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
474
#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
475
 
476
#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
477
#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
478
#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
479
 
480
/* isolate the least set bit of a bitmap */
481
#define least_bit(x)         ((x) & -(x))
482
 
483
/* mask with all bits to left of least bit of x on */
484
#define left_bits(x)         ((x<<1) | -(x<<1))
485
 
486
/* mask with all bits to left of or equal to least bit of x on */
487
#define same_or_left_bits(x) ((x) | -(x))
488
 
489
 
490
/* index corresponding to given bit. Use x86 asm if possible */
491
 
492
#define compute_bit2idx(X, I)\
493
{\
494
  unsigned int J;\
495
  __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
496
  I = (bindex_t)J;\
497
}
498
 
499
 
500
#define mark_inuse_foot(M,p,s)
501
 
502
/* Get/set size at footer */
503
#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
504
#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
505
 
506
/* Macros for setting head/foot of non-mmapped chunks */
507
 
508
/* Set cinuse bit and pinuse bit of next chunk */
509
#define set_inuse(M,p,s)\
510
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
511
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
512
 
513
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
514
#define set_inuse_and_pinuse(M,p,s)\
515
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
516
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
517
 
518
/* Set size, cinuse and pinuse bit of this chunk */
519
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
520
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
521
 
522
 
523
#define assert(x)
524
#define RTCHECK(e)  __builtin_expect(e, 1)
525
 
526
#define check_free_chunk(M,P)
527
#define check_inuse_chunk(M,P)
528
#define check_malloced_chunk(M,P,N)
529
#define check_mmapped_chunk(M,P)
530
#define check_malloc_state(M)
531
#define check_top_chunk(M,P)
532
 
533
/* Check if address a is at least as high as any from MORECORE or MMAP */
534
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
535
/* Check if address of next chunk n is higher than base chunk p */
536
#define ok_next(p, n)    ((char*)(p) < (char*)(n))
537
/* Check if p has inuse status */
538
#define ok_inuse(p)     is_inuse(p)
539
/* Check if p has its pinuse bit on */
540
#define ok_pinuse(p)     pinuse(p)
541
 
542
#define CORRUPTION_ERROR_ACTION(m)                             \
543
        do {                                                   \
544
            printf("%s malloc heap corrupted\n",__FUNCTION__); \
545
            while(1)                                           \
546
            {                                                  \
547
                delay(100);                                    \
548
            }                                                  \
549
        }while(0)                                              \
550
 
551
 
552
#define USAGE_ERROR_ACTION(m, p)                               \
553
        do {                                                   \
554
            printf("%s malloc heap corrupted\n",__FUNCTION__); \
555
            while(1)                                           \
556
            {                                                  \
557
                delay(100);                                    \
558
            }                                                  \
559
        }while(0)                                              \
560
 
561
/* ----------------------- Operations on smallbins ----------------------- */
562
 
563
/*
564
  Various forms of linking and unlinking are defined as macros.  Even
565
  the ones for trees, which are very long but have very short typical
566
  paths.  This is ugly but reduces reliance on inlining support of
567
  compilers.
568
*/
569
 
570
/* Link a free chunk into a smallbin  */
571
#define insert_small_chunk(M, P, S) {\
572
  bindex_t I  = small_index(S);\
573
  mchunkptr B = smallbin_at(M, I);\
574
  mchunkptr F = B;\
575
  assert(S >= MIN_CHUNK_SIZE);\
576
  if (!smallmap_is_marked(M, I))\
577
    mark_smallmap(M, I);\
578
  else if (RTCHECK(ok_address(M, B->fd)))\
579
    F = B->fd;\
580
  else {\
581
    CORRUPTION_ERROR_ACTION(M);\
582
  }\
583
  B->fd = P;\
584
  F->bk = P;\
585
  P->fd = F;\
586
  P->bk = B;\
587
}
588
 
589
/* Unlink a chunk from a smallbin  */
590
#define unlink_small_chunk(M, P, S) {\
591
  mchunkptr F = P->fd;\
592
  mchunkptr B = P->bk;\
593
  bindex_t I = small_index(S);\
594
  assert(P != B);\
595
  assert(P != F);\
596
  assert(chunksize(P) == small_index2size(I));\
597
  if (F == B)\
598
    clear_smallmap(M, I);\
599
  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
600
                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
601
    F->bk = B;\
602
    B->fd = F;\
603
  }\
604
  else {\
605
    CORRUPTION_ERROR_ACTION(M);\
606
  }\
607
}
608
 
609
/* Unlink the first chunk from a smallbin */
610
#define unlink_first_small_chunk(M, B, P, I) {\
611
  mchunkptr F = P->fd;\
612
  assert(P != B);\
613
  assert(P != F);\
614
  assert(chunksize(P) == small_index2size(I));\
615
  if (B == F)\
616
    clear_smallmap(M, I);\
617
  else if (RTCHECK(ok_address(M, F))) {\
618
    B->fd = F;\
619
    F->bk = B;\
620
  }\
621
  else {\
622
    CORRUPTION_ERROR_ACTION(M);\
623
  }\
624
}
625
 
626
/* Replace dv node, binning the old one */
627
/* Used only when dvsize known to be small */
628
#define replace_dv(M, P, S) {\
629
  size_t DVS = M->dvsize;\
630
  if (DVS != 0) {\
631
    mchunkptr DV = M->dv;\
632
    assert(is_small(DVS));\
633
    insert_small_chunk(M, DV, DVS);\
634
  }\
635
  M->dvsize = S;\
636
  M->dv = P;\
637
}
638
 
639
 
640
/* ------------------------- Operations on trees ------------------------- */
641
 
642
/* Insert chunk into tree */
643
#define insert_large_chunk(M, X, S) {\
644
  tbinptr* H;\
645
  bindex_t I;\
646
  compute_tree_index(S, I);\
647
  H = treebin_at(M, I);\
648
  X->index = I;\
649
  X->child[0] = X->child[1] = 0;\
650
  if (!treemap_is_marked(M, I)) {\
651
    mark_treemap(M, I);\
652
    *H = X;\
653
    X->parent = (tchunkptr)H;\
654
    X->fd = X->bk = X;\
655
  }\
656
  else {\
657
    tchunkptr T = *H;\
658
    size_t K = S << leftshift_for_tree_index(I);\
659
    for (;;) {\
660
      if (chunksize(T) != S) {\
661
        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
662
        K <<= 1;\
663
        if (*C != 0)\
664
          T = *C;\
665
        else if (RTCHECK(ok_address(M, C))) {\
666
          *C = X;\
667
          X->parent = T;\
668
          X->fd = X->bk = X;\
669
          break;\
670
        }\
671
        else {\
672
          CORRUPTION_ERROR_ACTION(M);\
673
          break;\
674
        }\
675
      }\
676
      else {\
677
        tchunkptr F = T->fd;\
678
        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
679
          T->fd = F->bk = X;\
680
          X->fd = F;\
681
          X->bk = T;\
682
          X->parent = 0;\
683
          break;\
684
        }\
685
        else {\
686
          CORRUPTION_ERROR_ACTION(M);\
687
          break;\
688
        }\
689
      }\
690
    }\
691
  }\
692
}
693
 
694
/*
695
  Unlink steps:
696
 
697
  1. If x is a chained node, unlink it from its same-sized fd/bk links
698
     and choose its bk node as its replacement.
699
  2. If x was the last node of its size, but not a leaf node, it must
700
     be replaced with a leaf node (not merely one with an open left or
701
     right), to make sure that lefts and rights of descendents
702
     correspond properly to bit masks.  We use the rightmost descendent
703
     of x.  We could use any other leaf, but this is easy to locate and
704
     tends to counteract removal of leftmosts elsewhere, and so keeps
705
     paths shorter than minimally guaranteed.  This doesn't loop much
706
     because on average a node in a tree is near the bottom.
707
  3. If x is the base of a chain (i.e., has parent links) relink
708
     x's parent and children to x's replacement (or null if none).
709
*/
710
 
711
#define unlink_large_chunk(M, X) {\
712
  tchunkptr XP = X->parent;\
713
  tchunkptr R;\
714
  if (X->bk != X) {\
715
    tchunkptr F = X->fd;\
716
    R = X->bk;\
717
    if (RTCHECK(ok_address(M, F))) {\
718
      F->bk = R;\
719
      R->fd = F;\
720
    }\
721
    else {\
722
      CORRUPTION_ERROR_ACTION(M);\
723
    }\
724
  }\
725
  else {\
726
    tchunkptr* RP;\
727
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
728
        ((R = *(RP = &(X->child[0]))) != 0)) {\
729
      tchunkptr* CP;\
730
      while ((*(CP = &(R->child[1])) != 0) ||\
731
             (*(CP = &(R->child[0])) != 0)) {\
732
        R = *(RP = CP);\
733
      }\
734
      if (RTCHECK(ok_address(M, RP)))\
735
        *RP = 0;\
736
      else {\
737
        CORRUPTION_ERROR_ACTION(M);\
738
      }\
739
    }\
740
  }\
741
  if (XP != 0) {\
742
    tbinptr* H = treebin_at(M, X->index);\
743
    if (X == *H) {\
744
      if ((*H = R) == 0) \
745
        clear_treemap(M, X->index);\
746
    }\
747
    else if (RTCHECK(ok_address(M, XP))) {\
748
      if (XP->child[0] == X) \
749
        XP->child[0] = R;\
750
      else \
751
        XP->child[1] = R;\
752
    }\
753
    else\
754
      CORRUPTION_ERROR_ACTION(M);\
755
    if (R != 0) {\
756
      if (RTCHECK(ok_address(M, R))) {\
757
        tchunkptr C0, C1;\
758
        R->parent = XP;\
759
        if ((C0 = X->child[0]) != 0) {\
760
          if (RTCHECK(ok_address(M, C0))) {\
761
            R->child[0] = C0;\
762
            C0->parent = R;\
763
          }\
764
          else\
765
            CORRUPTION_ERROR_ACTION(M);\
766
        }\
767
        if ((C1 = X->child[1]) != 0) {\
768
          if (RTCHECK(ok_address(M, C1))) {\
769
            R->child[1] = C1;\
770
            C1->parent = R;\
771
          }\
772
          else\
773
            CORRUPTION_ERROR_ACTION(M);\
774
        }\
775
      }\
776
      else\
777
        CORRUPTION_ERROR_ACTION(M);\
778
    }\
779
  }\
780
}
781
 
782
/* Relays to large vs small bin operations */
783
 
784
#define insert_chunk(M, P, S)\
785
  if (is_small(S)) insert_small_chunk(M, P, S)\
786
  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
787
 
788
#define unlink_chunk(M, P, S)\
789
  if (is_small(S)) unlink_small_chunk(M, P, S)\
790
  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
791
 
792
 
793
/* -------------------------- system alloc setup ------------------------- */
794
 
795
/* Operations on mflags */
796
 
797
#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
798
#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
799
#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
800
 
801
#define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
802
#define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
803
#define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
804
 
805
#define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
806
#define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
807
 
808
#define set_lock(M,L)\
809
 ((M)->mflags = (L)?\
810
  ((M)->mflags | USE_LOCK_BIT) :\
811
  ((M)->mflags & ~USE_LOCK_BIT))
812
 
813
/* page-align a size */
814
#define page_align(S)\
815
 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
816
 
817
/* granularity-align a size */
818
#define granularity_align(S)\
819
  (((S) + (mparams.granularity - SIZE_T_ONE))\
820
   & ~(mparams.granularity - SIZE_T_ONE))
821
 
822
 
823
/* For mmap, use granularity alignment  */
824
#define mmap_align(S) granularity_align(S)
825
 
826
/* For sys_alloc, enough padding to ensure can malloc request on success */
827
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
828
 
829
#define is_page_aligned(S)\
830
   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
831
#define is_granularity_aligned(S)\
832
   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
833
 
834
/*  True if segment S holds address A */
835
#define segment_holds(S, A)\
836
  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
837
 
838
/* Return segment holding given address */
839
static msegmentptr segment_holding(mstate m, char* addr)
840
{
841
    msegmentptr sp = &m->seg;
842
    for (;;) {
843
        if (addr >= sp->base && addr < sp->base + sp->size)
844
            return sp;
845
        if ((sp = sp->next) == 0)
846
            return 0;
847
    }
848
}
849
 
850
/* Return true if segment contains a segment link */
851
static int has_segment_link(mstate m, msegmentptr ss)
852
{
853
    msegmentptr sp = &m->seg;
854
    for (;;) {
855
        if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
856
            return 1;
857
        if ((sp = sp->next) == 0)
858
            return 0;
859
    }
860
}
861
 
862
static inline void* os_mmap(size_t size)
863
{
864
  void* ptr = KernelAlloc(size);
865
  return (ptr != 0)? ptr: MFAIL;
866
}
867
 
868
static inline int os_munmap(void* ptr, size_t size)
869
{
870
    return (KernelFree(ptr) != 0) ? 0 : -1;
871
}
872
 
873
#define should_trim(M,s)  ((s) > (M)->trim_check)
874
 
875
 
876
#define MMAP_DEFAULT(s)        os_mmap(s)
877
#define MUNMAP_DEFAULT(a, s)   os_munmap((a), (s))
878
#define DIRECT_MMAP_DEFAULT(s) os_mmap(s)
879
 
880
#define internal_malloc(m, b) malloc(b)
881
#define internal_free(m, mem) free(mem)
882
 
883
/* -----------------------  Direct-mmapping chunks ----------------------- */
884
 
885
/*
886
  Directly mmapped chunks are set up with an offset to the start of
887
  the mmapped region stored in the prev_foot field of the chunk. This
888
  allows reconstruction of the required argument to MUNMAP when freed,
889
  and also allows adjustment of the returned chunk to meet alignment
890
  requirements (especially in memalign).
891
*/
892
 
893
/* Malloc using mmap */
894
static void* mmap_alloc(mstate m, size_t nb)
895
{
896
    size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
897
    if (mmsize > nb)       /* Check for wrap around 0 */
898
    {
899
        char* mm = (char*)(os_mmap(mmsize));
900
        if (mm != CMFAIL)
901
        {
902
            size_t offset = align_offset(chunk2mem(mm));
903
            size_t psize = mmsize - offset - MMAP_FOOT_PAD;
904
            mchunkptr p = (mchunkptr)(mm + offset);
905
            p->prev_foot = offset;
906
            p->head = psize;
907
            mark_inuse_foot(m, p, psize);
908
            chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
909
            chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
910
 
911
            if (m->least_addr == 0 || mm < m->least_addr)
912
                m->least_addr = mm;
913
            if ((m->footprint += mmsize) > m->max_footprint)
914
                m->max_footprint = m->footprint;
915
            assert(is_aligned(chunk2mem(p)));
916
            check_mmapped_chunk(m, p);
917
            return chunk2mem(p);
918
        }
919
    }
920
    return 0;
921
}
922
 
923
/* Realloc using mmap */
924
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb)
925
{
926
    size_t oldsize = chunksize(oldp);
927
    if (is_small(nb)) /* Can't shrink mmap regions below small size */
928
        return 0;
929
  /* Keep old chunk if big enough but not too big */
930
    if (oldsize >= nb + SIZE_T_SIZE &&
931
      (oldsize - nb) <= (mparams.granularity << 1))
932
        return oldp;
933
    else
934
    {
935
        size_t offset = oldp->prev_foot;
936
        size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
937
        size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
938
        char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
939
                                  oldmmsize, newmmsize, 1);
940
        if (cp != CMFAIL)
941
        {
942
            mchunkptr newp = (mchunkptr)(cp + offset);
943
            size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
944
            newp->head = psize;
945
            mark_inuse_foot(m, newp, psize);
946
            chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
947
            chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
948
 
949
            if (cp < m->least_addr)
950
                m->least_addr = cp;
951
            if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
952
                m->max_footprint = m->footprint;
953
            check_mmapped_chunk(m, newp);
954
            return newp;
955
        }
956
    }
957
    return 0;
958
}
959
 
960
/* ---------------------------- setting mparams -------------------------- */
961
 
962
/* Initialize mparams */
963
static int init_mparams(void) {
964
 
965
    ACQUIRE_MALLOC_GLOBAL_LOCK();
966
 
967
    if (mparams.magic == 0)
968
    {
969
        size_t magic;
970
        size_t psize;
971
        size_t gsize;
972
 
973
        psize = 4096;
974
        gsize = DEFAULT_GRANULARITY;
975
 
976
    /* Sanity-check configuration:
977
       size_t must be unsigned and as wide as pointer type.
978
       ints must be at least 4 bytes.
979
       alignment must be at least 8.
980
       Alignment, min chunk size, and page size must all be powers of 2.
981
    */
982
 
983
        mparams.granularity = gsize;
984
        mparams.page_size = psize;
985
        mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
986
        mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
987
        mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
988
 
989
    /* Set up lock for main malloc area */
990
        gm->mflags = mparams.default_mflags;
991
        MutexInit(&gm->lock);
992
 
993
        magic = (size_t)(GetTimerTicks() ^ (size_t)0x55555555U);
994
        magic |= (size_t)8U;    /* ensure nonzero */
995
        magic &= ~(size_t)7U;   /* improve chances of fault for bad values */
996
        mparams.magic = magic;
997
    }
998
 
999
    RELEASE_MALLOC_GLOBAL_LOCK();
1000
    return 1;
1001
}
1002
 
1003
/* -------------------------- mspace management -------------------------- */
1004
 
1005
/* Initialize top chunk and its size */
1006
static void init_top(mstate m, mchunkptr p, size_t psize)
1007
{
1008
  /* Ensure alignment */
1009
    size_t offset = align_offset(chunk2mem(p));
1010
    p = (mchunkptr)((char*)p + offset);
1011
    psize -= offset;
1012
 
1013
    m->top = p;
1014
    m->topsize = psize;
1015
    p->head = psize | PINUSE_BIT;
1016
  /* set size of fake trailing chunk holding overhead space only once */
1017
    chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
1018
    m->trim_check = mparams.trim_threshold; /* reset on each update */
1019
}
1020
 
1021
/* Initialize bins for a new mstate that is otherwise zeroed out */
1022
static void init_bins(mstate m)
1023
{
1024
  /* Establish circular links for smallbins */
1025
    bindex_t i;
1026
    for (i = 0; i < NSMALLBINS; ++i) {
1027
        sbinptr bin = smallbin_at(m,i);
1028
        bin->fd = bin->bk = bin;
1029
    }
1030
}
1031
 
1032
/* Allocate chunk and prepend remainder with chunk in successor base. */
1033
static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
1034
                           size_t nb)
1035
{
1036
    mchunkptr p = align_as_chunk(newbase);
1037
    mchunkptr oldfirst = align_as_chunk(oldbase);
1038
    size_t psize = (char*)oldfirst - (char*)p;
1039
    mchunkptr q = chunk_plus_offset(p, nb);
1040
    size_t qsize = psize - nb;
1041
    set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1042
 
1043
    assert((char*)oldfirst > (char*)q);
1044
    assert(pinuse(oldfirst));
1045
    assert(qsize >= MIN_CHUNK_SIZE);
1046
 
1047
  /* consolidate remainder with first chunk of old base */
1048
    if (oldfirst == m->top) {
1049
        size_t tsize = m->topsize += qsize;
1050
        m->top = q;
1051
        q->head = tsize | PINUSE_BIT;
1052
        check_top_chunk(m, q);
1053
    }
1054
    else if (oldfirst == m->dv) {
1055
        size_t dsize = m->dvsize += qsize;
1056
        m->dv = q;
1057
        set_size_and_pinuse_of_free_chunk(q, dsize);
1058
    }
1059
    else {
1060
        if (!is_inuse(oldfirst)) {
1061
            size_t nsize = chunksize(oldfirst);
1062
            unlink_chunk(m, oldfirst, nsize);
1063
            oldfirst = chunk_plus_offset(oldfirst, nsize);
1064
            qsize += nsize;
1065
        }
1066
        set_free_with_pinuse(q, qsize, oldfirst);
1067
        insert_chunk(m, q, qsize);
1068
        check_free_chunk(m, q);
1069
    }
1070
 
1071
    check_malloced_chunk(m, chunk2mem(p), nb);
1072
    return chunk2mem(p);
1073
}
1074
 
1075
/* Add a segment to hold a new noncontiguous region */
1076
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped)
1077
{
1078
  /* Determine locations and sizes of segment, fenceposts, old top */
1079
    char* old_top = (char*)m->top;
1080
    msegmentptr oldsp = segment_holding(m, old_top);
1081
    char* old_end = oldsp->base + oldsp->size;
1082
    size_t ssize = pad_request(sizeof(struct malloc_segment));
1083
    char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
1084
    size_t offset = align_offset(chunk2mem(rawsp));
1085
    char* asp = rawsp + offset;
1086
    char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
1087
    mchunkptr sp = (mchunkptr)csp;
1088
    msegmentptr ss = (msegmentptr)(chunk2mem(sp));
1089
    mchunkptr tnext = chunk_plus_offset(sp, ssize);
1090
    mchunkptr p = tnext;
1091
    int nfences = 0;
1092
 
1093
  /* reset top to new space */
1094
    init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
1095
 
1096
  /* Set up segment record */
1097
    assert(is_aligned(ss));
1098
    set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
1099
    *ss = m->seg; /* Push current record */
1100
    m->seg.base = tbase;
1101
    m->seg.size = tsize;
1102
    m->seg.sflags = mmapped;
1103
    m->seg.next = ss;
1104
 
1105
  /* Insert trailing fenceposts */
1106
    for (;;) {
1107
        mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
1108
        p->head = FENCEPOST_HEAD;
1109
        ++nfences;
1110
        if ((char*)(&(nextp->head)) < old_end)
1111
            p = nextp;
1112
        else
1113
            break;
1114
    }
1115
    assert(nfences >= 2);
1116
 
1117
  /* Insert the rest of old top into a bin as an ordinary free chunk */
1118
    if (csp != old_top) {
1119
        mchunkptr q = (mchunkptr)old_top;
1120
        size_t psize = csp - old_top;
1121
        mchunkptr tn = chunk_plus_offset(q, psize);
1122
        set_free_with_pinuse(q, psize, tn);
1123
        insert_chunk(m, q, psize);
1124
    }
1125
 
1126
  check_top_chunk(m, m->top);
1127
}
1128
 
1129
/* -------------------------- System allocation -------------------------- */
1130
 
1131
/* Get memory from system using MORECORE or MMAP */
1132
static void* sys_alloc(mstate m, size_t nb)
1133
{
1134
    char* tbase = CMFAIL;
1135
    size_t tsize = 0;
1136
    flag_t mmap_flag = 0;
1137
 
1138
    ensure_initialization();
1139
 
1140
  /* Directly map large chunks, but only if already initialized */
1141
    if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0)
1142
    {
1143
        void* mem = mmap_alloc(m, nb);
1144
        if (mem != 0)
1145
            return mem;
1146
    }
1147
 
1148
  /*
1149
    Try getting memory in any of three ways (in most-preferred to
1150
    least-preferred order):
1151
    1. A call to MORECORE that can normally contiguously extend memory.
1152
       (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
1153
       or main space is mmapped or a previous contiguous call failed)
1154
    2. A call to MMAP new space (disabled if not HAVE_MMAP).
1155
       Note that under the default settings, if MORECORE is unable to
1156
       fulfill a request, and HAVE_MMAP is true, then mmap is
1157
       used as a noncontiguous system allocator. This is a useful backup
1158
       strategy for systems with holes in address spaces -- in this case
1159
       sbrk cannot contiguously expand the heap, but mmap may be able to
1160
       find space.
1161
    3. A call to MORECORE that cannot usually contiguously extend memory.
1162
       (disabled if not HAVE_MORECORE)
1163
 
1164
   In all cases, we need to request enough bytes from system to ensure
1165
   we can malloc nb bytes upon success, so pad with enough space for
1166
   top_foot, plus alignment-pad to make sure we don't lose bytes if
1167
   not on boundary, and round this up to a granularity unit.
1168
  */
1169
 
1170
    if (HAVE_MMAP && tbase == CMFAIL)   /* Try MMAP */
1171
    {
1172
        size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING);
1173
        if (rsize > nb)  /* Fail if wraps around zero */
1174
        {
1175
            char* mp = (char*)(CALL_MMAP(rsize));
1176
            if (mp != CMFAIL)
1177
            {
1178
                tbase = mp;
1179
                tsize = rsize;
1180
                mmap_flag = USE_MMAP_BIT;
1181
            }
1182
        }
1183
    }
1184
 
1185
 
1186
    if (tbase != CMFAIL)
1187
    {
1188
 
1189
        if ((m->footprint += tsize) > m->max_footprint)
1190
            m->max_footprint = m->footprint;
1191
 
1192
        if (!is_initialized(m))  /* first-time initialization */
1193
        {
1194
            if (m->least_addr == 0 || tbase < m->least_addr)
1195
                m->least_addr = tbase;
1196
            m->seg.base = tbase;
1197
             m->seg.size = tsize;
1198
            m->seg.sflags = mmap_flag;
1199
            m->magic = mparams.magic;
1200
            m->release_checks = MAX_RELEASE_CHECK_RATE;
1201
            init_bins(m);
1202
 
1203
            if (is_global(m))
1204
                init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
1205
            else
1206
            {
1207
        /* Offset top by embedded malloc_state */
1208
                mchunkptr mn = next_chunk(mem2chunk(m));
1209
                init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
1210
            }
1211
        }
1212
        else
1213
        {
1214
      /* Try to merge with an existing segment */
1215
            msegmentptr sp = &m->seg;
1216
      /* Only consider most recent segment if traversal suppressed */
1217
            while (sp != 0 && tbase != sp->base + sp->size)
1218
                sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
1219
            if (sp != 0 && !is_extern_segment(sp) &&
1220
               (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
1221
                segment_holds(sp, m->top))  /* append */
1222
            {
1223
                sp->size += tsize;
1224
                init_top(m, m->top, m->topsize + tsize);
1225
            }
1226
            else
1227
            {
1228
                if (tbase < m->least_addr)
1229
                    m->least_addr = tbase;
1230
                sp = &m->seg;
1231
                while (sp != 0 && sp->base != tbase + tsize)
1232
                    sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
1233
                if (sp != 0 &&
1234
                    !is_extern_segment(sp) &&
1235
                    (sp->sflags & USE_MMAP_BIT) == mmap_flag)
1236
                {
1237
                    char* oldbase = sp->base;
1238
                    sp->base = tbase;
1239
                    sp->size += tsize;
1240
                    return prepend_alloc(m, tbase, oldbase, nb);
1241
                }
1242
                else
1243
                    add_segment(m, tbase, tsize, mmap_flag);
1244
            }
1245
        }
1246
 
1247
        if (nb < m->topsize)  /* Allocate from new or extended top space */
1248
        {
1249
            size_t rsize = m->topsize -= nb;
1250
            mchunkptr p = m->top;
1251
            mchunkptr r = m->top = chunk_plus_offset(p, nb);
1252
            r->head = rsize | PINUSE_BIT;
1253
            set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1254
            check_top_chunk(m, m->top);
1255
            check_malloced_chunk(m, chunk2mem(p), nb);
1256
            return chunk2mem(p);
1257
        }
1258
    }
1259
 
1260
//    MALLOC_FAILURE_ACTION;
1261
    return 0;
1262
}
1263
 
1264
 
1265
/* -----------------------  system deallocation -------------------------- */
1266
 
1267
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1268
static size_t release_unused_segments(mstate m)
1269
{
1270
    size_t released = 0;
1271
    int nsegs = 0;
1272
    msegmentptr pred = &m->seg;
1273
    msegmentptr sp = pred->next;
1274
    while (sp != 0)
1275
    {
1276
        char* base = sp->base;
1277
        size_t size = sp->size;
1278
        msegmentptr next = sp->next;
1279
        ++nsegs;
1280
        if (is_mmapped_segment(sp) && !is_extern_segment(sp))
1281
        {
1282
            mchunkptr p = align_as_chunk(base);
1283
            size_t psize = chunksize(p);
1284
      /* Can unmap if first chunk holds entire segment and not pinned */
1285
            if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE)
1286
            {
1287
                tchunkptr tp = (tchunkptr)p;
1288
                assert(segment_holds(sp, (char*)sp));
1289
                if (p == m->dv) {
1290
                    m->dv = 0;
1291
                    m->dvsize = 0;
1292
                }
1293
                else {
1294
                    unlink_large_chunk(m, tp);
1295
                }
1296
                if (CALL_MUNMAP(base, size) == 0)
1297
                {
1298
                    released += size;
1299
                    m->footprint -= size;
1300
          /* unlink obsoleted record */
1301
                    sp = pred;
1302
                    sp->next = next;
1303
                }
1304
                else { /* back out if cannot unmap */
1305
                    insert_large_chunk(m, tp, psize);
1306
                }
1307
            }
1308
        }
1309
        if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
1310
            break;
1311
        pred = sp;
1312
        sp = next;
1313
    }
1314
  /* Reset check counter */
1315
    m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)?
1316
                       nsegs : MAX_RELEASE_CHECK_RATE);
1317
    return released;
1318
}
1319
 
1320
static int sys_trim(mstate m, size_t pad)
1321
{
1322
    size_t released = 0;
1323
    ensure_initialization();
1324
    if (pad < MAX_REQUEST && is_initialized(m))
1325
    {
1326
        pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
1327
 
1328
        if (m->topsize > pad)
1329
        {
1330
      /* Shrink top space in granularity-size units, keeping at least one */
1331
            size_t unit = mparams.granularity;
1332
            size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
1333
                             SIZE_T_ONE) * unit;
1334
            msegmentptr sp = segment_holding(m, (char*)m->top);
1335
 
1336
            if (!is_extern_segment(sp))
1337
            {
1338
                if (is_mmapped_segment(sp))
1339
                {
1340
                    if (HAVE_MMAP &&
1341
                        sp->size >= extra &&
1342
                        !has_segment_link(m, sp))  /* can't shrink if pinned */
1343
                    {
1344
                        size_t newsize = sp->size - extra;
1345
            /* Prefer mremap, fall back to munmap */
1346
                        if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
1347
                            (CALL_MUNMAP(sp->base + newsize, extra) == 0))
1348
                        {
1349
                            released = extra;
1350
                        }
1351
                    }
1352
                }
1353
            }
1354
 
1355
            if (released != 0)
1356
            {
1357
                sp->size -= released;
1358
                m->footprint -= released;
1359
                init_top(m, m->top, m->topsize - released);
1360
                check_top_chunk(m, m->top);
1361
            }
1362
        }
1363
 
1364
    /* Unmap any unused mmapped segments */
1365
        if (HAVE_MMAP)
1366
            released += release_unused_segments(m);
1367
 
1368
    /* On failure, disable autotrim to avoid repeated failed future calls */
1369
        if (released == 0 && m->topsize > m->trim_check)
1370
        m->trim_check = MAX_SIZE_T;
1371
    }
1372
 
1373
    return (released != 0)? 1 : 0;
1374
}
1375
 
1376
 
1377
 
1378
/* ---------------------------- malloc support --------------------------- */
1379
 
1380
/* allocate a large request from the best fitting chunk in a treebin */
1381
static void* tmalloc_large(mstate m, size_t nb) {
1382
  tchunkptr v = 0;
1383
  size_t rsize = -nb; /* Unsigned negation */
1384
  tchunkptr t;
1385
  bindex_t idx;
1386
  compute_tree_index(nb, idx);
1387
  if ((t = *treebin_at(m, idx)) != 0) {
1388
    /* Traverse tree for this bin looking for node with size == nb */
1389
    size_t sizebits = nb << leftshift_for_tree_index(idx);
1390
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
1391
    for (;;) {
1392
      tchunkptr rt;
1393
      size_t trem = chunksize(t) - nb;
1394
      if (trem < rsize) {
1395
        v = t;
1396
        if ((rsize = trem) == 0)
1397
          break;
1398
      }
1399
      rt = t->child[1];
1400
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1401
      if (rt != 0 && rt != t)
1402
        rst = rt;
1403
      if (t == 0) {
1404
        t = rst; /* set t to least subtree holding sizes > nb */
1405
        break;
1406
      }
1407
      sizebits <<= 1;
1408
    }
1409
  }
1410
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
1411
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
1412
    if (leftbits != 0) {
1413
      bindex_t i;
1414
      binmap_t leastbit = least_bit(leftbits);
1415
      compute_bit2idx(leastbit, i);
1416
      t = *treebin_at(m, i);
1417
    }
1418
  }
1419
 
1420
  while (t != 0) { /* find smallest of tree or subtree */
1421
    size_t trem = chunksize(t) - nb;
1422
    if (trem < rsize) {
1423
      rsize = trem;
1424
      v = t;
1425
    }
1426
    t = leftmost_child(t);
1427
  }
1428
 
1429
  /*  If dv is a better fit, return 0 so malloc will use it */
1430
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
1431
    if (RTCHECK(ok_address(m, v))) { /* split */
1432
      mchunkptr r = chunk_plus_offset(v, nb);
1433
      assert(chunksize(v) == rsize + nb);
1434
      if (RTCHECK(ok_next(v, r))) {
1435
        unlink_large_chunk(m, v);
1436
        if (rsize < MIN_CHUNK_SIZE)
1437
          set_inuse_and_pinuse(m, v, (rsize + nb));
1438
        else {
1439
          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1440
          set_size_and_pinuse_of_free_chunk(r, rsize);
1441
          insert_chunk(m, r, rsize);
1442
        }
1443
        return chunk2mem(v);
1444
      }
1445
    }
1446
    CORRUPTION_ERROR_ACTION(m);
1447
  }
1448
  return 0;
1449
}
1450
 
1451
/* allocate a small request from the best fitting chunk in a treebin */
1452
static void* tmalloc_small(mstate m, size_t nb)
1453
{
1454
    tchunkptr t, v;
1455
    size_t rsize;
1456
    bindex_t i;
1457
    binmap_t leastbit = least_bit(m->treemap);
1458
    compute_bit2idx(leastbit, i);
1459
    v = t = *treebin_at(m, i);
1460
    rsize = chunksize(t) - nb;
1461
 
1462
    while ((t = leftmost_child(t)) != 0) {
1463
        size_t trem = chunksize(t) - nb;
1464
        if (trem < rsize) {
1465
            rsize = trem;
1466
            v = t;
1467
        }
1468
    }
1469
 
1470
    if (RTCHECK(ok_address(m, v))) {
1471
        mchunkptr r = chunk_plus_offset(v, nb);
1472
        assert(chunksize(v) == rsize + nb);
1473
        if (RTCHECK(ok_next(v, r))) {
1474
            unlink_large_chunk(m, v);
1475
            if (rsize < MIN_CHUNK_SIZE)
1476
                set_inuse_and_pinuse(m, v, (rsize + nb));
1477
            else {
1478
                set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1479
                set_size_and_pinuse_of_free_chunk(r, rsize);
1480
                replace_dv(m, r, rsize);
1481
            }
1482
            return chunk2mem(v);
1483
        }
1484
    }
1485
 
1486
    CORRUPTION_ERROR_ACTION(m);
1487
    return 0;
1488
}
1489
 
1490
/* --------------------------- memalign support -------------------------- */
1491
 
1492
static void* internal_memalign(mstate m, size_t alignment, size_t bytes)
1493
{
1494
    if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
1495
        return internal_malloc(m, bytes);
1496
    if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
1497
        alignment = MIN_CHUNK_SIZE;
1498
    if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
1499
        size_t a = MALLOC_ALIGNMENT << 1;
1500
        while (a < alignment) a <<= 1;
1501
        alignment = a;
1502
    }
1503
 
1504
    if (bytes >= MAX_REQUEST - alignment) {
1505
        if (m != 0)  { /* Test isn't needed but avoids compiler warning */
1506
//            MALLOC_FAILURE_ACTION;
1507
        }
1508
    }
1509
    else
1510
    {
1511
        size_t nb = request2size(bytes);
1512
        size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
1513
        char* mem = (char*)internal_malloc(m, req);
1514
        if (mem != 0)
1515
        {
1516
            void* leader = 0;
1517
            void* trailer = 0;
1518
            mchunkptr p = mem2chunk(mem);
1519
 
1520
            PREACTION(m);
1521
 
1522
            if ((((size_t)(mem)) % alignment) != 0)  /* misaligned */
1523
            {
1524
        /*
1525
          Find an aligned spot inside chunk.  Since we need to give
1526
          back leading space in a chunk of at least MIN_CHUNK_SIZE, if
1527
          the first calculation places us at a spot with less than
1528
          MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
1529
          We've allocated enough total room so that this is always
1530
          possible.
1531
        */
1532
                char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
1533
                                                       alignment -
1534
                                                       SIZE_T_ONE)) &
1535
                                             -alignment));
1536
                char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
1537
                             br : br+alignment;
1538
                mchunkptr newp = (mchunkptr)pos;
1539
                size_t leadsize = pos - (char*)(p);
1540
                size_t newsize = chunksize(p) - leadsize;
1541
 
1542
                if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
1543
                    newp->prev_foot = p->prev_foot + leadsize;
1544
                    newp->head = newsize;
1545
                }
1546
                else { /* Otherwise, give back leader, use the rest */
1547
                    set_inuse(m, newp, newsize);
1548
                    set_inuse(m, p, leadsize);
1549
                    leader = chunk2mem(p);
1550
                }
1551
                p = newp;
1552
            }
1553
 
1554
      /* Give back spare room at the end */
1555
            if (!is_mmapped(p))
1556
            {
1557
                size_t size = chunksize(p);
1558
                if (size > nb + MIN_CHUNK_SIZE)
1559
                {
1560
                    size_t remainder_size = size - nb;
1561
                    mchunkptr remainder = chunk_plus_offset(p, nb);
1562
                    set_inuse(m, p, nb);
1563
                    set_inuse(m, remainder, remainder_size);
1564
                    trailer = chunk2mem(remainder);
1565
                }
1566
            }
1567
 
1568
            assert (chunksize(p) >= nb);
1569
            assert((((size_t)(chunk2mem(p))) % alignment) == 0);
1570
            check_inuse_chunk(m, p);
1571
            POSTACTION(m);
1572
            if (leader != 0) {
1573
                internal_free(m, leader);
1574
            }
1575
            if (trailer != 0) {
1576
                internal_free(m, trailer);
1577
            }
1578
            return chunk2mem(p);
1579
        }
1580
    }
1581
    return 0;
1582
}
1583
 
1584
void* memalign(size_t alignment, size_t bytes)
1585
{
1586
    return internal_memalign(gm, alignment, bytes);
1587
}
1588
 
1589
 
1590
void* malloc(size_t bytes)
1591
{
1592
  /*
1593
     Basic algorithm:
1594
     If a small request (< 256 bytes minus per-chunk overhead):
1595
       1. If one exists, use a remainderless chunk in associated smallbin.
1596
          (Remainderless means that there are too few excess bytes to
1597
          represent as a chunk.)
1598
       2. If it is big enough, use the dv chunk, which is normally the
1599
          chunk adjacent to the one used for the most recent small request.
1600
       3. If one exists, split the smallest available chunk in a bin,
1601
          saving remainder in dv.
1602
       4. If it is big enough, use the top chunk.
1603
       5. If available, get memory from system and use it
1604
     Otherwise, for a large request:
1605
       1. Find the smallest available binned chunk that fits, and use it
1606
          if it is better fitting than dv chunk, splitting if necessary.
1607
       2. If better fitting than any binned chunk, use the dv chunk.
1608
       3. If it is big enough, use the top chunk.
1609
       4. If request size >= mmap threshold, try to directly mmap this chunk.
1610
       5. If available, get memory from system and use it
1611
 
1612
     The ugly goto's here ensure that postaction occurs along all paths.
1613
  */
1614
 
1615
    ensure_initialization(); /* initialize in sys_alloc if not using locks */
1616
 
1617
    PREACTION(gm);
1618
    {
1619
        void* mem;
1620
        size_t nb;
1621
 
1622
        if (bytes <= MAX_SMALL_REQUEST)
1623
        {
1624
            bindex_t idx;
1625
            binmap_t smallbits;
1626
            nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
1627
            idx = small_index(nb);
1628
            smallbits = gm->smallmap >> idx;
1629
 
1630
            if ((smallbits & 0x3U) != 0) /* Remainderless fit to a smallbin. */
1631
            {
1632
                mchunkptr b, p;
1633
                idx += ~smallbits & 1;       /* Uses next bin if idx empty */
1634
                b = smallbin_at(gm, idx);
1635
                p = b->fd;
1636
                assert(chunksize(p) == small_index2size(idx));
1637
                unlink_first_small_chunk(gm, b, p, idx);
1638
                set_inuse_and_pinuse(gm, p, small_index2size(idx));
1639
                mem = chunk2mem(p);
1640
                check_malloced_chunk(gm, mem, nb);
1641
                goto postaction;
1642
            }
1643
            else if (nb > gm->dvsize)
1644
            {
1645
                if (smallbits != 0) /* Use chunk in next nonempty smallbin */
1646
                {
1647
                    mchunkptr b, p, r;
1648
                    size_t rsize;
1649
                    bindex_t i;
1650
                    binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
1651
                    binmap_t leastbit = least_bit(leftbits);
1652
                    compute_bit2idx(leastbit, i);
1653
                    b = smallbin_at(gm, i);
1654
                    p = b->fd;
1655
                    assert(chunksize(p) == small_index2size(i));
1656
                    unlink_first_small_chunk(gm, b, p, i);
1657
                    rsize = small_index2size(i) - nb;
1658
          /* Fit here cannot be remainderless if 4byte sizes */
1659
                    if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
1660
                        set_inuse_and_pinuse(gm, p, small_index2size(i));
1661
                    else
1662
                    {
1663
                        set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1664
                        r = chunk_plus_offset(p, nb);
1665
                        set_size_and_pinuse_of_free_chunk(r, rsize);
1666
                        replace_dv(gm, r, rsize);
1667
                    }
1668
                    mem = chunk2mem(p);
1669
                    check_malloced_chunk(gm, mem, nb);
1670
                    goto postaction;
1671
                }
1672
                else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0)
1673
                {
1674
                    check_malloced_chunk(gm, mem, nb);
1675
                    goto postaction;
1676
                }
1677
            }
1678
        }
1679
        else if (bytes >= MAX_REQUEST)
1680
            nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1681
        else
1682
        {
1683
            nb = pad_request(bytes);
1684
            if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0)
1685
            {
1686
                check_malloced_chunk(gm, mem, nb);
1687
                goto postaction;
1688
            }
1689
        }
1690
 
1691
        if (nb <= gm->dvsize) {
1692
            size_t rsize = gm->dvsize - nb;
1693
            mchunkptr p = gm->dv;
1694
            if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
1695
                mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
1696
                gm->dvsize = rsize;
1697
                set_size_and_pinuse_of_free_chunk(r, rsize);
1698
                set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1699
            }
1700
            else { /* exhaust dv */
1701
                size_t dvs = gm->dvsize;
1702
                gm->dvsize = 0;
1703
                gm->dv = 0;
1704
                set_inuse_and_pinuse(gm, p, dvs);
1705
            }
1706
            mem = chunk2mem(p);
1707
            check_malloced_chunk(gm, mem, nb);
1708
            goto postaction;
1709
        }
1710
        else if (nb < gm->topsize) { /* Split top */
1711
            size_t rsize = gm->topsize -= nb;
1712
            mchunkptr p = gm->top;
1713
            mchunkptr r = gm->top = chunk_plus_offset(p, nb);
1714
            r->head = rsize | PINUSE_BIT;
1715
            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1716
            mem = chunk2mem(p);
1717
            check_top_chunk(gm, gm->top);
1718
            check_malloced_chunk(gm, mem, nb);
1719
            goto postaction;
1720
        }
1721
 
1722
        mem = sys_alloc(gm, nb);
1723
 
1724
  postaction:
1725
        POSTACTION(gm);
1726
        return mem;
1727
    }
1728
 
1729
    return 0;
1730
}
1731
 
1732
 
1733
void free(void* mem)
1734
{
1735
  /*
1736
     Consolidate freed chunks with preceeding or succeeding bordering
1737
     free chunks, if they exist, and then place in a bin.  Intermixed
1738
     with special cases for top, dv, mmapped chunks, and usage errors.
1739
  */
1740
 
1741
    if (mem != 0)
1742
    {
1743
        mchunkptr p  = mem2chunk(mem);
1744
 
1745
#define fm gm
1746
 
1747
        PREACTION(fm);
1748
        {
1749
            check_inuse_chunk(fm, p);
1750
            if (RTCHECK(ok_address(fm, p) && ok_inuse(p)))
1751
            {
1752
                size_t psize = chunksize(p);
1753
                mchunkptr next = chunk_plus_offset(p, psize);
1754
                if (!pinuse(p))
1755
                {
1756
                    size_t prevsize = p->prev_foot;
1757
                    if (is_mmapped(p))
1758
                    {
1759
                        psize += prevsize + MMAP_FOOT_PAD;
1760
                        if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
1761
                        fm->footprint -= psize;
1762
                        goto postaction;
1763
                    }
1764
                    else
1765
                    {
1766
                        mchunkptr prev = chunk_minus_offset(p, prevsize);
1767
                        psize += prevsize;
1768
                        p = prev;
1769
                        if (RTCHECK(ok_address(fm, prev)))  /* consolidate backward */
1770
                        {
1771
                            if (p != fm->dv)
1772
                            {
1773
                                unlink_chunk(fm, p, prevsize);
1774
                            }
1775
                            else if ((next->head & INUSE_BITS) == INUSE_BITS)
1776
                            {
1777
                                fm->dvsize = psize;
1778
                                set_free_with_pinuse(p, psize, next);
1779
                                goto postaction;
1780
                            }
1781
                        }
1782
                        else
1783
                            goto erroraction;
1784
                    }
1785
                }
1786
 
1787
                if (RTCHECK(ok_next(p, next) && ok_pinuse(next)))
1788
                {
1789
                    if (!cinuse(next))    /* consolidate forward */
1790
                    {
1791
                        if (next == fm->top)
1792
                        {
1793
                            size_t tsize = fm->topsize += psize;
1794
                            fm->top = p;
1795
                            p->head = tsize | PINUSE_BIT;
1796
                            if (p == fm->dv)
1797
                            {
1798
                                fm->dv = 0;
1799
                                fm->dvsize = 0;
1800
                            }
1801
                            if (should_trim(fm, tsize))
1802
                                sys_trim(fm, 0);
1803
                            goto postaction;
1804
                        }
1805
                        else if (next == fm->dv)
1806
                        {
1807
                            size_t dsize = fm->dvsize += psize;
1808
                            fm->dv = p;
1809
                            set_size_and_pinuse_of_free_chunk(p, dsize);
1810
                            goto postaction;
1811
                        }
1812
                        else
1813
                        {
1814
                            size_t nsize = chunksize(next);
1815
                            psize += nsize;
1816
                            unlink_chunk(fm, next, nsize);
1817
                            set_size_and_pinuse_of_free_chunk(p, psize);
1818
                            if (p == fm->dv)
1819
                            {
1820
                                fm->dvsize = psize;
1821
                                goto postaction;
1822
                            }
1823
                        }
1824
                    }
1825
                    else
1826
                        set_free_with_pinuse(p, psize, next);
1827
 
1828
                    if (is_small(psize))
1829
                    {
1830
                        insert_small_chunk(fm, p, psize);
1831
                        check_free_chunk(fm, p);
1832
                    }
1833
                    else
1834
                    {
1835
                        tchunkptr tp = (tchunkptr)p;
1836
                        insert_large_chunk(fm, tp, psize);
1837
                        check_free_chunk(fm, p);
1838
                        if (--fm->release_checks == 0)
1839
                            release_unused_segments(fm);
1840
                    }
1841
                    goto postaction;
1842
                }
1843
            }
1844
    erroraction:
1845
        USAGE_ERROR_ACTION(fm, p);
1846
    postaction:
1847
        POSTACTION(fm);
1848
        }
1849
    }
1850
#undef fm
1851
}
1852
 
1853