Subversion Repositories Kolibri OS

Rev

Rev 1408 | Rev 3039 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1616 serge 1
/*
2
  This is a version (aka dlmalloc) of malloc/free/realloc written by
3
  Doug Lea and released to the public domain, as explained at
4
  http://creativecommons.org/licenses/publicdomain.  Send questions,
5
  comments, complaints, performance data, etc to dl@cs.oswego.edu
6
 
7
* Version 2.8.4 Wed May 27 09:56:23 2009  Doug Lea  (dl at gee)
8
 
9
   Note: There may be an updated version of this malloc obtainable at
10
           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
11
         Check before installing!
12
 
13
* Quickstart
14
 
15
  This library is all in one file to simplify the most common usage:
16
  ftp it, compile it (-O3), and link it into another program. All of
17
  the compile-time options default to reasonable values for use on
18
  most platforms.  You might later want to step through various
19
  compile-time and dynamic tuning options.
20
 
21
  For convenience, an include file for code using this malloc is at:
22
     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h
23
  You don't really need this .h file unless you call functions not
24
  defined in your system include files.  The .h file contains only the
25
  excerpts from this file needed for using this malloc on ANSI C/C++
26
  systems, so long as you haven't changed compile-time options about
27
  naming and tuning parameters.  If you do, then you can create your
28
  own malloc.h that does include all settings by cutting at the point
29
  indicated below. Note that you may already by default be using a C
30
  library containing a malloc that is based on some version of this
31
  malloc (for example in linux). You might still want to use the one
32
  in this file to customize settings or to avoid overheads associated
33
  with library versions.
34
 
35
*/
36
 
37
#include 
38
#include 
39
#include 
40
 
41
struct malloc_chunk {
42
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
43
  size_t               head;       /* Size and inuse bits. */
44
  struct malloc_chunk* fd;         /* double links -- used only if free. */
45
  struct malloc_chunk* bk;
46
};
47
 
48
typedef struct malloc_chunk  mchunk;
49
typedef struct malloc_chunk* mchunkptr;
50
typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
51
typedef unsigned int bindex_t;         /* Described below */
52
typedef unsigned int binmap_t;         /* Described below */
53
typedef unsigned int flag_t;           /* The type of various bit flag sets */
54
 
55
 
56
 
57
/* ------------------- size_t and alignment properties -------------------- */
58
 
59
/* The maximum possible size_t value has all bits set */
60
#define MAX_SIZE_T           (~(size_t)0)
61
 
62
/* The byte and bit size of a size_t */
63
#define SIZE_T_SIZE             (sizeof(size_t))
64
#define SIZE_T_BITSIZE          (sizeof(size_t) << 3)
65
 
66
/* Some constants coerced to size_t */
67
/* Annoying but necessary to avoid errors on some platforms */
68
#define SIZE_T_ZERO             ((size_t)0)
69
#define SIZE_T_ONE              ((size_t)1)
70
#define SIZE_T_TWO              ((size_t)2)
71
#define SIZE_T_FOUR             ((size_t)4)
72
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
73
#define FOUR_SIZE_T_SIZES       (SIZE_T_SIZE<<2)
74
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
75
#define HALF_MAX_SIZE_T         (MAX_SIZE_T / 2U)
76
 
77
#define USE_LOCK_BIT            (2U)
78
#define USE_MMAP_BIT            (SIZE_T_ONE)
79
#define USE_NONCONTIGUOUS_BIT   (4U)
80
 
81
/* segment bit set in create_mspace_with_base */
82
#define EXTERN_BIT              (8U)
83
 
84
#define HAVE_MMAP               1
85
#define CALL_MMAP(s)            MMAP_DEFAULT(s)
86
#define CALL_MUNMAP(a, s)       MUNMAP_DEFAULT((a), (s))
87
#define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
88
#define MAX_RELEASE_CHECK_RATE  4095
89
#define NO_SEGMENT_TRAVERSAL    1
90
#define MALLOC_ALIGNMENT        ((size_t)8U)
91
#define CHUNK_OVERHEAD          (SIZE_T_SIZE)
92
#define DEFAULT_GRANULARITY     ((size_t)64U * (size_t)1024U)
93
#define DEFAULT_MMAP_THRESHOLD  ((size_t)256U * (size_t)1024U)
94
#define DEFAULT_TRIM_THRESHOLD  ((size_t)512U * (size_t)1024U)
95
 
96
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
97
#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
98
 
99
/* True if address a has acceptable alignment */
100
#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
101
 
102
/* the number of bytes to offset an address to align it */
103
#define align_offset(A)\
104
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
105
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
106
 
107
 
108
#define MFAIL                ((void*)(MAX_SIZE_T))
109
#define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
110
 
111
/* For sys_alloc, enough padding to ensure can malloc request on success */
112
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
113
 
114
/*
115
  TOP_FOOT_SIZE is padding at the end of a segment, including space
116
  that may be needed to place segment records and fenceposts when new
117
  noncontiguous segments are added.
118
*/
119
#define TOP_FOOT_SIZE\
120
  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
121
 
122
/* ------------------- Chunks sizes and alignments ----------------------- */
123
 
124
#define MCHUNK_SIZE         (sizeof(mchunk))
125
 
126
/* MMapped chunks need a second word of overhead ... */
127
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
128
/* ... and additional padding for fake next-chunk at foot */
129
#define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
130
 
131
/* The smallest size we can malloc is an aligned minimal chunk */
132
#define MIN_CHUNK_SIZE\
133
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
134
 
135
/* conversion from malloc headers to user pointers, and back */
136
#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
137
#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
138
/* chunk associated with aligned address A */
139
#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
140
 
141
/* Bounds on request (not chunk) sizes. */
142
#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
143
#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
144
 
145
/* pad request bytes into a usable size */
146
#define pad_request(req) \
147
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
148
 
149
/* pad request, checking for minimum (but not maximum) */
150
#define request2size(req) \
151
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
152
 
153
/* ------------------ Operations on head and foot fields ----------------- */
154
 
155
/*
156
  The head field of a chunk is or'ed with PINUSE_BIT when previous
157
  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
158
  use, unless mmapped, in which case both bits are cleared.
159
 
160
  FLAG4_BIT is not used by this malloc, but might be useful in extensions.
161
*/
162
 
163
#define PINUSE_BIT          (SIZE_T_ONE)
164
#define CINUSE_BIT          (SIZE_T_TWO)
165
#define FLAG4_BIT           (SIZE_T_FOUR)
166
#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
167
#define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
168
 
169
/* Head value for fenceposts */
170
#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
171
 
172
/* extraction of fields from head words */
173
#define cinuse(p)           ((p)->head & CINUSE_BIT)
174
#define pinuse(p)           ((p)->head & PINUSE_BIT)
175
#define is_inuse(p)         (((p)->head & INUSE_BITS) != PINUSE_BIT)
176
#define is_mmapped(p)       (((p)->head & INUSE_BITS) == 0)
177
 
178
#define chunksize(p)        ((p)->head & ~(FLAG_BITS))
179
 
180
#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
181
 
182
/* Treat space at ptr +/- offset as a chunk */
183
#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
184
#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
185
 
186
/* Ptr to next or previous physical malloc_chunk. */
187
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
188
#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
189
 
190
/* extract next chunk's pinuse bit */
191
#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
192
 
193
/* Set size, pinuse bit, and foot */
194
#define set_size_and_pinuse_of_free_chunk(p, s)\
195
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
196
 
197
/* Set size, pinuse bit, foot, and clear next pinuse */
198
#define set_free_with_pinuse(p, s, n)\
199
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
200
 
201
/* Get the internal overhead associated with chunk p */
202
#define overhead_for(p)\
203
 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
204
 
205
 
206
struct malloc_tree_chunk {
207
  /* The first four fields must be compatible with malloc_chunk */
208
  size_t                    prev_foot;
209
  size_t                    head;
210
  struct malloc_tree_chunk* fd;
211
  struct malloc_tree_chunk* bk;
212
 
213
  struct malloc_tree_chunk* child[2];
214
  struct malloc_tree_chunk* parent;
215
  bindex_t                  index;
216
};
217
 
218
typedef struct malloc_tree_chunk  tchunk;
219
typedef struct malloc_tree_chunk* tchunkptr;
220
typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
221
 
222
/* A little helper macro for trees */
223
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
224
 
225
 
226
struct malloc_segment {
227
  char*        base;             /* base address */
228
  size_t       size;             /* allocated size */
229
  struct malloc_segment* next;   /* ptr to next segment */
230
  flag_t       sflags;           /* mmap and extern flag */
231
};
232
 
233
#define is_mmapped_segment(S)  ((S)->sflags & USE_MMAP_BIT)
234
#define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
235
 
236
typedef struct malloc_segment  msegment;
237
typedef struct malloc_segment* msegmentptr;
238
 
239
/* ---------------------------- malloc_state ----------------------------- */
240
 
241
/*
242
   A malloc_state holds all of the bookkeeping for a space.
243
   The main fields are:
244
 
245
  Top
246
    The topmost chunk of the currently active segment. Its size is
247
    cached in topsize.  The actual size of topmost space is
248
    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
249
    fenceposts and segment records if necessary when getting more
250
    space from the system.  The size at which to autotrim top is
251
    cached from mparams in trim_check, except that it is disabled if
252
    an autotrim fails.
253
 
254
  Designated victim (dv)
255
    This is the preferred chunk for servicing small requests that
256
    don't have exact fits.  It is normally the chunk split off most
257
    recently to service another small request.  Its size is cached in
258
    dvsize. The link fields of this chunk are not maintained since it
259
    is not kept in a bin.
260
 
261
  SmallBins
262
    An array of bin headers for free chunks.  These bins hold chunks
263
    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
264
    chunks of all the same size, spaced 8 bytes apart.  To simplify
265
    use in double-linked lists, each bin header acts as a malloc_chunk
266
    pointing to the real first node, if it exists (else pointing to
267
    itself).  This avoids special-casing for headers.  But to avoid
268
    waste, we allocate only the fd/bk pointers of bins, and then use
269
    repositioning tricks to treat these as the fields of a chunk.
270
 
271
  TreeBins
272
    Treebins are pointers to the roots of trees holding a range of
273
    sizes. There are 2 equally spaced treebins for each power of two
274
    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
275
    larger.
276
 
277
  Bin maps
278
    There is one bit map for small bins ("smallmap") and one for
279
    treebins ("treemap).  Each bin sets its bit when non-empty, and
280
    clears the bit when empty.  Bit operations are then used to avoid
281
    bin-by-bin searching -- nearly all "search" is done without ever
282
    looking at bins that won't be selected.  The bit maps
283
    conservatively use 32 bits per map word, even if on 64bit system.
284
    For a good description of some of the bit-based techniques used
285
    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
286
    supplement at http://hackersdelight.org/). Many of these are
287
    intended to reduce the branchiness of paths through malloc etc, as
288
    well as to reduce the number of memory locations read or written.
289
 
290
  Segments
291
    A list of segments headed by an embedded malloc_segment record
292
    representing the initial space.
293
 
294
  Address check support
295
    The least_addr field is the least address ever obtained from
296
    MORECORE or MMAP. Attempted frees and reallocs of any address less
297
    than this are trapped (unless INSECURE is defined).
298
 
299
  Magic tag
300
    A cross-check field that should always hold same value as mparams.magic.
301
 
302
  Flags
303
    Bits recording whether to use MMAP, locks, or contiguous MORECORE
304
 
305
  Statistics
306
    Each space keeps track of current and maximum system memory
307
    obtained via MORECORE or MMAP.
308
 
309
  Trim support
310
    Fields holding the amount of unused topmost memory that should trigger
311
    timming, and a counter to force periodic scanning to release unused
312
    non-topmost segments.
313
 
314
  Locking
315
    If USE_LOCKS is defined, the "mutex" lock is acquired and released
316
    around every public call using this mspace.
317
 
318
  Extension support
319
    A void* pointer and a size_t field that can be used to help implement
320
    extensions to this malloc.
321
*/
322
 
323
/* Bin types, widths and sizes */
324
#define NSMALLBINS        (32U)
325
#define NTREEBINS         (32U)
326
#define SMALLBIN_SHIFT    (3U)
327
#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
328
#define TREEBIN_SHIFT     (8U)
329
#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
330
#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
331
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
332
 
333
struct malloc_state {
334
  binmap_t   smallmap;
335
  binmap_t   treemap;
336
  size_t     dvsize;
337
  size_t     topsize;
338
  char*      least_addr;
339
  mchunkptr  dv;
340
  mchunkptr  top;
341
  size_t     trim_check;
342
  size_t     release_checks;
343
  size_t     magic;
344
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
345
  tbinptr    treebins[NTREEBINS];
346
  size_t     footprint;
347
  size_t     max_footprint;
348
  flag_t     mflags;
349
  struct mutex  lock;     /* locate lock among fields that rarely change */
350
  msegment   seg;
351
  void*      extp;      /* Unused but available for extensions */
352
  size_t     exts;
353
};
354
 
355
typedef struct malloc_state*    mstate;
356
 
357
/* ------------- Global malloc_state and malloc_params ------------------- */
358
 
359
/*
360
  malloc_params holds global properties, including those that can be
361
  dynamically set using mallopt. There is a single instance, mparams,
362
  initialized in init_mparams. Note that the non-zeroness of "magic"
363
  also serves as an initialization flag.
364
*/
365
 
366
struct malloc_params
367
{
368
    volatile size_t  magic;
369
    size_t  page_size;
370
    size_t  granularity;
371
    size_t  mmap_threshold;
372
    size_t  trim_threshold;
373
    flag_t  default_mflags;
374
};
375
 
376
static struct malloc_params mparams;
377
 
378
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
379
 
380
static struct malloc_state _gm_;
381
#define gm                 (&_gm_)
382
#define is_global(M)       ((M) == &_gm_)
383
 
384
#define is_initialized(M)  ((M)->top != 0)
385
 
386
 
387
//struct mutex malloc_global_mutex;
388
 
389
static DEFINE_MUTEX(malloc_global_mutex);
390
 
391
#define ACQUIRE_MALLOC_GLOBAL_LOCK()  MutexLock(&malloc_global_mutex);
392
#define RELEASE_MALLOC_GLOBAL_LOCK()  MutexUnlock(&malloc_global_mutex);
393
 
394
#define PREACTION(M)  ( MutexLock(&(M)->lock))
395
#define POSTACTION(M) { MutexUnlock(&(M)->lock); }
396
 
397
 
398
/* ---------------------------- Indexing Bins ---------------------------- */
399
 
400
#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
401
#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
402
#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
403
#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
404
 
405
/* addressing by index. See above about smallbin repositioning */
406
#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
407
#define treebin_at(M,i)     (&((M)->treebins[i]))
408
 
409
 
410
#define compute_tree_index(S, I)\
411
{\
412
  unsigned int X = S >> TREEBIN_SHIFT;\
413
  if (X == 0)\
414
    I = 0;\
415
  else if (X > 0xFFFF)\
416
    I = NTREEBINS-1;\
417
  else {\
418
    unsigned int K;\
419
    __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g"  (X));\
420
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
421
  }\
422
}
423
 
424
/* Bit representing maximum resolved size in a treebin at i */
425
#define bit_for_tree_index(i) \
426
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
427
 
428
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
429
#define leftshift_for_tree_index(i) \
430
   ((i == NTREEBINS-1)? 0 : \
431
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
432
 
433
/* The size of the smallest chunk held in bin with index i */
434
#define minsize_for_tree_index(i) \
435
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
436
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
437
 
438
 
439
/* ------------------------ Operations on bin maps ----------------------- */
440
 
441
/* bit corresponding to given index */
442
#define idx2bit(i)              ((binmap_t)(1) << (i))
443
 
444
/* Mark/Clear bits with given index */
445
#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
446
#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
447
#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
448
 
449
#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
450
#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
451
#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
452
 
453
/* isolate the least set bit of a bitmap */
454
#define least_bit(x)         ((x) & -(x))
455
 
456
/* mask with all bits to left of least bit of x on */
457
#define left_bits(x)         ((x<<1) | -(x<<1))
458
 
459
/* mask with all bits to left of or equal to least bit of x on */
460
#define same_or_left_bits(x) ((x) | -(x))
461
 
462
 
463
/* index corresponding to given bit. Use x86 asm if possible */
464
 
465
#define compute_bit2idx(X, I)\
466
{\
467
  unsigned int J;\
468
  __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
469
  I = (bindex_t)J;\
470
}
471
 
472
 
473
#define mark_inuse_foot(M,p,s)
474
 
475
/* Get/set size at footer */
476
#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
477
#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
478
 
479
/* Macros for setting head/foot of non-mmapped chunks */
480
 
481
/* Set cinuse bit and pinuse bit of next chunk */
482
#define set_inuse(M,p,s)\
483
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
484
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
485
 
486
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
487
#define set_inuse_and_pinuse(M,p,s)\
488
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
489
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
490
 
491
/* Set size, cinuse and pinuse bit of this chunk */
492
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
493
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
494
 
495
 
496
#define assert(x)
497
#define RTCHECK(e)  __builtin_expect(e, 1)
498
 
499
#define check_free_chunk(M,P)
500
#define check_inuse_chunk(M,P)
501
#define check_malloced_chunk(M,P,N)
502
#define check_mmapped_chunk(M,P)
503
#define check_malloc_state(M)
504
#define check_top_chunk(M,P)
505
 
506
/* Check if address a is at least as high as any from MORECORE or MMAP */
507
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
508
/* Check if address of next chunk n is higher than base chunk p */
509
#define ok_next(p, n)    ((char*)(p) < (char*)(n))
510
/* Check if p has inuse status */
511
#define ok_inuse(p)     is_inuse(p)
512
/* Check if p has its pinuse bit on */
513
#define ok_pinuse(p)     pinuse(p)
514
 
515
#define CORRUPTION_ERROR_ACTION(m)                             \
516
        do {                                                   \
517
            printf("%s malloc heap corrupted\n",__FUNCTION__); \
518
            while(1)                                           \
519
            {                                                  \
520
                delay(100);                                    \
521
            }                                                  \
522
        }while(0)                                              \
523
 
524
 
525
#define USAGE_ERROR_ACTION(m, p)                               \
526
        do {                                                   \
527
            printf("%s malloc heap corrupted\n",__FUNCTION__); \
528
            while(1)                                           \
529
            {                                                  \
530
                delay(100);                                    \
531
            }                                                  \
532
        }while(0)                                              \
533
 
534
/* ----------------------- Operations on smallbins ----------------------- */
535
 
536
/*
537
  Various forms of linking and unlinking are defined as macros.  Even
538
  the ones for trees, which are very long but have very short typical
539
  paths.  This is ugly but reduces reliance on inlining support of
540
  compilers.
541
*/
542
 
543
/* Link a free chunk into a smallbin  */
544
#define insert_small_chunk(M, P, S) {\
545
  bindex_t I  = small_index(S);\
546
  mchunkptr B = smallbin_at(M, I);\
547
  mchunkptr F = B;\
548
  assert(S >= MIN_CHUNK_SIZE);\
549
  if (!smallmap_is_marked(M, I))\
550
    mark_smallmap(M, I);\
551
  else if (RTCHECK(ok_address(M, B->fd)))\
552
    F = B->fd;\
553
  else {\
554
    CORRUPTION_ERROR_ACTION(M);\
555
  }\
556
  B->fd = P;\
557
  F->bk = P;\
558
  P->fd = F;\
559
  P->bk = B;\
560
}
561
 
562
/* Unlink a chunk from a smallbin  */
563
#define unlink_small_chunk(M, P, S) {\
564
  mchunkptr F = P->fd;\
565
  mchunkptr B = P->bk;\
566
  bindex_t I = small_index(S);\
567
  assert(P != B);\
568
  assert(P != F);\
569
  assert(chunksize(P) == small_index2size(I));\
570
  if (F == B)\
571
    clear_smallmap(M, I);\
572
  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
573
                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
574
    F->bk = B;\
575
    B->fd = F;\
576
  }\
577
  else {\
578
    CORRUPTION_ERROR_ACTION(M);\
579
  }\
580
}
581
 
582
/* Unlink the first chunk from a smallbin */
583
#define unlink_first_small_chunk(M, B, P, I) {\
584
  mchunkptr F = P->fd;\
585
  assert(P != B);\
586
  assert(P != F);\
587
  assert(chunksize(P) == small_index2size(I));\
588
  if (B == F)\
589
    clear_smallmap(M, I);\
590
  else if (RTCHECK(ok_address(M, F))) {\
591
    B->fd = F;\
592
    F->bk = B;\
593
  }\
594
  else {\
595
    CORRUPTION_ERROR_ACTION(M);\
596
  }\
597
}
598
 
599
/* Replace dv node, binning the old one */
600
/* Used only when dvsize known to be small */
601
#define replace_dv(M, P, S) {\
602
  size_t DVS = M->dvsize;\
603
  if (DVS != 0) {\
604
    mchunkptr DV = M->dv;\
605
    assert(is_small(DVS));\
606
    insert_small_chunk(M, DV, DVS);\
607
  }\
608
  M->dvsize = S;\
609
  M->dv = P;\
610
}
611
 
612
 
613
/* ------------------------- Operations on trees ------------------------- */
614
 
615
/* Insert chunk into tree */
616
#define insert_large_chunk(M, X, S) {\
617
  tbinptr* H;\
618
  bindex_t I;\
619
  compute_tree_index(S, I);\
620
  H = treebin_at(M, I);\
621
  X->index = I;\
622
  X->child[0] = X->child[1] = 0;\
623
  if (!treemap_is_marked(M, I)) {\
624
    mark_treemap(M, I);\
625
    *H = X;\
626
    X->parent = (tchunkptr)H;\
627
    X->fd = X->bk = X;\
628
  }\
629
  else {\
630
    tchunkptr T = *H;\
631
    size_t K = S << leftshift_for_tree_index(I);\
632
    for (;;) {\
633
      if (chunksize(T) != S) {\
634
        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
635
        K <<= 1;\
636
        if (*C != 0)\
637
          T = *C;\
638
        else if (RTCHECK(ok_address(M, C))) {\
639
          *C = X;\
640
          X->parent = T;\
641
          X->fd = X->bk = X;\
642
          break;\
643
        }\
644
        else {\
645
          CORRUPTION_ERROR_ACTION(M);\
646
          break;\
647
        }\
648
      }\
649
      else {\
650
        tchunkptr F = T->fd;\
651
        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
652
          T->fd = F->bk = X;\
653
          X->fd = F;\
654
          X->bk = T;\
655
          X->parent = 0;\
656
          break;\
657
        }\
658
        else {\
659
          CORRUPTION_ERROR_ACTION(M);\
660
          break;\
661
        }\
662
      }\
663
    }\
664
  }\
665
}
666
 
667
/*
668
  Unlink steps:
669
 
670
  1. If x is a chained node, unlink it from its same-sized fd/bk links
671
     and choose its bk node as its replacement.
672
  2. If x was the last node of its size, but not a leaf node, it must
673
     be replaced with a leaf node (not merely one with an open left or
674
     right), to make sure that lefts and rights of descendents
675
     correspond properly to bit masks.  We use the rightmost descendent
676
     of x.  We could use any other leaf, but this is easy to locate and
677
     tends to counteract removal of leftmosts elsewhere, and so keeps
678
     paths shorter than minimally guaranteed.  This doesn't loop much
679
     because on average a node in a tree is near the bottom.
680
  3. If x is the base of a chain (i.e., has parent links) relink
681
     x's parent and children to x's replacement (or null if none).
682
*/
683
 
684
#define unlink_large_chunk(M, X) {\
685
  tchunkptr XP = X->parent;\
686
  tchunkptr R;\
687
  if (X->bk != X) {\
688
    tchunkptr F = X->fd;\
689
    R = X->bk;\
690
    if (RTCHECK(ok_address(M, F))) {\
691
      F->bk = R;\
692
      R->fd = F;\
693
    }\
694
    else {\
695
      CORRUPTION_ERROR_ACTION(M);\
696
    }\
697
  }\
698
  else {\
699
    tchunkptr* RP;\
700
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
701
        ((R = *(RP = &(X->child[0]))) != 0)) {\
702
      tchunkptr* CP;\
703
      while ((*(CP = &(R->child[1])) != 0) ||\
704
             (*(CP = &(R->child[0])) != 0)) {\
705
        R = *(RP = CP);\
706
      }\
707
      if (RTCHECK(ok_address(M, RP)))\
708
        *RP = 0;\
709
      else {\
710
        CORRUPTION_ERROR_ACTION(M);\
711
      }\
712
    }\
713
  }\
714
  if (XP != 0) {\
715
    tbinptr* H = treebin_at(M, X->index);\
716
    if (X == *H) {\
717
      if ((*H = R) == 0) \
718
        clear_treemap(M, X->index);\
719
    }\
720
    else if (RTCHECK(ok_address(M, XP))) {\
721
      if (XP->child[0] == X) \
722
        XP->child[0] = R;\
723
      else \
724
        XP->child[1] = R;\
725
    }\
726
    else\
727
      CORRUPTION_ERROR_ACTION(M);\
728
    if (R != 0) {\
729
      if (RTCHECK(ok_address(M, R))) {\
730
        tchunkptr C0, C1;\
731
        R->parent = XP;\
732
        if ((C0 = X->child[0]) != 0) {\
733
          if (RTCHECK(ok_address(M, C0))) {\
734
            R->child[0] = C0;\
735
            C0->parent = R;\
736
          }\
737
          else\
738
            CORRUPTION_ERROR_ACTION(M);\
739
        }\
740
        if ((C1 = X->child[1]) != 0) {\
741
          if (RTCHECK(ok_address(M, C1))) {\
742
            R->child[1] = C1;\
743
            C1->parent = R;\
744
          }\
745
          else\
746
            CORRUPTION_ERROR_ACTION(M);\
747
        }\
748
      }\
749
      else\
750
        CORRUPTION_ERROR_ACTION(M);\
751
    }\
752
  }\
753
}
754
 
755
/* Relays to large vs small bin operations */
756
 
757
#define insert_chunk(M, P, S)\
758
  if (is_small(S)) insert_small_chunk(M, P, S)\
759
  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
760
 
761
#define unlink_chunk(M, P, S)\
762
  if (is_small(S)) unlink_small_chunk(M, P, S)\
763
  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
764
 
765
 
766
/* -------------------------- system alloc setup ------------------------- */
767
 
768
/* Operations on mflags */
769
 
770
#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
771
#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
772
#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
773
 
774
#define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
775
#define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
776
#define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
777
 
778
#define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
779
#define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
780
 
781
#define set_lock(M,L)\
782
 ((M)->mflags = (L)?\
783
  ((M)->mflags | USE_LOCK_BIT) :\
784
  ((M)->mflags & ~USE_LOCK_BIT))
785
 
786
/* page-align a size */
787
#define page_align(S)\
788
 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
789
 
790
/* granularity-align a size */
791
#define granularity_align(S)\
792
  (((S) + (mparams.granularity - SIZE_T_ONE))\
793
   & ~(mparams.granularity - SIZE_T_ONE))
794
 
795
 
796
/* For mmap, use granularity alignment  */
797
#define mmap_align(S) granularity_align(S)
798
 
799
/* For sys_alloc, enough padding to ensure can malloc request on success */
800
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
801
 
802
#define is_page_aligned(S)\
803
   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
804
#define is_granularity_aligned(S)\
805
   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
806
 
807
/*  True if segment S holds address A */
808
#define segment_holds(S, A)\
809
  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
810
 
811
/* Return segment holding given address */
812
static msegmentptr segment_holding(mstate m, char* addr)
813
{
814
    msegmentptr sp = &m->seg;
815
    for (;;) {
816
        if (addr >= sp->base && addr < sp->base + sp->size)
817
            return sp;
818
        if ((sp = sp->next) == 0)
819
            return 0;
820
    }
821
}
822
 
823
/* Return true if segment contains a segment link */
824
static int has_segment_link(mstate m, msegmentptr ss)
825
{
826
    msegmentptr sp = &m->seg;
827
    for (;;) {
828
        if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
829
            return 1;
830
        if ((sp = sp->next) == 0)
831
            return 0;
832
    }
833
}
834
 
835
static inline void* os_mmap(size_t size)
836
{
837
  void* ptr = KernelAlloc(size);
838
  return (ptr != 0)? ptr: MFAIL;
839
}
840
 
841
static inline int os_munmap(void* ptr, size_t size)
842
{
843
    return (KernelFree(ptr) != 0) ? 0 : -1;
844
}
845
 
846
#define should_trim(M,s)  ((s) > (M)->trim_check)
847
 
848
 
849
#define MMAP_DEFAULT(s)        os_mmap(s)
850
#define MUNMAP_DEFAULT(a, s)   os_munmap((a), (s))
851
#define DIRECT_MMAP_DEFAULT(s) os_mmap(s)
852
 
853
#define internal_malloc(m, b) malloc(b)
854
#define internal_free(m, mem) free(mem)
855
 
856
/* -----------------------  Direct-mmapping chunks ----------------------- */
857
 
858
/*
859
  Directly mmapped chunks are set up with an offset to the start of
860
  the mmapped region stored in the prev_foot field of the chunk. This
861
  allows reconstruction of the required argument to MUNMAP when freed,
862
  and also allows adjustment of the returned chunk to meet alignment
863
  requirements (especially in memalign).
864
*/
865
 
866
/* Malloc using mmap */
867
static void* mmap_alloc(mstate m, size_t nb)
868
{
869
    size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
870
    if (mmsize > nb)       /* Check for wrap around 0 */
871
    {
872
        char* mm = (char*)(os_mmap(mmsize));
873
        if (mm != CMFAIL)
874
        {
875
            size_t offset = align_offset(chunk2mem(mm));
876
            size_t psize = mmsize - offset - MMAP_FOOT_PAD;
877
            mchunkptr p = (mchunkptr)(mm + offset);
878
            p->prev_foot = offset;
879
            p->head = psize;
880
            mark_inuse_foot(m, p, psize);
881
            chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
882
            chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
883
 
884
            if (m->least_addr == 0 || mm < m->least_addr)
885
                m->least_addr = mm;
886
            if ((m->footprint += mmsize) > m->max_footprint)
887
                m->max_footprint = m->footprint;
888
            assert(is_aligned(chunk2mem(p)));
889
            check_mmapped_chunk(m, p);
890
            return chunk2mem(p);
891
        }
892
    }
893
    return 0;
894
}
895
 
896
/* Realloc using mmap */
897
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb)
898
{
899
    size_t oldsize = chunksize(oldp);
900
    if (is_small(nb)) /* Can't shrink mmap regions below small size */
901
        return 0;
902
  /* Keep old chunk if big enough but not too big */
903
    if (oldsize >= nb + SIZE_T_SIZE &&
904
      (oldsize - nb) <= (mparams.granularity << 1))
905
        return oldp;
906
    else
907
    {
908
        size_t offset = oldp->prev_foot;
909
        size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
910
        size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
911
        char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
912
                                  oldmmsize, newmmsize, 1);
913
        if (cp != CMFAIL)
914
        {
915
            mchunkptr newp = (mchunkptr)(cp + offset);
916
            size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
917
            newp->head = psize;
918
            mark_inuse_foot(m, newp, psize);
919
            chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
920
            chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
921
 
922
            if (cp < m->least_addr)
923
                m->least_addr = cp;
924
            if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
925
                m->max_footprint = m->footprint;
926
            check_mmapped_chunk(m, newp);
927
            return newp;
928
        }
929
    }
930
    return 0;
931
}
932
 
933
/* ---------------------------- setting mparams -------------------------- */
934
 
935
/* Initialize mparams */
936
static int init_mparams(void) {
937
 
938
    ACQUIRE_MALLOC_GLOBAL_LOCK();
939
 
940
    if (mparams.magic == 0)
941
    {
942
        size_t magic;
943
        size_t psize;
944
        size_t gsize;
945
 
946
        psize = 4096;
947
        gsize = DEFAULT_GRANULARITY;
948
 
949
    /* Sanity-check configuration:
950
       size_t must be unsigned and as wide as pointer type.
951
       ints must be at least 4 bytes.
952
       alignment must be at least 8.
953
       Alignment, min chunk size, and page size must all be powers of 2.
954
    */
955
 
956
        mparams.granularity = gsize;
957
        mparams.page_size = psize;
958
        mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
959
        mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
960
        mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
961
 
962
    /* Set up lock for main malloc area */
963
        gm->mflags = mparams.default_mflags;
964
        MutexInit(&gm->lock);
965
 
966
        magic = (size_t)(GetTimerTicks() ^ (size_t)0x55555555U);
967
        magic |= (size_t)8U;    /* ensure nonzero */
968
        magic &= ~(size_t)7U;   /* improve chances of fault for bad values */
969
        mparams.magic = magic;
970
    }
971
 
972
    RELEASE_MALLOC_GLOBAL_LOCK();
973
    return 1;
974
}
975
 
976
/* -------------------------- mspace management -------------------------- */
977
 
978
/* Initialize top chunk and its size */
979
static void init_top(mstate m, mchunkptr p, size_t psize)
980
{
981
  /* Ensure alignment */
982
    size_t offset = align_offset(chunk2mem(p));
983
    p = (mchunkptr)((char*)p + offset);
984
    psize -= offset;
985
 
986
    m->top = p;
987
    m->topsize = psize;
988
    p->head = psize | PINUSE_BIT;
989
  /* set size of fake trailing chunk holding overhead space only once */
990
    chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
991
    m->trim_check = mparams.trim_threshold; /* reset on each update */
992
}
993
 
994
/* Initialize bins for a new mstate that is otherwise zeroed out */
995
static void init_bins(mstate m)
996
{
997
  /* Establish circular links for smallbins */
998
    bindex_t i;
999
    for (i = 0; i < NSMALLBINS; ++i) {
1000
        sbinptr bin = smallbin_at(m,i);
1001
        bin->fd = bin->bk = bin;
1002
    }
1003
}
1004
 
1005
/* Allocate chunk and prepend remainder with chunk in successor base. */
1006
static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
1007
                           size_t nb)
1008
{
1009
    mchunkptr p = align_as_chunk(newbase);
1010
    mchunkptr oldfirst = align_as_chunk(oldbase);
1011
    size_t psize = (char*)oldfirst - (char*)p;
1012
    mchunkptr q = chunk_plus_offset(p, nb);
1013
    size_t qsize = psize - nb;
1014
    set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1015
 
1016
    assert((char*)oldfirst > (char*)q);
1017
    assert(pinuse(oldfirst));
1018
    assert(qsize >= MIN_CHUNK_SIZE);
1019
 
1020
  /* consolidate remainder with first chunk of old base */
1021
    if (oldfirst == m->top) {
1022
        size_t tsize = m->topsize += qsize;
1023
        m->top = q;
1024
        q->head = tsize | PINUSE_BIT;
1025
        check_top_chunk(m, q);
1026
    }
1027
    else if (oldfirst == m->dv) {
1028
        size_t dsize = m->dvsize += qsize;
1029
        m->dv = q;
1030
        set_size_and_pinuse_of_free_chunk(q, dsize);
1031
    }
1032
    else {
1033
        if (!is_inuse(oldfirst)) {
1034
            size_t nsize = chunksize(oldfirst);
1035
            unlink_chunk(m, oldfirst, nsize);
1036
            oldfirst = chunk_plus_offset(oldfirst, nsize);
1037
            qsize += nsize;
1038
        }
1039
        set_free_with_pinuse(q, qsize, oldfirst);
1040
        insert_chunk(m, q, qsize);
1041
        check_free_chunk(m, q);
1042
    }
1043
 
1044
    check_malloced_chunk(m, chunk2mem(p), nb);
1045
    return chunk2mem(p);
1046
}
1047
 
1048
/* Add a segment to hold a new noncontiguous region */
1049
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped)
1050
{
1051
  /* Determine locations and sizes of segment, fenceposts, old top */
1052
    char* old_top = (char*)m->top;
1053
    msegmentptr oldsp = segment_holding(m, old_top);
1054
    char* old_end = oldsp->base + oldsp->size;
1055
    size_t ssize = pad_request(sizeof(struct malloc_segment));
1056
    char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
1057
    size_t offset = align_offset(chunk2mem(rawsp));
1058
    char* asp = rawsp + offset;
1059
    char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
1060
    mchunkptr sp = (mchunkptr)csp;
1061
    msegmentptr ss = (msegmentptr)(chunk2mem(sp));
1062
    mchunkptr tnext = chunk_plus_offset(sp, ssize);
1063
    mchunkptr p = tnext;
1064
    int nfences = 0;
1065
 
1066
  /* reset top to new space */
1067
    init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
1068
 
1069
  /* Set up segment record */
1070
    assert(is_aligned(ss));
1071
    set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
1072
    *ss = m->seg; /* Push current record */
1073
    m->seg.base = tbase;
1074
    m->seg.size = tsize;
1075
    m->seg.sflags = mmapped;
1076
    m->seg.next = ss;
1077
 
1078
  /* Insert trailing fenceposts */
1079
    for (;;) {
1080
        mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
1081
        p->head = FENCEPOST_HEAD;
1082
        ++nfences;
1083
        if ((char*)(&(nextp->head)) < old_end)
1084
            p = nextp;
1085
        else
1086
            break;
1087
    }
1088
    assert(nfences >= 2);
1089
 
1090
  /* Insert the rest of old top into a bin as an ordinary free chunk */
1091
    if (csp != old_top) {
1092
        mchunkptr q = (mchunkptr)old_top;
1093
        size_t psize = csp - old_top;
1094
        mchunkptr tn = chunk_plus_offset(q, psize);
1095
        set_free_with_pinuse(q, psize, tn);
1096
        insert_chunk(m, q, psize);
1097
    }
1098
 
1099
  check_top_chunk(m, m->top);
1100
}
1101
 
1102
/* -------------------------- System allocation -------------------------- */
1103
 
1104
/* Get memory from system using MORECORE or MMAP */
1105
static void* sys_alloc(mstate m, size_t nb)
1106
{
1107
    char* tbase = CMFAIL;
1108
    size_t tsize = 0;
1109
    flag_t mmap_flag = 0;
1110
 
1111
    ensure_initialization();
1112
 
1113
  /* Directly map large chunks, but only if already initialized */
1114
    if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0)
1115
    {
1116
        void* mem = mmap_alloc(m, nb);
1117
        if (mem != 0)
1118
            return mem;
1119
    }
1120
 
1121
  /*
1122
    Try getting memory in any of three ways (in most-preferred to
1123
    least-preferred order):
1124
    1. A call to MORECORE that can normally contiguously extend memory.
1125
       (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
1126
       or main space is mmapped or a previous contiguous call failed)
1127
    2. A call to MMAP new space (disabled if not HAVE_MMAP).
1128
       Note that under the default settings, if MORECORE is unable to
1129
       fulfill a request, and HAVE_MMAP is true, then mmap is
1130
       used as a noncontiguous system allocator. This is a useful backup
1131
       strategy for systems with holes in address spaces -- in this case
1132
       sbrk cannot contiguously expand the heap, but mmap may be able to
1133
       find space.
1134
    3. A call to MORECORE that cannot usually contiguously extend memory.
1135
       (disabled if not HAVE_MORECORE)
1136
 
1137
   In all cases, we need to request enough bytes from system to ensure
1138
   we can malloc nb bytes upon success, so pad with enough space for
1139
   top_foot, plus alignment-pad to make sure we don't lose bytes if
1140
   not on boundary, and round this up to a granularity unit.
1141
  */
1142
 
1143
    if (HAVE_MMAP && tbase == CMFAIL)   /* Try MMAP */
1144
    {
1145
        size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING);
1146
        if (rsize > nb)  /* Fail if wraps around zero */
1147
        {
1148
            char* mp = (char*)(CALL_MMAP(rsize));
1149
            if (mp != CMFAIL)
1150
            {
1151
                tbase = mp;
1152
                tsize = rsize;
1153
                mmap_flag = USE_MMAP_BIT;
1154
            }
1155
        }
1156
    }
1157
 
1158
 
1159
    if (tbase != CMFAIL)
1160
    {
1161
 
1162
        if ((m->footprint += tsize) > m->max_footprint)
1163
            m->max_footprint = m->footprint;
1164
 
1165
        if (!is_initialized(m))  /* first-time initialization */
1166
        {
1167
            if (m->least_addr == 0 || tbase < m->least_addr)
1168
                m->least_addr = tbase;
1169
            m->seg.base = tbase;
1170
             m->seg.size = tsize;
1171
            m->seg.sflags = mmap_flag;
1172
            m->magic = mparams.magic;
1173
            m->release_checks = MAX_RELEASE_CHECK_RATE;
1174
            init_bins(m);
1175
 
1176
            if (is_global(m))
1177
                init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
1178
            else
1179
            {
1180
        /* Offset top by embedded malloc_state */
1181
                mchunkptr mn = next_chunk(mem2chunk(m));
1182
                init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
1183
            }
1184
        }
1185
        else
1186
        {
1187
      /* Try to merge with an existing segment */
1188
            msegmentptr sp = &m->seg;
1189
      /* Only consider most recent segment if traversal suppressed */
1190
            while (sp != 0 && tbase != sp->base + sp->size)
1191
                sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
1192
            if (sp != 0 && !is_extern_segment(sp) &&
1193
               (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
1194
                segment_holds(sp, m->top))  /* append */
1195
            {
1196
                sp->size += tsize;
1197
                init_top(m, m->top, m->topsize + tsize);
1198
            }
1199
            else
1200
            {
1201
                if (tbase < m->least_addr)
1202
                    m->least_addr = tbase;
1203
                sp = &m->seg;
1204
                while (sp != 0 && sp->base != tbase + tsize)
1205
                    sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
1206
                if (sp != 0 &&
1207
                    !is_extern_segment(sp) &&
1208
                    (sp->sflags & USE_MMAP_BIT) == mmap_flag)
1209
                {
1210
                    char* oldbase = sp->base;
1211
                    sp->base = tbase;
1212
                    sp->size += tsize;
1213
                    return prepend_alloc(m, tbase, oldbase, nb);
1214
                }
1215
                else
1216
                    add_segment(m, tbase, tsize, mmap_flag);
1217
            }
1218
        }
1219
 
1220
        if (nb < m->topsize)  /* Allocate from new or extended top space */
1221
        {
1222
            size_t rsize = m->topsize -= nb;
1223
            mchunkptr p = m->top;
1224
            mchunkptr r = m->top = chunk_plus_offset(p, nb);
1225
            r->head = rsize | PINUSE_BIT;
1226
            set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1227
            check_top_chunk(m, m->top);
1228
            check_malloced_chunk(m, chunk2mem(p), nb);
1229
            return chunk2mem(p);
1230
        }
1231
    }
1232
 
1233
//    MALLOC_FAILURE_ACTION;
1234
    return 0;
1235
}
1236
 
1237
 
1238
/* -----------------------  system deallocation -------------------------- */
1239
 
1240
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1241
static size_t release_unused_segments(mstate m)
1242
{
1243
    size_t released = 0;
1244
    int nsegs = 0;
1245
    msegmentptr pred = &m->seg;
1246
    msegmentptr sp = pred->next;
1247
    while (sp != 0)
1248
    {
1249
        char* base = sp->base;
1250
        size_t size = sp->size;
1251
        msegmentptr next = sp->next;
1252
        ++nsegs;
1253
        if (is_mmapped_segment(sp) && !is_extern_segment(sp))
1254
        {
1255
            mchunkptr p = align_as_chunk(base);
1256
            size_t psize = chunksize(p);
1257
      /* Can unmap if first chunk holds entire segment and not pinned */
1258
            if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE)
1259
            {
1260
                tchunkptr tp = (tchunkptr)p;
1261
                assert(segment_holds(sp, (char*)sp));
1262
                if (p == m->dv) {
1263
                    m->dv = 0;
1264
                    m->dvsize = 0;
1265
                }
1266
                else {
1267
                    unlink_large_chunk(m, tp);
1268
                }
1269
                if (CALL_MUNMAP(base, size) == 0)
1270
                {
1271
                    released += size;
1272
                    m->footprint -= size;
1273
          /* unlink obsoleted record */
1274
                    sp = pred;
1275
                    sp->next = next;
1276
                }
1277
                else { /* back out if cannot unmap */
1278
                    insert_large_chunk(m, tp, psize);
1279
                }
1280
            }
1281
        }
1282
        if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
1283
            break;
1284
        pred = sp;
1285
        sp = next;
1286
    }
1287
  /* Reset check counter */
1288
    m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)?
1289
                       nsegs : MAX_RELEASE_CHECK_RATE);
1290
    return released;
1291
}
1292
 
1293
static int sys_trim(mstate m, size_t pad)
1294
{
1295
    size_t released = 0;
1296
    ensure_initialization();
1297
    if (pad < MAX_REQUEST && is_initialized(m))
1298
    {
1299
        pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
1300
 
1301
        if (m->topsize > pad)
1302
        {
1303
      /* Shrink top space in granularity-size units, keeping at least one */
1304
            size_t unit = mparams.granularity;
1305
            size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
1306
                             SIZE_T_ONE) * unit;
1307
            msegmentptr sp = segment_holding(m, (char*)m->top);
1308
 
1309
            if (!is_extern_segment(sp))
1310
            {
1311
                if (is_mmapped_segment(sp))
1312
                {
1313
                    if (HAVE_MMAP &&
1314
                        sp->size >= extra &&
1315
                        !has_segment_link(m, sp))  /* can't shrink if pinned */
1316
                    {
1317
                        size_t newsize = sp->size - extra;
1318
            /* Prefer mremap, fall back to munmap */
1319
                        if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
1320
                            (CALL_MUNMAP(sp->base + newsize, extra) == 0))
1321
                        {
1322
                            released = extra;
1323
                        }
1324
                    }
1325
                }
1326
            }
1327
 
1328
            if (released != 0)
1329
            {
1330
                sp->size -= released;
1331
                m->footprint -= released;
1332
                init_top(m, m->top, m->topsize - released);
1333
                check_top_chunk(m, m->top);
1334
            }
1335
        }
1336
 
1337
    /* Unmap any unused mmapped segments */
1338
        if (HAVE_MMAP)
1339
            released += release_unused_segments(m);
1340
 
1341
    /* On failure, disable autotrim to avoid repeated failed future calls */
1342
        if (released == 0 && m->topsize > m->trim_check)
1343
        m->trim_check = MAX_SIZE_T;
1344
    }
1345
 
1346
    return (released != 0)? 1 : 0;
1347
}
1348
 
1349
 
1350
 
1351
/* ---------------------------- malloc support --------------------------- */
1352
 
1353
/* allocate a large request from the best fitting chunk in a treebin */
1354
static void* tmalloc_large(mstate m, size_t nb) {
1355
  tchunkptr v = 0;
1356
  size_t rsize = -nb; /* Unsigned negation */
1357
  tchunkptr t;
1358
  bindex_t idx;
1359
  compute_tree_index(nb, idx);
1360
  if ((t = *treebin_at(m, idx)) != 0) {
1361
    /* Traverse tree for this bin looking for node with size == nb */
1362
    size_t sizebits = nb << leftshift_for_tree_index(idx);
1363
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
1364
    for (;;) {
1365
      tchunkptr rt;
1366
      size_t trem = chunksize(t) - nb;
1367
      if (trem < rsize) {
1368
        v = t;
1369
        if ((rsize = trem) == 0)
1370
          break;
1371
      }
1372
      rt = t->child[1];
1373
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1374
      if (rt != 0 && rt != t)
1375
        rst = rt;
1376
      if (t == 0) {
1377
        t = rst; /* set t to least subtree holding sizes > nb */
1378
        break;
1379
      }
1380
      sizebits <<= 1;
1381
    }
1382
  }
1383
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
1384
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
1385
    if (leftbits != 0) {
1386
      bindex_t i;
1387
      binmap_t leastbit = least_bit(leftbits);
1388
      compute_bit2idx(leastbit, i);
1389
      t = *treebin_at(m, i);
1390
    }
1391
  }
1392
 
1393
  while (t != 0) { /* find smallest of tree or subtree */
1394
    size_t trem = chunksize(t) - nb;
1395
    if (trem < rsize) {
1396
      rsize = trem;
1397
      v = t;
1398
    }
1399
    t = leftmost_child(t);
1400
  }
1401
 
1402
  /*  If dv is a better fit, return 0 so malloc will use it */
1403
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
1404
    if (RTCHECK(ok_address(m, v))) { /* split */
1405
      mchunkptr r = chunk_plus_offset(v, nb);
1406
      assert(chunksize(v) == rsize + nb);
1407
      if (RTCHECK(ok_next(v, r))) {
1408
        unlink_large_chunk(m, v);
1409
        if (rsize < MIN_CHUNK_SIZE)
1410
          set_inuse_and_pinuse(m, v, (rsize + nb));
1411
        else {
1412
          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1413
          set_size_and_pinuse_of_free_chunk(r, rsize);
1414
          insert_chunk(m, r, rsize);
1415
        }
1416
        return chunk2mem(v);
1417
      }
1418
    }
1419
    CORRUPTION_ERROR_ACTION(m);
1420
  }
1421
  return 0;
1422
}
1423
 
1424
/* allocate a small request from the best fitting chunk in a treebin */
1425
static void* tmalloc_small(mstate m, size_t nb)
1426
{
1427
    tchunkptr t, v;
1428
    size_t rsize;
1429
    bindex_t i;
1430
    binmap_t leastbit = least_bit(m->treemap);
1431
    compute_bit2idx(leastbit, i);
1432
    v = t = *treebin_at(m, i);
1433
    rsize = chunksize(t) - nb;
1434
 
1435
    while ((t = leftmost_child(t)) != 0) {
1436
        size_t trem = chunksize(t) - nb;
1437
        if (trem < rsize) {
1438
            rsize = trem;
1439
            v = t;
1440
        }
1441
    }
1442
 
1443
    if (RTCHECK(ok_address(m, v))) {
1444
        mchunkptr r = chunk_plus_offset(v, nb);
1445
        assert(chunksize(v) == rsize + nb);
1446
        if (RTCHECK(ok_next(v, r))) {
1447
            unlink_large_chunk(m, v);
1448
            if (rsize < MIN_CHUNK_SIZE)
1449
                set_inuse_and_pinuse(m, v, (rsize + nb));
1450
            else {
1451
                set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1452
                set_size_and_pinuse_of_free_chunk(r, rsize);
1453
                replace_dv(m, r, rsize);
1454
            }
1455
            return chunk2mem(v);
1456
        }
1457
    }
1458
 
1459
    CORRUPTION_ERROR_ACTION(m);
1460
    return 0;
1461
}
1462
 
1463
/* --------------------------- memalign support -------------------------- */
1464
 
1465
static void* internal_memalign(mstate m, size_t alignment, size_t bytes)
1466
{
1467
    if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
1468
        return internal_malloc(m, bytes);
1469
    if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
1470
        alignment = MIN_CHUNK_SIZE;
1471
    if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
1472
        size_t a = MALLOC_ALIGNMENT << 1;
1473
        while (a < alignment) a <<= 1;
1474
        alignment = a;
1475
    }
1476
 
1477
    if (bytes >= MAX_REQUEST - alignment) {
1478
        if (m != 0)  { /* Test isn't needed but avoids compiler warning */
1479
//            MALLOC_FAILURE_ACTION;
1480
        }
1481
    }
1482
    else
1483
    {
1484
        size_t nb = request2size(bytes);
1485
        size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
1486
        char* mem = (char*)internal_malloc(m, req);
1487
        if (mem != 0)
1488
        {
1489
            void* leader = 0;
1490
            void* trailer = 0;
1491
            mchunkptr p = mem2chunk(mem);
1492
 
1493
            PREACTION(m);
1494
 
1495
            if ((((size_t)(mem)) % alignment) != 0)  /* misaligned */
1496
            {
1497
        /*
1498
          Find an aligned spot inside chunk.  Since we need to give
1499
          back leading space in a chunk of at least MIN_CHUNK_SIZE, if
1500
          the first calculation places us at a spot with less than
1501
          MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
1502
          We've allocated enough total room so that this is always
1503
          possible.
1504
        */
1505
                char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
1506
                                                       alignment -
1507
                                                       SIZE_T_ONE)) &
1508
                                             -alignment));
1509
                char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
1510
                             br : br+alignment;
1511
                mchunkptr newp = (mchunkptr)pos;
1512
                size_t leadsize = pos - (char*)(p);
1513
                size_t newsize = chunksize(p) - leadsize;
1514
 
1515
                if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
1516
                    newp->prev_foot = p->prev_foot + leadsize;
1517
                    newp->head = newsize;
1518
                }
1519
                else { /* Otherwise, give back leader, use the rest */
1520
                    set_inuse(m, newp, newsize);
1521
                    set_inuse(m, p, leadsize);
1522
                    leader = chunk2mem(p);
1523
                }
1524
                p = newp;
1525
            }
1526
 
1527
      /* Give back spare room at the end */
1528
            if (!is_mmapped(p))
1529
            {
1530
                size_t size = chunksize(p);
1531
                if (size > nb + MIN_CHUNK_SIZE)
1532
                {
1533
                    size_t remainder_size = size - nb;
1534
                    mchunkptr remainder = chunk_plus_offset(p, nb);
1535
                    set_inuse(m, p, nb);
1536
                    set_inuse(m, remainder, remainder_size);
1537
                    trailer = chunk2mem(remainder);
1538
                }
1539
            }
1540
 
1541
            assert (chunksize(p) >= nb);
1542
            assert((((size_t)(chunk2mem(p))) % alignment) == 0);
1543
            check_inuse_chunk(m, p);
1544
            POSTACTION(m);
1545
            if (leader != 0) {
1546
                internal_free(m, leader);
1547
            }
1548
            if (trailer != 0) {
1549
                internal_free(m, trailer);
1550
            }
1551
            return chunk2mem(p);
1552
        }
1553
    }
1554
    return 0;
1555
}
1556
 
1557
void* memalign(size_t alignment, size_t bytes)
1558
{
1559
    return internal_memalign(gm, alignment, bytes);
1560
}
1561
 
1562
 
1563
void* malloc(size_t bytes)
1564
{
1565
  /*
1566
     Basic algorithm:
1567
     If a small request (< 256 bytes minus per-chunk overhead):
1568
       1. If one exists, use a remainderless chunk in associated smallbin.
1569
          (Remainderless means that there are too few excess bytes to
1570
          represent as a chunk.)
1571
       2. If it is big enough, use the dv chunk, which is normally the
1572
          chunk adjacent to the one used for the most recent small request.
1573
       3. If one exists, split the smallest available chunk in a bin,
1574
          saving remainder in dv.
1575
       4. If it is big enough, use the top chunk.
1576
       5. If available, get memory from system and use it
1577
     Otherwise, for a large request:
1578
       1. Find the smallest available binned chunk that fits, and use it
1579
          if it is better fitting than dv chunk, splitting if necessary.
1580
       2. If better fitting than any binned chunk, use the dv chunk.
1581
       3. If it is big enough, use the top chunk.
1582
       4. If request size >= mmap threshold, try to directly mmap this chunk.
1583
       5. If available, get memory from system and use it
1584
 
1585
     The ugly goto's here ensure that postaction occurs along all paths.
1586
  */
1587
 
1588
    ensure_initialization(); /* initialize in sys_alloc if not using locks */
1589
 
1590
    PREACTION(gm);
1591
    {
1592
        void* mem;
1593
        size_t nb;
1594
 
1595
        if (bytes <= MAX_SMALL_REQUEST)
1596
        {
1597
            bindex_t idx;
1598
            binmap_t smallbits;
1599
            nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
1600
            idx = small_index(nb);
1601
            smallbits = gm->smallmap >> idx;
1602
 
1603
            if ((smallbits & 0x3U) != 0) /* Remainderless fit to a smallbin. */
1604
            {
1605
                mchunkptr b, p;
1606
                idx += ~smallbits & 1;       /* Uses next bin if idx empty */
1607
                b = smallbin_at(gm, idx);
1608
                p = b->fd;
1609
                assert(chunksize(p) == small_index2size(idx));
1610
                unlink_first_small_chunk(gm, b, p, idx);
1611
                set_inuse_and_pinuse(gm, p, small_index2size(idx));
1612
                mem = chunk2mem(p);
1613
                check_malloced_chunk(gm, mem, nb);
1614
                goto postaction;
1615
            }
1616
            else if (nb > gm->dvsize)
1617
            {
1618
                if (smallbits != 0) /* Use chunk in next nonempty smallbin */
1619
                {
1620
                    mchunkptr b, p, r;
1621
                    size_t rsize;
1622
                    bindex_t i;
1623
                    binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
1624
                    binmap_t leastbit = least_bit(leftbits);
1625
                    compute_bit2idx(leastbit, i);
1626
                    b = smallbin_at(gm, i);
1627
                    p = b->fd;
1628
                    assert(chunksize(p) == small_index2size(i));
1629
                    unlink_first_small_chunk(gm, b, p, i);
1630
                    rsize = small_index2size(i) - nb;
1631
          /* Fit here cannot be remainderless if 4byte sizes */
1632
                    if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
1633
                        set_inuse_and_pinuse(gm, p, small_index2size(i));
1634
                    else
1635
                    {
1636
                        set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1637
                        r = chunk_plus_offset(p, nb);
1638
                        set_size_and_pinuse_of_free_chunk(r, rsize);
1639
                        replace_dv(gm, r, rsize);
1640
                    }
1641
                    mem = chunk2mem(p);
1642
                    check_malloced_chunk(gm, mem, nb);
1643
                    goto postaction;
1644
                }
1645
                else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0)
1646
                {
1647
                    check_malloced_chunk(gm, mem, nb);
1648
                    goto postaction;
1649
                }
1650
            }
1651
        }
1652
        else if (bytes >= MAX_REQUEST)
1653
            nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1654
        else
1655
        {
1656
            nb = pad_request(bytes);
1657
            if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0)
1658
            {
1659
                check_malloced_chunk(gm, mem, nb);
1660
                goto postaction;
1661
            }
1662
        }
1663
 
1664
        if (nb <= gm->dvsize) {
1665
            size_t rsize = gm->dvsize - nb;
1666
            mchunkptr p = gm->dv;
1667
            if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
1668
                mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
1669
                gm->dvsize = rsize;
1670
                set_size_and_pinuse_of_free_chunk(r, rsize);
1671
                set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1672
            }
1673
            else { /* exhaust dv */
1674
                size_t dvs = gm->dvsize;
1675
                gm->dvsize = 0;
1676
                gm->dv = 0;
1677
                set_inuse_and_pinuse(gm, p, dvs);
1678
            }
1679
            mem = chunk2mem(p);
1680
            check_malloced_chunk(gm, mem, nb);
1681
            goto postaction;
1682
        }
1683
        else if (nb < gm->topsize) { /* Split top */
1684
            size_t rsize = gm->topsize -= nb;
1685
            mchunkptr p = gm->top;
1686
            mchunkptr r = gm->top = chunk_plus_offset(p, nb);
1687
            r->head = rsize | PINUSE_BIT;
1688
            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1689
            mem = chunk2mem(p);
1690
            check_top_chunk(gm, gm->top);
1691
            check_malloced_chunk(gm, mem, nb);
1692
            goto postaction;
1693
        }
1694
 
1695
        mem = sys_alloc(gm, nb);
1696
 
1697
  postaction:
1698
        POSTACTION(gm);
1699
        return mem;
1700
    }
1701
 
1702
    return 0;
1703
}
1704
 
1705
 
1706
void free(void* mem)
1707
{
1708
  /*
1709
     Consolidate freed chunks with preceeding or succeeding bordering
1710
     free chunks, if they exist, and then place in a bin.  Intermixed
1711
     with special cases for top, dv, mmapped chunks, and usage errors.
1712
  */
1713
 
1714
    if (mem != 0)
1715
    {
1716
        mchunkptr p  = mem2chunk(mem);
1717
 
1718
#define fm gm
1719
 
1720
        PREACTION(fm);
1721
        {
1722
            check_inuse_chunk(fm, p);
1723
            if (RTCHECK(ok_address(fm, p) && ok_inuse(p)))
1724
            {
1725
                size_t psize = chunksize(p);
1726
                mchunkptr next = chunk_plus_offset(p, psize);
1727
                if (!pinuse(p))
1728
                {
1729
                    size_t prevsize = p->prev_foot;
1730
                    if (is_mmapped(p))
1731
                    {
1732
                        psize += prevsize + MMAP_FOOT_PAD;
1733
                        if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
1734
                        fm->footprint -= psize;
1735
                        goto postaction;
1736
                    }
1737
                    else
1738
                    {
1739
                        mchunkptr prev = chunk_minus_offset(p, prevsize);
1740
                        psize += prevsize;
1741
                        p = prev;
1742
                        if (RTCHECK(ok_address(fm, prev)))  /* consolidate backward */
1743
                        {
1744
                            if (p != fm->dv)
1745
                            {
1746
                                unlink_chunk(fm, p, prevsize);
1747
                            }
1748
                            else if ((next->head & INUSE_BITS) == INUSE_BITS)
1749
                            {
1750
                                fm->dvsize = psize;
1751
                                set_free_with_pinuse(p, psize, next);
1752
                                goto postaction;
1753
                            }
1754
                        }
1755
                        else
1756
                            goto erroraction;
1757
                    }
1758
                }
1759
 
1760
                if (RTCHECK(ok_next(p, next) && ok_pinuse(next)))
1761
                {
1762
                    if (!cinuse(next))    /* consolidate forward */
1763
                    {
1764
                        if (next == fm->top)
1765
                        {
1766
                            size_t tsize = fm->topsize += psize;
1767
                            fm->top = p;
1768
                            p->head = tsize | PINUSE_BIT;
1769
                            if (p == fm->dv)
1770
                            {
1771
                                fm->dv = 0;
1772
                                fm->dvsize = 0;
1773
                            }
1774
                            if (should_trim(fm, tsize))
1775
                                sys_trim(fm, 0);
1776
                            goto postaction;
1777
                        }
1778
                        else if (next == fm->dv)
1779
                        {
1780
                            size_t dsize = fm->dvsize += psize;
1781
                            fm->dv = p;
1782
                            set_size_and_pinuse_of_free_chunk(p, dsize);
1783
                            goto postaction;
1784
                        }
1785
                        else
1786
                        {
1787
                            size_t nsize = chunksize(next);
1788
                            psize += nsize;
1789
                            unlink_chunk(fm, next, nsize);
1790
                            set_size_and_pinuse_of_free_chunk(p, psize);
1791
                            if (p == fm->dv)
1792
                            {
1793
                                fm->dvsize = psize;
1794
                                goto postaction;
1795
                            }
1796
                        }
1797
                    }
1798
                    else
1799
                        set_free_with_pinuse(p, psize, next);
1800
 
1801
                    if (is_small(psize))
1802
                    {
1803
                        insert_small_chunk(fm, p, psize);
1804
                        check_free_chunk(fm, p);
1805
                    }
1806
                    else
1807
                    {
1808
                        tchunkptr tp = (tchunkptr)p;
1809
                        insert_large_chunk(fm, tp, psize);
1810
                        check_free_chunk(fm, p);
1811
                        if (--fm->release_checks == 0)
1812
                            release_unused_segments(fm);
1813
                    }
1814
                    goto postaction;
1815
                }
1816
            }
1817
    erroraction:
1818
        USAGE_ERROR_ACTION(fm, p);
1819
    postaction:
1820
        POSTACTION(fm);
1821
        }
1822
    }
1823
#undef fm
1824
}
1825
 
1826