Subversion Repositories Kolibri OS

Rev

Rev 1616 | Rev 3297 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1616 serge 1
/*
2
  This is a version (aka dlmalloc) of malloc/free/realloc written by
3
  Doug Lea and released to the public domain, as explained at
4
  http://creativecommons.org/licenses/publicdomain.  Send questions,
5
  comments, complaints, performance data, etc to dl@cs.oswego.edu
6
 
7
* Version 2.8.4 Wed May 27 09:56:23 2009  Doug Lea  (dl at gee)
8
 
9
   Note: There may be an updated version of this malloc obtainable at
10
           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
11
         Check before installing!
12
 
13
* Quickstart
14
 
15
  This library is all in one file to simplify the most common usage:
16
  ftp it, compile it (-O3), and link it into another program. All of
17
  the compile-time options default to reasonable values for use on
18
  most platforms.  You might later want to step through various
19
  compile-time and dynamic tuning options.
20
 
21
  For convenience, an include file for code using this malloc is at:
22
     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h
23
  You don't really need this .h file unless you call functions not
24
  defined in your system include files.  The .h file contains only the
25
  excerpts from this file needed for using this malloc on ANSI C/C++
26
  systems, so long as you haven't changed compile-time options about
27
  naming and tuning parameters.  If you do, then you can create your
28
  own malloc.h that does include all settings by cutting at the point
29
  indicated below. Note that you may already by default be using a C
30
  library containing a malloc that is based on some version of this
31
  malloc (for example in linux). You might still want to use the one
32
  in this file to customize settings or to avoid overheads associated
33
  with library versions.
34
 
35
*/
36
 
37
#include 
38
#include 
39
#include 
40
 
41
struct malloc_chunk {
42
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
43
  size_t               head;       /* Size and inuse bits. */
44
  struct malloc_chunk* fd;         /* double links -- used only if free. */
45
  struct malloc_chunk* bk;
46
};
47
 
48
typedef struct malloc_chunk  mchunk;
49
typedef struct malloc_chunk* mchunkptr;
50
typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
51
typedef unsigned int bindex_t;         /* Described below */
52
typedef unsigned int binmap_t;         /* Described below */
53
typedef unsigned int flag_t;           /* The type of various bit flag sets */
54
 
55
 
56
 
57
/* ------------------- size_t and alignment properties -------------------- */
58
 
59
/* The maximum possible size_t value has all bits set */
60
#define MAX_SIZE_T           (~(size_t)0)
61
 
62
/* The byte and bit size of a size_t */
63
#define SIZE_T_SIZE             (sizeof(size_t))
64
#define SIZE_T_BITSIZE          (sizeof(size_t) << 3)
65
 
66
/* Some constants coerced to size_t */
67
/* Annoying but necessary to avoid errors on some platforms */
68
#define SIZE_T_ZERO             ((size_t)0)
69
#define SIZE_T_ONE              ((size_t)1)
70
#define SIZE_T_TWO              ((size_t)2)
71
#define SIZE_T_FOUR             ((size_t)4)
72
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
73
#define FOUR_SIZE_T_SIZES       (SIZE_T_SIZE<<2)
74
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
75
#define HALF_MAX_SIZE_T         (MAX_SIZE_T / 2U)
76
 
77
#define USE_LOCK_BIT            (2U)
78
#define USE_MMAP_BIT            (SIZE_T_ONE)
79
#define USE_NONCONTIGUOUS_BIT   (4U)
80
 
81
/* segment bit set in create_mspace_with_base */
82
#define EXTERN_BIT              (8U)
83
 
84
#define HAVE_MMAP               1
85
#define CALL_MMAP(s)            MMAP_DEFAULT(s)
86
#define CALL_MUNMAP(a, s)       MUNMAP_DEFAULT((a), (s))
87
#define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
88
#define MAX_RELEASE_CHECK_RATE  4095
89
#define NO_SEGMENT_TRAVERSAL    1
90
#define MALLOC_ALIGNMENT        ((size_t)8U)
91
#define CHUNK_OVERHEAD          (SIZE_T_SIZE)
3039 serge 92
#define DEFAULT_GRANULARITY     ((size_t)128U * (size_t)1024U)
93
#define DEFAULT_MMAP_THRESHOLD  ((size_t)512U * (size_t)1024U)
94
#define DEFAULT_TRIM_THRESHOLD  ((size_t)1024U * (size_t)1024U)
1616 serge 95
 
96
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
97
#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
98
 
99
/* True if address a has acceptable alignment */
100
#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
101
 
102
/* the number of bytes to offset an address to align it */
103
#define align_offset(A)\
104
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
105
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
106
 
107
 
108
#define MFAIL                ((void*)(MAX_SIZE_T))
109
#define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
110
 
111
/* For sys_alloc, enough padding to ensure can malloc request on success */
112
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
113
 
114
/*
115
  TOP_FOOT_SIZE is padding at the end of a segment, including space
116
  that may be needed to place segment records and fenceposts when new
117
  noncontiguous segments are added.
118
*/
119
#define TOP_FOOT_SIZE\
120
  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
121
 
122
/* ------------------- Chunks sizes and alignments ----------------------- */
123
 
124
#define MCHUNK_SIZE         (sizeof(mchunk))
125
 
126
/* MMapped chunks need a second word of overhead ... */
127
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
128
/* ... and additional padding for fake next-chunk at foot */
129
#define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
130
 
131
/* The smallest size we can malloc is an aligned minimal chunk */
132
#define MIN_CHUNK_SIZE\
133
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
134
 
135
/* conversion from malloc headers to user pointers, and back */
136
#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
137
#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
138
/* chunk associated with aligned address A */
139
#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
140
 
141
/* Bounds on request (not chunk) sizes. */
142
#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
143
#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
144
 
145
/* pad request bytes into a usable size */
146
#define pad_request(req) \
147
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
148
 
149
/* pad request, checking for minimum (but not maximum) */
150
#define request2size(req) \
151
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
152
 
153
/* ------------------ Operations on head and foot fields ----------------- */
154
 
155
/*
156
  The head field of a chunk is or'ed with PINUSE_BIT when previous
157
  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
158
  use, unless mmapped, in which case both bits are cleared.
159
 
160
  FLAG4_BIT is not used by this malloc, but might be useful in extensions.
161
*/
162
 
163
#define PINUSE_BIT          (SIZE_T_ONE)
164
#define CINUSE_BIT          (SIZE_T_TWO)
165
#define FLAG4_BIT           (SIZE_T_FOUR)
166
#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
167
#define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
168
 
169
/* Head value for fenceposts */
170
#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
171
 
172
/* extraction of fields from head words */
173
#define cinuse(p)           ((p)->head & CINUSE_BIT)
174
#define pinuse(p)           ((p)->head & PINUSE_BIT)
175
#define is_inuse(p)         (((p)->head & INUSE_BITS) != PINUSE_BIT)
176
#define is_mmapped(p)       (((p)->head & INUSE_BITS) == 0)
177
 
178
#define chunksize(p)        ((p)->head & ~(FLAG_BITS))
179
 
180
#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
181
 
182
/* Treat space at ptr +/- offset as a chunk */
183
#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
184
#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
185
 
186
/* Ptr to next or previous physical malloc_chunk. */
187
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
188
#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
189
 
190
/* extract next chunk's pinuse bit */
191
#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
192
 
193
/* Set size, pinuse bit, and foot */
194
#define set_size_and_pinuse_of_free_chunk(p, s)\
195
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
196
 
197
/* Set size, pinuse bit, foot, and clear next pinuse */
198
#define set_free_with_pinuse(p, s, n)\
199
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
200
 
201
/* Get the internal overhead associated with chunk p */
202
#define overhead_for(p)\
203
 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
204
 
205
 
206
struct malloc_tree_chunk {
207
  /* The first four fields must be compatible with malloc_chunk */
208
  size_t                    prev_foot;
209
  size_t                    head;
210
  struct malloc_tree_chunk* fd;
211
  struct malloc_tree_chunk* bk;
212
 
213
  struct malloc_tree_chunk* child[2];
214
  struct malloc_tree_chunk* parent;
215
  bindex_t                  index;
216
};
217
 
218
typedef struct malloc_tree_chunk  tchunk;
219
typedef struct malloc_tree_chunk* tchunkptr;
220
typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
221
 
222
/* A little helper macro for trees */
223
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
224
 
225
 
226
struct malloc_segment {
227
  char*        base;             /* base address */
228
  size_t       size;             /* allocated size */
229
  struct malloc_segment* next;   /* ptr to next segment */
230
  flag_t       sflags;           /* mmap and extern flag */
231
};
232
 
233
#define is_mmapped_segment(S)  ((S)->sflags & USE_MMAP_BIT)
234
#define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
235
 
236
typedef struct malloc_segment  msegment;
237
typedef struct malloc_segment* msegmentptr;
238
 
239
/* ---------------------------- malloc_state ----------------------------- */
240
 
241
/*
242
   A malloc_state holds all of the bookkeeping for a space.
243
   The main fields are:
244
 
245
  Top
246
    The topmost chunk of the currently active segment. Its size is
247
    cached in topsize.  The actual size of topmost space is
248
    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
249
    fenceposts and segment records if necessary when getting more
250
    space from the system.  The size at which to autotrim top is
251
    cached from mparams in trim_check, except that it is disabled if
252
    an autotrim fails.
253
 
254
  Designated victim (dv)
255
    This is the preferred chunk for servicing small requests that
256
    don't have exact fits.  It is normally the chunk split off most
257
    recently to service another small request.  Its size is cached in
258
    dvsize. The link fields of this chunk are not maintained since it
259
    is not kept in a bin.
260
 
261
  SmallBins
262
    An array of bin headers for free chunks.  These bins hold chunks
263
    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
264
    chunks of all the same size, spaced 8 bytes apart.  To simplify
265
    use in double-linked lists, each bin header acts as a malloc_chunk
266
    pointing to the real first node, if it exists (else pointing to
267
    itself).  This avoids special-casing for headers.  But to avoid
268
    waste, we allocate only the fd/bk pointers of bins, and then use
269
    repositioning tricks to treat these as the fields of a chunk.
270
 
271
  TreeBins
272
    Treebins are pointers to the roots of trees holding a range of
273
    sizes. There are 2 equally spaced treebins for each power of two
274
    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
275
    larger.
276
 
277
  Bin maps
278
    There is one bit map for small bins ("smallmap") and one for
279
    treebins ("treemap).  Each bin sets its bit when non-empty, and
280
    clears the bit when empty.  Bit operations are then used to avoid
281
    bin-by-bin searching -- nearly all "search" is done without ever
282
    looking at bins that won't be selected.  The bit maps
283
    conservatively use 32 bits per map word, even if on 64bit system.
284
    For a good description of some of the bit-based techniques used
285
    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
286
    supplement at http://hackersdelight.org/). Many of these are
287
    intended to reduce the branchiness of paths through malloc etc, as
288
    well as to reduce the number of memory locations read or written.
289
 
290
  Segments
291
    A list of segments headed by an embedded malloc_segment record
292
    representing the initial space.
293
 
294
  Address check support
295
    The least_addr field is the least address ever obtained from
296
    MORECORE or MMAP. Attempted frees and reallocs of any address less
297
    than this are trapped (unless INSECURE is defined).
298
 
299
  Magic tag
300
    A cross-check field that should always hold same value as mparams.magic.
301
 
302
  Flags
303
    Bits recording whether to use MMAP, locks, or contiguous MORECORE
304
 
305
  Statistics
306
    Each space keeps track of current and maximum system memory
307
    obtained via MORECORE or MMAP.
308
 
309
  Trim support
310
    Fields holding the amount of unused topmost memory that should trigger
311
    timming, and a counter to force periodic scanning to release unused
312
    non-topmost segments.
313
 
314
  Locking
315
    If USE_LOCKS is defined, the "mutex" lock is acquired and released
316
    around every public call using this mspace.
317
 
318
  Extension support
319
    A void* pointer and a size_t field that can be used to help implement
320
    extensions to this malloc.
321
*/
322
 
323
/* Bin types, widths and sizes */
324
#define NSMALLBINS        (32U)
325
#define NTREEBINS         (32U)
326
#define SMALLBIN_SHIFT    (3U)
327
#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
328
#define TREEBIN_SHIFT     (8U)
329
#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
330
#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
331
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
332
 
333
struct malloc_state {
334
  binmap_t   smallmap;
335
  binmap_t   treemap;
336
  size_t     dvsize;
337
  size_t     topsize;
338
  char*      least_addr;
339
  mchunkptr  dv;
340
  mchunkptr  top;
341
  size_t     trim_check;
342
  size_t     release_checks;
343
  size_t     magic;
344
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
345
  tbinptr    treebins[NTREEBINS];
346
  size_t     footprint;
347
  size_t     max_footprint;
348
  flag_t     mflags;
349
  struct mutex  lock;     /* locate lock among fields that rarely change */
350
  msegment   seg;
351
  void*      extp;      /* Unused but available for extensions */
352
  size_t     exts;
353
};
354
 
355
typedef struct malloc_state*    mstate;
356
 
357
/* ------------- Global malloc_state and malloc_params ------------------- */
358
 
359
/*
360
  malloc_params holds global properties, including those that can be
361
  dynamically set using mallopt. There is a single instance, mparams,
362
  initialized in init_mparams. Note that the non-zeroness of "magic"
363
  also serves as an initialization flag.
364
*/
365
 
366
struct malloc_params
367
{
368
    volatile size_t  magic;
369
    size_t  page_size;
370
    size_t  granularity;
371
    size_t  mmap_threshold;
372
    size_t  trim_threshold;
373
    flag_t  default_mflags;
374
};
375
 
376
static struct malloc_params mparams;
377
 
378
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
379
 
380
static struct malloc_state _gm_;
381
#define gm                 (&_gm_)
382
#define is_global(M)       ((M) == &_gm_)
383
 
384
#define is_initialized(M)  ((M)->top != 0)
385
 
386
 
387
//struct mutex malloc_global_mutex;
388
 
389
static DEFINE_MUTEX(malloc_global_mutex);
390
 
391
#define ACQUIRE_MALLOC_GLOBAL_LOCK()  MutexLock(&malloc_global_mutex);
392
#define RELEASE_MALLOC_GLOBAL_LOCK()  MutexUnlock(&malloc_global_mutex);
393
 
394
#define PREACTION(M)  ( MutexLock(&(M)->lock))
395
#define POSTACTION(M) { MutexUnlock(&(M)->lock); }
396
 
397
 
398
/* ---------------------------- Indexing Bins ---------------------------- */
399
 
400
#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
401
#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
402
#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
403
#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
404
 
405
/* addressing by index. See above about smallbin repositioning */
406
#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
407
#define treebin_at(M,i)     (&((M)->treebins[i]))
408
 
409
 
410
#define compute_tree_index(S, I)\
411
{\
412
  unsigned int X = S >> TREEBIN_SHIFT;\
413
  if (X == 0)\
414
    I = 0;\
415
  else if (X > 0xFFFF)\
416
    I = NTREEBINS-1;\
417
  else {\
418
    unsigned int K;\
419
    __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g"  (X));\
420
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
421
  }\
422
}
423
 
424
/* Bit representing maximum resolved size in a treebin at i */
425
#define bit_for_tree_index(i) \
426
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
427
 
428
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
429
#define leftshift_for_tree_index(i) \
430
   ((i == NTREEBINS-1)? 0 : \
431
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
432
 
433
/* The size of the smallest chunk held in bin with index i */
434
#define minsize_for_tree_index(i) \
435
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
436
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
437
 
438
 
439
/* ------------------------ Operations on bin maps ----------------------- */
440
 
441
/* bit corresponding to given index */
442
#define idx2bit(i)              ((binmap_t)(1) << (i))
443
 
444
/* Mark/Clear bits with given index */
445
#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
446
#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
447
#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
448
 
449
#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
450
#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
451
#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
452
 
453
/* isolate the least set bit of a bitmap */
454
#define least_bit(x)         ((x) & -(x))
455
 
456
/* mask with all bits to left of least bit of x on */
457
#define left_bits(x)         ((x<<1) | -(x<<1))
458
 
459
/* mask with all bits to left of or equal to least bit of x on */
460
#define same_or_left_bits(x) ((x) | -(x))
461
 
462
 
463
/* index corresponding to given bit. Use x86 asm if possible */
464
 
465
#define compute_bit2idx(X, I)\
466
{\
467
  unsigned int J;\
468
  __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
469
  I = (bindex_t)J;\
470
}
471
 
472
 
473
#define mark_inuse_foot(M,p,s)
474
 
475
/* Get/set size at footer */
476
#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
477
#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
478
 
479
/* Macros for setting head/foot of non-mmapped chunks */
480
 
481
/* Set cinuse bit and pinuse bit of next chunk */
482
#define set_inuse(M,p,s)\
483
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
484
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
485
 
486
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
487
#define set_inuse_and_pinuse(M,p,s)\
488
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
489
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
490
 
491
/* Set size, cinuse and pinuse bit of this chunk */
492
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
493
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
494
 
495
 
496
#define assert(x)
497
#define RTCHECK(e)  __builtin_expect(e, 1)
498
 
499
#define check_free_chunk(M,P)
500
#define check_inuse_chunk(M,P)
501
#define check_malloced_chunk(M,P,N)
502
#define check_mmapped_chunk(M,P)
503
#define check_malloc_state(M)
504
#define check_top_chunk(M,P)
505
 
506
/* Check if address a is at least as high as any from MORECORE or MMAP */
507
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
508
/* Check if address of next chunk n is higher than base chunk p */
509
#define ok_next(p, n)    ((char*)(p) < (char*)(n))
510
/* Check if p has inuse status */
511
#define ok_inuse(p)     is_inuse(p)
512
/* Check if p has its pinuse bit on */
513
#define ok_pinuse(p)     pinuse(p)
514
 
515
#define CORRUPTION_ERROR_ACTION(m)                             \
516
        do {                                                   \
517
            printf("%s malloc heap corrupted\n",__FUNCTION__); \
518
            while(1)                                           \
519
            {                                                  \
520
                delay(100);                                    \
521
            }                                                  \
522
        }while(0)                                              \
523
 
524
 
525
#define USAGE_ERROR_ACTION(m, p)                               \
526
        do {                                                   \
527
            printf("%s malloc heap corrupted\n",__FUNCTION__); \
528
            while(1)                                           \
529
            {                                                  \
530
                delay(100);                                    \
531
            }                                                  \
532
        }while(0)                                              \
533
 
534
/* ----------------------- Operations on smallbins ----------------------- */
535
 
536
/*
537
  Various forms of linking and unlinking are defined as macros.  Even
538
  the ones for trees, which are very long but have very short typical
539
  paths.  This is ugly but reduces reliance on inlining support of
540
  compilers.
541
*/
542
 
543
/* Link a free chunk into a smallbin  */
544
#define insert_small_chunk(M, P, S) {\
545
  bindex_t I  = small_index(S);\
546
  mchunkptr B = smallbin_at(M, I);\
547
  mchunkptr F = B;\
548
  assert(S >= MIN_CHUNK_SIZE);\
549
  if (!smallmap_is_marked(M, I))\
550
    mark_smallmap(M, I);\
551
  else if (RTCHECK(ok_address(M, B->fd)))\
552
    F = B->fd;\
553
  else {\
554
    CORRUPTION_ERROR_ACTION(M);\
555
  }\
556
  B->fd = P;\
557
  F->bk = P;\
558
  P->fd = F;\
559
  P->bk = B;\
560
}
561
 
562
/* Unlink a chunk from a smallbin  */
563
#define unlink_small_chunk(M, P, S) {\
564
  mchunkptr F = P->fd;\
565
  mchunkptr B = P->bk;\
566
  bindex_t I = small_index(S);\
567
  assert(P != B);\
568
  assert(P != F);\
569
  assert(chunksize(P) == small_index2size(I));\
570
  if (F == B)\
571
    clear_smallmap(M, I);\
572
  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
573
                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
574
    F->bk = B;\
575
    B->fd = F;\
576
  }\
577
  else {\
578
    CORRUPTION_ERROR_ACTION(M);\
579
  }\
580
}
581
 
582
/* Unlink the first chunk from a smallbin */
583
#define unlink_first_small_chunk(M, B, P, I) {\
584
  mchunkptr F = P->fd;\
585
  assert(P != B);\
586
  assert(P != F);\
587
  assert(chunksize(P) == small_index2size(I));\
588
  if (B == F)\
589
    clear_smallmap(M, I);\
590
  else if (RTCHECK(ok_address(M, F))) {\
591
    B->fd = F;\
592
    F->bk = B;\
593
  }\
594
  else {\
595
    CORRUPTION_ERROR_ACTION(M);\
596
  }\
597
}
598
 
599
/* Replace dv node, binning the old one */
600
/* Used only when dvsize known to be small */
601
#define replace_dv(M, P, S) {\
602
  size_t DVS = M->dvsize;\
603
  if (DVS != 0) {\
604
    mchunkptr DV = M->dv;\
605
    assert(is_small(DVS));\
606
    insert_small_chunk(M, DV, DVS);\
607
  }\
608
  M->dvsize = S;\
609
  M->dv = P;\
610
}
611
 
612
 
613
/* ------------------------- Operations on trees ------------------------- */
614
 
615
/* Insert chunk into tree */
616
#define insert_large_chunk(M, X, S) {\
617
  tbinptr* H;\
618
  bindex_t I;\
619
  compute_tree_index(S, I);\
620
  H = treebin_at(M, I);\
621
  X->index = I;\
622
  X->child[0] = X->child[1] = 0;\
623
  if (!treemap_is_marked(M, I)) {\
624
    mark_treemap(M, I);\
625
    *H = X;\
626
    X->parent = (tchunkptr)H;\
627
    X->fd = X->bk = X;\
628
  }\
629
  else {\
630
    tchunkptr T = *H;\
631
    size_t K = S << leftshift_for_tree_index(I);\
632
    for (;;) {\
633
      if (chunksize(T) != S) {\
634
        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
635
        K <<= 1;\
636
        if (*C != 0)\
637
          T = *C;\
638
        else if (RTCHECK(ok_address(M, C))) {\
639
          *C = X;\
640
          X->parent = T;\
641
          X->fd = X->bk = X;\
642
          break;\
643
        }\
644
        else {\
645
          CORRUPTION_ERROR_ACTION(M);\
646
          break;\
647
        }\
648
      }\
649
      else {\
650
        tchunkptr F = T->fd;\
651
        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
652
          T->fd = F->bk = X;\
653
          X->fd = F;\
654
          X->bk = T;\
655
          X->parent = 0;\
656
          break;\
657
        }\
658
        else {\
659
          CORRUPTION_ERROR_ACTION(M);\
660
          break;\
661
        }\
662
      }\
663
    }\
664
  }\
665
}
666
 
667
/*
668
  Unlink steps:
669
 
670
  1. If x is a chained node, unlink it from its same-sized fd/bk links
671
     and choose its bk node as its replacement.
672
  2. If x was the last node of its size, but not a leaf node, it must
673
     be replaced with a leaf node (not merely one with an open left or
674
     right), to make sure that lefts and rights of descendents
675
     correspond properly to bit masks.  We use the rightmost descendent
676
     of x.  We could use any other leaf, but this is easy to locate and
677
     tends to counteract removal of leftmosts elsewhere, and so keeps
678
     paths shorter than minimally guaranteed.  This doesn't loop much
679
     because on average a node in a tree is near the bottom.
680
  3. If x is the base of a chain (i.e., has parent links) relink
681
     x's parent and children to x's replacement (or null if none).
682
*/
683
 
684
#define unlink_large_chunk(M, X) {\
685
  tchunkptr XP = X->parent;\
686
  tchunkptr R;\
687
  if (X->bk != X) {\
688
    tchunkptr F = X->fd;\
689
    R = X->bk;\
690
    if (RTCHECK(ok_address(M, F))) {\
691
      F->bk = R;\
692
      R->fd = F;\
693
    }\
694
    else {\
695
      CORRUPTION_ERROR_ACTION(M);\
696
    }\
697
  }\
698
  else {\
699
    tchunkptr* RP;\
700
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
701
        ((R = *(RP = &(X->child[0]))) != 0)) {\
702
      tchunkptr* CP;\
703
      while ((*(CP = &(R->child[1])) != 0) ||\
704
             (*(CP = &(R->child[0])) != 0)) {\
705
        R = *(RP = CP);\
706
      }\
707
      if (RTCHECK(ok_address(M, RP)))\
708
        *RP = 0;\
709
      else {\
710
        CORRUPTION_ERROR_ACTION(M);\
711
      }\
712
    }\
713
  }\
714
  if (XP != 0) {\
715
    tbinptr* H = treebin_at(M, X->index);\
716
    if (X == *H) {\
717
      if ((*H = R) == 0) \
718
        clear_treemap(M, X->index);\
719
    }\
720
    else if (RTCHECK(ok_address(M, XP))) {\
721
      if (XP->child[0] == X) \
722
        XP->child[0] = R;\
723
      else \
724
        XP->child[1] = R;\
725
    }\
726
    else\
727
      CORRUPTION_ERROR_ACTION(M);\
728
    if (R != 0) {\
729
      if (RTCHECK(ok_address(M, R))) {\
730
        tchunkptr C0, C1;\
731
        R->parent = XP;\
732
        if ((C0 = X->child[0]) != 0) {\
733
          if (RTCHECK(ok_address(M, C0))) {\
734
            R->child[0] = C0;\
735
            C0->parent = R;\
736
          }\
737
          else\
738
            CORRUPTION_ERROR_ACTION(M);\
739
        }\
740
        if ((C1 = X->child[1]) != 0) {\
741
          if (RTCHECK(ok_address(M, C1))) {\
742
            R->child[1] = C1;\
743
            C1->parent = R;\
744
          }\
745
          else\
746
            CORRUPTION_ERROR_ACTION(M);\
747
        }\
748
      }\
749
      else\
750
        CORRUPTION_ERROR_ACTION(M);\
751
    }\
752
  }\
753
}
754
 
755
/* Relays to large vs small bin operations */
756
 
757
#define insert_chunk(M, P, S)\
758
  if (is_small(S)) insert_small_chunk(M, P, S)\
759
  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
760
 
761
#define unlink_chunk(M, P, S)\
762
  if (is_small(S)) unlink_small_chunk(M, P, S)\
763
  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
764
 
765
 
766
/* -------------------------- system alloc setup ------------------------- */
767
 
768
/* Operations on mflags */
769
 
770
#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
771
#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
772
#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
773
 
774
#define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
775
#define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
776
#define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
777
 
778
#define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
779
#define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
780
 
781
#define set_lock(M,L)\
782
 ((M)->mflags = (L)?\
783
  ((M)->mflags | USE_LOCK_BIT) :\
784
  ((M)->mflags & ~USE_LOCK_BIT))
785
 
786
/* page-align a size */
787
#define page_align(S)\
788
 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
789
 
790
/* granularity-align a size */
791
#define granularity_align(S)\
792
  (((S) + (mparams.granularity - SIZE_T_ONE))\
793
   & ~(mparams.granularity - SIZE_T_ONE))
794
 
795
 
796
/* For mmap, use granularity alignment  */
797
#define mmap_align(S) granularity_align(S)
798
 
799
/* For sys_alloc, enough padding to ensure can malloc request on success */
800
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
801
 
802
#define is_page_aligned(S)\
803
   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
804
#define is_granularity_aligned(S)\
805
   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
806
 
807
/*  True if segment S holds address A */
808
#define segment_holds(S, A)\
809
  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
810
 
811
/* Return segment holding given address */
812
static msegmentptr segment_holding(mstate m, char* addr)
813
{
814
    msegmentptr sp = &m->seg;
815
    for (;;) {
816
        if (addr >= sp->base && addr < sp->base + sp->size)
817
            return sp;
818
        if ((sp = sp->next) == 0)
819
            return 0;
820
    }
821
}
822
 
823
/* Return true if segment contains a segment link */
824
static int has_segment_link(mstate m, msegmentptr ss)
825
{
826
    msegmentptr sp = &m->seg;
827
    for (;;) {
828
        if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
829
            return 1;
830
        if ((sp = sp->next) == 0)
831
            return 0;
832
    }
833
}
834
 
835
static inline void* os_mmap(size_t size)
836
{
837
  void* ptr = KernelAlloc(size);
3039 serge 838
  printf("%s %x %d bytes\n",__FUNCTION__, ptr, size);
1616 serge 839
  return (ptr != 0)? ptr: MFAIL;
840
}
841
 
842
static inline int os_munmap(void* ptr, size_t size)
843
{
844
    return (KernelFree(ptr) != 0) ? 0 : -1;
845
}
846
 
847
#define should_trim(M,s)  ((s) > (M)->trim_check)
848
 
849
 
850
#define MMAP_DEFAULT(s)        os_mmap(s)
851
#define MUNMAP_DEFAULT(a, s)   os_munmap((a), (s))
852
#define DIRECT_MMAP_DEFAULT(s) os_mmap(s)
853
 
854
#define internal_malloc(m, b) malloc(b)
855
#define internal_free(m, mem) free(mem)
856
 
857
/* -----------------------  Direct-mmapping chunks ----------------------- */
858
 
859
/*
860
  Directly mmapped chunks are set up with an offset to the start of
861
  the mmapped region stored in the prev_foot field of the chunk. This
862
  allows reconstruction of the required argument to MUNMAP when freed,
863
  and also allows adjustment of the returned chunk to meet alignment
864
  requirements (especially in memalign).
865
*/
866
 
867
/* Malloc using mmap */
868
static void* mmap_alloc(mstate m, size_t nb)
869
{
870
    size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
871
    if (mmsize > nb)       /* Check for wrap around 0 */
872
    {
873
        char* mm = (char*)(os_mmap(mmsize));
874
        if (mm != CMFAIL)
875
        {
876
            size_t offset = align_offset(chunk2mem(mm));
877
            size_t psize = mmsize - offset - MMAP_FOOT_PAD;
878
            mchunkptr p = (mchunkptr)(mm + offset);
879
            p->prev_foot = offset;
880
            p->head = psize;
881
            mark_inuse_foot(m, p, psize);
882
            chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
883
            chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
884
 
885
            if (m->least_addr == 0 || mm < m->least_addr)
886
                m->least_addr = mm;
887
            if ((m->footprint += mmsize) > m->max_footprint)
888
                m->max_footprint = m->footprint;
889
            assert(is_aligned(chunk2mem(p)));
890
            check_mmapped_chunk(m, p);
891
            return chunk2mem(p);
892
        }
893
    }
894
    return 0;
895
}
896
 
897
/* Realloc using mmap */
898
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb)
899
{
900
    size_t oldsize = chunksize(oldp);
901
    if (is_small(nb)) /* Can't shrink mmap regions below small size */
902
        return 0;
903
  /* Keep old chunk if big enough but not too big */
904
    if (oldsize >= nb + SIZE_T_SIZE &&
905
      (oldsize - nb) <= (mparams.granularity << 1))
906
        return oldp;
907
    else
908
    {
909
        size_t offset = oldp->prev_foot;
910
        size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
911
        size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
912
        char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
913
                                  oldmmsize, newmmsize, 1);
914
        if (cp != CMFAIL)
915
        {
916
            mchunkptr newp = (mchunkptr)(cp + offset);
917
            size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
918
            newp->head = psize;
919
            mark_inuse_foot(m, newp, psize);
920
            chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
921
            chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
922
 
923
            if (cp < m->least_addr)
924
                m->least_addr = cp;
925
            if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
926
                m->max_footprint = m->footprint;
927
            check_mmapped_chunk(m, newp);
928
            return newp;
929
        }
930
    }
931
    return 0;
932
}
933
 
934
/* ---------------------------- setting mparams -------------------------- */
935
 
936
/* Initialize mparams */
937
static int init_mparams(void) {
938
 
939
    ACQUIRE_MALLOC_GLOBAL_LOCK();
940
 
941
    if (mparams.magic == 0)
942
    {
943
        size_t magic;
944
        size_t psize;
945
        size_t gsize;
946
 
947
        psize = 4096;
948
        gsize = DEFAULT_GRANULARITY;
949
 
950
    /* Sanity-check configuration:
951
       size_t must be unsigned and as wide as pointer type.
952
       ints must be at least 4 bytes.
953
       alignment must be at least 8.
954
       Alignment, min chunk size, and page size must all be powers of 2.
955
    */
956
 
957
        mparams.granularity = gsize;
958
        mparams.page_size = psize;
959
        mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
960
        mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
961
        mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
962
 
963
    /* Set up lock for main malloc area */
964
        gm->mflags = mparams.default_mflags;
965
        MutexInit(&gm->lock);
966
 
967
        magic = (size_t)(GetTimerTicks() ^ (size_t)0x55555555U);
968
        magic |= (size_t)8U;    /* ensure nonzero */
969
        magic &= ~(size_t)7U;   /* improve chances of fault for bad values */
970
        mparams.magic = magic;
971
    }
972
 
973
    RELEASE_MALLOC_GLOBAL_LOCK();
974
    return 1;
975
}
976
 
977
/* -------------------------- mspace management -------------------------- */
978
 
979
/* Initialize top chunk and its size */
980
static void init_top(mstate m, mchunkptr p, size_t psize)
981
{
982
  /* Ensure alignment */
983
    size_t offset = align_offset(chunk2mem(p));
984
    p = (mchunkptr)((char*)p + offset);
985
    psize -= offset;
986
 
987
    m->top = p;
988
    m->topsize = psize;
989
    p->head = psize | PINUSE_BIT;
990
  /* set size of fake trailing chunk holding overhead space only once */
991
    chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
992
    m->trim_check = mparams.trim_threshold; /* reset on each update */
993
}
994
 
995
/* Initialize bins for a new mstate that is otherwise zeroed out */
996
static void init_bins(mstate m)
997
{
998
  /* Establish circular links for smallbins */
999
    bindex_t i;
1000
    for (i = 0; i < NSMALLBINS; ++i) {
1001
        sbinptr bin = smallbin_at(m,i);
1002
        bin->fd = bin->bk = bin;
1003
    }
1004
}
1005
 
1006
/* Allocate chunk and prepend remainder with chunk in successor base. */
1007
static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
1008
                           size_t nb)
1009
{
1010
    mchunkptr p = align_as_chunk(newbase);
1011
    mchunkptr oldfirst = align_as_chunk(oldbase);
1012
    size_t psize = (char*)oldfirst - (char*)p;
1013
    mchunkptr q = chunk_plus_offset(p, nb);
1014
    size_t qsize = psize - nb;
1015
    set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1016
 
1017
    assert((char*)oldfirst > (char*)q);
1018
    assert(pinuse(oldfirst));
1019
    assert(qsize >= MIN_CHUNK_SIZE);
1020
 
1021
  /* consolidate remainder with first chunk of old base */
1022
    if (oldfirst == m->top) {
1023
        size_t tsize = m->topsize += qsize;
1024
        m->top = q;
1025
        q->head = tsize | PINUSE_BIT;
1026
        check_top_chunk(m, q);
1027
    }
1028
    else if (oldfirst == m->dv) {
1029
        size_t dsize = m->dvsize += qsize;
1030
        m->dv = q;
1031
        set_size_and_pinuse_of_free_chunk(q, dsize);
1032
    }
1033
    else {
1034
        if (!is_inuse(oldfirst)) {
1035
            size_t nsize = chunksize(oldfirst);
1036
            unlink_chunk(m, oldfirst, nsize);
1037
            oldfirst = chunk_plus_offset(oldfirst, nsize);
1038
            qsize += nsize;
1039
        }
1040
        set_free_with_pinuse(q, qsize, oldfirst);
1041
        insert_chunk(m, q, qsize);
1042
        check_free_chunk(m, q);
1043
    }
1044
 
1045
    check_malloced_chunk(m, chunk2mem(p), nb);
1046
    return chunk2mem(p);
1047
}
1048
 
1049
/* Add a segment to hold a new noncontiguous region */
1050
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped)
1051
{
1052
  /* Determine locations and sizes of segment, fenceposts, old top */
1053
    char* old_top = (char*)m->top;
1054
    msegmentptr oldsp = segment_holding(m, old_top);
1055
    char* old_end = oldsp->base + oldsp->size;
1056
    size_t ssize = pad_request(sizeof(struct malloc_segment));
1057
    char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
1058
    size_t offset = align_offset(chunk2mem(rawsp));
1059
    char* asp = rawsp + offset;
1060
    char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
1061
    mchunkptr sp = (mchunkptr)csp;
1062
    msegmentptr ss = (msegmentptr)(chunk2mem(sp));
1063
    mchunkptr tnext = chunk_plus_offset(sp, ssize);
1064
    mchunkptr p = tnext;
1065
    int nfences = 0;
1066
 
1067
  /* reset top to new space */
1068
    init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
1069
 
1070
  /* Set up segment record */
1071
    assert(is_aligned(ss));
1072
    set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
1073
    *ss = m->seg; /* Push current record */
1074
    m->seg.base = tbase;
1075
    m->seg.size = tsize;
1076
    m->seg.sflags = mmapped;
1077
    m->seg.next = ss;
1078
 
1079
  /* Insert trailing fenceposts */
1080
    for (;;) {
1081
        mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
1082
        p->head = FENCEPOST_HEAD;
1083
        ++nfences;
1084
        if ((char*)(&(nextp->head)) < old_end)
1085
            p = nextp;
1086
        else
1087
            break;
1088
    }
1089
    assert(nfences >= 2);
1090
 
1091
  /* Insert the rest of old top into a bin as an ordinary free chunk */
1092
    if (csp != old_top) {
1093
        mchunkptr q = (mchunkptr)old_top;
1094
        size_t psize = csp - old_top;
1095
        mchunkptr tn = chunk_plus_offset(q, psize);
1096
        set_free_with_pinuse(q, psize, tn);
1097
        insert_chunk(m, q, psize);
1098
    }
1099
 
1100
  check_top_chunk(m, m->top);
1101
}
1102
 
1103
/* -------------------------- System allocation -------------------------- */
1104
 
1105
/* Get memory from system using MORECORE or MMAP */
1106
static void* sys_alloc(mstate m, size_t nb)
1107
{
1108
    char* tbase = CMFAIL;
1109
    size_t tsize = 0;
1110
    flag_t mmap_flag = 0;
1111
 
1112
    ensure_initialization();
1113
 
3039 serge 1114
    printf("%s %d bytes\n", __FUNCTION__, nb);
1115
 
1616 serge 1116
  /* Directly map large chunks, but only if already initialized */
1117
    if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0)
1118
    {
1119
        void* mem = mmap_alloc(m, nb);
1120
        if (mem != 0)
1121
            return mem;
1122
    }
1123
 
1124
  /*
1125
    Try getting memory in any of three ways (in most-preferred to
1126
    least-preferred order):
1127
    1. A call to MORECORE that can normally contiguously extend memory.
1128
       (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
1129
       or main space is mmapped or a previous contiguous call failed)
1130
    2. A call to MMAP new space (disabled if not HAVE_MMAP).
1131
       Note that under the default settings, if MORECORE is unable to
1132
       fulfill a request, and HAVE_MMAP is true, then mmap is
1133
       used as a noncontiguous system allocator. This is a useful backup
1134
       strategy for systems with holes in address spaces -- in this case
1135
       sbrk cannot contiguously expand the heap, but mmap may be able to
1136
       find space.
1137
    3. A call to MORECORE that cannot usually contiguously extend memory.
1138
       (disabled if not HAVE_MORECORE)
1139
 
1140
   In all cases, we need to request enough bytes from system to ensure
1141
   we can malloc nb bytes upon success, so pad with enough space for
1142
   top_foot, plus alignment-pad to make sure we don't lose bytes if
1143
   not on boundary, and round this up to a granularity unit.
1144
  */
1145
 
1146
    if (HAVE_MMAP && tbase == CMFAIL)   /* Try MMAP */
1147
    {
1148
        size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING);
1149
        if (rsize > nb)  /* Fail if wraps around zero */
1150
        {
1151
            char* mp = (char*)(CALL_MMAP(rsize));
1152
            if (mp != CMFAIL)
1153
            {
1154
                tbase = mp;
1155
                tsize = rsize;
1156
                mmap_flag = USE_MMAP_BIT;
1157
            }
1158
        }
1159
    }
1160
 
1161
 
1162
    if (tbase != CMFAIL)
1163
    {
1164
 
1165
        if ((m->footprint += tsize) > m->max_footprint)
1166
            m->max_footprint = m->footprint;
1167
 
1168
        if (!is_initialized(m))  /* first-time initialization */
1169
        {
1170
            if (m->least_addr == 0 || tbase < m->least_addr)
1171
                m->least_addr = tbase;
1172
            m->seg.base = tbase;
1173
             m->seg.size = tsize;
1174
            m->seg.sflags = mmap_flag;
1175
            m->magic = mparams.magic;
1176
            m->release_checks = MAX_RELEASE_CHECK_RATE;
1177
            init_bins(m);
1178
 
1179
            if (is_global(m))
1180
                init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
1181
            else
1182
            {
1183
        /* Offset top by embedded malloc_state */
1184
                mchunkptr mn = next_chunk(mem2chunk(m));
1185
                init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
1186
            }
1187
        }
1188
        else
1189
        {
1190
      /* Try to merge with an existing segment */
1191
            msegmentptr sp = &m->seg;
1192
      /* Only consider most recent segment if traversal suppressed */
1193
            while (sp != 0 && tbase != sp->base + sp->size)
1194
                sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
1195
            if (sp != 0 && !is_extern_segment(sp) &&
1196
               (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
1197
                segment_holds(sp, m->top))  /* append */
1198
            {
1199
                sp->size += tsize;
1200
                init_top(m, m->top, m->topsize + tsize);
1201
            }
1202
            else
1203
            {
1204
                if (tbase < m->least_addr)
1205
                    m->least_addr = tbase;
1206
                sp = &m->seg;
1207
                while (sp != 0 && sp->base != tbase + tsize)
1208
                    sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
1209
                if (sp != 0 &&
1210
                    !is_extern_segment(sp) &&
1211
                    (sp->sflags & USE_MMAP_BIT) == mmap_flag)
1212
                {
1213
                    char* oldbase = sp->base;
1214
                    sp->base = tbase;
1215
                    sp->size += tsize;
1216
                    return prepend_alloc(m, tbase, oldbase, nb);
1217
                }
1218
                else
1219
                    add_segment(m, tbase, tsize, mmap_flag);
1220
            }
1221
        }
1222
 
1223
        if (nb < m->topsize)  /* Allocate from new or extended top space */
1224
        {
1225
            size_t rsize = m->topsize -= nb;
1226
            mchunkptr p = m->top;
1227
            mchunkptr r = m->top = chunk_plus_offset(p, nb);
1228
            r->head = rsize | PINUSE_BIT;
1229
            set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1230
            check_top_chunk(m, m->top);
1231
            check_malloced_chunk(m, chunk2mem(p), nb);
1232
            return chunk2mem(p);
1233
        }
1234
    }
1235
 
1236
//    MALLOC_FAILURE_ACTION;
1237
    return 0;
1238
}
1239
 
1240
 
1241
/* -----------------------  system deallocation -------------------------- */
1242
 
1243
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1244
static size_t release_unused_segments(mstate m)
1245
{
1246
    size_t released = 0;
1247
    int nsegs = 0;
1248
    msegmentptr pred = &m->seg;
1249
    msegmentptr sp = pred->next;
1250
    while (sp != 0)
1251
    {
1252
        char* base = sp->base;
1253
        size_t size = sp->size;
1254
        msegmentptr next = sp->next;
1255
        ++nsegs;
1256
        if (is_mmapped_segment(sp) && !is_extern_segment(sp))
1257
        {
1258
            mchunkptr p = align_as_chunk(base);
1259
            size_t psize = chunksize(p);
1260
      /* Can unmap if first chunk holds entire segment and not pinned */
1261
            if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE)
1262
            {
1263
                tchunkptr tp = (tchunkptr)p;
1264
                assert(segment_holds(sp, (char*)sp));
1265
                if (p == m->dv) {
1266
                    m->dv = 0;
1267
                    m->dvsize = 0;
1268
                }
1269
                else {
1270
                    unlink_large_chunk(m, tp);
1271
                }
1272
                if (CALL_MUNMAP(base, size) == 0)
1273
                {
1274
                    released += size;
1275
                    m->footprint -= size;
1276
          /* unlink obsoleted record */
1277
                    sp = pred;
1278
                    sp->next = next;
1279
                }
1280
                else { /* back out if cannot unmap */
1281
                    insert_large_chunk(m, tp, psize);
1282
                }
1283
            }
1284
        }
1285
        if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
1286
            break;
1287
        pred = sp;
1288
        sp = next;
1289
    }
1290
  /* Reset check counter */
1291
    m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)?
1292
                       nsegs : MAX_RELEASE_CHECK_RATE);
1293
    return released;
1294
}
1295
 
1296
static int sys_trim(mstate m, size_t pad)
1297
{
1298
    size_t released = 0;
1299
    ensure_initialization();
1300
    if (pad < MAX_REQUEST && is_initialized(m))
1301
    {
1302
        pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
1303
 
1304
        if (m->topsize > pad)
1305
        {
1306
      /* Shrink top space in granularity-size units, keeping at least one */
1307
            size_t unit = mparams.granularity;
1308
            size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
1309
                             SIZE_T_ONE) * unit;
1310
            msegmentptr sp = segment_holding(m, (char*)m->top);
1311
 
1312
            if (!is_extern_segment(sp))
1313
            {
1314
                if (is_mmapped_segment(sp))
1315
                {
1316
                    if (HAVE_MMAP &&
1317
                        sp->size >= extra &&
1318
                        !has_segment_link(m, sp))  /* can't shrink if pinned */
1319
                    {
1320
                        size_t newsize = sp->size - extra;
1321
            /* Prefer mremap, fall back to munmap */
1322
                        if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
1323
                            (CALL_MUNMAP(sp->base + newsize, extra) == 0))
1324
                        {
1325
                            released = extra;
1326
                        }
1327
                    }
1328
                }
1329
            }
1330
 
1331
            if (released != 0)
1332
            {
1333
                sp->size -= released;
1334
                m->footprint -= released;
1335
                init_top(m, m->top, m->topsize - released);
1336
                check_top_chunk(m, m->top);
1337
            }
1338
        }
1339
 
1340
    /* Unmap any unused mmapped segments */
1341
        if (HAVE_MMAP)
1342
            released += release_unused_segments(m);
1343
 
1344
    /* On failure, disable autotrim to avoid repeated failed future calls */
1345
        if (released == 0 && m->topsize > m->trim_check)
1346
        m->trim_check = MAX_SIZE_T;
1347
    }
1348
 
1349
    return (released != 0)? 1 : 0;
1350
}
1351
 
1352
 
1353
 
1354
/* ---------------------------- malloc support --------------------------- */
1355
 
1356
/* allocate a large request from the best fitting chunk in a treebin */
1357
static void* tmalloc_large(mstate m, size_t nb) {
1358
  tchunkptr v = 0;
1359
  size_t rsize = -nb; /* Unsigned negation */
1360
  tchunkptr t;
1361
  bindex_t idx;
1362
  compute_tree_index(nb, idx);
1363
  if ((t = *treebin_at(m, idx)) != 0) {
1364
    /* Traverse tree for this bin looking for node with size == nb */
1365
    size_t sizebits = nb << leftshift_for_tree_index(idx);
1366
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
1367
    for (;;) {
1368
      tchunkptr rt;
1369
      size_t trem = chunksize(t) - nb;
1370
      if (trem < rsize) {
1371
        v = t;
1372
        if ((rsize = trem) == 0)
1373
          break;
1374
      }
1375
      rt = t->child[1];
1376
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1377
      if (rt != 0 && rt != t)
1378
        rst = rt;
1379
      if (t == 0) {
1380
        t = rst; /* set t to least subtree holding sizes > nb */
1381
        break;
1382
      }
1383
      sizebits <<= 1;
1384
    }
1385
  }
1386
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
1387
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
1388
    if (leftbits != 0) {
1389
      bindex_t i;
1390
      binmap_t leastbit = least_bit(leftbits);
1391
      compute_bit2idx(leastbit, i);
1392
      t = *treebin_at(m, i);
1393
    }
1394
  }
1395
 
1396
  while (t != 0) { /* find smallest of tree or subtree */
1397
    size_t trem = chunksize(t) - nb;
1398
    if (trem < rsize) {
1399
      rsize = trem;
1400
      v = t;
1401
    }
1402
    t = leftmost_child(t);
1403
  }
1404
 
1405
  /*  If dv is a better fit, return 0 so malloc will use it */
1406
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
1407
    if (RTCHECK(ok_address(m, v))) { /* split */
1408
      mchunkptr r = chunk_plus_offset(v, nb);
1409
      assert(chunksize(v) == rsize + nb);
1410
      if (RTCHECK(ok_next(v, r))) {
1411
        unlink_large_chunk(m, v);
1412
        if (rsize < MIN_CHUNK_SIZE)
1413
          set_inuse_and_pinuse(m, v, (rsize + nb));
1414
        else {
1415
          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1416
          set_size_and_pinuse_of_free_chunk(r, rsize);
1417
          insert_chunk(m, r, rsize);
1418
        }
1419
        return chunk2mem(v);
1420
      }
1421
    }
1422
    CORRUPTION_ERROR_ACTION(m);
1423
  }
1424
  return 0;
1425
}
1426
 
1427
/* allocate a small request from the best fitting chunk in a treebin */
1428
static void* tmalloc_small(mstate m, size_t nb)
1429
{
1430
    tchunkptr t, v;
1431
    size_t rsize;
1432
    bindex_t i;
1433
    binmap_t leastbit = least_bit(m->treemap);
1434
    compute_bit2idx(leastbit, i);
1435
    v = t = *treebin_at(m, i);
1436
    rsize = chunksize(t) - nb;
1437
 
1438
    while ((t = leftmost_child(t)) != 0) {
1439
        size_t trem = chunksize(t) - nb;
1440
        if (trem < rsize) {
1441
            rsize = trem;
1442
            v = t;
1443
        }
1444
    }
1445
 
1446
    if (RTCHECK(ok_address(m, v))) {
1447
        mchunkptr r = chunk_plus_offset(v, nb);
1448
        assert(chunksize(v) == rsize + nb);
1449
        if (RTCHECK(ok_next(v, r))) {
1450
            unlink_large_chunk(m, v);
1451
            if (rsize < MIN_CHUNK_SIZE)
1452
                set_inuse_and_pinuse(m, v, (rsize + nb));
1453
            else {
1454
                set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1455
                set_size_and_pinuse_of_free_chunk(r, rsize);
1456
                replace_dv(m, r, rsize);
1457
            }
1458
            return chunk2mem(v);
1459
        }
1460
    }
1461
 
1462
    CORRUPTION_ERROR_ACTION(m);
1463
    return 0;
1464
}
1465
 
1466
/* --------------------------- memalign support -------------------------- */
1467
 
1468
static void* internal_memalign(mstate m, size_t alignment, size_t bytes)
1469
{
1470
    if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
1471
        return internal_malloc(m, bytes);
1472
    if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
1473
        alignment = MIN_CHUNK_SIZE;
1474
    if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
1475
        size_t a = MALLOC_ALIGNMENT << 1;
1476
        while (a < alignment) a <<= 1;
1477
        alignment = a;
1478
    }
1479
 
1480
    if (bytes >= MAX_REQUEST - alignment) {
1481
        if (m != 0)  { /* Test isn't needed but avoids compiler warning */
1482
//            MALLOC_FAILURE_ACTION;
1483
        }
1484
    }
1485
    else
1486
    {
1487
        size_t nb = request2size(bytes);
1488
        size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
1489
        char* mem = (char*)internal_malloc(m, req);
1490
        if (mem != 0)
1491
        {
1492
            void* leader = 0;
1493
            void* trailer = 0;
1494
            mchunkptr p = mem2chunk(mem);
1495
 
1496
            PREACTION(m);
1497
 
1498
            if ((((size_t)(mem)) % alignment) != 0)  /* misaligned */
1499
            {
1500
        /*
1501
          Find an aligned spot inside chunk.  Since we need to give
1502
          back leading space in a chunk of at least MIN_CHUNK_SIZE, if
1503
          the first calculation places us at a spot with less than
1504
          MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
1505
          We've allocated enough total room so that this is always
1506
          possible.
1507
        */
1508
                char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
1509
                                                       alignment -
1510
                                                       SIZE_T_ONE)) &
1511
                                             -alignment));
1512
                char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
1513
                             br : br+alignment;
1514
                mchunkptr newp = (mchunkptr)pos;
1515
                size_t leadsize = pos - (char*)(p);
1516
                size_t newsize = chunksize(p) - leadsize;
1517
 
1518
                if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
1519
                    newp->prev_foot = p->prev_foot + leadsize;
1520
                    newp->head = newsize;
1521
                }
1522
                else { /* Otherwise, give back leader, use the rest */
1523
                    set_inuse(m, newp, newsize);
1524
                    set_inuse(m, p, leadsize);
1525
                    leader = chunk2mem(p);
1526
                }
1527
                p = newp;
1528
            }
1529
 
1530
      /* Give back spare room at the end */
1531
            if (!is_mmapped(p))
1532
            {
1533
                size_t size = chunksize(p);
1534
                if (size > nb + MIN_CHUNK_SIZE)
1535
                {
1536
                    size_t remainder_size = size - nb;
1537
                    mchunkptr remainder = chunk_plus_offset(p, nb);
1538
                    set_inuse(m, p, nb);
1539
                    set_inuse(m, remainder, remainder_size);
1540
                    trailer = chunk2mem(remainder);
1541
                }
1542
            }
1543
 
1544
            assert (chunksize(p) >= nb);
1545
            assert((((size_t)(chunk2mem(p))) % alignment) == 0);
1546
            check_inuse_chunk(m, p);
1547
            POSTACTION(m);
1548
            if (leader != 0) {
1549
                internal_free(m, leader);
1550
            }
1551
            if (trailer != 0) {
1552
                internal_free(m, trailer);
1553
            }
1554
            return chunk2mem(p);
1555
        }
1556
    }
1557
    return 0;
1558
}
1559
 
1560
void* memalign(size_t alignment, size_t bytes)
1561
{
1562
    return internal_memalign(gm, alignment, bytes);
1563
}
1564
 
1565
 
1566
void* malloc(size_t bytes)
1567
{
1568
  /*
1569
     Basic algorithm:
1570
     If a small request (< 256 bytes minus per-chunk overhead):
1571
       1. If one exists, use a remainderless chunk in associated smallbin.
1572
          (Remainderless means that there are too few excess bytes to
1573
          represent as a chunk.)
1574
       2. If it is big enough, use the dv chunk, which is normally the
1575
          chunk adjacent to the one used for the most recent small request.
1576
       3. If one exists, split the smallest available chunk in a bin,
1577
          saving remainder in dv.
1578
       4. If it is big enough, use the top chunk.
1579
       5. If available, get memory from system and use it
1580
     Otherwise, for a large request:
1581
       1. Find the smallest available binned chunk that fits, and use it
1582
          if it is better fitting than dv chunk, splitting if necessary.
1583
       2. If better fitting than any binned chunk, use the dv chunk.
1584
       3. If it is big enough, use the top chunk.
1585
       4. If request size >= mmap threshold, try to directly mmap this chunk.
1586
       5. If available, get memory from system and use it
1587
 
1588
     The ugly goto's here ensure that postaction occurs along all paths.
1589
  */
1590
 
1591
    ensure_initialization(); /* initialize in sys_alloc if not using locks */
1592
 
1593
    PREACTION(gm);
1594
    {
1595
        void* mem;
1596
        size_t nb;
1597
 
1598
        if (bytes <= MAX_SMALL_REQUEST)
1599
        {
1600
            bindex_t idx;
1601
            binmap_t smallbits;
1602
            nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
1603
            idx = small_index(nb);
1604
            smallbits = gm->smallmap >> idx;
1605
 
1606
            if ((smallbits & 0x3U) != 0) /* Remainderless fit to a smallbin. */
1607
            {
1608
                mchunkptr b, p;
1609
                idx += ~smallbits & 1;       /* Uses next bin if idx empty */
1610
                b = smallbin_at(gm, idx);
1611
                p = b->fd;
1612
                assert(chunksize(p) == small_index2size(idx));
1613
                unlink_first_small_chunk(gm, b, p, idx);
1614
                set_inuse_and_pinuse(gm, p, small_index2size(idx));
1615
                mem = chunk2mem(p);
1616
                check_malloced_chunk(gm, mem, nb);
1617
                goto postaction;
1618
            }
1619
            else if (nb > gm->dvsize)
1620
            {
1621
                if (smallbits != 0) /* Use chunk in next nonempty smallbin */
1622
                {
1623
                    mchunkptr b, p, r;
1624
                    size_t rsize;
1625
                    bindex_t i;
1626
                    binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
1627
                    binmap_t leastbit = least_bit(leftbits);
1628
                    compute_bit2idx(leastbit, i);
1629
                    b = smallbin_at(gm, i);
1630
                    p = b->fd;
1631
                    assert(chunksize(p) == small_index2size(i));
1632
                    unlink_first_small_chunk(gm, b, p, i);
1633
                    rsize = small_index2size(i) - nb;
1634
          /* Fit here cannot be remainderless if 4byte sizes */
1635
                    if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
1636
                        set_inuse_and_pinuse(gm, p, small_index2size(i));
1637
                    else
1638
                    {
1639
                        set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1640
                        r = chunk_plus_offset(p, nb);
1641
                        set_size_and_pinuse_of_free_chunk(r, rsize);
1642
                        replace_dv(gm, r, rsize);
1643
                    }
1644
                    mem = chunk2mem(p);
1645
                    check_malloced_chunk(gm, mem, nb);
1646
                    goto postaction;
1647
                }
1648
                else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0)
1649
                {
1650
                    check_malloced_chunk(gm, mem, nb);
1651
                    goto postaction;
1652
                }
1653
            }
1654
        }
1655
        else if (bytes >= MAX_REQUEST)
1656
            nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1657
        else
1658
        {
1659
            nb = pad_request(bytes);
1660
            if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0)
1661
            {
1662
                check_malloced_chunk(gm, mem, nb);
1663
                goto postaction;
1664
            }
1665
        }
1666
 
1667
        if (nb <= gm->dvsize) {
1668
            size_t rsize = gm->dvsize - nb;
1669
            mchunkptr p = gm->dv;
1670
            if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
1671
                mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
1672
                gm->dvsize = rsize;
1673
                set_size_and_pinuse_of_free_chunk(r, rsize);
1674
                set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1675
            }
1676
            else { /* exhaust dv */
1677
                size_t dvs = gm->dvsize;
1678
                gm->dvsize = 0;
1679
                gm->dv = 0;
1680
                set_inuse_and_pinuse(gm, p, dvs);
1681
            }
1682
            mem = chunk2mem(p);
1683
            check_malloced_chunk(gm, mem, nb);
1684
            goto postaction;
1685
        }
1686
        else if (nb < gm->topsize) { /* Split top */
1687
            size_t rsize = gm->topsize -= nb;
1688
            mchunkptr p = gm->top;
1689
            mchunkptr r = gm->top = chunk_plus_offset(p, nb);
1690
            r->head = rsize | PINUSE_BIT;
1691
            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1692
            mem = chunk2mem(p);
1693
            check_top_chunk(gm, gm->top);
1694
            check_malloced_chunk(gm, mem, nb);
1695
            goto postaction;
1696
        }
1697
 
1698
        mem = sys_alloc(gm, nb);
1699
 
1700
  postaction:
1701
        POSTACTION(gm);
1702
        return mem;
1703
    }
1704
 
1705
    return 0;
1706
}
1707
 
1708
 
1709
void free(void* mem)
1710
{
1711
  /*
1712
     Consolidate freed chunks with preceeding or succeeding bordering
1713
     free chunks, if they exist, and then place in a bin.  Intermixed
1714
     with special cases for top, dv, mmapped chunks, and usage errors.
1715
  */
1716
 
1717
    if (mem != 0)
1718
    {
1719
        mchunkptr p  = mem2chunk(mem);
1720
 
1721
#define fm gm
1722
 
1723
        PREACTION(fm);
1724
        {
1725
            check_inuse_chunk(fm, p);
1726
            if (RTCHECK(ok_address(fm, p) && ok_inuse(p)))
1727
            {
1728
                size_t psize = chunksize(p);
1729
                mchunkptr next = chunk_plus_offset(p, psize);
1730
                if (!pinuse(p))
1731
                {
1732
                    size_t prevsize = p->prev_foot;
1733
                    if (is_mmapped(p))
1734
                    {
1735
                        psize += prevsize + MMAP_FOOT_PAD;
1736
                        if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
1737
                        fm->footprint -= psize;
1738
                        goto postaction;
1739
                    }
1740
                    else
1741
                    {
1742
                        mchunkptr prev = chunk_minus_offset(p, prevsize);
1743
                        psize += prevsize;
1744
                        p = prev;
1745
                        if (RTCHECK(ok_address(fm, prev)))  /* consolidate backward */
1746
                        {
1747
                            if (p != fm->dv)
1748
                            {
1749
                                unlink_chunk(fm, p, prevsize);
1750
                            }
1751
                            else if ((next->head & INUSE_BITS) == INUSE_BITS)
1752
                            {
1753
                                fm->dvsize = psize;
1754
                                set_free_with_pinuse(p, psize, next);
1755
                                goto postaction;
1756
                            }
1757
                        }
1758
                        else
1759
                            goto erroraction;
1760
                    }
1761
                }
1762
 
1763
                if (RTCHECK(ok_next(p, next) && ok_pinuse(next)))
1764
                {
1765
                    if (!cinuse(next))    /* consolidate forward */
1766
                    {
1767
                        if (next == fm->top)
1768
                        {
1769
                            size_t tsize = fm->topsize += psize;
1770
                            fm->top = p;
1771
                            p->head = tsize | PINUSE_BIT;
1772
                            if (p == fm->dv)
1773
                            {
1774
                                fm->dv = 0;
1775
                                fm->dvsize = 0;
1776
                            }
1777
                            if (should_trim(fm, tsize))
1778
                                sys_trim(fm, 0);
1779
                            goto postaction;
1780
                        }
1781
                        else if (next == fm->dv)
1782
                        {
1783
                            size_t dsize = fm->dvsize += psize;
1784
                            fm->dv = p;
1785
                            set_size_and_pinuse_of_free_chunk(p, dsize);
1786
                            goto postaction;
1787
                        }
1788
                        else
1789
                        {
1790
                            size_t nsize = chunksize(next);
1791
                            psize += nsize;
1792
                            unlink_chunk(fm, next, nsize);
1793
                            set_size_and_pinuse_of_free_chunk(p, psize);
1794
                            if (p == fm->dv)
1795
                            {
1796
                                fm->dvsize = psize;
1797
                                goto postaction;
1798
                            }
1799
                        }
1800
                    }
1801
                    else
1802
                        set_free_with_pinuse(p, psize, next);
1803
 
1804
                    if (is_small(psize))
1805
                    {
1806
                        insert_small_chunk(fm, p, psize);
1807
                        check_free_chunk(fm, p);
1808
                    }
1809
                    else
1810
                    {
1811
                        tchunkptr tp = (tchunkptr)p;
1812
                        insert_large_chunk(fm, tp, psize);
1813
                        check_free_chunk(fm, p);
1814
                        if (--fm->release_checks == 0)
1815
                            release_unused_segments(fm);
1816
                    }
1817
                    goto postaction;
1818
                }
1819
            }
1820
    erroraction:
1821
        USAGE_ERROR_ACTION(fm, p);
1822
    postaction:
1823
        POSTACTION(fm);
1824
        }
1825
    }
1826
#undef fm
1827
}
1828
 
1829