Subversion Repositories Kolibri OS

Rev

Rev 6099 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4349 Serge 1
/*
2
  This is a version (aka dlmalloc) of malloc/free/realloc written by
3
  Doug Lea and released to the public domain, as explained at
4
  http://creativecommons.org/licenses/publicdomain.  Send questions,
5
  comments, complaints, performance data, etc to dl@cs.oswego.edu
6
 
7
* Version 2.8.4 Wed May 27 09:56:23 2009  Doug Lea  (dl at gee)
8
 
9
   Note: There may be an updated version of this malloc obtainable at
10
           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
11
         Check before installing!
12
 
13
* Quickstart
14
 
15
  This library is all in one file to simplify the most common usage:
16
  ftp it, compile it (-O3), and link it into another program. All of
17
  the compile-time options default to reasonable values for use on
18
  most platforms.  You might later want to step through various
19
  compile-time and dynamic tuning options.
20
 
21
  For convenience, an include file for code using this malloc is at:
22
     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h
23
  You don't really need this .h file unless you call functions not
24
  defined in your system include files.  The .h file contains only the
25
  excerpts from this file needed for using this malloc on ANSI C/C++
26
  systems, so long as you haven't changed compile-time options about
27
  naming and tuning parameters.  If you do, then you can create your
28
  own malloc.h that does include all settings by cutting at the point
29
  indicated below. Note that you may already by default be using a C
30
  library containing a malloc that is based on some version of this
31
  malloc (for example in linux). You might still want to use the one
32
  in this file to customize settings or to avoid overheads associated
33
  with library versions.
34
 
35
*/
36
#include 
37
#include 
38
#include 
39
 
40
 
41
 
42
struct malloc_chunk {
43
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
44
  size_t               head;       /* Size and inuse bits. */
45
  struct malloc_chunk* fd;         /* double links -- used only if free. */
46
  struct malloc_chunk* bk;
47
};
48
 
49
typedef struct malloc_chunk  mchunk;
50
typedef struct malloc_chunk* mchunkptr;
51
typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
52
typedef unsigned int bindex_t;         /* Described below */
53
typedef unsigned int binmap_t;         /* Described below */
54
typedef unsigned int flag_t;           /* The type of various bit flag sets */
55
 
56
 
57
 
58
/* ------------------- size_t and alignment properties -------------------- */
59
 
60
/* The maximum possible size_t value has all bits set */
61
#define MAX_SIZE_T           (~(size_t)0)
62
 
63
void *user_alloc(size_t size)
64
{
65
    void  *val;
66
 
67
//    __asm__("int3");
68
 
69
    __asm__ __volatile__(
70
    "int $0x40"
71
    :"=a"(val)
72
    :"a"(68),"b"(12),"c"(size));
73
    return val;
74
}
75
 
76
static inline
77
int user_free(void *mem)
78
{
79
    int  val;
80
 
81
//    __asm__("int3");
82
 
83
    __asm__ __volatile__(
84
    "int $0x40"
85
    :"=a"(val)
86
    :"a"(68),"b"(13),"c"(mem));
87
    return val;
88
}
89
 
90
 
91
/* ------------------- size_t and alignment properties -------------------- */
92
 
93
/* The byte and bit size of a size_t */
94
#define SIZE_T_SIZE         (sizeof(size_t))
95
#define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
96
 
97
/* Some constants coerced to size_t */
98
/* Annoying but necessary to avoid errors on some platforms */
99
#define SIZE_T_ZERO         ((size_t)0)
100
#define SIZE_T_ONE          ((size_t)1)
101
#define SIZE_T_TWO          ((size_t)2)
102
#define SIZE_T_FOUR         ((size_t)4)
103
#define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
104
#define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
105
#define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
106
#define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
107
 
108
#define USE_LOCK_BIT            (2U)
109
#define USE_MMAP_BIT            (SIZE_T_ONE)
110
#define USE_NONCONTIGUOUS_BIT   (4U)
111
 
112
/* segment bit set in create_mspace_with_base */
113
#define EXTERN_BIT              (8U)
114
 
115
#define HAVE_MMAP               1
116
#define CALL_MMAP(s)            MMAP_DEFAULT(s)
117
#define CALL_MUNMAP(a, s)       MUNMAP_DEFAULT((a), (s))
118
#define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
119
 
120
#define calloc_must_clear(p)    (!is_mmapped(p))
121
 
122
#define MALLOC_FAILURE_ACTION
123
 
124
#define MAX_RELEASE_CHECK_RATE  4095
125
#define NO_SEGMENT_TRAVERSAL    1
126
#define MALLOC_ALIGNMENT        ((size_t)8U)
127
#define CHUNK_OVERHEAD          (SIZE_T_SIZE)
128
#define DEFAULT_GRANULARITY     ((size_t)512U * (size_t)1024U)
129
#define DEFAULT_MMAP_THRESHOLD  ((size_t)1024U * (size_t)1024U)
130
#define DEFAULT_TRIM_THRESHOLD  ((size_t)2048U * (size_t)1024U)
131
 
132
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
133
#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
134
 
135
/* True if address a has acceptable alignment */
136
#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
137
 
138
/* the number of bytes to offset an address to align it */
139
#define align_offset(A)\
140
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
141
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
142
 
143
 
144
#define MFAIL                ((void*)(MAX_SIZE_T))
145
#define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
146
 
147
/* For sys_alloc, enough padding to ensure can malloc request on success */
148
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
149
 
150
/*
151
  TOP_FOOT_SIZE is padding at the end of a segment, including space
152
  that may be needed to place segment records and fenceposts when new
153
  noncontiguous segments are added.
154
*/
155
#define TOP_FOOT_SIZE\
156
  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
157
 
158
/* ------------------- Chunks sizes and alignments ----------------------- */
159
 
160
#define MCHUNK_SIZE         (sizeof(mchunk))
161
 
162
/* MMapped chunks need a second word of overhead ... */
163
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
164
/* ... and additional padding for fake next-chunk at foot */
165
#define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
166
 
167
/* The smallest size we can malloc is an aligned minimal chunk */
168
#define MIN_CHUNK_SIZE\
169
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
170
 
171
/* conversion from malloc headers to user pointers, and back */
172
#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
173
#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
174
/* chunk associated with aligned address A */
175
#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
176
 
177
/* Bounds on request (not chunk) sizes. */
178
#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
179
#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
180
 
181
/* pad request bytes into a usable size */
182
#define pad_request(req) \
183
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
184
 
185
/* pad request, checking for minimum (but not maximum) */
186
#define request2size(req) \
187
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
188
 
189
/* ------------------ Operations on head and foot fields ----------------- */
190
 
191
/*
192
  The head field of a chunk is or'ed with PINUSE_BIT when previous
193
  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
194
  use, unless mmapped, in which case both bits are cleared.
195
 
196
  FLAG4_BIT is not used by this malloc, but might be useful in extensions.
197
*/
198
 
199
#define PINUSE_BIT          (SIZE_T_ONE)
200
#define CINUSE_BIT          (SIZE_T_TWO)
201
#define FLAG4_BIT           (SIZE_T_FOUR)
202
#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
203
#define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
204
 
205
/* Head value for fenceposts */
206
#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
207
 
208
/* extraction of fields from head words */
209
#define cinuse(p)           ((p)->head & CINUSE_BIT)
210
#define pinuse(p)           ((p)->head & PINUSE_BIT)
211
#define is_inuse(p)         (((p)->head & INUSE_BITS) != PINUSE_BIT)
212
#define is_mmapped(p)       (((p)->head & INUSE_BITS) == 0)
213
 
214
#define chunksize(p)        ((p)->head & ~(FLAG_BITS))
215
 
216
#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
217
 
218
/* Treat space at ptr +/- offset as a chunk */
219
#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
220
#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
221
 
222
/* Ptr to next or previous physical malloc_chunk. */
223
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
224
#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
225
 
226
/* extract next chunk's pinuse bit */
227
#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
228
 
229
/* Set size, pinuse bit, and foot */
230
#define set_size_and_pinuse_of_free_chunk(p, s)\
231
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
232
 
233
/* Set size, pinuse bit, foot, and clear next pinuse */
234
#define set_free_with_pinuse(p, s, n)\
235
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
236
 
237
/* Get the internal overhead associated with chunk p */
238
#define overhead_for(p)\
239
 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
240
 
241
 
242
struct malloc_tree_chunk {
243
  /* The first four fields must be compatible with malloc_chunk */
244
  size_t                    prev_foot;
245
  size_t                    head;
246
  struct malloc_tree_chunk* fd;
247
  struct malloc_tree_chunk* bk;
248
 
249
  struct malloc_tree_chunk* child[2];
250
  struct malloc_tree_chunk* parent;
251
  bindex_t                  index;
252
};
253
 
254
typedef struct malloc_tree_chunk  tchunk;
255
typedef struct malloc_tree_chunk* tchunkptr;
256
typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
257
 
258
/* A little helper macro for trees */
259
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
260
 
261
 
262
struct malloc_segment {
263
  char*        base;             /* base address */
264
  size_t       size;             /* allocated size */
265
  struct malloc_segment* next;   /* ptr to next segment */
266
  flag_t       sflags;           /* mmap and extern flag */
267
};
268
 
269
#define is_mmapped_segment(S)  ((S)->sflags & USE_MMAP_BIT)
270
#define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
271
 
272
typedef struct malloc_segment  msegment;
273
typedef struct malloc_segment* msegmentptr;
274
 
275
/* ---------------------------- malloc_state ----------------------------- */
276
 
277
/*
278
   A malloc_state holds all of the bookkeeping for a space.
279
   The main fields are:
280
 
281
  Top
282
    The topmost chunk of the currently active segment. Its size is
283
    cached in topsize.  The actual size of topmost space is
284
    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
285
    fenceposts and segment records if necessary when getting more
286
    space from the system.  The size at which to autotrim top is
287
    cached from mparams in trim_check, except that it is disabled if
288
    an autotrim fails.
289
 
290
  Designated victim (dv)
291
    This is the preferred chunk for servicing small requests that
292
    don't have exact fits.  It is normally the chunk split off most
293
    recently to service another small request.  Its size is cached in
294
    dvsize. The link fields of this chunk are not maintained since it
295
    is not kept in a bin.
296
 
297
  SmallBins
298
    An array of bin headers for free chunks.  These bins hold chunks
299
    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
300
    chunks of all the same size, spaced 8 bytes apart.  To simplify
301
    use in double-linked lists, each bin header acts as a malloc_chunk
302
    pointing to the real first node, if it exists (else pointing to
303
    itself).  This avoids special-casing for headers.  But to avoid
304
    waste, we allocate only the fd/bk pointers of bins, and then use
305
    repositioning tricks to treat these as the fields of a chunk.
306
 
307
  TreeBins
308
    Treebins are pointers to the roots of trees holding a range of
309
    sizes. There are 2 equally spaced treebins for each power of two
310
    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
311
    larger.
312
 
313
  Bin maps
314
    There is one bit map for small bins ("smallmap") and one for
315
    treebins ("treemap).  Each bin sets its bit when non-empty, and
316
    clears the bit when empty.  Bit operations are then used to avoid
317
    bin-by-bin searching -- nearly all "search" is done without ever
318
    looking at bins that won't be selected.  The bit maps
319
    conservatively use 32 bits per map word, even if on 64bit system.
320
    For a good description of some of the bit-based techniques used
321
    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
322
    supplement at http://hackersdelight.org/). Many of these are
323
    intended to reduce the branchiness of paths through malloc etc, as
324
    well as to reduce the number of memory locations read or written.
325
 
326
  Segments
327
    A list of segments headed by an embedded malloc_segment record
328
    representing the initial space.
329
 
330
  Address check support
331
    The least_addr field is the least address ever obtained from
332
    MORECORE or MMAP. Attempted frees and reallocs of any address less
333
    than this are trapped (unless INSECURE is defined).
334
 
335
  Magic tag
336
    A cross-check field that should always hold same value as mparams.magic.
337
 
338
  Flags
339
    Bits recording whether to use MMAP, locks, or contiguous MORECORE
340
 
341
  Statistics
342
    Each space keeps track of current and maximum system memory
343
    obtained via MORECORE or MMAP.
344
 
345
  Trim support
346
    Fields holding the amount of unused topmost memory that should trigger
347
    timming, and a counter to force periodic scanning to release unused
348
    non-topmost segments.
349
 
350
  Locking
351
    If USE_LOCKS is defined, the "mutex" lock is acquired and released
352
    around every public call using this mspace.
353
 
354
  Extension support
355
    A void* pointer and a size_t field that can be used to help implement
356
    extensions to this malloc.
357
*/
358
 
359
/* Bin types, widths and sizes */
360
#define NSMALLBINS        (32U)
361
#define NTREEBINS         (32U)
362
#define SMALLBIN_SHIFT    (3U)
363
#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
364
#define TREEBIN_SHIFT     (8U)
365
#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
366
#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
367
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
368
 
369
struct malloc_state {
370
  binmap_t   smallmap;
371
  binmap_t   treemap;
372
  size_t     dvsize;
373
  size_t     topsize;
374
  char*      least_addr;
375
  mchunkptr  dv;
376
  mchunkptr  top;
377
  size_t     trim_check;
378
  size_t     release_checks;
379
  size_t     magic;
380
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
381
  tbinptr    treebins[NTREEBINS];
382
  size_t     footprint;
383
  size_t     max_footprint;
384
  flag_t     mflags;
6536 serge 385
  _LOCK_RECURSIVE_T    lock;     /* locate lock among fields that rarely change */
4349 Serge 386
  msegment   seg;
387
  void*      extp;      /* Unused but available for extensions */
388
  size_t     exts;
389
};
390
 
391
typedef struct malloc_state*    mstate;
392
 
393
/* ------------- Global malloc_state and malloc_params ------------------- */
394
 
395
/*
396
  malloc_params holds global properties, including those that can be
397
  dynamically set using mallopt. There is a single instance, mparams,
398
  initialized in init_mparams. Note that the non-zeroness of "magic"
399
  also serves as an initialization flag.
400
*/
401
 
402
struct malloc_params
403
{
404
  volatile size_t magic;
405
  size_t page_size;
406
  size_t granularity;
407
  size_t mmap_threshold;
408
  size_t trim_threshold;
409
  flag_t default_mflags;
410
};
411
 
412
static struct malloc_params mparams;
413
 
414
/* Ensure mparams initialized */
415
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
416
 
417
static struct malloc_state _gm_;
418
#define gm                 (&_gm_)
419
#define is_global(M)       ((M) == &_gm_)
420
 
421
#define is_initialized(M)  ((M)->top != 0)
422
 
423
 
424
 
425
 
426
__LOCK_INIT_RECURSIVE(static, malloc_global_mutex);
427
 
6536 serge 428
#define ACQUIRE_MALLOC_GLOBAL_LOCK()  __lock_acquire_recursive(malloc_global_mutex);
429
#define RELEASE_MALLOC_GLOBAL_LOCK()  __lock_release_recursive(malloc_global_mutex);
4349 Serge 430
 
6536 serge 431
#define PREACTION(M)  ( __lock_acquire_recursive((M)->lock))
432
#define POSTACTION(M) { __lock_release_recursive((M)->lock); }
4349 Serge 433
 
434
/* ---------------------------- Indexing Bins ---------------------------- */
435
 
436
#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
437
#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
438
#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
439
#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
440
 
441
/* addressing by index. See above about smallbin repositioning */
442
#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
443
#define treebin_at(M,i)     (&((M)->treebins[i]))
444
 
445
 
446
#define compute_tree_index(S, I)\
447
{\
448
  unsigned int X = S >> TREEBIN_SHIFT;\
449
  if (X == 0)\
450
    I = 0;\
451
  else if (X > 0xFFFF)\
452
    I = NTREEBINS-1;\
453
  else {\
454
    unsigned int K;\
455
    __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g"  (X));\
456
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
457
  }\
458
}
459
 
460
/* Bit representing maximum resolved size in a treebin at i */
461
#define bit_for_tree_index(i) \
462
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
463
 
464
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
465
#define leftshift_for_tree_index(i) \
466
   ((i == NTREEBINS-1)? 0 : \
467
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
468
 
469
/* The size of the smallest chunk held in bin with index i */
470
#define minsize_for_tree_index(i) \
471
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
472
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
473
 
474
 
475
/* ------------------------ Operations on bin maps ----------------------- */
476
 
477
/* bit corresponding to given index */
478
#define idx2bit(i)              ((binmap_t)(1) << (i))
479
 
480
/* Mark/Clear bits with given index */
481
#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
482
#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
483
#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
484
 
485
#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
486
#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
487
#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
488
 
489
/* isolate the least set bit of a bitmap */
490
#define least_bit(x)         ((x) & -(x))
491
 
492
/* mask with all bits to left of least bit of x on */
493
#define left_bits(x)         ((x<<1) | -(x<<1))
494
 
495
/* mask with all bits to left of or equal to least bit of x on */
496
#define same_or_left_bits(x) ((x) | -(x))
497
 
498
 
499
/* index corresponding to given bit. Use x86 asm if possible */
500
 
501
#define compute_bit2idx(X, I)\
502
{\
503
  unsigned int J;\
504
  __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
505
  I = (bindex_t)J;\
506
}
507
 
508
 
509
#define mark_inuse_foot(M,p,s)
510
 
511
/* Get/set size at footer */
512
#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
513
#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
514
 
515
/* Macros for setting head/foot of non-mmapped chunks */
516
 
517
/* Set cinuse bit and pinuse bit of next chunk */
518
#define set_inuse(M,p,s)\
519
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
520
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
521
 
522
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
523
#define set_inuse_and_pinuse(M,p,s)\
524
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
525
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
526
 
527
/* Set size, cinuse and pinuse bit of this chunk */
528
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
529
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
530
 
531
 
532
#define assert(x)
533
#define RTCHECK(e)  __builtin_expect(e, 1)
534
 
535
#define check_free_chunk(M,P)
536
#define check_inuse_chunk(M,P)
537
#define check_malloced_chunk(M,P,N)
538
#define check_mmapped_chunk(M,P)
539
#define check_malloc_state(M)
540
#define check_top_chunk(M,P)
541
 
542
/* Check if address a is at least as high as any from MORECORE or MMAP */
543
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
544
/* Check if address of next chunk n is higher than base chunk p */
545
#define ok_next(p, n)    ((char*)(p) < (char*)(n))
546
/* Check if p has inuse status */
547
#define ok_inuse(p)     is_inuse(p)
548
/* Check if p has its pinuse bit on */
549
#define ok_pinuse(p)     pinuse(p)
550
 
551
#define CORRUPTION_ERROR_ACTION(m)                             \
552
        do {                                                   \
6068 serge 553
            /*printf("%s malloc heap corrupted\n",__FUNCTION__); */\
4349 Serge 554
            __asm__("int3");                                   \
555
        }while(0)                                              \
556
 
557
 
558
#define USAGE_ERROR_ACTION(m, p)                               \
559
        do {                                                   \
6068 serge 560
            /*printf("%s malloc heap corrupted\n",__FUNCTION__); */\
4349 Serge 561
            __asm__("int3");                                   \
562
        }while(0)                                              \
563
 
564
/* ----------------------- Operations on smallbins ----------------------- */
565
 
566
/*
567
  Various forms of linking and unlinking are defined as macros.  Even
568
  the ones for trees, which are very long but have very short typical
569
  paths.  This is ugly but reduces reliance on inlining support of
570
  compilers.
571
*/
572
 
573
/* Link a free chunk into a smallbin  */
574
#define insert_small_chunk(M, P, S) {\
575
  bindex_t I  = small_index(S);\
576
  mchunkptr B = smallbin_at(M, I);\
577
  mchunkptr F = B;\
578
  assert(S >= MIN_CHUNK_SIZE);\
579
  if (!smallmap_is_marked(M, I))\
580
    mark_smallmap(M, I);\
581
  else if (RTCHECK(ok_address(M, B->fd)))\
582
    F = B->fd;\
583
  else {\
584
    CORRUPTION_ERROR_ACTION(M);\
585
  }\
586
  B->fd = P;\
587
  F->bk = P;\
588
  P->fd = F;\
589
  P->bk = B;\
590
}
591
 
592
/* Unlink a chunk from a smallbin  */
593
#define unlink_small_chunk(M, P, S) {\
594
  mchunkptr F = P->fd;\
595
  mchunkptr B = P->bk;\
596
  bindex_t I = small_index(S);\
597
  assert(P != B);\
598
  assert(P != F);\
599
  assert(chunksize(P) == small_index2size(I));\
600
  if (F == B)\
601
    clear_smallmap(M, I);\
602
  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
603
                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
604
    F->bk = B;\
605
    B->fd = F;\
606
  }\
607
  else {\
608
    CORRUPTION_ERROR_ACTION(M);\
609
  }\
610
}
611
 
612
/* Unlink the first chunk from a smallbin */
613
#define unlink_first_small_chunk(M, B, P, I) {\
614
  mchunkptr F = P->fd;\
615
  assert(P != B);\
616
  assert(P != F);\
617
  assert(chunksize(P) == small_index2size(I));\
618
  if (B == F)\
619
    clear_smallmap(M, I);\
620
  else if (RTCHECK(ok_address(M, F))) {\
621
    B->fd = F;\
622
    F->bk = B;\
623
  }\
624
  else {\
625
    CORRUPTION_ERROR_ACTION(M);\
626
  }\
627
}
628
 
629
/* Replace dv node, binning the old one */
630
/* Used only when dvsize known to be small */
631
#define replace_dv(M, P, S) {\
632
  size_t DVS = M->dvsize;\
633
  if (DVS != 0) {\
634
    mchunkptr DV = M->dv;\
635
    assert(is_small(DVS));\
636
    insert_small_chunk(M, DV, DVS);\
637
  }\
638
  M->dvsize = S;\
639
  M->dv = P;\
640
}
641
 
642
 
643
/* ------------------------- Operations on trees ------------------------- */
644
 
645
/* Insert chunk into tree */
646
#define insert_large_chunk(M, X, S) {\
647
  tbinptr* H;\
648
  bindex_t I;\
649
  compute_tree_index(S, I);\
650
  H = treebin_at(M, I);\
651
  X->index = I;\
652
  X->child[0] = X->child[1] = 0;\
653
  if (!treemap_is_marked(M, I)) {\
654
    mark_treemap(M, I);\
655
    *H = X;\
656
    X->parent = (tchunkptr)H;\
657
    X->fd = X->bk = X;\
658
  }\
659
  else {\
660
    tchunkptr T = *H;\
661
    size_t K = S << leftshift_for_tree_index(I);\
662
    for (;;) {\
663
      if (chunksize(T) != S) {\
664
        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
665
        K <<= 1;\
666
        if (*C != 0)\
667
          T = *C;\
668
        else if (RTCHECK(ok_address(M, C))) {\
669
          *C = X;\
670
          X->parent = T;\
671
          X->fd = X->bk = X;\
672
          break;\
673
        }\
674
        else {\
675
          CORRUPTION_ERROR_ACTION(M);\
676
          break;\
677
        }\
678
      }\
679
      else {\
680
        tchunkptr F = T->fd;\
681
        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
682
          T->fd = F->bk = X;\
683
          X->fd = F;\
684
          X->bk = T;\
685
          X->parent = 0;\
686
          break;\
687
        }\
688
        else {\
689
          CORRUPTION_ERROR_ACTION(M);\
690
          break;\
691
        }\
692
      }\
693
    }\
694
  }\
695
}
696
 
697
/*
698
  Unlink steps:
699
 
700
  1. If x is a chained node, unlink it from its same-sized fd/bk links
701
     and choose its bk node as its replacement.
702
  2. If x was the last node of its size, but not a leaf node, it must
703
     be replaced with a leaf node (not merely one with an open left or
704
     right), to make sure that lefts and rights of descendents
705
     correspond properly to bit masks.  We use the rightmost descendent
706
     of x.  We could use any other leaf, but this is easy to locate and
707
     tends to counteract removal of leftmosts elsewhere, and so keeps
708
     paths shorter than minimally guaranteed.  This doesn't loop much
709
     because on average a node in a tree is near the bottom.
710
  3. If x is the base of a chain (i.e., has parent links) relink
711
     x's parent and children to x's replacement (or null if none).
712
*/
713
 
714
#define unlink_large_chunk(M, X) {\
715
  tchunkptr XP = X->parent;\
716
  tchunkptr R;\
717
  if (X->bk != X) {\
718
    tchunkptr F = X->fd;\
719
    R = X->bk;\
720
    if (RTCHECK(ok_address(M, F))) {\
721
      F->bk = R;\
722
      R->fd = F;\
723
    }\
724
    else {\
725
      CORRUPTION_ERROR_ACTION(M);\
726
    }\
727
  }\
728
  else {\
729
    tchunkptr* RP;\
730
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
731
        ((R = *(RP = &(X->child[0]))) != 0)) {\
732
      tchunkptr* CP;\
733
      while ((*(CP = &(R->child[1])) != 0) ||\
734
             (*(CP = &(R->child[0])) != 0)) {\
735
        R = *(RP = CP);\
736
      }\
737
      if (RTCHECK(ok_address(M, RP)))\
738
        *RP = 0;\
739
      else {\
740
        CORRUPTION_ERROR_ACTION(M);\
741
      }\
742
    }\
743
  }\
744
  if (XP != 0) {\
745
    tbinptr* H = treebin_at(M, X->index);\
746
    if (X == *H) {\
747
      if ((*H = R) == 0) \
748
        clear_treemap(M, X->index);\
749
    }\
750
    else if (RTCHECK(ok_address(M, XP))) {\
751
      if (XP->child[0] == X) \
752
        XP->child[0] = R;\
753
      else \
754
        XP->child[1] = R;\
755
    }\
756
    else\
757
      CORRUPTION_ERROR_ACTION(M);\
758
    if (R != 0) {\
759
      if (RTCHECK(ok_address(M, R))) {\
760
        tchunkptr C0, C1;\
761
        R->parent = XP;\
762
        if ((C0 = X->child[0]) != 0) {\
763
          if (RTCHECK(ok_address(M, C0))) {\
764
            R->child[0] = C0;\
765
            C0->parent = R;\
766
          }\
767
          else\
768
            CORRUPTION_ERROR_ACTION(M);\
769
        }\
770
        if ((C1 = X->child[1]) != 0) {\
771
          if (RTCHECK(ok_address(M, C1))) {\
772
            R->child[1] = C1;\
773
            C1->parent = R;\
774
          }\
775
          else\
776
            CORRUPTION_ERROR_ACTION(M);\
777
        }\
778
      }\
779
      else\
780
        CORRUPTION_ERROR_ACTION(M);\
781
    }\
782
  }\
783
}
784
 
785
/* Relays to large vs small bin operations */
786
 
787
#define insert_chunk(M, P, S)\
788
  if (is_small(S)) insert_small_chunk(M, P, S)\
789
  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
790
 
791
#define unlink_chunk(M, P, S)\
792
  if (is_small(S)) unlink_small_chunk(M, P, S)\
793
  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
794
 
795
 
796
/* -------------------------- system alloc setup ------------------------- */
797
 
798
/* Operations on mflags */
799
 
800
#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
801
#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
802
#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
803
 
804
#define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
805
#define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
806
#define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
807
 
808
#define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
809
#define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
810
 
811
#define set_lock(M,L)\
812
 ((M)->mflags = (L)?\
813
  ((M)->mflags | USE_LOCK_BIT) :\
814
  ((M)->mflags & ~USE_LOCK_BIT))
815
 
816
/* page-align a size */
817
#define page_align(S)\
818
 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
819
 
820
/* granularity-align a size */
821
#define granularity_align(S)\
822
  (((S) + (mparams.granularity - SIZE_T_ONE))\
823
   & ~(mparams.granularity - SIZE_T_ONE))
824
 
825
 
826
/* For mmap, use granularity alignment  */
827
#define mmap_align(S) granularity_align(S)
828
 
829
/* For sys_alloc, enough padding to ensure can malloc request on success */
830
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
831
 
832
#define is_page_aligned(S)\
833
   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
834
#define is_granularity_aligned(S)\
835
   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
836
 
837
/*  True if segment S holds address A */
838
#define segment_holds(S, A)\
839
  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
840
 
841
/* Return segment holding given address */
842
static msegmentptr segment_holding(mstate m, char* addr)
843
{
844
    msegmentptr sp = &m->seg;
845
    for (;;) {
846
        if (addr >= sp->base && addr < sp->base + sp->size)
847
            return sp;
848
        if ((sp = sp->next) == 0)
849
            return 0;
850
    }
851
}
852
 
853
/* Return true if segment contains a segment link */
854
static int has_segment_link(mstate m, msegmentptr ss)
855
{
856
    msegmentptr sp = &m->seg;
857
    for (;;) {
858
        if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
859
            return 1;
860
        if ((sp = sp->next) == 0)
861
            return 0;
862
    }
863
}
864
 
865
static inline void* os_mmap(size_t size)
866
{
867
  void* ptr = user_alloc(size);
868
  return (ptr != 0)? ptr: MFAIL;
869
}
870
 
871
static inline int os_munmap(void* ptr, size_t size)
872
{
873
    return (user_free(ptr) != 0) ? 0 : -1;
874
}
875
 
876
#define should_trim(M,s)  ((s) > (M)->trim_check)
877
 
878
 
879
#define MMAP_DEFAULT(s)        os_mmap(s)
880
#define MUNMAP_DEFAULT(a, s)   os_munmap((a), (s))
881
#define DIRECT_MMAP_DEFAULT(s) os_mmap(s)
882
 
883
#define internal_malloc(m, b) malloc(b)
884
#define internal_free(m, mem) free(mem)
885
 
886
/* -----------------------  Direct-mmapping chunks ----------------------- */
887
 
888
/*
889
  Directly mmapped chunks are set up with an offset to the start of
890
  the mmapped region stored in the prev_foot field of the chunk. This
891
  allows reconstruction of the required argument to MUNMAP when freed,
892
  and also allows adjustment of the returned chunk to meet alignment
893
  requirements (especially in memalign).
894
*/
895
 
896
/* Malloc using mmap */
897
static void* mmap_alloc(mstate m, size_t nb)
898
{
899
  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
900
    if (mmsize > nb)       /* Check for wrap around 0 */
901
    {
902
        char* mm = (char*)(os_mmap(mmsize));
903
        if (mm != CMFAIL)
904
        {
905
      size_t offset = align_offset(chunk2mem(mm));
906
      size_t psize = mmsize - offset - MMAP_FOOT_PAD;
907
      mchunkptr p = (mchunkptr)(mm + offset);
908
      p->prev_foot = offset;
909
      p->head = psize;
910
      mark_inuse_foot(m, p, psize);
911
      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
912
      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
913
 
914
      if (m->least_addr == 0 || mm < m->least_addr)
915
        m->least_addr = mm;
916
      if ((m->footprint += mmsize) > m->max_footprint)
917
        m->max_footprint = m->footprint;
918
      assert(is_aligned(chunk2mem(p)));
919
      check_mmapped_chunk(m, p);
920
      return chunk2mem(p);
921
    }
922
  }
923
  return 0;
924
}
925
 
926
/* Realloc using mmap */
927
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb)
928
{
929
  size_t oldsize = chunksize(oldp);
930
  if (is_small(nb)) /* Can't shrink mmap regions below small size */
931
    return 0;
932
  /* Keep old chunk if big enough but not too big */
933
  if (oldsize >= nb + SIZE_T_SIZE &&
934
      (oldsize - nb) <= (mparams.granularity << 1))
935
    return oldp;
936
    else
937
    {
938
    size_t offset = oldp->prev_foot;
939
    size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
940
    size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
941
    char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
942
                                  oldmmsize, newmmsize, 1);
943
        if (cp != CMFAIL)
944
        {
945
      mchunkptr newp = (mchunkptr)(cp + offset);
946
      size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
947
      newp->head = psize;
948
      mark_inuse_foot(m, newp, psize);
949
      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
950
      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
951
 
952
      if (cp < m->least_addr)
953
        m->least_addr = cp;
954
      if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
955
        m->max_footprint = m->footprint;
956
      check_mmapped_chunk(m, newp);
957
      return newp;
958
    }
959
  }
960
  return 0;
961
}
962
 
963
/* ---------------------------- setting mparams -------------------------- */
964
 
965
/* Initialize mparams */
966
static int init_mparams(void) {
967
 
968
    ACQUIRE_MALLOC_GLOBAL_LOCK();
969
 
970
    if (mparams.magic == 0)
971
    {
972
        size_t magic;
973
        size_t psize;
974
        size_t gsize;
975
 
976
        psize = 4096;
977
        gsize = DEFAULT_GRANULARITY;
978
 
979
    /* Sanity-check configuration:
980
       size_t must be unsigned and as wide as pointer type.
981
       ints must be at least 4 bytes.
982
       alignment must be at least 8.
983
       Alignment, min chunk size, and page size must all be powers of 2.
984
    */
985
 
986
        mparams.granularity = gsize;
987
        mparams.page_size = psize;
988
        mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
989
        mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
990
        mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
991
 
992
    /* Set up lock for main malloc area */
993
        gm->mflags = mparams.default_mflags;
6536 serge 994
        __lock_init_recursive(gm->lock);
4349 Serge 995
 
996
        magic = (size_t)(0x12345678 ^ (size_t)0x55555555U);
997
        magic |= (size_t)8U;    /* ensure nonzero */
998
        magic &= ~(size_t)7U;   /* improve chances of fault for bad values */
999
        mparams.magic = magic;
1000
    }
1001
 
1002
    RELEASE_MALLOC_GLOBAL_LOCK();
1003
    return 1;
1004
}
1005
 
1006
/* -------------------------- mspace management -------------------------- */
1007
 
1008
/* Initialize top chunk and its size */
1009
static void init_top(mstate m, mchunkptr p, size_t psize)
1010
{
1011
  /* Ensure alignment */
1012
  size_t offset = align_offset(chunk2mem(p));
1013
  p = (mchunkptr)((char*)p + offset);
1014
  psize -= offset;
1015
 
1016
  m->top = p;
1017
  m->topsize = psize;
1018
  p->head = psize | PINUSE_BIT;
1019
  /* set size of fake trailing chunk holding overhead space only once */
1020
  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
1021
  m->trim_check = mparams.trim_threshold; /* reset on each update */
1022
}
1023
 
1024
/* Initialize bins for a new mstate that is otherwise zeroed out */
1025
static void init_bins(mstate m)
1026
{
1027
  /* Establish circular links for smallbins */
1028
  bindex_t i;
1029
  for (i = 0; i < NSMALLBINS; ++i) {
1030
    sbinptr bin = smallbin_at(m,i);
1031
    bin->fd = bin->bk = bin;
1032
  }
1033
}
1034
 
1035
/* Allocate chunk and prepend remainder with chunk in successor base. */
1036
static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
1037
                           size_t nb)
1038
{
1039
  mchunkptr p = align_as_chunk(newbase);
1040
  mchunkptr oldfirst = align_as_chunk(oldbase);
1041
  size_t psize = (char*)oldfirst - (char*)p;
1042
  mchunkptr q = chunk_plus_offset(p, nb);
1043
  size_t qsize = psize - nb;
1044
  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1045
 
1046
  assert((char*)oldfirst > (char*)q);
1047
  assert(pinuse(oldfirst));
1048
  assert(qsize >= MIN_CHUNK_SIZE);
1049
 
1050
  /* consolidate remainder with first chunk of old base */
1051
  if (oldfirst == m->top) {
1052
    size_t tsize = m->topsize += qsize;
1053
    m->top = q;
1054
    q->head = tsize | PINUSE_BIT;
1055
    check_top_chunk(m, q);
1056
  }
1057
  else if (oldfirst == m->dv) {
1058
    size_t dsize = m->dvsize += qsize;
1059
    m->dv = q;
1060
    set_size_and_pinuse_of_free_chunk(q, dsize);
1061
  }
1062
  else {
1063
    if (!is_inuse(oldfirst)) {
1064
      size_t nsize = chunksize(oldfirst);
1065
      unlink_chunk(m, oldfirst, nsize);
1066
      oldfirst = chunk_plus_offset(oldfirst, nsize);
1067
      qsize += nsize;
1068
    }
1069
    set_free_with_pinuse(q, qsize, oldfirst);
1070
    insert_chunk(m, q, qsize);
1071
    check_free_chunk(m, q);
1072
  }
1073
 
1074
  check_malloced_chunk(m, chunk2mem(p), nb);
1075
  return chunk2mem(p);
1076
}
1077
 
1078
/* Add a segment to hold a new noncontiguous region */
1079
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped)
1080
{
1081
  /* Determine locations and sizes of segment, fenceposts, old top */
1082
  char* old_top = (char*)m->top;
1083
  msegmentptr oldsp = segment_holding(m, old_top);
1084
  char* old_end = oldsp->base + oldsp->size;
1085
  size_t ssize = pad_request(sizeof(struct malloc_segment));
1086
  char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
1087
  size_t offset = align_offset(chunk2mem(rawsp));
1088
  char* asp = rawsp + offset;
1089
  char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
1090
  mchunkptr sp = (mchunkptr)csp;
1091
  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
1092
  mchunkptr tnext = chunk_plus_offset(sp, ssize);
1093
  mchunkptr p = tnext;
1094
  int nfences = 0;
1095
 
1096
  /* reset top to new space */
1097
  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
1098
 
1099
  /* Set up segment record */
1100
  assert(is_aligned(ss));
1101
  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
1102
  *ss = m->seg; /* Push current record */
1103
  m->seg.base = tbase;
1104
  m->seg.size = tsize;
1105
  m->seg.sflags = mmapped;
1106
  m->seg.next = ss;
1107
 
1108
  /* Insert trailing fenceposts */
1109
  for (;;) {
1110
    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
1111
    p->head = FENCEPOST_HEAD;
1112
    ++nfences;
1113
    if ((char*)(&(nextp->head)) < old_end)
1114
      p = nextp;
1115
    else
1116
      break;
1117
  }
1118
  assert(nfences >= 2);
1119
 
1120
  /* Insert the rest of old top into a bin as an ordinary free chunk */
1121
  if (csp != old_top) {
1122
    mchunkptr q = (mchunkptr)old_top;
1123
    size_t psize = csp - old_top;
1124
    mchunkptr tn = chunk_plus_offset(q, psize);
1125
    set_free_with_pinuse(q, psize, tn);
1126
    insert_chunk(m, q, psize);
1127
  }
1128
 
1129
  check_top_chunk(m, m->top);
1130
}
1131
 
1132
/* -------------------------- System allocation -------------------------- */
1133
 
1134
/* Get memory from system using MORECORE or MMAP */
1135
static void* sys_alloc(mstate m, size_t nb)
1136
{
1137
    char* tbase = CMFAIL;
1138
    size_t tsize = 0;
1139
    flag_t mmap_flag = 0;
1140
 
1141
    ensure_initialization();
1142
 
1143
  /* Directly map large chunks, but only if already initialized */
1144
    if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0)
1145
    {
1146
        void* mem = mmap_alloc(m, nb);
1147
        if (mem != 0)
1148
            return mem;
1149
    }
1150
 
1151
  /*
1152
    Try getting memory in any of three ways (in most-preferred to
1153
    least-preferred order):
1154
    1. A call to MORECORE that can normally contiguously extend memory.
1155
       (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
1156
       or main space is mmapped or a previous contiguous call failed)
1157
    2. A call to MMAP new space (disabled if not HAVE_MMAP).
1158
       Note that under the default settings, if MORECORE is unable to
1159
       fulfill a request, and HAVE_MMAP is true, then mmap is
1160
       used as a noncontiguous system allocator. This is a useful backup
1161
       strategy for systems with holes in address spaces -- in this case
1162
       sbrk cannot contiguously expand the heap, but mmap may be able to
1163
       find space.
1164
    3. A call to MORECORE that cannot usually contiguously extend memory.
1165
       (disabled if not HAVE_MORECORE)
1166
 
1167
   In all cases, we need to request enough bytes from system to ensure
1168
   we can malloc nb bytes upon success, so pad with enough space for
1169
   top_foot, plus alignment-pad to make sure we don't lose bytes if
1170
   not on boundary, and round this up to a granularity unit.
1171
  */
1172
 
1173
    if (HAVE_MMAP && tbase == CMFAIL)   /* Try MMAP */
1174
    {
1175
        size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING);
1176
        if (rsize > nb)  /* Fail if wraps around zero */
1177
        {
1178
            char* mp = (char*)(CALL_MMAP(rsize));
1179
            if (mp != CMFAIL)
1180
            {
1181
                tbase = mp;
1182
                tsize = rsize;
1183
                mmap_flag = USE_MMAP_BIT;
1184
            }
1185
        }
1186
    }
1187
 
1188
    if (tbase != CMFAIL)
1189
    {
1190
 
1191
        if ((m->footprint += tsize) > m->max_footprint)
1192
            m->max_footprint = m->footprint;
1193
 
1194
        if (!is_initialized(m))  /* first-time initialization */
1195
        {
1196
            if (m->least_addr == 0 || tbase < m->least_addr)
1197
            m->least_addr = tbase;
1198
            m->seg.base = tbase;
1199
            m->seg.size = tsize;
1200
            m->seg.sflags = mmap_flag;
1201
            m->magic = mparams.magic;
1202
            m->release_checks = MAX_RELEASE_CHECK_RATE;
1203
            init_bins(m);
1204
 
1205
            if (is_global(m))
1206
                init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
1207
            else
1208
            {
1209
        /* Offset top by embedded malloc_state */
1210
                mchunkptr mn = next_chunk(mem2chunk(m));
1211
                init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
1212
            }
1213
        }
1214
        else
1215
        {
1216
      /* Try to merge with an existing segment */
1217
            msegmentptr sp = &m->seg;
1218
      /* Only consider most recent segment if traversal suppressed */
1219
            while (sp != 0 && tbase != sp->base + sp->size)
1220
                sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
1221
            if (sp != 0 && !is_extern_segment(sp) &&
1222
                (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
1223
                segment_holds(sp, m->top))  /* append */
1224
            {
1225
                sp->size += tsize;
1226
                init_top(m, m->top, m->topsize + tsize);
1227
            }
1228
            else
1229
            {
1230
                if (tbase < m->least_addr)
1231
                    m->least_addr = tbase;
1232
                sp = &m->seg;
1233
                while (sp != 0 && sp->base != tbase + tsize)
1234
                    sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
1235
                if (sp != 0 && !is_extern_segment(sp) &&
1236
                   (sp->sflags & USE_MMAP_BIT) == mmap_flag)
1237
                {
1238
                    char* oldbase = sp->base;
1239
                    sp->base = tbase;
1240
                    sp->size += tsize;
1241
                    return prepend_alloc(m, tbase, oldbase, nb);
1242
                }
1243
                else
1244
                    add_segment(m, tbase, tsize, mmap_flag);
1245
            }
1246
        }
1247
 
1248
        if (nb < m->topsize)  /* Allocate from new or extended top space */
1249
        {
1250
            size_t rsize = m->topsize -= nb;
1251
            mchunkptr p = m->top;
1252
            mchunkptr r = m->top = chunk_plus_offset(p, nb);
1253
            r->head = rsize | PINUSE_BIT;
1254
            set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1255
            check_top_chunk(m, m->top);
1256
            check_malloced_chunk(m, chunk2mem(p), nb);
1257
            return chunk2mem(p);
1258
        }
1259
    }
1260
 
1261
    MALLOC_FAILURE_ACTION;
1262
    return 0;
1263
}
1264
 
1265
 
1266
/* -----------------------  system deallocation -------------------------- */
1267
 
1268
/* Unmap and unlink any mmapped segments that don't contain used chunks */
1269
static size_t release_unused_segments(mstate m)
1270
{
1271
  size_t released = 0;
1272
  int nsegs = 0;
1273
  msegmentptr pred = &m->seg;
1274
  msegmentptr sp = pred->next;
1275
    while (sp != 0)
1276
    {
1277
    char* base = sp->base;
1278
    size_t size = sp->size;
1279
    msegmentptr next = sp->next;
1280
    ++nsegs;
1281
        if (is_mmapped_segment(sp) && !is_extern_segment(sp))
1282
        {
1283
      mchunkptr p = align_as_chunk(base);
1284
      size_t psize = chunksize(p);
1285
      /* Can unmap if first chunk holds entire segment and not pinned */
1286
            if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE)
1287
            {
1288
        tchunkptr tp = (tchunkptr)p;
1289
        assert(segment_holds(sp, (char*)sp));
1290
        if (p == m->dv) {
1291
          m->dv = 0;
1292
          m->dvsize = 0;
1293
        }
1294
        else {
1295
          unlink_large_chunk(m, tp);
1296
        }
1297
                if (CALL_MUNMAP(base, size) == 0)
1298
                {
1299
          released += size;
1300
          m->footprint -= size;
1301
          /* unlink obsoleted record */
1302
          sp = pred;
1303
          sp->next = next;
1304
        }
1305
        else { /* back out if cannot unmap */
1306
          insert_large_chunk(m, tp, psize);
1307
        }
1308
      }
1309
    }
1310
    if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
1311
      break;
1312
    pred = sp;
1313
    sp = next;
1314
  }
1315
  /* Reset check counter */
1316
  m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)?
1317
                       nsegs : MAX_RELEASE_CHECK_RATE);
1318
  return released;
1319
}
1320
 
1321
static int sys_trim(mstate m, size_t pad)
1322
{
1323
  size_t released = 0;
1324
  ensure_initialization();
1325
    if (pad < MAX_REQUEST && is_initialized(m))
1326
    {
1327
    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
1328
 
1329
        if (m->topsize > pad)
1330
        {
1331
      /* Shrink top space in granularity-size units, keeping at least one */
1332
      size_t unit = mparams.granularity;
1333
      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
1334
                      SIZE_T_ONE) * unit;
1335
      msegmentptr sp = segment_holding(m, (char*)m->top);
1336
 
1337
            if (!is_extern_segment(sp))
1338
            {
1339
                if (is_mmapped_segment(sp))
1340
                {
1341
          if (HAVE_MMAP &&
1342
              sp->size >= extra &&
1343
                        !has_segment_link(m, sp))  /* can't shrink if pinned */
1344
                    {
1345
            size_t newsize = sp->size - extra;
1346
            /* Prefer mremap, fall back to munmap */
1347
            if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
1348
                            (CALL_MUNMAP(sp->base + newsize, extra) == 0))
1349
                        {
1350
              released = extra;
1351
            }
1352
          }
1353
        }
1354
            }
1355
 
1356
            if (released != 0)
1357
            {
1358
        sp->size -= released;
1359
        m->footprint -= released;
1360
        init_top(m, m->top, m->topsize - released);
1361
        check_top_chunk(m, m->top);
1362
      }
1363
    }
1364
 
1365
    /* Unmap any unused mmapped segments */
1366
    if (HAVE_MMAP)
1367
      released += release_unused_segments(m);
1368
 
1369
    /* On failure, disable autotrim to avoid repeated failed future calls */
1370
    if (released == 0 && m->topsize > m->trim_check)
1371
      m->trim_check = MAX_SIZE_T;
1372
  }
1373
 
1374
  return (released != 0)? 1 : 0;
1375
}
1376
 
1377
 
1378
 
1379
/* ---------------------------- malloc support --------------------------- */
1380
 
1381
/* allocate a large request from the best fitting chunk in a treebin */
1382
static void* tmalloc_large(mstate m, size_t nb) {
1383
  tchunkptr v = 0;
1384
  size_t rsize = -nb; /* Unsigned negation */
1385
  tchunkptr t;
1386
  bindex_t idx;
1387
  compute_tree_index(nb, idx);
1388
  if ((t = *treebin_at(m, idx)) != 0) {
1389
    /* Traverse tree for this bin looking for node with size == nb */
1390
    size_t sizebits = nb << leftshift_for_tree_index(idx);
1391
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
1392
    for (;;) {
1393
      tchunkptr rt;
1394
      size_t trem = chunksize(t) - nb;
1395
      if (trem < rsize) {
1396
        v = t;
1397
        if ((rsize = trem) == 0)
1398
          break;
1399
      }
1400
      rt = t->child[1];
1401
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1402
      if (rt != 0 && rt != t)
1403
        rst = rt;
1404
      if (t == 0) {
1405
        t = rst; /* set t to least subtree holding sizes > nb */
1406
        break;
1407
      }
1408
      sizebits <<= 1;
1409
    }
1410
  }
1411
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
1412
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
1413
    if (leftbits != 0) {
1414
      bindex_t i;
1415
      binmap_t leastbit = least_bit(leftbits);
1416
      compute_bit2idx(leastbit, i);
1417
      t = *treebin_at(m, i);
1418
    }
1419
  }
1420
 
1421
  while (t != 0) { /* find smallest of tree or subtree */
1422
    size_t trem = chunksize(t) - nb;
1423
    if (trem < rsize) {
1424
      rsize = trem;
1425
      v = t;
1426
    }
1427
    t = leftmost_child(t);
1428
  }
1429
 
1430
  /*  If dv is a better fit, return 0 so malloc will use it */
1431
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
1432
    if (RTCHECK(ok_address(m, v))) { /* split */
1433
      mchunkptr r = chunk_plus_offset(v, nb);
1434
      assert(chunksize(v) == rsize + nb);
1435
      if (RTCHECK(ok_next(v, r))) {
1436
        unlink_large_chunk(m, v);
1437
        if (rsize < MIN_CHUNK_SIZE)
1438
          set_inuse_and_pinuse(m, v, (rsize + nb));
1439
        else {
1440
          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1441
          set_size_and_pinuse_of_free_chunk(r, rsize);
1442
          insert_chunk(m, r, rsize);
1443
        }
1444
        return chunk2mem(v);
1445
      }
1446
    }
1447
    CORRUPTION_ERROR_ACTION(m);
1448
  }
1449
  return 0;
1450
}
1451
 
1452
/* allocate a small request from the best fitting chunk in a treebin */
1453
static void* tmalloc_small(mstate m, size_t nb)
1454
{
1455
  tchunkptr t, v;
1456
  size_t rsize;
1457
  bindex_t i;
1458
  binmap_t leastbit = least_bit(m->treemap);
1459
  compute_bit2idx(leastbit, i);
1460
  v = t = *treebin_at(m, i);
1461
  rsize = chunksize(t) - nb;
1462
 
1463
  while ((t = leftmost_child(t)) != 0) {
1464
    size_t trem = chunksize(t) - nb;
1465
    if (trem < rsize) {
1466
      rsize = trem;
1467
      v = t;
1468
    }
1469
  }
1470
 
1471
  if (RTCHECK(ok_address(m, v))) {
1472
    mchunkptr r = chunk_plus_offset(v, nb);
1473
    assert(chunksize(v) == rsize + nb);
1474
    if (RTCHECK(ok_next(v, r))) {
1475
      unlink_large_chunk(m, v);
1476
      if (rsize < MIN_CHUNK_SIZE)
1477
        set_inuse_and_pinuse(m, v, (rsize + nb));
1478
      else {
1479
        set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1480
        set_size_and_pinuse_of_free_chunk(r, rsize);
1481
        replace_dv(m, r, rsize);
1482
      }
1483
      return chunk2mem(v);
1484
    }
1485
  }
1486
 
1487
  CORRUPTION_ERROR_ACTION(m);
1488
  return 0;
1489
}
1490
 
1491
/* --------------------------- realloc support --------------------------- */
1492
 
1493
static void* internal_realloc(struct _reent *reent_ptr, mstate m, void* oldmem, size_t bytes)
1494
{
1495
    if (bytes >= MAX_REQUEST)
1496
    {
1497
        MALLOC_FAILURE_ACTION;
1498
        return 0;
1499
    }
1500
 
1501
    PREACTION(m);
1502
    {
1503
        mchunkptr oldp = mem2chunk(oldmem);
1504
        size_t oldsize = chunksize(oldp);
1505
        mchunkptr next = chunk_plus_offset(oldp, oldsize);
1506
        mchunkptr newp = 0;
1507
        void* extra = 0;
1508
 
1509
    /* Try to either shrink or extend into top. Else malloc-copy-free */
1510
 
1511
        if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
1512
                    ok_next(oldp, next) && ok_pinuse(next)))
1513
        {
1514
            size_t nb = request2size(bytes);
1515
            if (is_mmapped(oldp))
1516
                newp = mmap_resize(m, oldp, nb);
1517
            else if (oldsize >= nb) { /* already big enough */
1518
                size_t rsize = oldsize - nb;
1519
                newp = oldp;
1520
                if (rsize >= MIN_CHUNK_SIZE)
1521
                {
1522
                    mchunkptr remainder = chunk_plus_offset(newp, nb);
1523
                    set_inuse(m, newp, nb);
1524
                    set_inuse_and_pinuse(m, remainder, rsize);
1525
                    extra = chunk2mem(remainder);
1526
                }
1527
            }
1528
            else if (next == m->top && oldsize + m->topsize > nb)
1529
            {
1530
        /* Expand into top */
1531
                size_t newsize = oldsize + m->topsize;
1532
                size_t newtopsize = newsize - nb;
1533
                mchunkptr newtop = chunk_plus_offset(oldp, nb);
1534
                set_inuse(m, oldp, nb);
1535
                newtop->head = newtopsize |PINUSE_BIT;
1536
                m->top = newtop;
1537
                m->topsize = newtopsize;
1538
                newp = oldp;
1539
            }
1540
        }
1541
        else {
1542
            USAGE_ERROR_ACTION(m, oldmem);
1543
            POSTACTION(m);
1544
            return 0;
1545
        }
1546
#if DEBUG
1547
        if (newp != 0) {
1548
            check_inuse_chunk(m, newp); /* Check requires lock */
1549
        }
1550
#endif
1551
 
1552
        POSTACTION(m);
1553
 
1554
        if (newp != 0)
1555
        {
1556
            if (extra != 0) {
1557
                _free_r(reent_ptr, extra);
1558
            }
1559
            return chunk2mem(newp);
1560
        }
1561
        else
1562
        {
1563
            void* newmem = _malloc_r(reent_ptr, bytes);
1564
            if (newmem != 0) {
1565
                size_t oc = oldsize - overhead_for(oldp);
1566
                memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
1567
                _free_r(reent_ptr, oldmem);
1568
            }
1569
        return newmem;
1570
        }
1571
  }
1572
  return 0;
1573
}
1574
 
1575
/* --------------------------- memalign support -------------------------- */
1576
 
1577
static void* internal_memalign(mstate m, size_t alignment, size_t bytes)
1578
{
1579
    if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
1580
        return internal_malloc(m, bytes);
1581
    if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
1582
        alignment = MIN_CHUNK_SIZE;
1583
    if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
1584
        size_t a = MALLOC_ALIGNMENT << 1;
1585
        while (a < alignment) a <<= 1;
1586
        alignment = a;
1587
    }
1588
 
1589
    if (bytes >= MAX_REQUEST - alignment) {
1590
        if (m != 0)  { /* Test isn't needed but avoids compiler warning */
1591
            MALLOC_FAILURE_ACTION;
1592
        }
1593
    }
1594
    else
1595
    {
1596
        size_t nb = request2size(bytes);
1597
        size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
1598
        char* mem = (char*)internal_malloc(m, req);
1599
        if (mem != 0)
1600
        {
1601
            void* leader = 0;
1602
            void* trailer = 0;
1603
            mchunkptr p = mem2chunk(mem);
1604
 
1605
            PREACTION(m);
1606
 
1607
            if ((((size_t)(mem)) % alignment) != 0)  /* misaligned */
1608
            {
1609
        /*
1610
          Find an aligned spot inside chunk.  Since we need to give
1611
          back leading space in a chunk of at least MIN_CHUNK_SIZE, if
1612
          the first calculation places us at a spot with less than
1613
          MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
1614
          We've allocated enough total room so that this is always
1615
          possible.
1616
        */
1617
                char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
1618
                                                       alignment -
1619
                                                       SIZE_T_ONE)) &
1620
                                             -alignment));
1621
                char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
1622
                             br : br+alignment;
1623
                mchunkptr newp = (mchunkptr)pos;
1624
                size_t leadsize = pos - (char*)(p);
1625
                size_t newsize = chunksize(p) - leadsize;
1626
 
1627
                if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
1628
                    newp->prev_foot = p->prev_foot + leadsize;
1629
                    newp->head = newsize;
1630
                }
1631
                else { /* Otherwise, give back leader, use the rest */
1632
                    set_inuse(m, newp, newsize);
1633
                    set_inuse(m, p, leadsize);
1634
                    leader = chunk2mem(p);
1635
                }
1636
                p = newp;
1637
            }
1638
 
1639
      /* Give back spare room at the end */
1640
            if (!is_mmapped(p))
1641
            {
1642
                size_t size = chunksize(p);
1643
                if (size > nb + MIN_CHUNK_SIZE)
1644
                {
1645
                    size_t remainder_size = size - nb;
1646
                    mchunkptr remainder = chunk_plus_offset(p, nb);
1647
                    set_inuse(m, p, nb);
1648
                    set_inuse(m, remainder, remainder_size);
1649
                    trailer = chunk2mem(remainder);
1650
                }
1651
            }
1652
 
1653
            assert (chunksize(p) >= nb);
1654
            assert((((size_t)(chunk2mem(p))) % alignment) == 0);
1655
            check_inuse_chunk(m, p);
1656
            POSTACTION(m);
1657
            if (leader != 0) {
1658
                internal_free(m, leader);
1659
            }
1660
            if (trailer != 0) {
1661
                internal_free(m, trailer);
1662
            }
1663
            return chunk2mem(p);
1664
        }
1665
    }
1666
    return 0;
1667
}
1668
 
1669
void* memalign(size_t alignment, size_t bytes)
1670
{
1671
    return internal_memalign(gm, alignment, bytes);
1672
}
1673
 
1674
 
1675
 
1676
void* _malloc_r(struct _reent *reent_ptr, size_t bytes) {
1677
  /*
1678
     Basic algorithm:
1679
     If a small request (< 256 bytes minus per-chunk overhead):
1680
       1. If one exists, use a remainderless chunk in associated smallbin.
1681
          (Remainderless means that there are too few excess bytes to
1682
          represent as a chunk.)
1683
       2. If it is big enough, use the dv chunk, which is normally the
1684
          chunk adjacent to the one used for the most recent small request.
1685
       3. If one exists, split the smallest available chunk in a bin,
1686
          saving remainder in dv.
1687
       4. If it is big enough, use the top chunk.
1688
       5. If available, get memory from system and use it
1689
     Otherwise, for a large request:
1690
       1. Find the smallest available binned chunk that fits, and use it
1691
          if it is better fitting than dv chunk, splitting if necessary.
1692
       2. If better fitting than any binned chunk, use the dv chunk.
1693
       3. If it is big enough, use the top chunk.
1694
       4. If request size >= mmap threshold, try to directly mmap this chunk.
1695
       5. If available, get memory from system and use it
1696
 
1697
     The ugly goto's here ensure that postaction occurs along all paths.
1698
  */
1699
 
1700
  ensure_initialization(); /* initialize in sys_alloc if not using locks */
1701
 
1702
    PREACTION(gm);
1703
   {
1704
    void* mem;
1705
    size_t nb;
1706
 
1707
        if (bytes <= MAX_SMALL_REQUEST)
1708
        {
1709
      bindex_t idx;
1710
      binmap_t smallbits;
1711
      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
1712
      idx = small_index(nb);
1713
      smallbits = gm->smallmap >> idx;
1714
 
1715
            if ((smallbits & 0x3U) != 0) /* Remainderless fit to a smallbin. */
1716
            {
1717
        mchunkptr b, p;
1718
        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
1719
        b = smallbin_at(gm, idx);
1720
        p = b->fd;
1721
        assert(chunksize(p) == small_index2size(idx));
1722
        unlink_first_small_chunk(gm, b, p, idx);
1723
        set_inuse_and_pinuse(gm, p, small_index2size(idx));
1724
        mem = chunk2mem(p);
1725
        check_malloced_chunk(gm, mem, nb);
1726
        goto postaction;
1727
      }
1728
            else if (nb > gm->dvsize)
1729
            {
1730
                if (smallbits != 0) /* Use chunk in next nonempty smallbin */
1731
                {
1732
          mchunkptr b, p, r;
1733
          size_t rsize;
1734
          bindex_t i;
1735
          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
1736
          binmap_t leastbit = least_bit(leftbits);
1737
          compute_bit2idx(leastbit, i);
1738
          b = smallbin_at(gm, i);
1739
          p = b->fd;
1740
          assert(chunksize(p) == small_index2size(i));
1741
          unlink_first_small_chunk(gm, b, p, i);
1742
          rsize = small_index2size(i) - nb;
1743
          /* Fit here cannot be remainderless if 4byte sizes */
1744
          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
1745
            set_inuse_and_pinuse(gm, p, small_index2size(i));
1746
                    else
1747
                    {
1748
            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1749
            r = chunk_plus_offset(p, nb);
1750
            set_size_and_pinuse_of_free_chunk(r, rsize);
1751
            replace_dv(gm, r, rsize);
1752
          }
1753
          mem = chunk2mem(p);
1754
          check_malloced_chunk(gm, mem, nb);
1755
          goto postaction;
1756
        }
1757
                else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0)
1758
                {
1759
          check_malloced_chunk(gm, mem, nb);
1760
          goto postaction;
1761
        }
1762
      }
1763
    }
1764
    else if (bytes >= MAX_REQUEST)
1765
      nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
1766
        else
1767
        {
1768
      nb = pad_request(bytes);
1769
            if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0)
1770
            {
1771
        check_malloced_chunk(gm, mem, nb);
1772
        goto postaction;
1773
      }
1774
    }
1775
 
1776
    if (nb <= gm->dvsize) {
1777
      size_t rsize = gm->dvsize - nb;
1778
      mchunkptr p = gm->dv;
1779
      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
1780
        mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
1781
        gm->dvsize = rsize;
1782
        set_size_and_pinuse_of_free_chunk(r, rsize);
1783
        set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1784
      }
1785
      else { /* exhaust dv */
1786
        size_t dvs = gm->dvsize;
1787
        gm->dvsize = 0;
1788
        gm->dv = 0;
1789
        set_inuse_and_pinuse(gm, p, dvs);
1790
      }
1791
      mem = chunk2mem(p);
1792
      check_malloced_chunk(gm, mem, nb);
1793
      goto postaction;
1794
    }
1795
    else if (nb < gm->topsize) { /* Split top */
1796
      size_t rsize = gm->topsize -= nb;
1797
      mchunkptr p = gm->top;
1798
      mchunkptr r = gm->top = chunk_plus_offset(p, nb);
1799
      r->head = rsize | PINUSE_BIT;
1800
      set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
1801
      mem = chunk2mem(p);
1802
      check_top_chunk(gm, gm->top);
1803
      check_malloced_chunk(gm, mem, nb);
1804
      goto postaction;
1805
    }
1806
 
1807
    mem = sys_alloc(gm, nb);
1808
 
1809
  postaction:
1810
        POSTACTION(gm);
1811
    return mem;
1812
  }
1813
 
1814
  return 0;
1815
}
1816
 
1817
void _free_r(struct _reent *reent_ptr, void* mem) {
1818
  /*
1819
     Consolidate freed chunks with preceeding or succeeding bordering
1820
     free chunks, if they exist, and then place in a bin.  Intermixed
1821
     with special cases for top, dv, mmapped chunks, and usage errors.
1822
  */
1823
 
1824
    if (mem != 0)
1825
    {
1826
    mchunkptr p  = mem2chunk(mem);
1827
 
1828
#define fm gm
1829
 
1830
        PREACTION(fm);
1831
    {
1832
      check_inuse_chunk(fm, p);
1833
            if (RTCHECK(ok_address(fm, p) && ok_inuse(p)))
1834
            {
1835
        size_t psize = chunksize(p);
1836
        mchunkptr next = chunk_plus_offset(p, psize);
1837
                if (!pinuse(p))
1838
                {
1839
          size_t prevsize = p->prev_foot;
1840
                    if (is_mmapped(p))
1841
                    {
1842
            psize += prevsize + MMAP_FOOT_PAD;
1843
            if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
1844
              fm->footprint -= psize;
1845
            goto postaction;
1846
          }
1847
                    else
1848
                    {
1849
            mchunkptr prev = chunk_minus_offset(p, prevsize);
1850
            psize += prevsize;
1851
            p = prev;
1852
                        if (RTCHECK(ok_address(fm, prev)))  /* consolidate backward */
1853
                        {
1854
                            if (p != fm->dv)
1855
                            {
1856
                unlink_chunk(fm, p, prevsize);
1857
              }
1858
                            else if ((next->head & INUSE_BITS) == INUSE_BITS)
1859
                            {
1860
                fm->dvsize = psize;
1861
                set_free_with_pinuse(p, psize, next);
1862
                goto postaction;
1863
              }
1864
            }
1865
            else
1866
              goto erroraction;
1867
          }
1868
        }
1869
 
1870
                if (RTCHECK(ok_next(p, next) && ok_pinuse(next)))
1871
                {
1872
                    if (!cinuse(next))    /* consolidate forward */
1873
                    {
1874
                        if (next == fm->top)
1875
                        {
1876
              size_t tsize = fm->topsize += psize;
1877
              fm->top = p;
1878
              p->head = tsize | PINUSE_BIT;
1879
                            if (p == fm->dv)
1880
                            {
1881
                fm->dv = 0;
1882
                fm->dvsize = 0;
1883
              }
1884
              if (should_trim(fm, tsize))
1885
                sys_trim(fm, 0);
1886
              goto postaction;
1887
            }
1888
                        else if (next == fm->dv)
1889
                        {
1890
              size_t dsize = fm->dvsize += psize;
1891
              fm->dv = p;
1892
              set_size_and_pinuse_of_free_chunk(p, dsize);
1893
              goto postaction;
1894
            }
1895
                        else
1896
                        {
1897
              size_t nsize = chunksize(next);
1898
              psize += nsize;
1899
              unlink_chunk(fm, next, nsize);
1900
              set_size_and_pinuse_of_free_chunk(p, psize);
1901
                            if (p == fm->dv)
1902
                            {
1903
                fm->dvsize = psize;
1904
                goto postaction;
1905
              }
1906
            }
1907
          }
1908
          else
1909
            set_free_with_pinuse(p, psize, next);
1910
 
1911
                    if (is_small(psize))
1912
                    {
1913
            insert_small_chunk(fm, p, psize);
1914
            check_free_chunk(fm, p);
1915
          }
1916
                    else
1917
                    {
1918
            tchunkptr tp = (tchunkptr)p;
1919
            insert_large_chunk(fm, tp, psize);
1920
            check_free_chunk(fm, p);
1921
            if (--fm->release_checks == 0)
1922
              release_unused_segments(fm);
1923
          }
1924
          goto postaction;
1925
        }
1926
      }
1927
    erroraction:
1928
      USAGE_ERROR_ACTION(fm, p);
1929
    postaction:
1930
        POSTACTION(fm);
1931
    }
1932
  }
1933
#undef fm
1934
}
1935
 
1936
void* _calloc_r(struct _reent *reent_ptr, size_t n_elements, size_t elem_size) {
1937
  void* mem;
1938
  size_t req = 0;
1939
  if (n_elements != 0) {
1940
    req = n_elements * elem_size;
1941
    if (((n_elements | elem_size) & ~(size_t)0xffff) &&
1942
        (req / n_elements != elem_size))
1943
      req = MAX_SIZE_T; /* force downstream failure on overflow */
1944
  }
1945
  mem = _malloc_r(reent_ptr, req);
1946
  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
1947
    memset(mem, 0, req);
1948
  return mem;
1949
}
1950
 
1951
void* _realloc_r (struct _reent *reent_ptr, void* oldmem, size_t bytes){
1952
  if (oldmem == 0)
1953
    return _malloc_r(reent_ptr, bytes);
1954
#ifdef REALLOC_ZERO_BYTES_FREES
1955
  if (bytes == 0) {
1956
    _free_r(ptr, oldmem);
1957
    return 0;
1958
  }
1959
#endif /* REALLOC_ZERO_BYTES_FREES */
1960
  else {
1961
#if ! FOOTERS
1962
    mstate m = gm;
1963
#else /* FOOTERS */
1964
    mstate m = get_mstate_for(mem2chunk(oldmem));
1965
    if (!ok_magic(m)) {
1966
      USAGE_ERROR_ACTION(m, oldmem);
1967
      return 0;
1968
    }
1969
#endif /* FOOTERS */
1970
    return internal_realloc(reent_ptr, m, oldmem, bytes);
1971
  }
1972
}
1973
 
1974
 
1975
 
1976
/* -----------------------------------------------------------------------
1977
History:
1978
    V2.8.4 Wed May 27 09:56:23 2009  Doug Lea  (dl at gee)
1979
      * Use zeros instead of prev foot for is_mmapped
1980
      * Add mspace_track_large_chunks; thanks to Jean Brouwers
1981
      * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
1982
      * Fix insufficient sys_alloc padding when using 16byte alignment
1983
      * Fix bad error check in mspace_footprint
1984
      * Adaptations for ptmalloc; thanks to Wolfram Gloger.
1985
      * Reentrant spin locks; thanks to Earl Chew and others
1986
      * Win32 improvements; thanks to Niall Douglas and Earl Chew
1987
      * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
1988
      * Extension hook in malloc_state
1989
      * Various small adjustments to reduce warnings on some compilers
1990
      * Various configuration extensions/changes for more platforms. Thanks
1991
         to all who contributed these.
1992
 
1993
    V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
1994
      * Add max_footprint functions
1995
      * Ensure all appropriate literals are size_t
1996
      * Fix conditional compilation problem for some #define settings
1997
      * Avoid concatenating segments with the one provided
1998
        in create_mspace_with_base
1999
      * Rename some variables to avoid compiler shadowing warnings
2000
      * Use explicit lock initialization.
2001
      * Better handling of sbrk interference.
2002
      * Simplify and fix segment insertion, trimming and mspace_destroy
2003
      * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
2004
      * Thanks especially to Dennis Flanagan for help on these.
2005
 
2006
    V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
2007
      * Fix memalign brace error.
2008
 
2009
    V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
2010
      * Fix improper #endif nesting in C++
2011
      * Add explicit casts needed for C++
2012
 
2013
    V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
2014
      * Use trees for large bins
2015
      * Support mspaces
2016
      * Use segments to unify sbrk-based and mmap-based system allocation,
2017
        removing need for emulation on most platforms without sbrk.
2018
      * Default safety checks
2019
      * Optional footer checks. Thanks to William Robertson for the idea.
2020
      * Internal code refactoring
2021
      * Incorporate suggestions and platform-specific changes.
2022
        Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
2023
        Aaron Bachmann,  Emery Berger, and others.
2024
      * Speed up non-fastbin processing enough to remove fastbins.
2025
      * Remove useless cfree() to avoid conflicts with other apps.
2026
      * Remove internal memcpy, memset. Compilers handle builtins better.
2027
      * Remove some options that no one ever used and rename others.
2028
 
2029
    V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
2030
      * Fix malloc_state bitmap array misdeclaration
2031
 
2032
    V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
2033
      * Allow tuning of FIRST_SORTED_BIN_SIZE
2034
      * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
2035
      * Better detection and support for non-contiguousness of MORECORE.
2036
        Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
2037
      * Bypass most of malloc if no frees. Thanks To Emery Berger.
2038
      * Fix freeing of old top non-contiguous chunk im sysmalloc.
2039
      * Raised default trim and map thresholds to 256K.
2040
      * Fix mmap-related #defines. Thanks to Lubos Lunak.
2041
      * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
2042
      * Branch-free bin calculation
2043
      * Default trim and mmap thresholds now 256K.
2044
 
2045
    V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
2046
      * Introduce independent_comalloc and independent_calloc.
2047
        Thanks to Michael Pachos for motivation and help.
2048
      * Make optional .h file available
2049
      * Allow > 2GB requests on 32bit systems.
2050
      * new WIN32 sbrk, mmap, munmap, lock code from .
2051
        Thanks also to Andreas Mueller ,
2052
        and Anonymous.
2053
      * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
2054
        helping test this.)
2055
      * memalign: check alignment arg
2056
      * realloc: don't try to shift chunks backwards, since this
2057
        leads to  more fragmentation in some programs and doesn't
2058
        seem to help in any others.
2059
      * Collect all cases in malloc requiring system memory into sysmalloc
2060
      * Use mmap as backup to sbrk
2061
      * Place all internal state in malloc_state
2062
      * Introduce fastbins (although similar to 2.5.1)
2063
      * Many minor tunings and cosmetic improvements
2064
      * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
2065
      * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
2066
        Thanks to Tony E. Bennett  and others.
2067
      * Include errno.h to support default failure action.
2068
 
2069
    V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
2070
      * return null for negative arguments
2071
      * Added Several WIN32 cleanups from Martin C. Fong 
2072
         * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
2073
          (e.g. WIN32 platforms)
2074
         * Cleanup header file inclusion for WIN32 platforms
2075
         * Cleanup code to avoid Microsoft Visual C++ compiler complaints
2076
         * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
2077
           memory allocation routines
2078
         * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
2079
         * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
2080
           usage of 'assert' in non-WIN32 code
2081
         * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
2082
           avoid infinite loop
2083
      * Always call 'fREe()' rather than 'free()'
2084
 
2085
    V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
2086
      * Fixed ordering problem with boundary-stamping
2087
 
2088
    V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
2089
      * Added pvalloc, as recommended by H.J. Liu
2090
      * Added 64bit pointer support mainly from Wolfram Gloger
2091
      * Added anonymously donated WIN32 sbrk emulation
2092
      * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
2093
      * malloc_extend_top: fix mask error that caused wastage after
2094
        foreign sbrks
2095
      * Add linux mremap support code from HJ Liu
6099 serge 2096
 
4349 Serge 2097
    V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
2098
      * Integrated most documentation with the code.
6099 serge 2099
      * Add support for mmap, with help from
4349 Serge 2100
        Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2101
      * Use last_remainder in more cases.
2102
      * Pack bins using idea from  colin@nyx10.cs.du.edu
2103
      * Use ordered bins instead of best-fit threshhold
2104
      * Eliminate block-local decls to simplify tracing and debugging.
2105
      * Support another case of realloc via move into top
6099 serge 2106
      * Fix error occuring when initial sbrk_base not word-aligned.
4349 Serge 2107
      * Rely on page size for units instead of SBRK_UNIT to
2108
        avoid surprises about sbrk alignment conventions.
2109
      * Add mallinfo, mallopt. Thanks to Raymond Nijssen
6099 serge 2110
        (raymond@es.ele.tue.nl) for the suggestion.
4349 Serge 2111
      * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
2112
      * More precautions for cases where other routines call sbrk,
2113
        courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2114
      * Added macros etc., allowing use in linux libc from
2115
        H.J. Lu (hjl@gnu.ai.mit.edu)
2116
      * Inverted this history list
2117
 
2118
    V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
2119
      * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2120
      * Removed all preallocation code since under current scheme
2121
        the work required to undo bad preallocations exceeds
2122
        the work saved in good cases for most test programs.
2123
      * No longer use return list or unconsolidated bins since
2124
        no scheme using them consistently outperforms those that don't
2125
        given above changes.
2126
      * Use best fit for very large chunks to prevent some worst-cases.
2127
      * Added some support for debugging
2128
 
2129
    V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
2130
      * Removed footers when chunks are in use. Thanks to
2131
        Paul Wilson (wilson@cs.texas.edu) for the suggestion.
2132
 
2133
    V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
6099 serge 2134
      * Added malloc_trim, with help from Wolfram Gloger
4349 Serge 2135
        (wmglo@Dent.MED.Uni-Muenchen.DE).
2136
 
2137
    V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
2138
 
2139
    V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
2140
      * realloc: try to expand in both directions
2141
      * malloc: swap order of clean-bin strategy;
2142
      * realloc: only conditionally expand backwards
2143
      * Try not to scavenge used bins
2144
      * Use bin counts as a guide to preallocation
2145
      * Occasionally bin return list chunks in first scan
2146
      * Add a few optimizations from colin@nyx10.cs.du.edu
2147
 
2148
    V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
2149
      * faster bin computation & slightly different binning
2150
      * merged all consolidations to one part of malloc proper
2151
         (eliminating old malloc_find_space & malloc_clean_bin)
2152
      * Scan 2 returns chunks (not just 1)
2153
      * Propagate failure in realloc if malloc returns 0
6099 serge 2154
      * Add stuff to allow compilation on non-ANSI compilers
4349 Serge 2155
          from kpv@research.att.com
6099 serge 2156
 
4349 Serge 2157
    V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
2158
      * removed potential for odd address access in prev_chunk
2159
      * removed dependency on getpagesize.h
2160
      * misc cosmetics and a bit more internal documentation
2161
      * anticosmetics: mangled names in macros to evade debugger strangeness
6099 serge 2162
      * tested on sparc, hp-700, dec-mips, rs6000
4349 Serge 2163
          with gcc & native cc (hp, dec only) allowing
2164
          Detlefs & Zorn comparison study (in SIGPLAN Notices.)
2165
 
2166
    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
6099 serge 2167
      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
4349 Serge 2168
         structure of old version,  but most details differ.)
2169
 
2170
*/
2171