Subversion Repositories Kolibri OS

Rev

Rev 5270 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5270 Rev 7143
1
/*
1
/*
2
  This is a version (aka dlmalloc) of malloc/free/realloc written by
2
  This is a version (aka dlmalloc) of malloc/free/realloc written by
3
  Doug Lea and released to the public domain, as explained at
3
  Doug Lea and released to the public domain, as explained at
4
  http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
4
  http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
5
  comments, complaints, performance data, etc to dl@cs.oswego.edu
5
  comments, complaints, performance data, etc to dl@cs.oswego.edu
6
 
6
 
7
* Version 2.8.6 Wed Aug 29 06:57:58 2012  Doug Lea
7
* Version 2.8.6 Wed Aug 29 06:57:58 2012  Doug Lea
8
   Note: There may be an updated version of this malloc obtainable at
8
   Note: There may be an updated version of this malloc obtainable at
9
           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
9
           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
10
         Check before installing!
10
         Check before installing!
11
 
11
 
12
* Quickstart
12
* Quickstart
13
 
13
 
14
  This library is all in one file to simplify the most common usage:
14
  This library is all in one file to simplify the most common usage:
15
  ftp it, compile it (-O3), and link it into another program. All of
15
  ftp it, compile it (-O3), and link it into another program. All of
16
  the compile-time options default to reasonable values for use on
16
  the compile-time options default to reasonable values for use on
17
  most platforms.  You might later want to step through various
17
  most platforms.  You might later want to step through various
18
  compile-time and dynamic tuning options.
18
  compile-time and dynamic tuning options.
19
 
19
 
20
  For convenience, an include file for code using this malloc is at:
20
  For convenience, an include file for code using this malloc is at:
21
     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h
21
     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h
22
  You don't really need this .h file unless you call functions not
22
  You don't really need this .h file unless you call functions not
23
  defined in your system include files.  The .h file contains only the
23
  defined in your system include files.  The .h file contains only the
24
  excerpts from this file needed for using this malloc on ANSI C/C++
24
  excerpts from this file needed for using this malloc on ANSI C/C++
25
  systems, so long as you haven't changed compile-time options about
25
  systems, so long as you haven't changed compile-time options about
26
  naming and tuning parameters.  If you do, then you can create your
26
  naming and tuning parameters.  If you do, then you can create your
27
  own malloc.h that does include all settings by cutting at the point
27
  own malloc.h that does include all settings by cutting at the point
28
  indicated below. Note that you may already by default be using a C
28
  indicated below. Note that you may already by default be using a C
29
  library containing a malloc that is based on some version of this
29
  library containing a malloc that is based on some version of this
30
  malloc (for example in linux). You might still want to use the one
30
  malloc (for example in linux). You might still want to use the one
31
  in this file to customize settings or to avoid overheads associated
31
  in this file to customize settings or to avoid overheads associated
32
  with library versions.
32
  with library versions.
33
 
33
 
34
* Vital statistics:
34
* Vital statistics:
35
 
35
 
36
  Supported pointer/size_t representation:       4 or 8 bytes
36
  Supported pointer/size_t representation:       4 or 8 bytes
37
       size_t MUST be an unsigned type of the same width as
37
       size_t MUST be an unsigned type of the same width as
38
       pointers. (If you are using an ancient system that declares
38
       pointers. (If you are using an ancient system that declares
39
       size_t as a signed type, or need it to be a different width
39
       size_t as a signed type, or need it to be a different width
40
       than pointers, you can use a previous release of this malloc
40
       than pointers, you can use a previous release of this malloc
41
       (e.g. 2.7.2) supporting these.)
41
       (e.g. 2.7.2) supporting these.)
42
 
42
 
43
  Alignment:                                     8 bytes (minimum)
43
  Alignment:                                     8 bytes (minimum)
44
       This suffices for nearly all current machines and C compilers.
44
       This suffices for nearly all current machines and C compilers.
45
       However, you can define MALLOC_ALIGNMENT to be wider than this
45
       However, you can define MALLOC_ALIGNMENT to be wider than this
46
       if necessary (up to 128bytes), at the expense of using more space.
46
       if necessary (up to 128bytes), at the expense of using more space.
47
 
47
 
48
  Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
48
  Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
49
                                          8 or 16 bytes (if 8byte sizes)
49
                                          8 or 16 bytes (if 8byte sizes)
50
       Each malloced chunk has a hidden word of overhead holding size
50
       Each malloced chunk has a hidden word of overhead holding size
51
       and status information, and additional cross-check word
51
       and status information, and additional cross-check word
52
       if FOOTERS is defined.
52
       if FOOTERS is defined.
53
 
53
 
54
  Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
54
  Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
55
                          8-byte ptrs:  32 bytes    (including overhead)
55
                          8-byte ptrs:  32 bytes    (including overhead)
56
 
56
 
57
       Even a request for zero bytes (i.e., malloc(0)) returns a
57
       Even a request for zero bytes (i.e., malloc(0)) returns a
58
       pointer to something of the minimum allocatable size.
58
       pointer to something of the minimum allocatable size.
59
       The maximum overhead wastage (i.e., number of extra bytes
59
       The maximum overhead wastage (i.e., number of extra bytes
60
       allocated than were requested in malloc) is less than or equal
60
       allocated than were requested in malloc) is less than or equal
61
       to the minimum size, except for requests >= mmap_threshold that
61
       to the minimum size, except for requests >= mmap_threshold that
62
       are serviced via mmap(), where the worst case wastage is about
62
       are serviced via mmap(), where the worst case wastage is about
63
       32 bytes plus the remainder from a system page (the minimal
63
       32 bytes plus the remainder from a system page (the minimal
64
       mmap unit); typically 4096 or 8192 bytes.
64
       mmap unit); typically 4096 or 8192 bytes.
65
 
65
 
66
  Security: static-safe; optionally more or less
66
  Security: static-safe; optionally more or less
67
       The "security" of malloc refers to the ability of malicious
67
       The "security" of malloc refers to the ability of malicious
68
       code to accentuate the effects of errors (for example, freeing
68
       code to accentuate the effects of errors (for example, freeing
69
       space that is not currently malloc'ed or overwriting past the
69
       space that is not currently malloc'ed or overwriting past the
70
       ends of chunks) in code that calls malloc.  This malloc
70
       ends of chunks) in code that calls malloc.  This malloc
71
       guarantees not to modify any memory locations below the base of
71
       guarantees not to modify any memory locations below the base of
72
       heap, i.e., static variables, even in the presence of usage
72
       heap, i.e., static variables, even in the presence of usage
73
       errors.  The routines additionally detect most improper frees
73
       errors.  The routines additionally detect most improper frees
74
       and reallocs.  All this holds as long as the static bookkeeping
74
       and reallocs.  All this holds as long as the static bookkeeping
75
       for malloc itself is not corrupted by some other means.  This
75
       for malloc itself is not corrupted by some other means.  This
76
       is only one aspect of security -- these checks do not, and
76
       is only one aspect of security -- these checks do not, and
77
       cannot, detect all possible programming errors.
77
       cannot, detect all possible programming errors.
78
 
78
 
79
       If FOOTERS is defined nonzero, then each allocated chunk
79
       If FOOTERS is defined nonzero, then each allocated chunk
80
       carries an additional check word to verify that it was malloced
80
       carries an additional check word to verify that it was malloced
81
       from its space.  These check words are the same within each
81
       from its space.  These check words are the same within each
82
       execution of a program using malloc, but differ across
82
       execution of a program using malloc, but differ across
83
       executions, so externally crafted fake chunks cannot be
83
       executions, so externally crafted fake chunks cannot be
84
       freed. This improves security by rejecting frees/reallocs that
84
       freed. This improves security by rejecting frees/reallocs that
85
       could corrupt heap memory, in addition to the checks preventing
85
       could corrupt heap memory, in addition to the checks preventing
86
       writes to statics that are always on.  This may further improve
86
       writes to statics that are always on.  This may further improve
87
       security at the expense of time and space overhead.  (Note that
87
       security at the expense of time and space overhead.  (Note that
88
       FOOTERS may also be worth using with MSPACES.)
88
       FOOTERS may also be worth using with MSPACES.)
89
 
89
 
90
       By default detected errors cause the program to abort (calling
90
       By default detected errors cause the program to abort (calling
91
       "abort()"). You can override this to instead proceed past
91
       "abort()"). You can override this to instead proceed past
92
       errors by defining PROCEED_ON_ERROR.  In this case, a bad free
92
       errors by defining PROCEED_ON_ERROR.  In this case, a bad free
93
       has no effect, and a malloc that encounters a bad address
93
       has no effect, and a malloc that encounters a bad address
94
       caused by user overwrites will ignore the bad address by
94
       caused by user overwrites will ignore the bad address by
95
       dropping pointers and indices to all known memory. This may
95
       dropping pointers and indices to all known memory. This may
96
       be appropriate for programs that should continue if at all
96
       be appropriate for programs that should continue if at all
97
       possible in the face of programming errors, although they may
97
       possible in the face of programming errors, although they may
98
       run out of memory because dropped memory is never reclaimed.
98
       run out of memory because dropped memory is never reclaimed.
99
 
99
 
100
       If you don't like either of these options, you can define
100
       If you don't like either of these options, you can define
101
       CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
101
       CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
102
       else. And if if you are sure that your program using malloc has
102
       else. And if if you are sure that your program using malloc has
103
       no errors or vulnerabilities, you can define INSECURE to 1,
103
       no errors or vulnerabilities, you can define INSECURE to 1,
104
       which might (or might not) provide a small performance improvement.
104
       which might (or might not) provide a small performance improvement.
105
 
105
 
106
       It is also possible to limit the maximum total allocatable
106
       It is also possible to limit the maximum total allocatable
107
       space, using malloc_set_footprint_limit. This is not
107
       space, using malloc_set_footprint_limit. This is not
108
       designed as a security feature in itself (calls to set limits
108
       designed as a security feature in itself (calls to set limits
109
       are not screened or privileged), but may be useful as one
109
       are not screened or privileged), but may be useful as one
110
       aspect of a secure implementation.
110
       aspect of a secure implementation.
111
 
111
 
112
  Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero
112
  Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero
113
       When USE_LOCKS is defined, each public call to malloc, free,
113
       When USE_LOCKS is defined, each public call to malloc, free,
114
       etc is surrounded with a lock. By default, this uses a plain
114
       etc is surrounded with a lock. By default, this uses a plain
115
       pthread mutex, win32 critical section, or a spin-lock if if
115
       pthread mutex, win32 critical section, or a spin-lock if if
116
       available for the platform and not disabled by setting
116
       available for the platform and not disabled by setting
117
       USE_SPIN_LOCKS=0.  However, if USE_RECURSIVE_LOCKS is defined,
117
       USE_SPIN_LOCKS=0.  However, if USE_RECURSIVE_LOCKS is defined,
118
       recursive versions are used instead (which are not required for
118
       recursive versions are used instead (which are not required for
119
       base functionality but may be needed in layered extensions).
119
       base functionality but may be needed in layered extensions).
120
       Using a global lock is not especially fast, and can be a major
120
       Using a global lock is not especially fast, and can be a major
121
       bottleneck.  It is designed only to provide minimal protection
121
       bottleneck.  It is designed only to provide minimal protection
122
       in concurrent environments, and to provide a basis for
122
       in concurrent environments, and to provide a basis for
123
       extensions.  If you are using malloc in a concurrent program,
123
       extensions.  If you are using malloc in a concurrent program,
124
       consider instead using nedmalloc
124
       consider instead using nedmalloc
125
       (http://www.nedprod.com/programs/portable/nedmalloc/) or
125
       (http://www.nedprod.com/programs/portable/nedmalloc/) or
126
       ptmalloc (See http://www.malloc.de), which are derived from
126
       ptmalloc (See http://www.malloc.de), which are derived from
127
       versions of this malloc.
127
       versions of this malloc.
128
 
128
 
129
  System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
129
  System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
130
       This malloc can use unix sbrk or any emulation (invoked using
130
       This malloc can use unix sbrk or any emulation (invoked using
131
       the CALL_MORECORE macro) and/or mmap/munmap or any emulation
131
       the CALL_MORECORE macro) and/or mmap/munmap or any emulation
132
       (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
132
       (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
133
       memory.  On most unix systems, it tends to work best if both
133
       memory.  On most unix systems, it tends to work best if both
134
       MORECORE and MMAP are enabled.  On Win32, it uses emulations
134
       MORECORE and MMAP are enabled.  On Win32, it uses emulations
135
       based on VirtualAlloc. It also uses common C library functions
135
       based on VirtualAlloc. It also uses common C library functions
136
       like memset.
136
       like memset.
137
 
137
 
138
  Compliance: I believe it is compliant with the Single Unix Specification
138
  Compliance: I believe it is compliant with the Single Unix Specification
139
       (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
139
       (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
140
       others as well.
140
       others as well.
141
 
141
 
142
* Overview of algorithms
142
* Overview of algorithms
143
 
143
 
144
  This is not the fastest, most space-conserving, most portable, or
144
  This is not the fastest, most space-conserving, most portable, or
145
  most tunable malloc ever written. However it is among the fastest
145
  most tunable malloc ever written. However it is among the fastest
146
  while also being among the most space-conserving, portable and
146
  while also being among the most space-conserving, portable and
147
  tunable.  Consistent balance across these factors results in a good
147
  tunable.  Consistent balance across these factors results in a good
148
  general-purpose allocator for malloc-intensive programs.
148
  general-purpose allocator for malloc-intensive programs.
149
 
149
 
150
  In most ways, this malloc is a best-fit allocator. Generally, it
150
  In most ways, this malloc is a best-fit allocator. Generally, it
151
  chooses the best-fitting existing chunk for a request, with ties
151
  chooses the best-fitting existing chunk for a request, with ties
152
  broken in approximately least-recently-used order. (This strategy
152
  broken in approximately least-recently-used order. (This strategy
153
  normally maintains low fragmentation.) However, for requests less
153
  normally maintains low fragmentation.) However, for requests less
154
  than 256bytes, it deviates from best-fit when there is not an
154
  than 256bytes, it deviates from best-fit when there is not an
155
  exactly fitting available chunk by preferring to use space adjacent
155
  exactly fitting available chunk by preferring to use space adjacent
156
  to that used for the previous small request, as well as by breaking
156
  to that used for the previous small request, as well as by breaking
157
  ties in approximately most-recently-used order. (These enhance
157
  ties in approximately most-recently-used order. (These enhance
158
  locality of series of small allocations.)  And for very large requests
158
  locality of series of small allocations.)  And for very large requests
159
  (>= 256Kb by default), it relies on system memory mapping
159
  (>= 256Kb by default), it relies on system memory mapping
160
  facilities, if supported.  (This helps avoid carrying around and
160
  facilities, if supported.  (This helps avoid carrying around and
161
  possibly fragmenting memory used only for large chunks.)
161
  possibly fragmenting memory used only for large chunks.)
162
 
162
 
163
  All operations (except malloc_stats and mallinfo) have execution
163
  All operations (except malloc_stats and mallinfo) have execution
164
  times that are bounded by a constant factor of the number of bits in
164
  times that are bounded by a constant factor of the number of bits in
165
  a size_t, not counting any clearing in calloc or copying in realloc,
165
  a size_t, not counting any clearing in calloc or copying in realloc,
166
  or actions surrounding MORECORE and MMAP that have times
166
  or actions surrounding MORECORE and MMAP that have times
167
  proportional to the number of non-contiguous regions returned by
167
  proportional to the number of non-contiguous regions returned by
168
  system allocation routines, which is often just 1. In real-time
168
  system allocation routines, which is often just 1. In real-time
169
  applications, you can optionally suppress segment traversals using
169
  applications, you can optionally suppress segment traversals using
170
  NO_SEGMENT_TRAVERSAL, which assures bounded execution even when
170
  NO_SEGMENT_TRAVERSAL, which assures bounded execution even when
171
  system allocators return non-contiguous spaces, at the typical
171
  system allocators return non-contiguous spaces, at the typical
172
  expense of carrying around more memory and increased fragmentation.
172
  expense of carrying around more memory and increased fragmentation.
173
 
173
 
174
  The implementation is not very modular and seriously overuses
174
  The implementation is not very modular and seriously overuses
175
  macros. Perhaps someday all C compilers will do as good a job
175
  macros. Perhaps someday all C compilers will do as good a job
176
  inlining modular code as can now be done by brute-force expansion,
176
  inlining modular code as can now be done by brute-force expansion,
177
  but now, enough of them seem not to.
177
  but now, enough of them seem not to.
178
 
178
 
179
  Some compilers issue a lot of warnings about code that is
179
  Some compilers issue a lot of warnings about code that is
180
  dead/unreachable only on some platforms, and also about intentional
180
  dead/unreachable only on some platforms, and also about intentional
181
  uses of negation on unsigned types. All known cases of each can be
181
  uses of negation on unsigned types. All known cases of each can be
182
  ignored.
182
  ignored.
183
 
183
 
184
  For a longer but out of date high-level description, see
184
  For a longer but out of date high-level description, see
185
     http://gee.cs.oswego.edu/dl/html/malloc.html
185
     http://gee.cs.oswego.edu/dl/html/malloc.html
186
 
186
 
187
* MSPACES
187
* MSPACES
188
  If MSPACES is defined, then in addition to malloc, free, etc.,
188
  If MSPACES is defined, then in addition to malloc, free, etc.,
189
  this file also defines mspace_malloc, mspace_free, etc. These
189
  this file also defines mspace_malloc, mspace_free, etc. These
190
  are versions of malloc routines that take an "mspace" argument
190
  are versions of malloc routines that take an "mspace" argument
191
  obtained using create_mspace, to control all internal bookkeeping.
191
  obtained using create_mspace, to control all internal bookkeeping.
192
  If ONLY_MSPACES is defined, only these versions are compiled.
192
  If ONLY_MSPACES is defined, only these versions are compiled.
193
  So if you would like to use this allocator for only some allocations,
193
  So if you would like to use this allocator for only some allocations,
194
  and your system malloc for others, you can compile with
194
  and your system malloc for others, you can compile with
195
  ONLY_MSPACES and then do something like...
195
  ONLY_MSPACES and then do something like...
196
    static mspace mymspace = create_mspace(0,0); // for example
196
    static mspace mymspace = create_mspace(0,0); // for example
197
    #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
197
    #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
198
 
198
 
199
  (Note: If you only need one instance of an mspace, you can instead
199
  (Note: If you only need one instance of an mspace, you can instead
200
  use "USE_DL_PREFIX" to relabel the global malloc.)
200
  use "USE_DL_PREFIX" to relabel the global malloc.)
201
 
201
 
202
  You can similarly create thread-local allocators by storing
202
  You can similarly create thread-local allocators by storing
203
  mspaces as thread-locals. For example:
203
  mspaces as thread-locals. For example:
204
    static __thread mspace tlms = 0;
204
    static __thread mspace tlms = 0;
205
    void*  tlmalloc(size_t bytes) {
205
    void*  tlmalloc(size_t bytes) {
206
      if (tlms == 0) tlms = create_mspace(0, 0);
206
      if (tlms == 0) tlms = create_mspace(0, 0);
207
      return mspace_malloc(tlms, bytes);
207
      return mspace_malloc(tlms, bytes);
208
    }
208
    }
209
    void  tlfree(void* mem) { mspace_free(tlms, mem); }
209
    void  tlfree(void* mem) { mspace_free(tlms, mem); }
210
 
210
 
211
  Unless FOOTERS is defined, each mspace is completely independent.
211
  Unless FOOTERS is defined, each mspace is completely independent.
212
  You cannot allocate from one and free to another (although
212
  You cannot allocate from one and free to another (although
213
  conformance is only weakly checked, so usage errors are not always
213
  conformance is only weakly checked, so usage errors are not always
214
  caught). If FOOTERS is defined, then each chunk carries around a tag
214
  caught). If FOOTERS is defined, then each chunk carries around a tag
215
  indicating its originating mspace, and frees are directed to their
215
  indicating its originating mspace, and frees are directed to their
216
  originating spaces. Normally, this requires use of locks.
216
  originating spaces. Normally, this requires use of locks.
217
 
217
 
218
 -------------------------  Compile-time options ---------------------------
218
 -------------------------  Compile-time options ---------------------------
219
 
219
 
220
Be careful in setting #define values for numerical constants of type
220
Be careful in setting #define values for numerical constants of type
221
size_t. On some systems, literal values are not automatically extended
221
size_t. On some systems, literal values are not automatically extended
222
to size_t precision unless they are explicitly casted. You can also
222
to size_t precision unless they are explicitly casted. You can also
223
use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
223
use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
224
 
224
 
225
WIN32                    default: defined if _WIN32 defined
225
WIN32                    default: defined if _WIN32 defined
226
  Defining WIN32 sets up defaults for MS environment and compilers.
226
  Defining WIN32 sets up defaults for MS environment and compilers.
227
  Otherwise defaults are for unix. Beware that there seem to be some
227
  Otherwise defaults are for unix. Beware that there seem to be some
228
  cases where this malloc might not be a pure drop-in replacement for
228
  cases where this malloc might not be a pure drop-in replacement for
229
  Win32 malloc: Random-looking failures from Win32 GDI API's (eg;
229
  Win32 malloc: Random-looking failures from Win32 GDI API's (eg;
230
  SetDIBits()) may be due to bugs in some video driver implementations
230
  SetDIBits()) may be due to bugs in some video driver implementations
231
  when pixel buffers are malloc()ed, and the region spans more than
231
  when pixel buffers are malloc()ed, and the region spans more than
232
  one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb)
232
  one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb)
233
  default granularity, pixel buffers may straddle virtual allocation
233
  default granularity, pixel buffers may straddle virtual allocation
234
  regions more often than when using the Microsoft allocator.  You can
234
  regions more often than when using the Microsoft allocator.  You can
235
  avoid this by using VirtualAlloc() and VirtualFree() for all pixel
235
  avoid this by using VirtualAlloc() and VirtualFree() for all pixel
236
  buffers rather than using malloc().  If this is not possible,
236
  buffers rather than using malloc().  If this is not possible,
237
  recompile this malloc with a larger DEFAULT_GRANULARITY. Note:
237
  recompile this malloc with a larger DEFAULT_GRANULARITY. Note:
238
  in cases where MSC and gcc (cygwin) are known to differ on WIN32,
238
  in cases where MSC and gcc (cygwin) are known to differ on WIN32,
239
  conditions use _MSC_VER to distinguish them.
239
  conditions use _MSC_VER to distinguish them.
240
 
240
 
241
DLMALLOC_EXPORT       default: extern
241
DLMALLOC_EXPORT       default: extern
242
  Defines how public APIs are declared. If you want to export via a
242
  Defines how public APIs are declared. If you want to export via a
243
  Windows DLL, you might define this as
243
  Windows DLL, you might define this as
244
    #define DLMALLOC_EXPORT extern  __declspec(dllexport)
244
    #define DLMALLOC_EXPORT extern  __declspec(dllexport)
245
  If you want a POSIX ELF shared object, you might use
245
  If you want a POSIX ELF shared object, you might use
246
    #define DLMALLOC_EXPORT extern __attribute__((visibility("default")))
246
    #define DLMALLOC_EXPORT extern __attribute__((visibility("default")))
247
 
247
 
248
MALLOC_ALIGNMENT         default: (size_t)(2 * sizeof(void *))
248
MALLOC_ALIGNMENT         default: (size_t)(2 * sizeof(void *))
249
  Controls the minimum alignment for malloc'ed chunks.  It must be a
249
  Controls the minimum alignment for malloc'ed chunks.  It must be a
250
  power of two and at least 8, even on machines for which smaller
250
  power of two and at least 8, even on machines for which smaller
251
  alignments would suffice. It may be defined as larger than this
251
  alignments would suffice. It may be defined as larger than this
252
  though. Note however that code and data structures are optimized for
252
  though. Note however that code and data structures are optimized for
253
  the case of 8-byte alignment.
253
  the case of 8-byte alignment.
254
 
254
 
255
MSPACES                  default: 0 (false)
255
MSPACES                  default: 0 (false)
256
  If true, compile in support for independent allocation spaces.
256
  If true, compile in support for independent allocation spaces.
257
  This is only supported if HAVE_MMAP is true.
257
  This is only supported if HAVE_MMAP is true.
258
 
258
 
259
ONLY_MSPACES             default: 0 (false)
259
ONLY_MSPACES             default: 0 (false)
260
  If true, only compile in mspace versions, not regular versions.
260
  If true, only compile in mspace versions, not regular versions.
261
 
261
 
262
USE_LOCKS                default: 0 (false)
262
USE_LOCKS                default: 0 (false)
263
  Causes each call to each public routine to be surrounded with
263
  Causes each call to each public routine to be surrounded with
264
  pthread or WIN32 mutex lock/unlock. (If set true, this can be
264
  pthread or WIN32 mutex lock/unlock. (If set true, this can be
265
  overridden on a per-mspace basis for mspace versions.) If set to a
265
  overridden on a per-mspace basis for mspace versions.) If set to a
266
  non-zero value other than 1, locks are used, but their
266
  non-zero value other than 1, locks are used, but their
267
  implementation is left out, so lock functions must be supplied manually,
267
  implementation is left out, so lock functions must be supplied manually,
268
  as described below.
268
  as described below.
269
 
269
 
270
USE_SPIN_LOCKS           default: 1 iff USE_LOCKS and spin locks available
270
USE_SPIN_LOCKS           default: 1 iff USE_LOCKS and spin locks available
271
  If true, uses custom spin locks for locking. This is currently
271
  If true, uses custom spin locks for locking. This is currently
272
  supported only gcc >= 4.1, older gccs on x86 platforms, and recent
272
  supported only gcc >= 4.1, older gccs on x86 platforms, and recent
273
  MS compilers.  Otherwise, posix locks or win32 critical sections are
273
  MS compilers.  Otherwise, posix locks or win32 critical sections are
274
  used.
274
  used.
275
 
275
 
276
USE_RECURSIVE_LOCKS      default: not defined
276
USE_RECURSIVE_LOCKS      default: not defined
277
  If defined nonzero, uses recursive (aka reentrant) locks, otherwise
277
  If defined nonzero, uses recursive (aka reentrant) locks, otherwise
278
  uses plain mutexes. This is not required for malloc proper, but may
278
  uses plain mutexes. This is not required for malloc proper, but may
279
  be needed for layered allocators such as nedmalloc.
279
  be needed for layered allocators such as nedmalloc.
280
 
280
 
281
LOCK_AT_FORK            default: not defined
281
LOCK_AT_FORK            default: not defined
282
  If defined nonzero, performs pthread_atfork upon initialization
282
  If defined nonzero, performs pthread_atfork upon initialization
283
  to initialize child lock while holding parent lock. The implementation
283
  to initialize child lock while holding parent lock. The implementation
284
  assumes that pthread locks (not custom locks) are being used. In other
284
  assumes that pthread locks (not custom locks) are being used. In other
285
  cases, you may need to customize the implementation.
285
  cases, you may need to customize the implementation.
286
 
286
 
287
FOOTERS                  default: 0
287
FOOTERS                  default: 0
288
  If true, provide extra checking and dispatching by placing
288
  If true, provide extra checking and dispatching by placing
289
  information in the footers of allocated chunks. This adds
289
  information in the footers of allocated chunks. This adds
290
  space and time overhead.
290
  space and time overhead.
291
 
291
 
292
INSECURE                 default: 0
292
INSECURE                 default: 0
293
  If true, omit checks for usage errors and heap space overwrites.
293
  If true, omit checks for usage errors and heap space overwrites.
294
 
294
 
295
USE_DL_PREFIX            default: NOT defined
295
USE_DL_PREFIX            default: NOT defined
296
  Causes compiler to prefix all public routines with the string 'dl'.
296
  Causes compiler to prefix all public routines with the string 'dl'.
297
  This can be useful when you only want to use this malloc in one part
297
  This can be useful when you only want to use this malloc in one part
298
  of a program, using your regular system malloc elsewhere.
298
  of a program, using your regular system malloc elsewhere.
299
 
299
 
300
MALLOC_INSPECT_ALL       default: NOT defined
300
MALLOC_INSPECT_ALL       default: NOT defined
301
  If defined, compiles malloc_inspect_all and mspace_inspect_all, that
301
  If defined, compiles malloc_inspect_all and mspace_inspect_all, that
302
  perform traversal of all heap space.  Unless access to these
302
  perform traversal of all heap space.  Unless access to these
303
  functions is otherwise restricted, you probably do not want to
303
  functions is otherwise restricted, you probably do not want to
304
  include them in secure implementations.
304
  include them in secure implementations.
305
 
305
 
306
ABORT                    default: defined as abort()
306
ABORT                    default: defined as abort()
307
  Defines how to abort on failed checks.  On most systems, a failed
307
  Defines how to abort on failed checks.  On most systems, a failed
308
  check cannot die with an "assert" or even print an informative
308
  check cannot die with an "assert" or even print an informative
309
  message, because the underlying print routines in turn call malloc,
309
  message, because the underlying print routines in turn call malloc,
310
  which will fail again.  Generally, the best policy is to simply call
310
  which will fail again.  Generally, the best policy is to simply call
311
  abort(). It's not very useful to do more than this because many
311
  abort(). It's not very useful to do more than this because many
312
  errors due to overwriting will show up as address faults (null, odd
312
  errors due to overwriting will show up as address faults (null, odd
313
  addresses etc) rather than malloc-triggered checks, so will also
313
  addresses etc) rather than malloc-triggered checks, so will also
314
  abort.  Also, most compilers know that abort() does not return, so
314
  abort.  Also, most compilers know that abort() does not return, so
315
  can better optimize code conditionally calling it.
315
  can better optimize code conditionally calling it.
316
 
316
 
317
PROCEED_ON_ERROR           default: defined as 0 (false)
317
PROCEED_ON_ERROR           default: defined as 0 (false)
318
  Controls whether detected bad addresses cause them to bypassed
318
  Controls whether detected bad addresses cause them to bypassed
319
  rather than aborting. If set, detected bad arguments to free and
319
  rather than aborting. If set, detected bad arguments to free and
320
  realloc are ignored. And all bookkeeping information is zeroed out
320
  realloc are ignored. And all bookkeeping information is zeroed out
321
  upon a detected overwrite of freed heap space, thus losing the
321
  upon a detected overwrite of freed heap space, thus losing the
322
  ability to ever return it from malloc again, but enabling the
322
  ability to ever return it from malloc again, but enabling the
323
  application to proceed. If PROCEED_ON_ERROR is defined, the
323
  application to proceed. If PROCEED_ON_ERROR is defined, the
324
  static variable malloc_corruption_error_count is compiled in
324
  static variable malloc_corruption_error_count is compiled in
325
  and can be examined to see if errors have occurred. This option
325
  and can be examined to see if errors have occurred. This option
326
  generates slower code than the default abort policy.
326
  generates slower code than the default abort policy.
327
 
327
 
328
DEBUG                    default: NOT defined
328
DEBUG                    default: NOT defined
329
  The DEBUG setting is mainly intended for people trying to modify
329
  The DEBUG setting is mainly intended for people trying to modify
330
  this code or diagnose problems when porting to new platforms.
330
  this code or diagnose problems when porting to new platforms.
331
  However, it may also be able to better isolate user errors than just
331
  However, it may also be able to better isolate user errors than just
332
  using runtime checks.  The assertions in the check routines spell
332
  using runtime checks.  The assertions in the check routines spell
333
  out in more detail the assumptions and invariants underlying the
333
  out in more detail the assumptions and invariants underlying the
334
  algorithms.  The checking is fairly extensive, and will slow down
334
  algorithms.  The checking is fairly extensive, and will slow down
335
  execution noticeably. Calling malloc_stats or mallinfo with DEBUG
335
  execution noticeably. Calling malloc_stats or mallinfo with DEBUG
336
  set will attempt to check every non-mmapped allocated and free chunk
336
  set will attempt to check every non-mmapped allocated and free chunk
337
  in the course of computing the summaries.
337
  in the course of computing the summaries.
338
 
338
 
339
ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
339
ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
340
  Debugging assertion failures can be nearly impossible if your
340
  Debugging assertion failures can be nearly impossible if your
341
  version of the assert macro causes malloc to be called, which will
341
  version of the assert macro causes malloc to be called, which will
342
  lead to a cascade of further failures, blowing the runtime stack.
342
  lead to a cascade of further failures, blowing the runtime stack.
343
  ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
343
  ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
344
  which will usually make debugging easier.
344
  which will usually make debugging easier.
345
 
345
 
346
MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
346
MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
347
  The action to take before "return 0" when malloc fails to be able to
347
  The action to take before "return 0" when malloc fails to be able to
348
  return memory because there is none available.
348
  return memory because there is none available.
349
 
349
 
350
HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
350
HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
351
  True if this system supports sbrk or an emulation of it.
351
  True if this system supports sbrk or an emulation of it.
352
 
352
 
353
MORECORE                  default: sbrk
353
MORECORE                  default: sbrk
354
  The name of the sbrk-style system routine to call to obtain more
354
  The name of the sbrk-style system routine to call to obtain more
355
  memory.  See below for guidance on writing custom MORECORE
355
  memory.  See below for guidance on writing custom MORECORE
356
  functions. The type of the argument to sbrk/MORECORE varies across
356
  functions. The type of the argument to sbrk/MORECORE varies across
357
  systems.  It cannot be size_t, because it supports negative
357
  systems.  It cannot be size_t, because it supports negative
358
  arguments, so it is normally the signed type of the same width as
358
  arguments, so it is normally the signed type of the same width as
359
  size_t (sometimes declared as "intptr_t").  It doesn't much matter
359
  size_t (sometimes declared as "intptr_t").  It doesn't much matter
360
  though. Internally, we only call it with arguments less than half
360
  though. Internally, we only call it with arguments less than half
361
  the max value of a size_t, which should work across all reasonable
361
  the max value of a size_t, which should work across all reasonable
362
  possibilities, although sometimes generating compiler warnings.
362
  possibilities, although sometimes generating compiler warnings.
363
 
363
 
364
MORECORE_CONTIGUOUS       default: 1 (true) if HAVE_MORECORE
364
MORECORE_CONTIGUOUS       default: 1 (true) if HAVE_MORECORE
365
  If true, take advantage of fact that consecutive calls to MORECORE
365
  If true, take advantage of fact that consecutive calls to MORECORE
366
  with positive arguments always return contiguous increasing
366
  with positive arguments always return contiguous increasing
367
  addresses.  This is true of unix sbrk. It does not hurt too much to
367
  addresses.  This is true of unix sbrk. It does not hurt too much to
368
  set it true anyway, since malloc copes with non-contiguities.
368
  set it true anyway, since malloc copes with non-contiguities.
369
  Setting it false when definitely non-contiguous saves time
369
  Setting it false when definitely non-contiguous saves time
370
  and possibly wasted space it would take to discover this though.
370
  and possibly wasted space it would take to discover this though.
371
 
371
 
372
MORECORE_CANNOT_TRIM      default: NOT defined
372
MORECORE_CANNOT_TRIM      default: NOT defined
373
  True if MORECORE cannot release space back to the system when given
373
  True if MORECORE cannot release space back to the system when given
374
  negative arguments. This is generally necessary only if you are
374
  negative arguments. This is generally necessary only if you are
375
  using a hand-crafted MORECORE function that cannot handle negative
375
  using a hand-crafted MORECORE function that cannot handle negative
376
  arguments.
376
  arguments.
377
 
377
 
378
NO_SEGMENT_TRAVERSAL       default: 0
378
NO_SEGMENT_TRAVERSAL       default: 0
379
  If non-zero, suppresses traversals of memory segments
379
  If non-zero, suppresses traversals of memory segments
380
  returned by either MORECORE or CALL_MMAP. This disables
380
  returned by either MORECORE or CALL_MMAP. This disables
381
  merging of segments that are contiguous, and selectively
381
  merging of segments that are contiguous, and selectively
382
  releasing them to the OS if unused, but bounds execution times.
382
  releasing them to the OS if unused, but bounds execution times.
383
 
383
 
384
HAVE_MMAP                 default: 1 (true)
384
HAVE_MMAP                 default: 1 (true)
385
  True if this system supports mmap or an emulation of it.  If so, and
385
  True if this system supports mmap or an emulation of it.  If so, and
386
  HAVE_MORECORE is not true, MMAP is used for all system
386
  HAVE_MORECORE is not true, MMAP is used for all system
387
  allocation. If set and HAVE_MORECORE is true as well, MMAP is
387
  allocation. If set and HAVE_MORECORE is true as well, MMAP is
388
  primarily used to directly allocate very large blocks. It is also
388
  primarily used to directly allocate very large blocks. It is also
389
  used as a backup strategy in cases where MORECORE fails to provide
389
  used as a backup strategy in cases where MORECORE fails to provide
390
  space from system. Note: A single call to MUNMAP is assumed to be
390
  space from system. Note: A single call to MUNMAP is assumed to be
391
  able to unmap memory that may have be allocated using multiple calls
391
  able to unmap memory that may have be allocated using multiple calls
392
  to MMAP, so long as they are adjacent.
392
  to MMAP, so long as they are adjacent.
393
 
393
 
394
HAVE_MREMAP               default: 1 on linux, else 0
394
HAVE_MREMAP               default: 1 on linux, else 0
395
  If true realloc() uses mremap() to re-allocate large blocks and
395
  If true realloc() uses mremap() to re-allocate large blocks and
396
  extend or shrink allocation spaces.
396
  extend or shrink allocation spaces.
397
 
397
 
398
MMAP_CLEARS               default: 1 except on WINCE.
398
MMAP_CLEARS               default: 1 except on WINCE.
399
  True if mmap clears memory so calloc doesn't need to. This is true
399
  True if mmap clears memory so calloc doesn't need to. This is true
400
  for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
400
  for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
401
 
401
 
402
USE_BUILTIN_FFS            default: 0 (i.e., not used)
402
USE_BUILTIN_FFS            default: 0 (i.e., not used)
403
  Causes malloc to use the builtin ffs() function to compute indices.
403
  Causes malloc to use the builtin ffs() function to compute indices.
404
  Some compilers may recognize and intrinsify ffs to be faster than the
404
  Some compilers may recognize and intrinsify ffs to be faster than the
405
  supplied C version. Also, the case of x86 using gcc is special-cased
405
  supplied C version. Also, the case of x86 using gcc is special-cased
406
  to an asm instruction, so is already as fast as it can be, and so
406
  to an asm instruction, so is already as fast as it can be, and so
407
  this setting has no effect. Similarly for Win32 under recent MS compilers.
407
  this setting has no effect. Similarly for Win32 under recent MS compilers.
408
  (On most x86s, the asm version is only slightly faster than the C version.)
408
  (On most x86s, the asm version is only slightly faster than the C version.)
409
 
409
 
410
malloc_getpagesize         default: derive from system includes, or 4096.
410
malloc_getpagesize         default: derive from system includes, or 4096.
411
  The system page size. To the extent possible, this malloc manages
411
  The system page size. To the extent possible, this malloc manages
412
  memory from the system in page-size units.  This may be (and
412
  memory from the system in page-size units.  This may be (and
413
  usually is) a function rather than a constant. This is ignored
413
  usually is) a function rather than a constant. This is ignored
414
  if WIN32, where page size is determined using getSystemInfo during
414
  if WIN32, where page size is determined using getSystemInfo during
415
  initialization.
415
  initialization.
416
 
416
 
417
USE_DEV_RANDOM             default: 0 (i.e., not used)
417
USE_DEV_RANDOM             default: 0 (i.e., not used)
418
  Causes malloc to use /dev/random to initialize secure magic seed for
418
  Causes malloc to use /dev/random to initialize secure magic seed for
419
  stamping footers. Otherwise, the current time is used.
419
  stamping footers. Otherwise, the current time is used.
420
 
420
 
421
NO_MALLINFO                default: 0
421
NO_MALLINFO                default: 0
422
  If defined, don't compile "mallinfo". This can be a simple way
422
  If defined, don't compile "mallinfo". This can be a simple way
423
  of dealing with mismatches between system declarations and
423
  of dealing with mismatches between system declarations and
424
  those in this file.
424
  those in this file.
425
 
425
 
426
MALLINFO_FIELD_TYPE        default: size_t
426
MALLINFO_FIELD_TYPE        default: size_t
427
  The type of the fields in the mallinfo struct. This was originally
427
  The type of the fields in the mallinfo struct. This was originally
428
  defined as "int" in SVID etc, but is more usefully defined as
428
  defined as "int" in SVID etc, but is more usefully defined as
429
  size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
429
  size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
430
 
430
 
431
NO_MALLOC_STATS            default: 0
431
NO_MALLOC_STATS            default: 0
432
  If defined, don't compile "malloc_stats". This avoids calls to
432
  If defined, don't compile "malloc_stats". This avoids calls to
433
  fprintf and bringing in stdio dependencies you might not want.
433
  fprintf and bringing in stdio dependencies you might not want.
434
 
434
 
435
REALLOC_ZERO_BYTES_FREES    default: not defined
435
REALLOC_ZERO_BYTES_FREES    default: not defined
436
  This should be set if a call to realloc with zero bytes should
436
  This should be set if a call to realloc with zero bytes should
437
  be the same as a call to free. Some people think it should. Otherwise,
437
  be the same as a call to free. Some people think it should. Otherwise,
438
  since this malloc returns a unique pointer for malloc(0), so does
438
  since this malloc returns a unique pointer for malloc(0), so does
439
  realloc(p, 0).
439
  realloc(p, 0).
440
 
440
 
441
LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
441
LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
442
LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
442
LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
443
LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H  default: NOT defined unless on WIN32
443
LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H  default: NOT defined unless on WIN32
444
  Define these if your system does not have these header files.
444
  Define these if your system does not have these header files.
445
  You might need to manually insert some of the declarations they provide.
445
  You might need to manually insert some of the declarations they provide.
446
 
446
 
447
DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
447
DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
448
                                system_info.dwAllocationGranularity in WIN32,
448
                                system_info.dwAllocationGranularity in WIN32,
449
                                otherwise 64K.
449
                                otherwise 64K.
450
      Also settable using mallopt(M_GRANULARITY, x)
450
      Also settable using mallopt(M_GRANULARITY, x)
451
  The unit for allocating and deallocating memory from the system.  On
451
  The unit for allocating and deallocating memory from the system.  On
452
  most systems with contiguous MORECORE, there is no reason to
452
  most systems with contiguous MORECORE, there is no reason to
453
  make this more than a page. However, systems with MMAP tend to
453
  make this more than a page. However, systems with MMAP tend to
454
  either require or encourage larger granularities.  You can increase
454
  either require or encourage larger granularities.  You can increase
455
  this value to prevent system allocation functions to be called so
455
  this value to prevent system allocation functions to be called so
456
  often, especially if they are slow.  The value must be at least one
456
  often, especially if they are slow.  The value must be at least one
457
  page and must be a power of two.  Setting to 0 causes initialization
457
  page and must be a power of two.  Setting to 0 causes initialization
458
  to either page size or win32 region size.  (Note: In previous
458
  to either page size or win32 region size.  (Note: In previous
459
  versions of malloc, the equivalent of this option was called
459
  versions of malloc, the equivalent of this option was called
460
  "TOP_PAD")
460
  "TOP_PAD")
461
 
461
 
462
DEFAULT_TRIM_THRESHOLD    default: 2MB
462
DEFAULT_TRIM_THRESHOLD    default: 2MB
463
      Also settable using mallopt(M_TRIM_THRESHOLD, x)
463
      Also settable using mallopt(M_TRIM_THRESHOLD, x)
464
  The maximum amount of unused top-most memory to keep before
464
  The maximum amount of unused top-most memory to keep before
465
  releasing via malloc_trim in free().  Automatic trimming is mainly
465
  releasing via malloc_trim in free().  Automatic trimming is mainly
466
  useful in long-lived programs using contiguous MORECORE.  Because
466
  useful in long-lived programs using contiguous MORECORE.  Because
467
  trimming via sbrk can be slow on some systems, and can sometimes be
467
  trimming via sbrk can be slow on some systems, and can sometimes be
468
  wasteful (in cases where programs immediately afterward allocate
468
  wasteful (in cases where programs immediately afterward allocate
469
  more large chunks) the value should be high enough so that your
469
  more large chunks) the value should be high enough so that your
470
  overall system performance would improve by releasing this much
470
  overall system performance would improve by releasing this much
471
  memory.  As a rough guide, you might set to a value close to the
471
  memory.  As a rough guide, you might set to a value close to the
472
  average size of a process (program) running on your system.
472
  average size of a process (program) running on your system.
473
  Releasing this much memory would allow such a process to run in
473
  Releasing this much memory would allow such a process to run in
474
  memory.  Generally, it is worth tuning trim thresholds when a
474
  memory.  Generally, it is worth tuning trim thresholds when a
475
  program undergoes phases where several large chunks are allocated
475
  program undergoes phases where several large chunks are allocated
476
  and released in ways that can reuse each other's storage, perhaps
476
  and released in ways that can reuse each other's storage, perhaps
477
  mixed with phases where there are no such chunks at all. The trim
477
  mixed with phases where there are no such chunks at all. The trim
478
  value must be greater than page size to have any useful effect.  To
478
  value must be greater than page size to have any useful effect.  To
479
  disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
479
  disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
480
  some people use of mallocing a huge space and then freeing it at
480
  some people use of mallocing a huge space and then freeing it at
481
  program startup, in an attempt to reserve system memory, doesn't
481
  program startup, in an attempt to reserve system memory, doesn't
482
  have the intended effect under automatic trimming, since that memory
482
  have the intended effect under automatic trimming, since that memory
483
  will immediately be returned to the system.
483
  will immediately be returned to the system.
484
 
484
 
485
DEFAULT_MMAP_THRESHOLD       default: 256K
485
DEFAULT_MMAP_THRESHOLD       default: 256K
486
      Also settable using mallopt(M_MMAP_THRESHOLD, x)
486
      Also settable using mallopt(M_MMAP_THRESHOLD, x)
487
  The request size threshold for using MMAP to directly service a
487
  The request size threshold for using MMAP to directly service a
488
  request. Requests of at least this size that cannot be allocated
488
  request. Requests of at least this size that cannot be allocated
489
  using already-existing space will be serviced via mmap.  (If enough
489
  using already-existing space will be serviced via mmap.  (If enough
490
  normal freed space already exists it is used instead.)  Using mmap
490
  normal freed space already exists it is used instead.)  Using mmap
491
  segregates relatively large chunks of memory so that they can be
491
  segregates relatively large chunks of memory so that they can be
492
  individually obtained and released from the host system. A request
492
  individually obtained and released from the host system. A request
493
  serviced through mmap is never reused by any other request (at least
493
  serviced through mmap is never reused by any other request (at least
494
  not directly; the system may just so happen to remap successive
494
  not directly; the system may just so happen to remap successive
495
  requests to the same locations).  Segregating space in this way has
495
  requests to the same locations).  Segregating space in this way has
496
  the benefits that: Mmapped space can always be individually released
496
  the benefits that: Mmapped space can always be individually released
497
  back to the system, which helps keep the system level memory demands
497
  back to the system, which helps keep the system level memory demands
498
  of a long-lived program low.  Also, mapped memory doesn't become
498
  of a long-lived program low.  Also, mapped memory doesn't become
499
  `locked' between other chunks, as can happen with normally allocated
499
  `locked' between other chunks, as can happen with normally allocated
500
  chunks, which means that even trimming via malloc_trim would not
500
  chunks, which means that even trimming via malloc_trim would not
501
  release them.  However, it has the disadvantage that the space
501
  release them.  However, it has the disadvantage that the space
502
  cannot be reclaimed, consolidated, and then used to service later
502
  cannot be reclaimed, consolidated, and then used to service later
503
  requests, as happens with normal chunks.  The advantages of mmap
503
  requests, as happens with normal chunks.  The advantages of mmap
504
  nearly always outweigh disadvantages for "large" chunks, but the
504
  nearly always outweigh disadvantages for "large" chunks, but the
505
  value of "large" may vary across systems.  The default is an
505
  value of "large" may vary across systems.  The default is an
506
  empirically derived value that works well in most systems. You can
506
  empirically derived value that works well in most systems. You can
507
  disable mmap by setting to MAX_SIZE_T.
507
  disable mmap by setting to MAX_SIZE_T.
508
 
508
 
509
MAX_RELEASE_CHECK_RATE   default: 4095 unless not HAVE_MMAP
509
MAX_RELEASE_CHECK_RATE   default: 4095 unless not HAVE_MMAP
510
  The number of consolidated frees between checks to release
510
  The number of consolidated frees between checks to release
511
  unused segments when freeing. When using non-contiguous segments,
511
  unused segments when freeing. When using non-contiguous segments,
512
  especially with multiple mspaces, checking only for topmost space
512
  especially with multiple mspaces, checking only for topmost space
513
  doesn't always suffice to trigger trimming. To compensate for this,
513
  doesn't always suffice to trigger trimming. To compensate for this,
514
  free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
514
  free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
515
  current number of segments, if greater) try to release unused
515
  current number of segments, if greater) try to release unused
516
  segments to the OS when freeing chunks that result in
516
  segments to the OS when freeing chunks that result in
517
  consolidation. The best value for this parameter is a compromise
517
  consolidation. The best value for this parameter is a compromise
518
  between slowing down frees with relatively costly checks that
518
  between slowing down frees with relatively costly checks that
519
  rarely trigger versus holding on to unused memory. To effectively
519
  rarely trigger versus holding on to unused memory. To effectively
520
  disable, set to MAX_SIZE_T. This may lead to a very slight speed
520
  disable, set to MAX_SIZE_T. This may lead to a very slight speed
521
  improvement at the expense of carrying around more memory.
521
  improvement at the expense of carrying around more memory.
522
*/
522
*/
523
 
523
 
524
#include 
524
#include 
525
#include 
525
#include 
526
#include 
526
#include 
527
 
527
 
528
/* Version identifier to allow people to support multiple versions */
528
/* Version identifier to allow people to support multiple versions */
529
#ifndef DLMALLOC_VERSION
529
#ifndef DLMALLOC_VERSION
530
#define DLMALLOC_VERSION 20806
530
#define DLMALLOC_VERSION 20806
531
#endif /* DLMALLOC_VERSION */
531
#endif /* DLMALLOC_VERSION */
532
 
532
 
533
 
533
 
534
/*
534
/*
535
  malloc(size_t n)
535
  malloc(size_t n)
536
  Returns a pointer to a newly allocated chunk of at least n bytes, or
536
  Returns a pointer to a newly allocated chunk of at least n bytes, or
537
  null if no space is available, in which case errno is set to ENOMEM
537
  null if no space is available, in which case errno is set to ENOMEM
538
  on ANSI C systems.
538
  on ANSI C systems.
539
 
539
 
540
  If n is zero, malloc returns a minimum-sized chunk. (The minimum
540
  If n is zero, malloc returns a minimum-sized chunk. (The minimum
541
  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
541
  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
542
  systems.)  Note that size_t is an unsigned type, so calls with
542
  systems.)  Note that size_t is an unsigned type, so calls with
543
  arguments that would be negative if signed are interpreted as
543
  arguments that would be negative if signed are interpreted as
544
  requests for huge amounts of space, which will often fail. The
544
  requests for huge amounts of space, which will often fail. The
545
  maximum supported value of n differs across systems, but is in all
545
  maximum supported value of n differs across systems, but is in all
546
  cases less than the maximum representable value of a size_t.
546
  cases less than the maximum representable value of a size_t.
547
*/
547
*/
548
 
548
 
549
/*
549
/*
550
  free(void* p)
550
  free(void* p)
551
  Releases the chunk of memory pointed to by p, that had been previously
551
  Releases the chunk of memory pointed to by p, that had been previously
552
  allocated using malloc or a related routine such as realloc.
552
  allocated using malloc or a related routine such as realloc.
553
  It has no effect if p is null. If p was not malloced or already
553
  It has no effect if p is null. If p was not malloced or already
554
  freed, free(p) will by default cause the current program to abort.
554
  freed, free(p) will by default cause the current program to abort.
555
*/
555
*/
556
 
556
 
557
/*
557
/*
558
  calloc(size_t n_elements, size_t element_size);
558
  calloc(size_t n_elements, size_t element_size);
559
  Returns a pointer to n_elements * element_size bytes, with all locations
559
  Returns a pointer to n_elements * element_size bytes, with all locations
560
  set to zero.
560
  set to zero.
561
*/
561
*/
562
 
562
 
563
/*
563
/*
564
  realloc(void* p, size_t n)
564
  realloc(void* p, size_t n)
565
  Returns a pointer to a chunk of size n that contains the same data
565
  Returns a pointer to a chunk of size n that contains the same data
566
  as does chunk p up to the minimum of (n, p's size) bytes, or null
566
  as does chunk p up to the minimum of (n, p's size) bytes, or null
567
  if no space is available.
567
  if no space is available.
568
 
568
 
569
  The returned pointer may or may not be the same as p. The algorithm
569
  The returned pointer may or may not be the same as p. The algorithm
570
  prefers extending p in most cases when possible, otherwise it
570
  prefers extending p in most cases when possible, otherwise it
571
  employs the equivalent of a malloc-copy-free sequence.
571
  employs the equivalent of a malloc-copy-free sequence.
572
 
572
 
573
  If p is null, realloc is equivalent to malloc.
573
  If p is null, realloc is equivalent to malloc.
574
 
574
 
575
  If space is not available, realloc returns null, errno is set (if on
575
  If space is not available, realloc returns null, errno is set (if on
576
  ANSI) and p is NOT freed.
576
  ANSI) and p is NOT freed.
577
 
577
 
578
  if n is for fewer bytes than already held by p, the newly unused
578
  if n is for fewer bytes than already held by p, the newly unused
579
  space is lopped off and freed if possible.  realloc with a size
579
  space is lopped off and freed if possible.  realloc with a size
580
  argument of zero (re)allocates a minimum-sized chunk.
580
  argument of zero (re)allocates a minimum-sized chunk.
581
 
581
 
582
  The old unix realloc convention of allowing the last-free'd chunk
582
  The old unix realloc convention of allowing the last-free'd chunk
583
  to be used as an argument to realloc is not supported.
583
  to be used as an argument to realloc is not supported.
584
*/
584
*/
585
/*
585
/*
586
  memalign(size_t alignment, size_t n);
586
  memalign(size_t alignment, size_t n);
587
  Returns a pointer to a newly allocated chunk of n bytes, aligned
587
  Returns a pointer to a newly allocated chunk of n bytes, aligned
588
  in accord with the alignment argument.
588
  in accord with the alignment argument.
589
 
589
 
590
  The alignment argument should be a power of two. If the argument is
590
  The alignment argument should be a power of two. If the argument is
591
  not a power of two, the nearest greater power is used.
591
  not a power of two, the nearest greater power is used.
592
  8-byte alignment is guaranteed by normal malloc calls, so don't
592
  8-byte alignment is guaranteed by normal malloc calls, so don't
593
  bother calling memalign with an argument of 8 or less.
593
  bother calling memalign with an argument of 8 or less.
594
 
594
 
595
  Overreliance on memalign is a sure way to fragment space.
595
  Overreliance on memalign is a sure way to fragment space.
596
*/
596
*/
597
 
597
 
598
 
598
 
599
#define DEBUG   1
599
#define DEBUG   1
600
 
600
 
601
 
601
 
602
 
602
 
603
 
603
 
604
 
604
 
605
 
605
 
606
 
606
 
607
 
607
 
608
 
608
 
609
 
609
 
610
 
610
 
611
 
611
 
612
#define assert(x)
612
#define assert(x)
613
 
613
 
614
#define MAX_SIZE_T           (~(size_t)0)
614
#define MAX_SIZE_T           (~(size_t)0)
615
 
615
 
616
#define CALL_DIRECT_MMAP(s) MMAP_DEFAULT(s)
616
#define CALL_DIRECT_MMAP(s) MMAP_DEFAULT(s)
617
 
617
 
618
/* ------------------- size_t and alignment properties -------------------- */
618
/* ------------------- size_t and alignment properties -------------------- */
619
 
619
 
620
/* The byte and bit size of a size_t */
620
/* The byte and bit size of a size_t */
621
#define SIZE_T_SIZE             (sizeof(size_t))
621
#define SIZE_T_SIZE             (sizeof(size_t))
622
#define SIZE_T_BITSIZE          (sizeof(size_t) << 3)
622
#define SIZE_T_BITSIZE          (sizeof(size_t) << 3)
623
 
623
 
624
/* Some constants coerced to size_t */
624
/* Some constants coerced to size_t */
625
/* Annoying but necessary to avoid errors on some platforms */
625
/* Annoying but necessary to avoid errors on some platforms */
626
#define SIZE_T_ZERO             ((size_t)0)
626
#define SIZE_T_ZERO             ((size_t)0)
627
#define SIZE_T_ONE              ((size_t)1)
627
#define SIZE_T_ONE              ((size_t)1)
628
#define SIZE_T_TWO              ((size_t)2)
628
#define SIZE_T_TWO              ((size_t)2)
629
#define SIZE_T_FOUR             ((size_t)4)
629
#define SIZE_T_FOUR             ((size_t)4)
630
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
630
#define TWO_SIZE_T_SIZES        (SIZE_T_SIZE<<1)
631
#define FOUR_SIZE_T_SIZES       (SIZE_T_SIZE<<2)
631
#define FOUR_SIZE_T_SIZES       (SIZE_T_SIZE<<2)
632
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
632
#define SIX_SIZE_T_SIZES        (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
633
#define HALF_MAX_SIZE_T         (MAX_SIZE_T / 2U)
633
#define HALF_MAX_SIZE_T         (MAX_SIZE_T / 2U)
634
 
634
 
635
#define USE_LOCK_BIT            (2U)
635
#define USE_LOCK_BIT            (2U)
636
#define USE_MMAP_BIT            (SIZE_T_ONE)
636
#define USE_MMAP_BIT            (SIZE_T_ONE)
637
#define USE_NONCONTIGUOUS_BIT   (4U)
637
#define USE_NONCONTIGUOUS_BIT   (4U)
638
 
638
 
639
/* segment bit set in create_mspace_with_base */
639
/* segment bit set in create_mspace_with_base */
640
#define EXTERN_BIT              (8U)
640
#define EXTERN_BIT              (8U)
641
 
641
 
642
#define HAVE_MMAP               1
642
#define HAVE_MMAP               1
643
#define HAVE_MORECORE           0
643
#define HAVE_MORECORE           0
644
#define MORECORE_CANNOT_TRIM    1
644
#define MORECORE_CANNOT_TRIM    1
645
#define CALL_MMAP(s)            MMAP_DEFAULT(s)
645
#define CALL_MMAP(s)            MMAP_DEFAULT(s)
646
#define CALL_MUNMAP(a, s)       MUNMAP_DEFAULT((a), (s))
646
#define CALL_MUNMAP(a, s)       MUNMAP_DEFAULT((a), (s))
647
#define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
647
#define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
648
#define MAX_RELEASE_CHECK_RATE  4095
648
#define MAX_RELEASE_CHECK_RATE  4095
649
#define NO_SEGMENT_TRAVERSAL    1
649
#define NO_SEGMENT_TRAVERSAL    1
650
#define MALLOC_ALIGNMENT        ((size_t)8U)
650
#define MALLOC_ALIGNMENT        ((size_t)8U)
651
#define CHUNK_OVERHEAD          (SIZE_T_SIZE)
651
#define CHUNK_OVERHEAD          (SIZE_T_SIZE)
652
#define DEFAULT_GRANULARITY     ((size_t)128U * (size_t)1024U)
652
#define DEFAULT_GRANULARITY     ((size_t)256U * (size_t)1024U)
653
#define DEFAULT_MMAP_THRESHOLD  ((size_t)512U * (size_t)1024U)
653
#define DEFAULT_MMAP_THRESHOLD  ((size_t)1024U * (size_t)1024U)
654
#define DEFAULT_TRIM_THRESHOLD  ((size_t)1024U * (size_t)1024U)
654
#define DEFAULT_TRIM_THRESHOLD  ((size_t)2048U * (size_t)1024U)
655
 
655
 
656
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
656
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
657
#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
657
#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
658
 
658
 
659
/* True if address a has acceptable alignment */
659
/* True if address a has acceptable alignment */
660
#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
660
#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
661
 
661
 
662
/* the number of bytes to offset an address to align it */
662
/* the number of bytes to offset an address to align it */
663
#define align_offset(A)\
663
#define align_offset(A)\
664
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
664
 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
665
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
665
  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
666
 
666
 
667
/* -------------------------- MMAP preliminaries ------------------------- */
667
/* -------------------------- MMAP preliminaries ------------------------- */
668
 
668
 
669
/*
669
/*
670
   If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
670
   If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
671
   checks to fail so compiler optimizer can delete code rather than
671
   checks to fail so compiler optimizer can delete code rather than
672
   using so many "#if"s.
672
   using so many "#if"s.
673
*/
673
*/
674
 
674
 
675
 
675
 
676
/* MORECORE and MMAP must return MFAIL on failure */
676
/* MORECORE and MMAP must return MFAIL on failure */
677
#define MFAIL                ((void*)(MAX_SIZE_T))
677
#define MFAIL                ((void*)(MAX_SIZE_T))
678
#define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
678
#define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
679
 
679
 
680
#define should_trim(M,s)  (0)
680
#define should_trim(M,s)  (0)
681
 
681
 
682
 
682
 
683
 
683
 
684
/* --------------------------- Lock preliminaries ------------------------ */
684
/* --------------------------- Lock preliminaries ------------------------ */
685
 
685
 
686
/*
686
/*
687
  When locks are defined, there is one global lock, plus
687
  When locks are defined, there is one global lock, plus
688
  one per-mspace lock.
688
  one per-mspace lock.
689
 
689
 
690
  The global lock_ensures that mparams.magic and other unique
690
  The global lock_ensures that mparams.magic and other unique
691
  mparams values are initialized only once. It also protects
691
  mparams values are initialized only once. It also protects
692
  sequences of calls to MORECORE.  In many cases sys_alloc requires
692
  sequences of calls to MORECORE.  In many cases sys_alloc requires
693
  two calls, that should not be interleaved with calls by other
693
  two calls, that should not be interleaved with calls by other
694
  threads.  This does not protect against direct calls to MORECORE
694
  threads.  This does not protect against direct calls to MORECORE
695
  by other threads not using this lock, so there is still code to
695
  by other threads not using this lock, so there is still code to
696
  cope the best we can on interference.
696
  cope the best we can on interference.
697
 
697
 
698
  Per-mspace locks surround calls to malloc, free, etc.
698
  Per-mspace locks surround calls to malloc, free, etc.
699
  By default, locks are simple non-reentrant mutexes.
699
  By default, locks are simple non-reentrant mutexes.
700
 
700
 
701
  Because lock-protected regions generally have bounded times, it is
701
  Because lock-protected regions generally have bounded times, it is
702
  OK to use the supplied simple spinlocks. Spinlocks are likely to
702
  OK to use the supplied simple spinlocks. Spinlocks are likely to
703
  improve performance for lightly contended applications, but worsen
703
  improve performance for lightly contended applications, but worsen
704
  performance under heavy contention.
704
  performance under heavy contention.
705
 
705
 
706
  If USE_LOCKS is > 1, the definitions of lock routines here are
706
  If USE_LOCKS is > 1, the definitions of lock routines here are
707
  bypassed, in which case you will need to define the type MLOCK_T,
707
  bypassed, in which case you will need to define the type MLOCK_T,
708
  and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
708
  and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
709
  and TRY_LOCK.  You must also declare a
709
  and TRY_LOCK.  You must also declare a
710
    static MLOCK_T malloc_global_mutex = { initialization values };.
710
    static MLOCK_T malloc_global_mutex = { initialization values };.
711
 
711
 
712
*/
712
*/
713
 
713
 
714
static DEFINE_MUTEX(malloc_global_mutex);
714
static DEFINE_MUTEX(malloc_global_mutex);
715
 
715
 
716
#define ACQUIRE_MALLOC_GLOBAL_LOCK()  MutexLock(&malloc_global_mutex);
716
#define ACQUIRE_MALLOC_GLOBAL_LOCK()  MutexLock(&malloc_global_mutex);
717
#define RELEASE_MALLOC_GLOBAL_LOCK()  MutexUnlock(&malloc_global_mutex);
717
#define RELEASE_MALLOC_GLOBAL_LOCK()  MutexUnlock(&malloc_global_mutex);
718
 
718
 
719
 
719
 
720
/* -----------------------  Chunk representations ------------------------ */
720
/* -----------------------  Chunk representations ------------------------ */
721
 
721
 
722
/*
722
/*
723
  (The following includes lightly edited explanations by Colin Plumb.)
723
  (The following includes lightly edited explanations by Colin Plumb.)
724
 
724
 
725
  The malloc_chunk declaration below is misleading (but accurate and
725
  The malloc_chunk declaration below is misleading (but accurate and
726
  necessary).  It declares a "view" into memory allowing access to
726
  necessary).  It declares a "view" into memory allowing access to
727
  necessary fields at known offsets from a given base.
727
  necessary fields at known offsets from a given base.
728
 
728
 
729
  Chunks of memory are maintained using a `boundary tag' method as
729
  Chunks of memory are maintained using a `boundary tag' method as
730
  originally described by Knuth.  (See the paper by Paul Wilson
730
  originally described by Knuth.  (See the paper by Paul Wilson
731
  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
731
  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
732
  techniques.)  Sizes of free chunks are stored both in the front of
732
  techniques.)  Sizes of free chunks are stored both in the front of
733
  each chunk and at the end.  This makes consolidating fragmented
733
  each chunk and at the end.  This makes consolidating fragmented
734
  chunks into bigger chunks fast.  The head fields also hold bits
734
  chunks into bigger chunks fast.  The head fields also hold bits
735
  representing whether chunks are free or in use.
735
  representing whether chunks are free or in use.
736
 
736
 
737
  Here are some pictures to make it clearer.  They are "exploded" to
737
  Here are some pictures to make it clearer.  They are "exploded" to
738
  show that the state of a chunk can be thought of as extending from
738
  show that the state of a chunk can be thought of as extending from
739
  the high 31 bits of the head field of its header through the
739
  the high 31 bits of the head field of its header through the
740
  prev_foot and PINUSE_BIT bit of the following chunk header.
740
  prev_foot and PINUSE_BIT bit of the following chunk header.
741
 
741
 
742
  A chunk that's in use looks like:
742
  A chunk that's in use looks like:
743
 
743
 
744
   chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
744
   chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
745
           | Size of previous chunk (if P = 0)                             |
745
           | Size of previous chunk (if P = 0)                             |
746
           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
746
           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
747
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
747
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
748
         | Size of this chunk                                         1| +-+
748
         | Size of this chunk                                         1| +-+
749
   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
749
   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
750
         |                                                               |
750
         |                                                               |
751
         +-                                                             -+
751
         +-                                                             -+
752
         |                                                               |
752
         |                                                               |
753
         +-                                                             -+
753
         +-                                                             -+
754
         |                                                               :
754
         |                                                               :
755
         +-      size - sizeof(size_t) available payload bytes          -+
755
         +-      size - sizeof(size_t) available payload bytes          -+
756
         :                                                               |
756
         :                                                               |
757
 chunk-> +-                                                             -+
757
 chunk-> +-                                                             -+
758
         |                                                               |
758
         |                                                               |
759
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
759
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
760
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
760
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
761
       | Size of next chunk (may or may not be in use)               | +-+
761
       | Size of next chunk (may or may not be in use)               | +-+
762
 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
762
 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
763
 
763
 
764
    And if it's free, it looks like this:
764
    And if it's free, it looks like this:
765
 
765
 
766
   chunk-> +-                                                             -+
766
   chunk-> +-                                                             -+
767
           | User payload (must be in use, or we would have merged!)       |
767
           | User payload (must be in use, or we would have merged!)       |
768
           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
768
           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
769
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
769
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
770
         | Size of this chunk                                         0| +-+
770
         | Size of this chunk                                         0| +-+
771
   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
771
   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
772
         | Next pointer                                                  |
772
         | Next pointer                                                  |
773
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
773
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
774
         | Prev pointer                                                  |
774
         | Prev pointer                                                  |
775
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
775
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
776
         |                                                               :
776
         |                                                               :
777
         +-      size - sizeof(struct chunk) unused bytes               -+
777
         +-      size - sizeof(struct chunk) unused bytes               -+
778
         :                                                               |
778
         :                                                               |
779
 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
779
 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
780
         | Size of this chunk                                            |
780
         | Size of this chunk                                            |
781
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
781
         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
782
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
782
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
783
       | Size of next chunk (must be in use, or we would have merged)| +-+
783
       | Size of next chunk (must be in use, or we would have merged)| +-+
784
 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
784
 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
785
       |                                                               :
785
       |                                                               :
786
       +- User payload                                                -+
786
       +- User payload                                                -+
787
       :                                                               |
787
       :                                                               |
788
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
788
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
789
                                                                     |0|
789
                                                                     |0|
790
                                                                     +-+
790
                                                                     +-+
791
  Note that since we always merge adjacent free chunks, the chunks
791
  Note that since we always merge adjacent free chunks, the chunks
792
  adjacent to a free chunk must be in use.
792
  adjacent to a free chunk must be in use.
793
 
793
 
794
  Given a pointer to a chunk (which can be derived trivially from the
794
  Given a pointer to a chunk (which can be derived trivially from the
795
  payload pointer) we can, in O(1) time, find out whether the adjacent
795
  payload pointer) we can, in O(1) time, find out whether the adjacent
796
  chunks are free, and if so, unlink them from the lists that they
796
  chunks are free, and if so, unlink them from the lists that they
797
  are on and merge them with the current chunk.
797
  are on and merge them with the current chunk.
798
 
798
 
799
  Chunks always begin on even word boundaries, so the mem portion
799
  Chunks always begin on even word boundaries, so the mem portion
800
  (which is returned to the user) is also on an even word boundary, and
800
  (which is returned to the user) is also on an even word boundary, and
801
  thus at least double-word aligned.
801
  thus at least double-word aligned.
802
 
802
 
803
  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
803
  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
804
  chunk size (which is always a multiple of two words), is an in-use
804
  chunk size (which is always a multiple of two words), is an in-use
805
  bit for the *previous* chunk.  If that bit is *clear*, then the
805
  bit for the *previous* chunk.  If that bit is *clear*, then the
806
  word before the current chunk size contains the previous chunk
806
  word before the current chunk size contains the previous chunk
807
  size, and can be used to find the front of the previous chunk.
807
  size, and can be used to find the front of the previous chunk.
808
  The very first chunk allocated always has this bit set, preventing
808
  The very first chunk allocated always has this bit set, preventing
809
  access to non-existent (or non-owned) memory. If pinuse is set for
809
  access to non-existent (or non-owned) memory. If pinuse is set for
810
  any given chunk, then you CANNOT determine the size of the
810
  any given chunk, then you CANNOT determine the size of the
811
  previous chunk, and might even get a memory addressing fault when
811
  previous chunk, and might even get a memory addressing fault when
812
  trying to do so.
812
  trying to do so.
813
 
813
 
814
  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
814
  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
815
  the chunk size redundantly records whether the current chunk is
815
  the chunk size redundantly records whether the current chunk is
816
  inuse (unless the chunk is mmapped). This redundancy enables usage
816
  inuse (unless the chunk is mmapped). This redundancy enables usage
817
  checks within free and realloc, and reduces indirection when freeing
817
  checks within free and realloc, and reduces indirection when freeing
818
  and consolidating chunks.
818
  and consolidating chunks.
819
 
819
 
820
  Each freshly allocated chunk must have both cinuse and pinuse set.
820
  Each freshly allocated chunk must have both cinuse and pinuse set.
821
  That is, each allocated chunk borders either a previously allocated
821
  That is, each allocated chunk borders either a previously allocated
822
  and still in-use chunk, or the base of its memory arena. This is
822
  and still in-use chunk, or the base of its memory arena. This is
823
  ensured by making all allocations from the `lowest' part of any
823
  ensured by making all allocations from the `lowest' part of any
824
  found chunk.  Further, no free chunk physically borders another one,
824
  found chunk.  Further, no free chunk physically borders another one,
825
  so each free chunk is known to be preceded and followed by either
825
  so each free chunk is known to be preceded and followed by either
826
  inuse chunks or the ends of memory.
826
  inuse chunks or the ends of memory.
827
 
827
 
828
  Note that the `foot' of the current chunk is actually represented
828
  Note that the `foot' of the current chunk is actually represented
829
  as the prev_foot of the NEXT chunk. This makes it easier to
829
  as the prev_foot of the NEXT chunk. This makes it easier to
830
  deal with alignments etc but can be very confusing when trying
830
  deal with alignments etc but can be very confusing when trying
831
  to extend or adapt this code.
831
  to extend or adapt this code.
832
 
832
 
833
  The exceptions to all this are
833
  The exceptions to all this are
834
 
834
 
835
     1. The special chunk `top' is the top-most available chunk (i.e.,
835
     1. The special chunk `top' is the top-most available chunk (i.e.,
836
        the one bordering the end of available memory). It is treated
836
        the one bordering the end of available memory). It is treated
837
        specially.  Top is never included in any bin, is used only if
837
        specially.  Top is never included in any bin, is used only if
838
        no other chunk is available, and is released back to the
838
        no other chunk is available, and is released back to the
839
        system if it is very large (see M_TRIM_THRESHOLD).  In effect,
839
        system if it is very large (see M_TRIM_THRESHOLD).  In effect,
840
        the top chunk is treated as larger (and thus less well
840
        the top chunk is treated as larger (and thus less well
841
        fitting) than any other available chunk.  The top chunk
841
        fitting) than any other available chunk.  The top chunk
842
        doesn't update its trailing size field since there is no next
842
        doesn't update its trailing size field since there is no next
843
        contiguous chunk that would have to index off it. However,
843
        contiguous chunk that would have to index off it. However,
844
        space is still allocated for it (TOP_FOOT_SIZE) to enable
844
        space is still allocated for it (TOP_FOOT_SIZE) to enable
845
        separation or merging when space is extended.
845
        separation or merging when space is extended.
846
 
846
 
847
     3. Chunks allocated via mmap, have both cinuse and pinuse bits
847
     3. Chunks allocated via mmap, have both cinuse and pinuse bits
848
        cleared in their head fields.  Because they are allocated
848
        cleared in their head fields.  Because they are allocated
849
        one-by-one, each must carry its own prev_foot field, which is
849
        one-by-one, each must carry its own prev_foot field, which is
850
        also used to hold the offset this chunk has within its mmapped
850
        also used to hold the offset this chunk has within its mmapped
851
        region, which is needed to preserve alignment. Each mmapped
851
        region, which is needed to preserve alignment. Each mmapped
852
        chunk is trailed by the first two fields of a fake next-chunk
852
        chunk is trailed by the first two fields of a fake next-chunk
853
        for sake of usage checks.
853
        for sake of usage checks.
854
 
854
 
855
*/
855
*/
856
 
856
 
857
struct malloc_chunk {
857
struct malloc_chunk {
858
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
858
  size_t               prev_foot;  /* Size of previous chunk (if free).  */
859
  size_t               head;       /* Size and inuse bits. */
859
  size_t               head;       /* Size and inuse bits. */
860
  struct malloc_chunk* fd;         /* double links -- used only if free. */
860
  struct malloc_chunk* fd;         /* double links -- used only if free. */
861
  struct malloc_chunk* bk;
861
  struct malloc_chunk* bk;
862
};
862
};
863
 
863
 
864
typedef struct malloc_chunk  mchunk;
864
typedef struct malloc_chunk  mchunk;
865
typedef struct malloc_chunk* mchunkptr;
865
typedef struct malloc_chunk* mchunkptr;
866
typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
866
typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
867
typedef unsigned int bindex_t;         /* Described below */
867
typedef unsigned int bindex_t;         /* Described below */
868
typedef unsigned int binmap_t;         /* Described below */
868
typedef unsigned int binmap_t;         /* Described below */
869
typedef unsigned int flag_t;           /* The type of various bit flag sets */
869
typedef unsigned int flag_t;           /* The type of various bit flag sets */
870
 
870
 
871
/* ------------------- Chunks sizes and alignments ----------------------- */
871
/* ------------------- Chunks sizes and alignments ----------------------- */
872
 
872
 
873
#define MCHUNK_SIZE         (sizeof(mchunk))
873
#define MCHUNK_SIZE         (sizeof(mchunk))
874
 
874
 
875
#if FOOTERS
875
#if FOOTERS
876
#define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
876
#define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
877
#else /* FOOTERS */
877
#else /* FOOTERS */
878
#define CHUNK_OVERHEAD      (SIZE_T_SIZE)
878
#define CHUNK_OVERHEAD      (SIZE_T_SIZE)
879
#endif /* FOOTERS */
879
#endif /* FOOTERS */
880
 
880
 
881
/* MMapped chunks need a second word of overhead ... */
881
/* MMapped chunks need a second word of overhead ... */
882
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
882
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
883
/* ... and additional padding for fake next-chunk at foot */
883
/* ... and additional padding for fake next-chunk at foot */
884
#define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
884
#define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
885
 
885
 
886
/* The smallest size we can malloc is an aligned minimal chunk */
886
/* The smallest size we can malloc is an aligned minimal chunk */
887
#define MIN_CHUNK_SIZE\
887
#define MIN_CHUNK_SIZE\
888
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
888
  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
889
 
889
 
890
/* conversion from malloc headers to user pointers, and back */
890
/* conversion from malloc headers to user pointers, and back */
891
#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
891
#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
892
#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
892
#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
893
/* chunk associated with aligned address A */
893
/* chunk associated with aligned address A */
894
#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
894
#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
895
 
895
 
896
/* Bounds on request (not chunk) sizes. */
896
/* Bounds on request (not chunk) sizes. */
897
#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
897
#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
898
#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
898
#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
899
 
899
 
900
/* pad request bytes into a usable size */
900
/* pad request bytes into a usable size */
901
#define pad_request(req) \
901
#define pad_request(req) \
902
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
902
   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
903
 
903
 
904
/* pad request, checking for minimum (but not maximum) */
904
/* pad request, checking for minimum (but not maximum) */
905
#define request2size(req) \
905
#define request2size(req) \
906
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
906
  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
907
 
907
 
908
 
908
 
909
/* ------------------ Operations on head and foot fields ----------------- */
909
/* ------------------ Operations on head and foot fields ----------------- */
910
 
910
 
911
/*
911
/*
912
  The head field of a chunk is or'ed with PINUSE_BIT when previous
912
  The head field of a chunk is or'ed with PINUSE_BIT when previous
913
  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
913
  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
914
  use, unless mmapped, in which case both bits are cleared.
914
  use, unless mmapped, in which case both bits are cleared.
915
 
915
 
916
  FLAG4_BIT is not used by this malloc, but might be useful in extensions.
916
  FLAG4_BIT is not used by this malloc, but might be useful in extensions.
917
*/
917
*/
918
 
918
 
919
#define PINUSE_BIT          (SIZE_T_ONE)
919
#define PINUSE_BIT          (SIZE_T_ONE)
920
#define CINUSE_BIT          (SIZE_T_TWO)
920
#define CINUSE_BIT          (SIZE_T_TWO)
921
#define FLAG4_BIT           (SIZE_T_FOUR)
921
#define FLAG4_BIT           (SIZE_T_FOUR)
922
#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
922
#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
923
#define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
923
#define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
924
 
924
 
925
/* Head value for fenceposts */
925
/* Head value for fenceposts */
926
#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
926
#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
927
 
927
 
928
/* extraction of fields from head words */
928
/* extraction of fields from head words */
929
#define cinuse(p)           ((p)->head & CINUSE_BIT)
929
#define cinuse(p)           ((p)->head & CINUSE_BIT)
930
#define pinuse(p)           ((p)->head & PINUSE_BIT)
930
#define pinuse(p)           ((p)->head & PINUSE_BIT)
931
#define flag4inuse(p)       ((p)->head & FLAG4_BIT)
931
#define flag4inuse(p)       ((p)->head & FLAG4_BIT)
932
#define is_inuse(p)         (((p)->head & INUSE_BITS) != PINUSE_BIT)
932
#define is_inuse(p)         (((p)->head & INUSE_BITS) != PINUSE_BIT)
933
#define is_mmapped(p)       (((p)->head & INUSE_BITS) == 0)
933
#define is_mmapped(p)       (((p)->head & INUSE_BITS) == 0)
934
 
934
 
935
#define chunksize(p)        ((p)->head & ~(FLAG_BITS))
935
#define chunksize(p)        ((p)->head & ~(FLAG_BITS))
936
 
936
 
937
#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
937
#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
938
#define set_flag4(p)        ((p)->head |= FLAG4_BIT)
938
#define set_flag4(p)        ((p)->head |= FLAG4_BIT)
939
#define clear_flag4(p)      ((p)->head &= ~FLAG4_BIT)
939
#define clear_flag4(p)      ((p)->head &= ~FLAG4_BIT)
940
 
940
 
941
/* Treat space at ptr +/- offset as a chunk */
941
/* Treat space at ptr +/- offset as a chunk */
942
#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
942
#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
943
#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
943
#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
944
 
944
 
945
/* Ptr to next or previous physical malloc_chunk. */
945
/* Ptr to next or previous physical malloc_chunk. */
946
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
946
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
947
#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
947
#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
948
 
948
 
949
/* extract next chunk's pinuse bit */
949
/* extract next chunk's pinuse bit */
950
#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
950
#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
951
 
951
 
952
/* Get/set size at footer */
952
/* Get/set size at footer */
953
#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
953
#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
954
#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
954
#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
955
 
955
 
956
/* Set size, pinuse bit, and foot */
956
/* Set size, pinuse bit, and foot */
957
#define set_size_and_pinuse_of_free_chunk(p, s)\
957
#define set_size_and_pinuse_of_free_chunk(p, s)\
958
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
958
  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
959
 
959
 
960
/* Set size, pinuse bit, foot, and clear next pinuse */
960
/* Set size, pinuse bit, foot, and clear next pinuse */
961
#define set_free_with_pinuse(p, s, n)\
961
#define set_free_with_pinuse(p, s, n)\
962
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
962
  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
963
 
963
 
964
/* Get the internal overhead associated with chunk p */
964
/* Get the internal overhead associated with chunk p */
965
#define overhead_for(p)\
965
#define overhead_for(p)\
966
 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
966
 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
967
 
967
 
968
/* Return true if malloced space is not necessarily cleared */
968
/* Return true if malloced space is not necessarily cleared */
969
#if MMAP_CLEARS
969
#if MMAP_CLEARS
970
#define calloc_must_clear(p) (!is_mmapped(p))
970
#define calloc_must_clear(p) (!is_mmapped(p))
971
#else /* MMAP_CLEARS */
971
#else /* MMAP_CLEARS */
972
#define calloc_must_clear(p) (1)
972
#define calloc_must_clear(p) (1)
973
#endif /* MMAP_CLEARS */
973
#endif /* MMAP_CLEARS */
974
 
974
 
975
/* ---------------------- Overlaid data structures ----------------------- */
975
/* ---------------------- Overlaid data structures ----------------------- */
976
 
976
 
977
/*
977
/*
978
  When chunks are not in use, they are treated as nodes of either
978
  When chunks are not in use, they are treated as nodes of either
979
  lists or trees.
979
  lists or trees.
980
 
980
 
981
  "Small"  chunks are stored in circular doubly-linked lists, and look
981
  "Small"  chunks are stored in circular doubly-linked lists, and look
982
  like this:
982
  like this:
983
 
983
 
984
    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
984
    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
985
            |             Size of previous chunk                            |
985
            |             Size of previous chunk                            |
986
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
986
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
987
    `head:' |             Size of chunk, in bytes                         |P|
987
    `head:' |             Size of chunk, in bytes                         |P|
988
      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
988
      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
989
            |             Forward pointer to next chunk in list             |
989
            |             Forward pointer to next chunk in list             |
990
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
990
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
991
            |             Back pointer to previous chunk in list            |
991
            |             Back pointer to previous chunk in list            |
992
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
992
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
993
            |             Unused space (may be 0 bytes long)                .
993
            |             Unused space (may be 0 bytes long)                .
994
            .                                                               .
994
            .                                                               .
995
            .                                                               |
995
            .                                                               |
996
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
996
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
997
    `foot:' |             Size of chunk, in bytes                           |
997
    `foot:' |             Size of chunk, in bytes                           |
998
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
998
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
999
 
999
 
1000
  Larger chunks are kept in a form of bitwise digital trees (aka
1000
  Larger chunks are kept in a form of bitwise digital trees (aka
1001
  tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
1001
  tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
1002
  free chunks greater than 256 bytes, their size doesn't impose any
1002
  free chunks greater than 256 bytes, their size doesn't impose any
1003
  constraints on user chunk sizes.  Each node looks like:
1003
  constraints on user chunk sizes.  Each node looks like:
1004
 
1004
 
1005
    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1005
    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1006
            |             Size of previous chunk                            |
1006
            |             Size of previous chunk                            |
1007
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1007
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1008
    `head:' |             Size of chunk, in bytes                         |P|
1008
    `head:' |             Size of chunk, in bytes                         |P|
1009
      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1009
      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1010
            |             Forward pointer to next chunk of same size        |
1010
            |             Forward pointer to next chunk of same size        |
1011
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1011
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1012
            |             Back pointer to previous chunk of same size       |
1012
            |             Back pointer to previous chunk of same size       |
1013
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1013
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1014
            |             Pointer to left child (child[0])                  |
1014
            |             Pointer to left child (child[0])                  |
1015
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1015
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1016
            |             Pointer to right child (child[1])                 |
1016
            |             Pointer to right child (child[1])                 |
1017
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1017
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1018
            |             Pointer to parent                                 |
1018
            |             Pointer to parent                                 |
1019
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1019
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1020
            |             bin index of this chunk                           |
1020
            |             bin index of this chunk                           |
1021
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1021
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1022
            |             Unused space                                      .
1022
            |             Unused space                                      .
1023
            .                                                               |
1023
            .                                                               |
1024
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1024
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1025
    `foot:' |             Size of chunk, in bytes                           |
1025
    `foot:' |             Size of chunk, in bytes                           |
1026
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1026
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1027
 
1027
 
1028
  Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
1028
  Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
1029
  of the same size are arranged in a circularly-linked list, with only
1029
  of the same size are arranged in a circularly-linked list, with only
1030
  the oldest chunk (the next to be used, in our FIFO ordering)
1030
  the oldest chunk (the next to be used, in our FIFO ordering)
1031
  actually in the tree.  (Tree members are distinguished by a non-null
1031
  actually in the tree.  (Tree members are distinguished by a non-null
1032
  parent pointer.)  If a chunk with the same size an an existing node
1032
  parent pointer.)  If a chunk with the same size an an existing node
1033
  is inserted, it is linked off the existing node using pointers that
1033
  is inserted, it is linked off the existing node using pointers that
1034
  work in the same way as fd/bk pointers of small chunks.
1034
  work in the same way as fd/bk pointers of small chunks.
1035
 
1035
 
1036
  Each tree contains a power of 2 sized range of chunk sizes (the
1036
  Each tree contains a power of 2 sized range of chunk sizes (the
1037
  smallest is 0x100 <= x < 0x180), which is is divided in half at each
1037
  smallest is 0x100 <= x < 0x180), which is is divided in half at each
1038
  tree level, with the chunks in the smaller half of the range (0x100
1038
  tree level, with the chunks in the smaller half of the range (0x100
1039
  <= x < 0x140 for the top nose) in the left subtree and the larger
1039
  <= x < 0x140 for the top nose) in the left subtree and the larger
1040
  half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
1040
  half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
1041
  done by inspecting individual bits.
1041
  done by inspecting individual bits.
1042
 
1042
 
1043
  Using these rules, each node's left subtree contains all smaller
1043
  Using these rules, each node's left subtree contains all smaller
1044
  sizes than its right subtree.  However, the node at the root of each
1044
  sizes than its right subtree.  However, the node at the root of each
1045
  subtree has no particular ordering relationship to either.  (The
1045
  subtree has no particular ordering relationship to either.  (The
1046
  dividing line between the subtree sizes is based on trie relation.)
1046
  dividing line between the subtree sizes is based on trie relation.)
1047
  If we remove the last chunk of a given size from the interior of the
1047
  If we remove the last chunk of a given size from the interior of the
1048
  tree, we need to replace it with a leaf node.  The tree ordering
1048
  tree, we need to replace it with a leaf node.  The tree ordering
1049
  rules permit a node to be replaced by any leaf below it.
1049
  rules permit a node to be replaced by any leaf below it.
1050
 
1050
 
1051
  The smallest chunk in a tree (a common operation in a best-fit
1051
  The smallest chunk in a tree (a common operation in a best-fit
1052
  allocator) can be found by walking a path to the leftmost leaf in
1052
  allocator) can be found by walking a path to the leftmost leaf in
1053
  the tree.  Unlike a usual binary tree, where we follow left child
1053
  the tree.  Unlike a usual binary tree, where we follow left child
1054
  pointers until we reach a null, here we follow the right child
1054
  pointers until we reach a null, here we follow the right child
1055
  pointer any time the left one is null, until we reach a leaf with
1055
  pointer any time the left one is null, until we reach a leaf with
1056
  both child pointers null. The smallest chunk in the tree will be
1056
  both child pointers null. The smallest chunk in the tree will be
1057
  somewhere along that path.
1057
  somewhere along that path.
1058
 
1058
 
1059
  The worst case number of steps to add, find, or remove a node is
1059
  The worst case number of steps to add, find, or remove a node is
1060
  bounded by the number of bits differentiating chunks within
1060
  bounded by the number of bits differentiating chunks within
1061
  bins. Under current bin calculations, this ranges from 6 up to 21
1061
  bins. Under current bin calculations, this ranges from 6 up to 21
1062
  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
1062
  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
1063
  is of course much better.
1063
  is of course much better.
1064
*/
1064
*/
1065
 
1065
 
1066
struct malloc_tree_chunk {
1066
struct malloc_tree_chunk {
1067
  /* The first four fields must be compatible with malloc_chunk */
1067
  /* The first four fields must be compatible with malloc_chunk */
1068
  size_t                    prev_foot;
1068
  size_t                    prev_foot;
1069
  size_t                    head;
1069
  size_t                    head;
1070
  struct malloc_tree_chunk* fd;
1070
  struct malloc_tree_chunk* fd;
1071
  struct malloc_tree_chunk* bk;
1071
  struct malloc_tree_chunk* bk;
1072
 
1072
 
1073
  struct malloc_tree_chunk* child[2];
1073
  struct malloc_tree_chunk* child[2];
1074
  struct malloc_tree_chunk* parent;
1074
  struct malloc_tree_chunk* parent;
1075
  bindex_t                  index;
1075
  bindex_t                  index;
1076
};
1076
};
1077
 
1077
 
1078
typedef struct malloc_tree_chunk  tchunk;
1078
typedef struct malloc_tree_chunk  tchunk;
1079
typedef struct malloc_tree_chunk* tchunkptr;
1079
typedef struct malloc_tree_chunk* tchunkptr;
1080
typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
1080
typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
1081
 
1081
 
1082
/* A little helper macro for trees */
1082
/* A little helper macro for trees */
1083
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1083
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1084
 
1084
 
1085
/* ----------------------------- Segments -------------------------------- */
1085
/* ----------------------------- Segments -------------------------------- */
1086
 
1086
 
1087
/*
1087
/*
1088
  Each malloc space may include non-contiguous segments, held in a
1088
  Each malloc space may include non-contiguous segments, held in a
1089
  list headed by an embedded malloc_segment record representing the
1089
  list headed by an embedded malloc_segment record representing the
1090
  top-most space. Segments also include flags holding properties of
1090
  top-most space. Segments also include flags holding properties of
1091
  the space. Large chunks that are directly allocated by mmap are not
1091
  the space. Large chunks that are directly allocated by mmap are not
1092
  included in this list. They are instead independently created and
1092
  included in this list. They are instead independently created and
1093
  destroyed without otherwise keeping track of them.
1093
  destroyed without otherwise keeping track of them.
1094
 
1094
 
1095
  Segment management mainly comes into play for spaces allocated by
1095
  Segment management mainly comes into play for spaces allocated by
1096
  MMAP.  Any call to MMAP might or might not return memory that is
1096
  MMAP.  Any call to MMAP might or might not return memory that is
1097
  adjacent to an existing segment.  MORECORE normally contiguously
1097
  adjacent to an existing segment.  MORECORE normally contiguously
1098
  extends the current space, so this space is almost always adjacent,
1098
  extends the current space, so this space is almost always adjacent,
1099
  which is simpler and faster to deal with. (This is why MORECORE is
1099
  which is simpler and faster to deal with. (This is why MORECORE is
1100
  used preferentially to MMAP when both are available -- see
1100
  used preferentially to MMAP when both are available -- see
1101
  sys_alloc.)  When allocating using MMAP, we don't use any of the
1101
  sys_alloc.)  When allocating using MMAP, we don't use any of the
1102
  hinting mechanisms (inconsistently) supported in various
1102
  hinting mechanisms (inconsistently) supported in various
1103
  implementations of unix mmap, or distinguish reserving from
1103
  implementations of unix mmap, or distinguish reserving from
1104
  committing memory. Instead, we just ask for space, and exploit
1104
  committing memory. Instead, we just ask for space, and exploit
1105
  contiguity when we get it.  It is probably possible to do
1105
  contiguity when we get it.  It is probably possible to do
1106
  better than this on some systems, but no general scheme seems
1106
  better than this on some systems, but no general scheme seems
1107
  to be significantly better.
1107
  to be significantly better.
1108
 
1108
 
1109
  Management entails a simpler variant of the consolidation scheme
1109
  Management entails a simpler variant of the consolidation scheme
1110
  used for chunks to reduce fragmentation -- new adjacent memory is
1110
  used for chunks to reduce fragmentation -- new adjacent memory is
1111
  normally prepended or appended to an existing segment. However,
1111
  normally prepended or appended to an existing segment. However,
1112
  there are limitations compared to chunk consolidation that mostly
1112
  there are limitations compared to chunk consolidation that mostly
1113
  reflect the fact that segment processing is relatively infrequent
1113
  reflect the fact that segment processing is relatively infrequent
1114
  (occurring only when getting memory from system) and that we
1114
  (occurring only when getting memory from system) and that we
1115
  don't expect to have huge numbers of segments:
1115
  don't expect to have huge numbers of segments:
1116
 
1116
 
1117
  * Segments are not indexed, so traversal requires linear scans.  (It
1117
  * Segments are not indexed, so traversal requires linear scans.  (It
1118
    would be possible to index these, but is not worth the extra
1118
    would be possible to index these, but is not worth the extra
1119
    overhead and complexity for most programs on most platforms.)
1119
    overhead and complexity for most programs on most platforms.)
1120
  * New segments are only appended to old ones when holding top-most
1120
  * New segments are only appended to old ones when holding top-most
1121
    memory; if they cannot be prepended to others, they are held in
1121
    memory; if they cannot be prepended to others, they are held in
1122
    different segments.
1122
    different segments.
1123
 
1123
 
1124
  Except for the top-most segment of an mstate, each segment record
1124
  Except for the top-most segment of an mstate, each segment record
1125
  is kept at the tail of its segment. Segments are added by pushing
1125
  is kept at the tail of its segment. Segments are added by pushing
1126
  segment records onto the list headed by &mstate.seg for the
1126
  segment records onto the list headed by &mstate.seg for the
1127
  containing mstate.
1127
  containing mstate.
1128
 
1128
 
1129
  Segment flags control allocation/merge/deallocation policies:
1129
  Segment flags control allocation/merge/deallocation policies:
1130
  * If EXTERN_BIT set, then we did not allocate this segment,
1130
  * If EXTERN_BIT set, then we did not allocate this segment,
1131
    and so should not try to deallocate or merge with others.
1131
    and so should not try to deallocate or merge with others.
1132
    (This currently holds only for the initial segment passed
1132
    (This currently holds only for the initial segment passed
1133
    into create_mspace_with_base.)
1133
    into create_mspace_with_base.)
1134
  * If USE_MMAP_BIT set, the segment may be merged with
1134
  * If USE_MMAP_BIT set, the segment may be merged with
1135
    other surrounding mmapped segments and trimmed/de-allocated
1135
    other surrounding mmapped segments and trimmed/de-allocated
1136
    using munmap.
1136
    using munmap.
1137
  * If neither bit is set, then the segment was obtained using
1137
  * If neither bit is set, then the segment was obtained using
1138
    MORECORE so can be merged with surrounding MORECORE'd segments
1138
    MORECORE so can be merged with surrounding MORECORE'd segments
1139
    and deallocated/trimmed using MORECORE with negative arguments.
1139
    and deallocated/trimmed using MORECORE with negative arguments.
1140
*/
1140
*/
1141
 
1141
 
1142
struct malloc_segment {
1142
struct malloc_segment {
1143
  char*        base;             /* base address */
1143
  char*        base;             /* base address */
1144
  size_t       size;             /* allocated size */
1144
  size_t       size;             /* allocated size */
1145
  struct malloc_segment* next;   /* ptr to next segment */
1145
  struct malloc_segment* next;   /* ptr to next segment */
1146
  flag_t       sflags;           /* mmap and extern flag */
1146
  flag_t       sflags;           /* mmap and extern flag */
1147
};
1147
};
1148
 
1148
 
1149
#define is_mmapped_segment(S)  ((S)->sflags & USE_MMAP_BIT)
1149
#define is_mmapped_segment(S)  ((S)->sflags & USE_MMAP_BIT)
1150
#define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
1150
#define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
1151
 
1151
 
1152
typedef struct malloc_segment  msegment;
1152
typedef struct malloc_segment  msegment;
1153
typedef struct malloc_segment* msegmentptr;
1153
typedef struct malloc_segment* msegmentptr;
1154
 
1154
 
1155
/* ---------------------------- malloc_state ----------------------------- */
1155
/* ---------------------------- malloc_state ----------------------------- */
1156
 
1156
 
1157
/*
1157
/*
1158
   A malloc_state holds all of the bookkeeping for a space.
1158
   A malloc_state holds all of the bookkeeping for a space.
1159
   The main fields are:
1159
   The main fields are:
1160
 
1160
 
1161
  Top
1161
  Top
1162
    The topmost chunk of the currently active segment. Its size is
1162
    The topmost chunk of the currently active segment. Its size is
1163
    cached in topsize.  The actual size of topmost space is
1163
    cached in topsize.  The actual size of topmost space is
1164
    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1164
    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1165
    fenceposts and segment records if necessary when getting more
1165
    fenceposts and segment records if necessary when getting more
1166
    space from the system.  The size at which to autotrim top is
1166
    space from the system.  The size at which to autotrim top is
1167
    cached from mparams in trim_check, except that it is disabled if
1167
    cached from mparams in trim_check, except that it is disabled if
1168
    an autotrim fails.
1168
    an autotrim fails.
1169
 
1169
 
1170
  Designated victim (dv)
1170
  Designated victim (dv)
1171
    This is the preferred chunk for servicing small requests that
1171
    This is the preferred chunk for servicing small requests that
1172
    don't have exact fits.  It is normally the chunk split off most
1172
    don't have exact fits.  It is normally the chunk split off most
1173
    recently to service another small request.  Its size is cached in
1173
    recently to service another small request.  Its size is cached in
1174
    dvsize. The link fields of this chunk are not maintained since it
1174
    dvsize. The link fields of this chunk are not maintained since it
1175
    is not kept in a bin.
1175
    is not kept in a bin.
1176
 
1176
 
1177
  SmallBins
1177
  SmallBins
1178
    An array of bin headers for free chunks.  These bins hold chunks
1178
    An array of bin headers for free chunks.  These bins hold chunks
1179
    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1179
    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1180
    chunks of all the same size, spaced 8 bytes apart.  To simplify
1180
    chunks of all the same size, spaced 8 bytes apart.  To simplify
1181
    use in double-linked lists, each bin header acts as a malloc_chunk
1181
    use in double-linked lists, each bin header acts as a malloc_chunk
1182
    pointing to the real first node, if it exists (else pointing to
1182
    pointing to the real first node, if it exists (else pointing to
1183
    itself).  This avoids special-casing for headers.  But to avoid
1183
    itself).  This avoids special-casing for headers.  But to avoid
1184
    waste, we allocate only the fd/bk pointers of bins, and then use
1184
    waste, we allocate only the fd/bk pointers of bins, and then use
1185
    repositioning tricks to treat these as the fields of a chunk.
1185
    repositioning tricks to treat these as the fields of a chunk.
1186
 
1186
 
1187
  TreeBins
1187
  TreeBins
1188
    Treebins are pointers to the roots of trees holding a range of
1188
    Treebins are pointers to the roots of trees holding a range of
1189
    sizes. There are 2 equally spaced treebins for each power of two
1189
    sizes. There are 2 equally spaced treebins for each power of two
1190
    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
1190
    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
1191
    larger.
1191
    larger.
1192
 
1192
 
1193
  Bin maps
1193
  Bin maps
1194
    There is one bit map for small bins ("smallmap") and one for
1194
    There is one bit map for small bins ("smallmap") and one for
1195
    treebins ("treemap).  Each bin sets its bit when non-empty, and
1195
    treebins ("treemap).  Each bin sets its bit when non-empty, and
1196
    clears the bit when empty.  Bit operations are then used to avoid
1196
    clears the bit when empty.  Bit operations are then used to avoid
1197
    bin-by-bin searching -- nearly all "search" is done without ever
1197
    bin-by-bin searching -- nearly all "search" is done without ever
1198
    looking at bins that won't be selected.  The bit maps
1198
    looking at bins that won't be selected.  The bit maps
1199
    conservatively use 32 bits per map word, even if on 64bit system.
1199
    conservatively use 32 bits per map word, even if on 64bit system.
1200
    For a good description of some of the bit-based techniques used
1200
    For a good description of some of the bit-based techniques used
1201
    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
1201
    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
1202
    supplement at http://hackersdelight.org/). Many of these are
1202
    supplement at http://hackersdelight.org/). Many of these are
1203
    intended to reduce the branchiness of paths through malloc etc, as
1203
    intended to reduce the branchiness of paths through malloc etc, as
1204
    well as to reduce the number of memory locations read or written.
1204
    well as to reduce the number of memory locations read or written.
1205
 
1205
 
1206
  Segments
1206
  Segments
1207
    A list of segments headed by an embedded malloc_segment record
1207
    A list of segments headed by an embedded malloc_segment record
1208
    representing the initial space.
1208
    representing the initial space.
1209
 
1209
 
1210
  Address check support
1210
  Address check support
1211
    The least_addr field is the least address ever obtained from
1211
    The least_addr field is the least address ever obtained from
1212
    MORECORE or MMAP. Attempted frees and reallocs of any address less
1212
    MORECORE or MMAP. Attempted frees and reallocs of any address less
1213
    than this are trapped (unless INSECURE is defined).
1213
    than this are trapped (unless INSECURE is defined).
1214
 
1214
 
1215
  Magic tag
1215
  Magic tag
1216
    A cross-check field that should always hold same value as mparams.magic.
1216
    A cross-check field that should always hold same value as mparams.magic.
1217
 
1217
 
1218
  Max allowed footprint
1218
  Max allowed footprint
1219
    The maximum allowed bytes to allocate from system (zero means no limit)
1219
    The maximum allowed bytes to allocate from system (zero means no limit)
1220
 
1220
 
1221
  Flags
1221
  Flags
1222
    Bits recording whether to use MMAP, locks, or contiguous MORECORE
1222
    Bits recording whether to use MMAP, locks, or contiguous MORECORE
1223
 
1223
 
1224
  Statistics
1224
  Statistics
1225
    Each space keeps track of current and maximum system memory
1225
    Each space keeps track of current and maximum system memory
1226
    obtained via MORECORE or MMAP.
1226
    obtained via MORECORE or MMAP.
1227
 
1227
 
1228
  Trim support
1228
  Trim support
1229
    Fields holding the amount of unused topmost memory that should trigger
1229
    Fields holding the amount of unused topmost memory that should trigger
1230
    trimming, and a counter to force periodic scanning to release unused
1230
    trimming, and a counter to force periodic scanning to release unused
1231
    non-topmost segments.
1231
    non-topmost segments.
1232
 
1232
 
1233
  Locking
1233
  Locking
1234
    If USE_LOCKS is defined, the "mutex" lock is acquired and released
1234
    If USE_LOCKS is defined, the "mutex" lock is acquired and released
1235
    around every public call using this mspace.
1235
    around every public call using this mspace.
1236
 
1236
 
1237
  Extension support
1237
  Extension support
1238
    A void* pointer and a size_t field that can be used to help implement
1238
    A void* pointer and a size_t field that can be used to help implement
1239
    extensions to this malloc.
1239
    extensions to this malloc.
1240
*/
1240
*/
1241
 
1241
 
1242
/* Bin types, widths and sizes */
1242
/* Bin types, widths and sizes */
1243
#define NSMALLBINS        (32U)
1243
#define NSMALLBINS        (32U)
1244
#define NTREEBINS         (32U)
1244
#define NTREEBINS         (32U)
1245
#define SMALLBIN_SHIFT    (3U)
1245
#define SMALLBIN_SHIFT    (3U)
1246
#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
1246
#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
1247
#define TREEBIN_SHIFT     (8U)
1247
#define TREEBIN_SHIFT     (8U)
1248
#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
1248
#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
1249
#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
1249
#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
1250
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
1250
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
1251
 
1251
 
1252
struct malloc_state {
1252
struct malloc_state {
1253
  binmap_t   smallmap;
1253
  binmap_t   smallmap;
1254
  binmap_t   treemap;
1254
  binmap_t   treemap;
1255
  size_t     dvsize;
1255
  size_t     dvsize;
1256
  size_t     topsize;
1256
  size_t     topsize;
1257
  char*      least_addr;
1257
  char*      least_addr;
1258
  mchunkptr  dv;
1258
  mchunkptr  dv;
1259
  mchunkptr  top;
1259
  mchunkptr  top;
1260
  size_t     trim_check;
1260
  size_t     trim_check;
1261
  size_t     release_checks;
1261
  size_t     release_checks;
1262
  size_t     magic;
1262
  size_t     magic;
1263
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
1263
  mchunkptr  smallbins[(NSMALLBINS+1)*2];
1264
  tbinptr    treebins[NTREEBINS];
1264
  tbinptr    treebins[NTREEBINS];
1265
  size_t     footprint;
1265
  size_t     footprint;
1266
  size_t     max_footprint;
1266
  size_t     max_footprint;
1267
  size_t     footprint_limit; /* zero means no limit */
1267
  size_t     footprint_limit; /* zero means no limit */
1268
  flag_t     mflags;
1268
  flag_t     mflags;
1269
  struct mutex  lock;     /* locate lock among fields that rarely change */
1269
  struct mutex  lock;     /* locate lock among fields that rarely change */
1270
  msegment   seg;
1270
  msegment   seg;
1271
  void*      extp;      /* Unused but available for extensions */
1271
  void*      extp;      /* Unused but available for extensions */
1272
  size_t     exts;
1272
  size_t     exts;
1273
};
1273
};
1274
 
1274
 
1275
typedef struct malloc_state*    mstate;
1275
typedef struct malloc_state*    mstate;
1276
 
1276
 
1277
/* ------------- Global malloc_state and malloc_params ------------------- */
1277
/* ------------- Global malloc_state and malloc_params ------------------- */
1278
 
1278
 
1279
/*
1279
/*
1280
  malloc_params holds global properties, including those that can be
1280
  malloc_params holds global properties, including those that can be
1281
  dynamically set using mallopt. There is a single instance, mparams,
1281
  dynamically set using mallopt. There is a single instance, mparams,
1282
  initialized in init_mparams. Note that the non-zeroness of "magic"
1282
  initialized in init_mparams. Note that the non-zeroness of "magic"
1283
  also serves as an initialization flag.
1283
  also serves as an initialization flag.
1284
*/
1284
*/
1285
 
1285
 
1286
struct malloc_params {
1286
struct malloc_params {
1287
  size_t magic;
1287
  size_t magic;
1288
    size_t  page_size;
1288
    size_t  page_size;
1289
    size_t  granularity;
1289
    size_t  granularity;
1290
    size_t  mmap_threshold;
1290
    size_t  mmap_threshold;
1291
    size_t  trim_threshold;
1291
    size_t  trim_threshold;
1292
    flag_t  default_mflags;
1292
    flag_t  default_mflags;
1293
};
1293
};
1294
 
1294
 
1295
static struct malloc_params mparams;
1295
static struct malloc_params mparams;
1296
 
1296
 
1297
/* Ensure mparams initialized */
1297
/* Ensure mparams initialized */
1298
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
1298
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
1299
 
1299
 
1300
static struct malloc_state _gm_;
1300
static struct malloc_state _gm_;
1301
#define gm                 (&_gm_)
1301
#define gm                 (&_gm_)
1302
#define is_global(M)       ((M) == &_gm_)
1302
#define is_global(M)       ((M) == &_gm_)
1303
 
1303
 
1304
#define is_initialized(M)  ((M)->top != 0)
1304
#define is_initialized(M)  ((M)->top != 0)
1305
 
1305
 
1306
/* -------------------------- system alloc setup ------------------------- */
1306
/* -------------------------- system alloc setup ------------------------- */
1307
 
1307
 
1308
/* Operations on mflags */
1308
/* Operations on mflags */
1309
 
1309
 
1310
#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
1310
#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
1311
#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
1311
#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
1312
#if USE_LOCKS
1312
#if USE_LOCKS
1313
#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
1313
#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
1314
#else
1314
#else
1315
#define disable_lock(M)
1315
#define disable_lock(M)
1316
#endif
1316
#endif
1317
 
1317
 
1318
#define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
1318
#define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
1319
#define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
1319
#define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
1320
#if HAVE_MMAP
1320
#if HAVE_MMAP
1321
#define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
1321
#define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
1322
#else
1322
#else
1323
#define disable_mmap(M)
1323
#define disable_mmap(M)
1324
#endif
1324
#endif
1325
 
1325
 
1326
#define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
1326
#define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
1327
#define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
1327
#define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
1328
 
1328
 
1329
#define set_lock(M,L)\
1329
#define set_lock(M,L)\
1330
 ((M)->mflags = (L)?\
1330
 ((M)->mflags = (L)?\
1331
  ((M)->mflags | USE_LOCK_BIT) :\
1331
  ((M)->mflags | USE_LOCK_BIT) :\
1332
  ((M)->mflags & ~USE_LOCK_BIT))
1332
  ((M)->mflags & ~USE_LOCK_BIT))
1333
 
1333
 
1334
/* page-align a size */
1334
/* page-align a size */
1335
#define page_align(S)\
1335
#define page_align(S)\
1336
 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
1336
 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
1337
 
1337
 
1338
/* granularity-align a size */
1338
/* granularity-align a size */
1339
#define granularity_align(S)\
1339
#define granularity_align(S)\
1340
  (((S) + (mparams.granularity - SIZE_T_ONE))\
1340
  (((S) + (mparams.granularity - SIZE_T_ONE))\
1341
   & ~(mparams.granularity - SIZE_T_ONE))
1341
   & ~(mparams.granularity - SIZE_T_ONE))
1342
 
1342
 
1343
 
1343
 
1344
/* For mmap, use granularity alignment on windows, else page-align */
1344
/* For mmap, use granularity alignment on windows, else page-align */
1345
#ifdef WIN32
1345
#ifdef WIN32
1346
#define mmap_align(S) granularity_align(S)
1346
#define mmap_align(S) granularity_align(S)
1347
#else
1347
#else
1348
#define mmap_align(S) page_align(S)
1348
#define mmap_align(S) page_align(S)
1349
#endif
1349
#endif
1350
 
1350
 
1351
/* For sys_alloc, enough padding to ensure can malloc request on success */
1351
/* For sys_alloc, enough padding to ensure can malloc request on success */
1352
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
1352
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
1353
 
1353
 
1354
#define is_page_aligned(S)\
1354
#define is_page_aligned(S)\
1355
   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
1355
   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
1356
#define is_granularity_aligned(S)\
1356
#define is_granularity_aligned(S)\
1357
   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
1357
   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
1358
 
1358
 
1359
/*  True if segment S holds address A */
1359
/*  True if segment S holds address A */
1360
#define segment_holds(S, A)\
1360
#define segment_holds(S, A)\
1361
  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
1361
  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
1362
 
1362
 
1363
/* Return segment holding given address */
1363
/* Return segment holding given address */
1364
static msegmentptr segment_holding(mstate m, char* addr) {
1364
static msegmentptr segment_holding(mstate m, char* addr) {
1365
  msegmentptr sp = &m->seg;
1365
  msegmentptr sp = &m->seg;
1366
  for (;;) {
1366
  for (;;) {
1367
    if (addr >= sp->base && addr < sp->base + sp->size)
1367
    if (addr >= sp->base && addr < sp->base + sp->size)
1368
      return sp;
1368
      return sp;
1369
    if ((sp = sp->next) == 0)
1369
    if ((sp = sp->next) == 0)
1370
      return 0;
1370
      return 0;
1371
  }
1371
  }
1372
}
1372
}
1373
 
1373
 
1374
/* Return true if segment contains a segment link */
1374
/* Return true if segment contains a segment link */
1375
static int has_segment_link(mstate m, msegmentptr ss) {
1375
static int has_segment_link(mstate m, msegmentptr ss) {
1376
  msegmentptr sp = &m->seg;
1376
  msegmentptr sp = &m->seg;
1377
  for (;;) {
1377
  for (;;) {
1378
    if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
1378
    if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
1379
      return 1;
1379
      return 1;
1380
    if ((sp = sp->next) == 0)
1380
    if ((sp = sp->next) == 0)
1381
      return 0;
1381
      return 0;
1382
  }
1382
  }
1383
}
1383
}
1384
 
1384
 
1385
 
1385
 
1386
/*
1386
/*
1387
  TOP_FOOT_SIZE is padding at the end of a segment, including space
1387
  TOP_FOOT_SIZE is padding at the end of a segment, including space
1388
  that may be needed to place segment records and fenceposts when new
1388
  that may be needed to place segment records and fenceposts when new
1389
  noncontiguous segments are added.
1389
  noncontiguous segments are added.
1390
*/
1390
*/
1391
#define TOP_FOOT_SIZE\
1391
#define TOP_FOOT_SIZE\
1392
  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
1392
  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
1393
 
1393
 
1394
 
1394
 
1395
/* -------------------------------  Hooks -------------------------------- */
1395
/* -------------------------------  Hooks -------------------------------- */
1396
 
1396
 
1397
/*
1397
/*
1398
  PREACTION should be defined to return 0 on success, and nonzero on
1398
  PREACTION should be defined to return 0 on success, and nonzero on
1399
  failure. If you are not using locking, you can redefine these to do
1399
  failure. If you are not using locking, you can redefine these to do
1400
  anything you like.
1400
  anything you like.
1401
*/
1401
*/
1402
 
1402
 
1403
#define PREACTION(M)  ( MutexLock(&(M)->lock))
1403
#define PREACTION(M)  ( MutexLock(&(M)->lock))
1404
#define POSTACTION(M) { MutexUnlock(&(M)->lock); }
1404
#define POSTACTION(M) { MutexUnlock(&(M)->lock); }
1405
 
1405
 
1406
/* -------------------------- Debugging setup ---------------------------- */
1406
/* -------------------------- Debugging setup ---------------------------- */
1407
 
1407
 
1408
#if ! DEBUG
1408
#if ! DEBUG
1409
 
1409
 
1410
#define check_free_chunk(M,P)
1410
#define check_free_chunk(M,P)
1411
#define check_inuse_chunk(M,P)
1411
#define check_inuse_chunk(M,P)
1412
#define check_malloced_chunk(M,P,N)
1412
#define check_malloced_chunk(M,P,N)
1413
#define check_mmapped_chunk(M,P)
1413
#define check_mmapped_chunk(M,P)
1414
#define check_malloc_state(M)
1414
#define check_malloc_state(M)
1415
#define check_top_chunk(M,P)
1415
#define check_top_chunk(M,P)
1416
 
1416
 
1417
#else /* DEBUG */
1417
#else /* DEBUG */
1418
#define check_free_chunk(M,P)       do_check_free_chunk(M,P)
1418
#define check_free_chunk(M,P)       do_check_free_chunk(M,P)
1419
#define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
1419
#define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
1420
#define check_top_chunk(M,P)        do_check_top_chunk(M,P)
1420
#define check_top_chunk(M,P)        do_check_top_chunk(M,P)
1421
#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
1421
#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
1422
#define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
1422
#define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
1423
#define check_malloc_state(M)       do_check_malloc_state(M)
1423
#define check_malloc_state(M)       do_check_malloc_state(M)
1424
 
1424
 
1425
static void   do_check_any_chunk(mstate m, mchunkptr p);
1425
static void   do_check_any_chunk(mstate m, mchunkptr p);
1426
static void   do_check_top_chunk(mstate m, mchunkptr p);
1426
static void   do_check_top_chunk(mstate m, mchunkptr p);
1427
static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
1427
static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
1428
static void   do_check_inuse_chunk(mstate m, mchunkptr p);
1428
static void   do_check_inuse_chunk(mstate m, mchunkptr p);
1429
static void   do_check_free_chunk(mstate m, mchunkptr p);
1429
static void   do_check_free_chunk(mstate m, mchunkptr p);
1430
static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
1430
static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
1431
static void   do_check_tree(mstate m, tchunkptr t);
1431
static void   do_check_tree(mstate m, tchunkptr t);
1432
static void   do_check_treebin(mstate m, bindex_t i);
1432
static void   do_check_treebin(mstate m, bindex_t i);
1433
static void   do_check_smallbin(mstate m, bindex_t i);
1433
static void   do_check_smallbin(mstate m, bindex_t i);
1434
static void   do_check_malloc_state(mstate m);
1434
static void   do_check_malloc_state(mstate m);
1435
static int    bin_find(mstate m, mchunkptr x);
1435
static int    bin_find(mstate m, mchunkptr x);
1436
static size_t traverse_and_check(mstate m);
1436
static size_t traverse_and_check(mstate m);
1437
#endif /* DEBUG */
1437
#endif /* DEBUG */
1438
 
1438
 
1439
/* ---------------------------- Indexing Bins ---------------------------- */
1439
/* ---------------------------- Indexing Bins ---------------------------- */
1440
 
1440
 
1441
#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
1441
#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
1442
#define small_index(s)      (bindex_t)((s)  >> SMALLBIN_SHIFT)
1442
#define small_index(s)      (bindex_t)((s)  >> SMALLBIN_SHIFT)
1443
#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
1443
#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
1444
#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
1444
#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
1445
 
1445
 
1446
/* addressing by index. See above about smallbin repositioning */
1446
/* addressing by index. See above about smallbin repositioning */
1447
#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
1447
#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
1448
#define treebin_at(M,i)     (&((M)->treebins[i]))
1448
#define treebin_at(M,i)     (&((M)->treebins[i]))
1449
 
1449
 
1450
#define compute_tree_index(S, I)\
1450
#define compute_tree_index(S, I)\
1451
{\
1451
{\
1452
  unsigned int X = S >> TREEBIN_SHIFT;\
1452
  unsigned int X = S >> TREEBIN_SHIFT;\
1453
  if (X == 0)\
1453
  if (X == 0)\
1454
    I = 0;\
1454
    I = 0;\
1455
  else if (X > 0xFFFF)\
1455
  else if (X > 0xFFFF)\
1456
    I = NTREEBINS-1;\
1456
    I = NTREEBINS-1;\
1457
  else {\
1457
  else {\
1458
    unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
1458
    unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
1459
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1459
    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1460
  }\
1460
  }\
1461
}
1461
}
1462
 
1462
 
1463
 
1463
 
1464
 
1464
 
1465
/* Bit representing maximum resolved size in a treebin at i */
1465
/* Bit representing maximum resolved size in a treebin at i */
1466
#define bit_for_tree_index(i) \
1466
#define bit_for_tree_index(i) \
1467
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
1467
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
1468
 
1468
 
1469
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
1469
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
1470
#define leftshift_for_tree_index(i) \
1470
#define leftshift_for_tree_index(i) \
1471
   ((i == NTREEBINS-1)? 0 : \
1471
   ((i == NTREEBINS-1)? 0 : \
1472
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
1472
    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
1473
 
1473
 
1474
/* The size of the smallest chunk held in bin with index i */
1474
/* The size of the smallest chunk held in bin with index i */
1475
#define minsize_for_tree_index(i) \
1475
#define minsize_for_tree_index(i) \
1476
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
1476
   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
1477
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
1477
   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
1478
 
1478
 
1479
 
1479
 
1480
/* ------------------------ Operations on bin maps ----------------------- */
1480
/* ------------------------ Operations on bin maps ----------------------- */
1481
 
1481
 
1482
/* bit corresponding to given index */
1482
/* bit corresponding to given index */
1483
#define idx2bit(i)              ((binmap_t)(1) << (i))
1483
#define idx2bit(i)              ((binmap_t)(1) << (i))
1484
 
1484
 
1485
/* Mark/Clear bits with given index */
1485
/* Mark/Clear bits with given index */
1486
#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
1486
#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
1487
#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
1487
#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
1488
#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
1488
#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
1489
 
1489
 
1490
#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
1490
#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
1491
#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
1491
#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
1492
#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
1492
#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
1493
 
1493
 
1494
/* isolate the least set bit of a bitmap */
1494
/* isolate the least set bit of a bitmap */
1495
#define least_bit(x)         ((x) & -(x))
1495
#define least_bit(x)         ((x) & -(x))
1496
 
1496
 
1497
/* mask with all bits to left of least bit of x on */
1497
/* mask with all bits to left of least bit of x on */
1498
#define left_bits(x)         ((x<<1) | -(x<<1))
1498
#define left_bits(x)         ((x<<1) | -(x<<1))
1499
 
1499
 
1500
/* mask with all bits to left of or equal to least bit of x on */
1500
/* mask with all bits to left of or equal to least bit of x on */
1501
#define same_or_left_bits(x) ((x) | -(x))
1501
#define same_or_left_bits(x) ((x) | -(x))
1502
 
1502
 
1503
#define compute_bit2idx(X, I)\
1503
#define compute_bit2idx(X, I)\
1504
{\
1504
{\
1505
  unsigned int J;\
1505
  unsigned int J;\
1506
  J = __builtin_ctz(X); \
1506
  J = __builtin_ctz(X); \
1507
  I = (bindex_t)J;\
1507
  I = (bindex_t)J;\
1508
}
1508
}
1509
 
1509
 
1510
/* ----------------------- Runtime Check Support ------------------------- */
1510
/* ----------------------- Runtime Check Support ------------------------- */
1511
 
1511
 
1512
/*
1512
/*
1513
  For security, the main invariant is that malloc/free/etc never
1513
  For security, the main invariant is that malloc/free/etc never
1514
  writes to a static address other than malloc_state, unless static
1514
  writes to a static address other than malloc_state, unless static
1515
  malloc_state itself has been corrupted, which cannot occur via
1515
  malloc_state itself has been corrupted, which cannot occur via
1516
  malloc (because of these checks). In essence this means that we
1516
  malloc (because of these checks). In essence this means that we
1517
  believe all pointers, sizes, maps etc held in malloc_state, but
1517
  believe all pointers, sizes, maps etc held in malloc_state, but
1518
  check all of those linked or offsetted from other embedded data
1518
  check all of those linked or offsetted from other embedded data
1519
  structures.  These checks are interspersed with main code in a way
1519
  structures.  These checks are interspersed with main code in a way
1520
  that tends to minimize their run-time cost.
1520
  that tends to minimize their run-time cost.
1521
 
1521
 
1522
  When FOOTERS is defined, in addition to range checking, we also
1522
  When FOOTERS is defined, in addition to range checking, we also
1523
  verify footer fields of inuse chunks, which can be used guarantee
1523
  verify footer fields of inuse chunks, which can be used guarantee
1524
  that the mstate controlling malloc/free is intact.  This is a
1524
  that the mstate controlling malloc/free is intact.  This is a
1525
  streamlined version of the approach described by William Robertson
1525
  streamlined version of the approach described by William Robertson
1526
  et al in "Run-time Detection of Heap-based Overflows" LISA'03
1526
  et al in "Run-time Detection of Heap-based Overflows" LISA'03
1527
  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
1527
  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
1528
  of an inuse chunk holds the xor of its mstate and a random seed,
1528
  of an inuse chunk holds the xor of its mstate and a random seed,
1529
  that is checked upon calls to free() and realloc().  This is
1529
  that is checked upon calls to free() and realloc().  This is
1530
  (probabalistically) unguessable from outside the program, but can be
1530
  (probabalistically) unguessable from outside the program, but can be
1531
  computed by any code successfully malloc'ing any chunk, so does not
1531
  computed by any code successfully malloc'ing any chunk, so does not
1532
  itself provide protection against code that has already broken
1532
  itself provide protection against code that has already broken
1533
  security through some other means.  Unlike Robertson et al, we
1533
  security through some other means.  Unlike Robertson et al, we
1534
  always dynamically check addresses of all offset chunks (previous,
1534
  always dynamically check addresses of all offset chunks (previous,
1535
  next, etc). This turns out to be cheaper than relying on hashes.
1535
  next, etc). This turns out to be cheaper than relying on hashes.
1536
*/
1536
*/
1537
 
1537
 
1538
#if !INSECURE
1538
#if !INSECURE
1539
/* Check if address a is at least as high as any from MORECORE or MMAP */
1539
/* Check if address a is at least as high as any from MORECORE or MMAP */
1540
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1540
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1541
/* Check if address of next chunk n is higher than base chunk p */
1541
/* Check if address of next chunk n is higher than base chunk p */
1542
#define ok_next(p, n)    ((char*)(p) < (char*)(n))
1542
#define ok_next(p, n)    ((char*)(p) < (char*)(n))
1543
/* Check if p has inuse status */
1543
/* Check if p has inuse status */
1544
#define ok_inuse(p)     is_inuse(p)
1544
#define ok_inuse(p)     is_inuse(p)
1545
/* Check if p has its pinuse bit on */
1545
/* Check if p has its pinuse bit on */
1546
#define ok_pinuse(p)     pinuse(p)
1546
#define ok_pinuse(p)     pinuse(p)
1547
 
1547
 
1548
#else /* !INSECURE */
1548
#else /* !INSECURE */
1549
#define ok_address(M, a) (1)
1549
#define ok_address(M, a) (1)
1550
#define ok_next(b, n)    (1)
1550
#define ok_next(b, n)    (1)
1551
#define ok_inuse(p)      (1)
1551
#define ok_inuse(p)      (1)
1552
#define ok_pinuse(p)     (1)
1552
#define ok_pinuse(p)     (1)
1553
#endif /* !INSECURE */
1553
#endif /* !INSECURE */
1554
 
1554
 
1555
#if (FOOTERS && !INSECURE)
1555
#if (FOOTERS && !INSECURE)
1556
/* Check if (alleged) mstate m has expected magic field */
1556
/* Check if (alleged) mstate m has expected magic field */
1557
#define ok_magic(M)      ((M)->magic == mparams.magic)
1557
#define ok_magic(M)      ((M)->magic == mparams.magic)
1558
#else  /* (FOOTERS && !INSECURE) */
1558
#else  /* (FOOTERS && !INSECURE) */
1559
#define ok_magic(M)      (1)
1559
#define ok_magic(M)      (1)
1560
#endif /* (FOOTERS && !INSECURE) */
1560
#endif /* (FOOTERS && !INSECURE) */
1561
 
1561
 
1562
/* In gcc, use __builtin_expect to minimize impact of checks */
1562
/* In gcc, use __builtin_expect to minimize impact of checks */
1563
#if !INSECURE
1563
#if !INSECURE
1564
#if defined(__GNUC__) && __GNUC__ >= 3
1564
#if defined(__GNUC__) && __GNUC__ >= 3
1565
#define RTCHECK(e)  __builtin_expect(e, 1)
1565
#define RTCHECK(e)  __builtin_expect(e, 1)
1566
#else /* GNUC */
1566
#else /* GNUC */
1567
#define RTCHECK(e)  (e)
1567
#define RTCHECK(e)  (e)
1568
#endif /* GNUC */
1568
#endif /* GNUC */
1569
#else /* !INSECURE */
1569
#else /* !INSECURE */
1570
#define RTCHECK(e)  (1)
1570
#define RTCHECK(e)  (1)
1571
#endif /* !INSECURE */
1571
#endif /* !INSECURE */
1572
 
1572
 
1573
/* macros to set up inuse chunks with or without footers */
1573
/* macros to set up inuse chunks with or without footers */
1574
 
1574
 
1575
#if !FOOTERS
1575
#if !FOOTERS
1576
 
1576
 
1577
#define mark_inuse_foot(M,p,s)
1577
#define mark_inuse_foot(M,p,s)
1578
 
1578
 
1579
/* Macros for setting head/foot of non-mmapped chunks */
1579
/* Macros for setting head/foot of non-mmapped chunks */
1580
 
1580
 
1581
/* Set cinuse bit and pinuse bit of next chunk */
1581
/* Set cinuse bit and pinuse bit of next chunk */
1582
#define set_inuse(M,p,s)\
1582
#define set_inuse(M,p,s)\
1583
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1583
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1584
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1584
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1585
 
1585
 
1586
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
1586
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
1587
#define set_inuse_and_pinuse(M,p,s)\
1587
#define set_inuse_and_pinuse(M,p,s)\
1588
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1588
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1589
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1589
  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1590
 
1590
 
1591
/* Set size, cinuse and pinuse bit of this chunk */
1591
/* Set size, cinuse and pinuse bit of this chunk */
1592
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1592
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1593
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1593
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1594
 
1594
 
1595
#else /* FOOTERS */
1595
#else /* FOOTERS */
1596
 
1596
 
1597
/* Set foot of inuse chunk to be xor of mstate and seed */
1597
/* Set foot of inuse chunk to be xor of mstate and seed */
1598
#define mark_inuse_foot(M,p,s)\
1598
#define mark_inuse_foot(M,p,s)\
1599
  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1599
  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1600
 
1600
 
1601
#define get_mstate_for(p)\
1601
#define get_mstate_for(p)\
1602
  ((mstate)(((mchunkptr)((char*)(p) +\
1602
  ((mstate)(((mchunkptr)((char*)(p) +\
1603
    (chunksize(p))))->prev_foot ^ mparams.magic))
1603
    (chunksize(p))))->prev_foot ^ mparams.magic))
1604
 
1604
 
1605
#define set_inuse(M,p,s)\
1605
#define set_inuse(M,p,s)\
1606
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1606
  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1607
  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1607
  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1608
  mark_inuse_foot(M,p,s))
1608
  mark_inuse_foot(M,p,s))
1609
 
1609
 
1610
#define set_inuse_and_pinuse(M,p,s)\
1610
#define set_inuse_and_pinuse(M,p,s)\
1611
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1611
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1612
  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1612
  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1613
 mark_inuse_foot(M,p,s))
1613
 mark_inuse_foot(M,p,s))
1614
 
1614
 
1615
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1615
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1616
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1616
  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1617
  mark_inuse_foot(M, p, s))
1617
  mark_inuse_foot(M, p, s))
1618
 
1618
 
1619
#endif /* !FOOTERS */
1619
#endif /* !FOOTERS */
1620
 
1620
 
1621
/* ---------------------------- setting mparams -------------------------- */
1621
/* ---------------------------- setting mparams -------------------------- */
1622
 
1622
 
1623
#if LOCK_AT_FORK
1623
#if LOCK_AT_FORK
1624
static void pre_fork(void)         { ACQUIRE_LOCK(&(gm)->mutex); }
1624
static void pre_fork(void)         { ACQUIRE_LOCK(&(gm)->mutex); }
1625
static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
1625
static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
1626
static void post_fork_child(void)  { INITIAL_LOCK(&(gm)->mutex); }
1626
static void post_fork_child(void)  { INITIAL_LOCK(&(gm)->mutex); }
1627
#endif /* LOCK_AT_FORK */
1627
#endif /* LOCK_AT_FORK */
1628
 
1628
 
1629
/* Initialize mparams */
1629
/* Initialize mparams */
1630
static int init_mparams(void) {
1630
static int init_mparams(void) {
1631
 
1631
 
1632
  ACQUIRE_MALLOC_GLOBAL_LOCK();
1632
  ACQUIRE_MALLOC_GLOBAL_LOCK();
1633
  if (mparams.magic == 0) {
1633
  if (mparams.magic == 0) {
1634
    size_t magic;
1634
    size_t magic;
1635
    size_t psize;
1635
    size_t psize;
1636
    size_t gsize;
1636
    size_t gsize;
1637
 
1637
 
1638
    psize = 4096;
1638
    psize = 4096;
1639
    gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
1639
    gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
1640
 
1640
 
1641
    mparams.granularity = gsize;
1641
    mparams.granularity = gsize;
1642
    mparams.page_size = psize;
1642
    mparams.page_size = psize;
1643
    mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
1643
    mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
1644
    mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
1644
    mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
1645
    mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
1645
    mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
1646
 
1646
 
1647
    /* Set up lock for main malloc area */
1647
    /* Set up lock for main malloc area */
1648
    gm->mflags = mparams.default_mflags;
1648
    gm->mflags = mparams.default_mflags;
1649
    MutexInit(&gm->lock);
1649
    MutexInit(&gm->lock);
1650
 
1650
 
1651
    {
1651
    {
1652
      magic = (size_t)&magic ^ (size_t)0x55555555U;
1652
      magic = (size_t)&magic ^ (size_t)0x55555555U;
1653
      magic |= (size_t)8U;    /* ensure nonzero */
1653
      magic |= (size_t)8U;    /* ensure nonzero */
1654
      magic &= ~(size_t)7U;   /* improve chances of fault for bad values */
1654
      magic &= ~(size_t)7U;   /* improve chances of fault for bad values */
1655
      /* Until memory modes commonly available, use volatile-write */
1655
      /* Until memory modes commonly available, use volatile-write */
1656
      (*(volatile size_t *)(&(mparams.magic))) = magic;
1656
      (*(volatile size_t *)(&(mparams.magic))) = magic;
1657
    }
1657
    }
1658
  }
1658
  }
1659
 
1659
 
1660
  RELEASE_MALLOC_GLOBAL_LOCK();
1660
  RELEASE_MALLOC_GLOBAL_LOCK();
1661
  return 1;
1661
  return 1;
1662
}
1662
}
1663
 
1663
 
1664
 
1664
 
1665
#if DEBUG
1665
#if DEBUG
1666
/* ------------------------- Debugging Support --------------------------- */
1666
/* ------------------------- Debugging Support --------------------------- */
1667
 
1667
 
1668
/* Check properties of any chunk, whether free, inuse, mmapped etc  */
1668
/* Check properties of any chunk, whether free, inuse, mmapped etc  */
1669
static void do_check_any_chunk(mstate m, mchunkptr p) {
1669
static void do_check_any_chunk(mstate m, mchunkptr p) {
1670
  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1670
  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1671
  assert(ok_address(m, p));
1671
  assert(ok_address(m, p));
1672
}
1672
}
1673
 
1673
 
1674
/* Check properties of top chunk */
1674
/* Check properties of top chunk */
1675
static void do_check_top_chunk(mstate m, mchunkptr p) {
1675
static void do_check_top_chunk(mstate m, mchunkptr p) {
1676
  msegmentptr sp = segment_holding(m, (char*)p);
1676
  msegmentptr sp = segment_holding(m, (char*)p);
1677
  size_t  sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
1677
  size_t  sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
1678
  assert(sp != 0);
1678
  assert(sp != 0);
1679
  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1679
  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1680
  assert(ok_address(m, p));
1680
  assert(ok_address(m, p));
1681
  assert(sz == m->topsize);
1681
  assert(sz == m->topsize);
1682
  assert(sz > 0);
1682
  assert(sz > 0);
1683
  assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
1683
  assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
1684
  assert(pinuse(p));
1684
  assert(pinuse(p));
1685
  assert(!pinuse(chunk_plus_offset(p, sz)));
1685
  assert(!pinuse(chunk_plus_offset(p, sz)));
1686
}
1686
}
1687
 
1687
 
1688
/* Check properties of (inuse) mmapped chunks */
1688
/* Check properties of (inuse) mmapped chunks */
1689
static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
1689
static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
1690
  size_t  sz = chunksize(p);
1690
  size_t  sz = chunksize(p);
1691
  size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
1691
  size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
1692
  assert(is_mmapped(p));
1692
  assert(is_mmapped(p));
1693
  assert(use_mmap(m));
1693
  assert(use_mmap(m));
1694
  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1694
  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1695
  assert(ok_address(m, p));
1695
  assert(ok_address(m, p));
1696
  assert(!is_small(sz));
1696
  assert(!is_small(sz));
1697
  assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
1697
  assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
1698
  assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
1698
  assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
1699
  assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
1699
  assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
1700
}
1700
}
1701
 
1701
 
1702
/* Check properties of inuse chunks */
1702
/* Check properties of inuse chunks */
1703
static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1703
static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1704
  do_check_any_chunk(m, p);
1704
  do_check_any_chunk(m, p);
1705
  assert(is_inuse(p));
1705
  assert(is_inuse(p));
1706
  assert(next_pinuse(p));
1706
  assert(next_pinuse(p));
1707
  /* If not pinuse and not mmapped, previous chunk has OK offset */
1707
  /* If not pinuse and not mmapped, previous chunk has OK offset */
1708
  assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
1708
  assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
1709
  if (is_mmapped(p))
1709
  if (is_mmapped(p))
1710
    do_check_mmapped_chunk(m, p);
1710
    do_check_mmapped_chunk(m, p);
1711
}
1711
}
1712
 
1712
 
1713
/* Check properties of free chunks */
1713
/* Check properties of free chunks */
1714
static void do_check_free_chunk(mstate m, mchunkptr p) {
1714
static void do_check_free_chunk(mstate m, mchunkptr p) {
1715
  size_t sz = chunksize(p);
1715
  size_t sz = chunksize(p);
1716
  mchunkptr next = chunk_plus_offset(p, sz);
1716
  mchunkptr next = chunk_plus_offset(p, sz);
1717
  do_check_any_chunk(m, p);
1717
  do_check_any_chunk(m, p);
1718
  assert(!is_inuse(p));
1718
  assert(!is_inuse(p));
1719
  assert(!next_pinuse(p));
1719
  assert(!next_pinuse(p));
1720
  assert (!is_mmapped(p));
1720
  assert (!is_mmapped(p));
1721
  if (p != m->dv && p != m->top) {
1721
  if (p != m->dv && p != m->top) {
1722
    if (sz >= MIN_CHUNK_SIZE) {
1722
    if (sz >= MIN_CHUNK_SIZE) {
1723
      assert((sz & CHUNK_ALIGN_MASK) == 0);
1723
      assert((sz & CHUNK_ALIGN_MASK) == 0);
1724
      assert(is_aligned(chunk2mem(p)));
1724
      assert(is_aligned(chunk2mem(p)));
1725
      assert(next->prev_foot == sz);
1725
      assert(next->prev_foot == sz);
1726
      assert(pinuse(p));
1726
      assert(pinuse(p));
1727
      assert (next == m->top || is_inuse(next));
1727
      assert (next == m->top || is_inuse(next));
1728
      assert(p->fd->bk == p);
1728
      assert(p->fd->bk == p);
1729
      assert(p->bk->fd == p);
1729
      assert(p->bk->fd == p);
1730
    }
1730
    }
1731
    else  /* markers are always of size SIZE_T_SIZE */
1731
    else  /* markers are always of size SIZE_T_SIZE */
1732
      assert(sz == SIZE_T_SIZE);
1732
      assert(sz == SIZE_T_SIZE);
1733
  }
1733
  }
1734
}
1734
}
1735
 
1735
 
1736
/* Check properties of malloced chunks at the point they are malloced */
1736
/* Check properties of malloced chunks at the point they are malloced */
1737
static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
1737
static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
1738
  if (mem != 0) {
1738
  if (mem != 0) {
1739
    mchunkptr p = mem2chunk(mem);
1739
    mchunkptr p = mem2chunk(mem);
1740
    size_t sz = p->head & ~INUSE_BITS;
1740
    size_t sz = p->head & ~INUSE_BITS;
1741
    do_check_inuse_chunk(m, p);
1741
    do_check_inuse_chunk(m, p);
1742
    assert((sz & CHUNK_ALIGN_MASK) == 0);
1742
    assert((sz & CHUNK_ALIGN_MASK) == 0);
1743
    assert(sz >= MIN_CHUNK_SIZE);
1743
    assert(sz >= MIN_CHUNK_SIZE);
1744
    assert(sz >= s);
1744
    assert(sz >= s);
1745
    /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
1745
    /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
1746
    assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
1746
    assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
1747
  }
1747
  }
1748
}
1748
}
1749
 
1749
 
1750
/* Check a tree and its subtrees.  */
1750
/* Check a tree and its subtrees.  */
1751
static void do_check_tree(mstate m, tchunkptr t) {
1751
static void do_check_tree(mstate m, tchunkptr t) {
1752
  tchunkptr head = 0;
1752
  tchunkptr head = 0;
1753
  tchunkptr u = t;
1753
  tchunkptr u = t;
1754
  bindex_t tindex = t->index;
1754
  bindex_t tindex = t->index;
1755
  size_t tsize = chunksize(t);
1755
  size_t tsize = chunksize(t);
1756
  bindex_t idx;
1756
  bindex_t idx;
1757
  compute_tree_index(tsize, idx);
1757
  compute_tree_index(tsize, idx);
1758
  assert(tindex == idx);
1758
  assert(tindex == idx);
1759
  assert(tsize >= MIN_LARGE_SIZE);
1759
  assert(tsize >= MIN_LARGE_SIZE);
1760
  assert(tsize >= minsize_for_tree_index(idx));
1760
  assert(tsize >= minsize_for_tree_index(idx));
1761
  assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1761
  assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1762
 
1762
 
1763
  do { /* traverse through chain of same-sized nodes */
1763
  do { /* traverse through chain of same-sized nodes */
1764
    do_check_any_chunk(m, ((mchunkptr)u));
1764
    do_check_any_chunk(m, ((mchunkptr)u));
1765
    assert(u->index == tindex);
1765
    assert(u->index == tindex);
1766
    assert(chunksize(u) == tsize);
1766
    assert(chunksize(u) == tsize);
1767
    assert(!is_inuse(u));
1767
    assert(!is_inuse(u));
1768
    assert(!next_pinuse(u));
1768
    assert(!next_pinuse(u));
1769
    assert(u->fd->bk == u);
1769
    assert(u->fd->bk == u);
1770
    assert(u->bk->fd == u);
1770
    assert(u->bk->fd == u);
1771
    if (u->parent == 0) {
1771
    if (u->parent == 0) {
1772
      assert(u->child[0] == 0);
1772
      assert(u->child[0] == 0);
1773
      assert(u->child[1] == 0);
1773
      assert(u->child[1] == 0);
1774
    }
1774
    }
1775
    else {
1775
    else {
1776
      assert(head == 0); /* only one node on chain has parent */
1776
      assert(head == 0); /* only one node on chain has parent */
1777
      head = u;
1777
      head = u;
1778
      assert(u->parent != u);
1778
      assert(u->parent != u);
1779
      assert (u->parent->child[0] == u ||
1779
      assert (u->parent->child[0] == u ||
1780
              u->parent->child[1] == u ||
1780
              u->parent->child[1] == u ||
1781
              *((tbinptr*)(u->parent)) == u);
1781
              *((tbinptr*)(u->parent)) == u);
1782
      if (u->child[0] != 0) {
1782
      if (u->child[0] != 0) {
1783
        assert(u->child[0]->parent == u);
1783
        assert(u->child[0]->parent == u);
1784
        assert(u->child[0] != u);
1784
        assert(u->child[0] != u);
1785
        do_check_tree(m, u->child[0]);
1785
        do_check_tree(m, u->child[0]);
1786
      }
1786
      }
1787
      if (u->child[1] != 0) {
1787
      if (u->child[1] != 0) {
1788
        assert(u->child[1]->parent == u);
1788
        assert(u->child[1]->parent == u);
1789
        assert(u->child[1] != u);
1789
        assert(u->child[1] != u);
1790
        do_check_tree(m, u->child[1]);
1790
        do_check_tree(m, u->child[1]);
1791
      }
1791
      }
1792
      if (u->child[0] != 0 && u->child[1] != 0) {
1792
      if (u->child[0] != 0 && u->child[1] != 0) {
1793
        assert(chunksize(u->child[0]) < chunksize(u->child[1]));
1793
        assert(chunksize(u->child[0]) < chunksize(u->child[1]));
1794
      }
1794
      }
1795
    }
1795
    }
1796
    u = u->fd;
1796
    u = u->fd;
1797
  } while (u != t);
1797
  } while (u != t);
1798
  assert(head != 0);
1798
  assert(head != 0);
1799
}
1799
}
1800
 
1800
 
1801
/*  Check all the chunks in a treebin.  */
1801
/*  Check all the chunks in a treebin.  */
1802
static void do_check_treebin(mstate m, bindex_t i) {
1802
static void do_check_treebin(mstate m, bindex_t i) {
1803
  tbinptr* tb = treebin_at(m, i);
1803
  tbinptr* tb = treebin_at(m, i);
1804
  tchunkptr t = *tb;
1804
  tchunkptr t = *tb;
1805
  int empty = (m->treemap & (1U << i)) == 0;
1805
  int empty = (m->treemap & (1U << i)) == 0;
1806
  if (t == 0)
1806
  if (t == 0)
1807
    assert(empty);
1807
    assert(empty);
1808
  if (!empty)
1808
  if (!empty)
1809
    do_check_tree(m, t);
1809
    do_check_tree(m, t);
1810
}
1810
}
1811
 
1811
 
1812
/*  Check all the chunks in a smallbin.  */
1812
/*  Check all the chunks in a smallbin.  */
1813
static void do_check_smallbin(mstate m, bindex_t i) {
1813
static void do_check_smallbin(mstate m, bindex_t i) {
1814
  sbinptr b = smallbin_at(m, i);
1814
  sbinptr b = smallbin_at(m, i);
1815
  mchunkptr p = b->bk;
1815
  mchunkptr p = b->bk;
1816
  unsigned int empty = (m->smallmap & (1U << i)) == 0;
1816
  unsigned int empty = (m->smallmap & (1U << i)) == 0;
1817
  if (p == b)
1817
  if (p == b)
1818
    assert(empty);
1818
    assert(empty);
1819
  if (!empty) {
1819
  if (!empty) {
1820
    for (; p != b; p = p->bk) {
1820
    for (; p != b; p = p->bk) {
1821
      size_t size = chunksize(p);
1821
      size_t size = chunksize(p);
1822
      mchunkptr q;
1822
      mchunkptr q;
1823
      /* each chunk claims to be free */
1823
      /* each chunk claims to be free */
1824
      do_check_free_chunk(m, p);
1824
      do_check_free_chunk(m, p);
1825
      /* chunk belongs in bin */
1825
      /* chunk belongs in bin */
1826
      assert(small_index(size) == i);
1826
      assert(small_index(size) == i);
1827
      assert(p->bk == b || chunksize(p->bk) == chunksize(p));
1827
      assert(p->bk == b || chunksize(p->bk) == chunksize(p));
1828
      /* chunk is followed by an inuse chunk */
1828
      /* chunk is followed by an inuse chunk */
1829
      q = next_chunk(p);
1829
      q = next_chunk(p);
1830
      if (q->head != FENCEPOST_HEAD)
1830
      if (q->head != FENCEPOST_HEAD)
1831
        do_check_inuse_chunk(m, q);
1831
        do_check_inuse_chunk(m, q);
1832
    }
1832
    }
1833
  }
1833
  }
1834
}
1834
}
1835
 
1835
 
1836
/* Find x in a bin. Used in other check functions. */
1836
/* Find x in a bin. Used in other check functions. */
1837
static int bin_find(mstate m, mchunkptr x) {
1837
static int bin_find(mstate m, mchunkptr x) {
1838
  size_t size = chunksize(x);
1838
  size_t size = chunksize(x);
1839
  if (is_small(size)) {
1839
  if (is_small(size)) {
1840
    bindex_t sidx = small_index(size);
1840
    bindex_t sidx = small_index(size);
1841
    sbinptr b = smallbin_at(m, sidx);
1841
    sbinptr b = smallbin_at(m, sidx);
1842
    if (smallmap_is_marked(m, sidx)) {
1842
    if (smallmap_is_marked(m, sidx)) {
1843
      mchunkptr p = b;
1843
      mchunkptr p = b;
1844
      do {
1844
      do {
1845
        if (p == x)
1845
        if (p == x)
1846
          return 1;
1846
          return 1;
1847
      } while ((p = p->fd) != b);
1847
      } while ((p = p->fd) != b);
1848
    }
1848
    }
1849
  }
1849
  }
1850
  else {
1850
  else {
1851
    bindex_t tidx;
1851
    bindex_t tidx;
1852
    compute_tree_index(size, tidx);
1852
    compute_tree_index(size, tidx);
1853
    if (treemap_is_marked(m, tidx)) {
1853
    if (treemap_is_marked(m, tidx)) {
1854
      tchunkptr t = *treebin_at(m, tidx);
1854
      tchunkptr t = *treebin_at(m, tidx);
1855
      size_t sizebits = size << leftshift_for_tree_index(tidx);
1855
      size_t sizebits = size << leftshift_for_tree_index(tidx);
1856
      while (t != 0 && chunksize(t) != size) {
1856
      while (t != 0 && chunksize(t) != size) {
1857
        t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1857
        t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1858
        sizebits <<= 1;
1858
        sizebits <<= 1;
1859
      }
1859
      }
1860
      if (t != 0) {
1860
      if (t != 0) {
1861
        tchunkptr u = t;
1861
        tchunkptr u = t;
1862
        do {
1862
        do {
1863
          if (u == (tchunkptr)x)
1863
          if (u == (tchunkptr)x)
1864
            return 1;
1864
            return 1;
1865
        } while ((u = u->fd) != t);
1865
        } while ((u = u->fd) != t);
1866
      }
1866
      }
1867
    }
1867
    }
1868
  }
1868
  }
1869
  return 0;
1869
  return 0;
1870
}
1870
}
1871
 
1871
 
1872
/* Traverse each chunk and check it; return total */
1872
/* Traverse each chunk and check it; return total */
1873
static size_t traverse_and_check(mstate m) {
1873
static size_t traverse_and_check(mstate m) {
1874
  size_t sum = 0;
1874
  size_t sum = 0;
1875
  if (is_initialized(m)) {
1875
  if (is_initialized(m)) {
1876
    msegmentptr s = &m->seg;
1876
    msegmentptr s = &m->seg;
1877
    sum += m->topsize + TOP_FOOT_SIZE;
1877
    sum += m->topsize + TOP_FOOT_SIZE;
1878
    while (s != 0) {
1878
    while (s != 0) {
1879
      mchunkptr q = align_as_chunk(s->base);
1879
      mchunkptr q = align_as_chunk(s->base);
1880
      mchunkptr lastq = 0;
1880
      mchunkptr lastq = 0;
1881
      assert(pinuse(q));
1881
      assert(pinuse(q));
1882
      while (segment_holds(s, q) &&
1882
      while (segment_holds(s, q) &&
1883
             q != m->top && q->head != FENCEPOST_HEAD) {
1883
             q != m->top && q->head != FENCEPOST_HEAD) {
1884
        sum += chunksize(q);
1884
        sum += chunksize(q);
1885
        if (is_inuse(q)) {
1885
        if (is_inuse(q)) {
1886
          assert(!bin_find(m, q));
1886
          assert(!bin_find(m, q));
1887
          do_check_inuse_chunk(m, q);
1887
          do_check_inuse_chunk(m, q);
1888
        }
1888
        }
1889
        else {
1889
        else {
1890
          assert(q == m->dv || bin_find(m, q));
1890
          assert(q == m->dv || bin_find(m, q));
1891
          assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
1891
          assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
1892
          do_check_free_chunk(m, q);
1892
          do_check_free_chunk(m, q);
1893
        }
1893
        }
1894
        lastq = q;
1894
        lastq = q;
1895
        q = next_chunk(q);
1895
        q = next_chunk(q);
1896
      }
1896
      }
1897
      s = s->next;
1897
      s = s->next;
1898
    }
1898
    }
1899
  }
1899
  }
1900
  return sum;
1900
  return sum;
1901
}
1901
}
1902
 
1902
 
1903
 
1903
 
1904
/* Check all properties of malloc_state. */
1904
/* Check all properties of malloc_state. */
1905
static void do_check_malloc_state(mstate m) {
1905
static void do_check_malloc_state(mstate m) {
1906
  bindex_t i;
1906
  bindex_t i;
1907
  size_t total;
1907
  size_t total;
1908
  /* check bins */
1908
  /* check bins */
1909
  for (i = 0; i < NSMALLBINS; ++i)
1909
  for (i = 0; i < NSMALLBINS; ++i)
1910
    do_check_smallbin(m, i);
1910
    do_check_smallbin(m, i);
1911
  for (i = 0; i < NTREEBINS; ++i)
1911
  for (i = 0; i < NTREEBINS; ++i)
1912
    do_check_treebin(m, i);
1912
    do_check_treebin(m, i);
1913
 
1913
 
1914
  if (m->dvsize != 0) { /* check dv chunk */
1914
  if (m->dvsize != 0) { /* check dv chunk */
1915
    do_check_any_chunk(m, m->dv);
1915
    do_check_any_chunk(m, m->dv);
1916
    assert(m->dvsize == chunksize(m->dv));
1916
    assert(m->dvsize == chunksize(m->dv));
1917
    assert(m->dvsize >= MIN_CHUNK_SIZE);
1917
    assert(m->dvsize >= MIN_CHUNK_SIZE);
1918
    assert(bin_find(m, m->dv) == 0);
1918
    assert(bin_find(m, m->dv) == 0);
1919
  }
1919
  }
1920
 
1920
 
1921
  if (m->top != 0) {   /* check top chunk */
1921
  if (m->top != 0) {   /* check top chunk */
1922
    do_check_top_chunk(m, m->top);
1922
    do_check_top_chunk(m, m->top);
1923
    /*assert(m->topsize == chunksize(m->top)); redundant */
1923
    /*assert(m->topsize == chunksize(m->top)); redundant */
1924
    assert(m->topsize > 0);
1924
    assert(m->topsize > 0);
1925
    assert(bin_find(m, m->top) == 0);
1925
    assert(bin_find(m, m->top) == 0);
1926
  }
1926
  }
1927
 
1927
 
1928
  total = traverse_and_check(m);
1928
  total = traverse_and_check(m);
1929
  assert(total <= m->footprint);
1929
  assert(total <= m->footprint);
1930
  assert(m->footprint <= m->max_footprint);
1930
  assert(m->footprint <= m->max_footprint);
1931
}
1931
}
1932
#endif /* DEBUG */
1932
#endif /* DEBUG */
1933
 
1933
 
1934
#define CORRUPTION_ERROR_ACTION(m)                             \
1934
#define CORRUPTION_ERROR_ACTION(m)                             \
1935
        do {                                                   \
1935
        do {                                                   \
1936
            printf("%s malloc heap corrupted\n",__FUNCTION__); \
1936
            printf("%s malloc heap corrupted\n",__FUNCTION__); \
1937
            while(1)                                           \
1937
            while(1)                                           \
1938
            {                                                  \
1938
            {                                                  \
1939
                delay(100);                                    \
1939
                delay(100);                                    \
1940
            }                                                  \
1940
            }                                                  \
1941
        }while(0)                                              \
1941
        }while(0)                                              \
1942
 
1942
 
1943
 
1943
 
1944
#define USAGE_ERROR_ACTION(m, p)                               \
1944
#define USAGE_ERROR_ACTION(m, p)                               \
1945
        do {                                                   \
1945
        do {                                                   \
1946
            printf("%s malloc heap corrupted\n",__FUNCTION__); \
1946
            printf("%s malloc heap corrupted\n",__FUNCTION__); \
1947
            while(1)                                           \
1947
            while(1)                                           \
1948
            {                                                  \
1948
            {                                                  \
1949
                delay(100);                                    \
1949
                delay(100);                                    \
1950
            }                                                  \
1950
            }                                                  \
1951
        }while(0)                                              \
1951
        }while(0)                                              \
1952
 
1952
 
1953
/* ----------------------- Operations on smallbins ----------------------- */
1953
/* ----------------------- Operations on smallbins ----------------------- */
1954
 
1954
 
1955
/*
1955
/*
1956
  Various forms of linking and unlinking are defined as macros.  Even
1956
  Various forms of linking and unlinking are defined as macros.  Even
1957
  the ones for trees, which are very long but have very short typical
1957
  the ones for trees, which are very long but have very short typical
1958
  paths.  This is ugly but reduces reliance on inlining support of
1958
  paths.  This is ugly but reduces reliance on inlining support of
1959
  compilers.
1959
  compilers.
1960
*/
1960
*/
1961
 
1961
 
1962
/* Link a free chunk into a smallbin  */
1962
/* Link a free chunk into a smallbin  */
1963
#define insert_small_chunk(M, P, S) {\
1963
#define insert_small_chunk(M, P, S) {\
1964
  bindex_t I  = small_index(S);\
1964
  bindex_t I  = small_index(S);\
1965
  mchunkptr B = smallbin_at(M, I);\
1965
  mchunkptr B = smallbin_at(M, I);\
1966
  mchunkptr F = B;\
1966
  mchunkptr F = B;\
1967
  assert(S >= MIN_CHUNK_SIZE);\
1967
  assert(S >= MIN_CHUNK_SIZE);\
1968
  if (!smallmap_is_marked(M, I))\
1968
  if (!smallmap_is_marked(M, I))\
1969
    mark_smallmap(M, I);\
1969
    mark_smallmap(M, I);\
1970
  else if (RTCHECK(ok_address(M, B->fd)))\
1970
  else if (RTCHECK(ok_address(M, B->fd)))\
1971
    F = B->fd;\
1971
    F = B->fd;\
1972
  else {\
1972
  else {\
1973
    CORRUPTION_ERROR_ACTION(M);\
1973
    CORRUPTION_ERROR_ACTION(M);\
1974
  }\
1974
  }\
1975
  B->fd = P;\
1975
  B->fd = P;\
1976
  F->bk = P;\
1976
  F->bk = P;\
1977
  P->fd = F;\
1977
  P->fd = F;\
1978
  P->bk = B;\
1978
  P->bk = B;\
1979
}
1979
}
1980
/* ----------------------- Operations on smallbins ----------------------- */
1980
/* ----------------------- Operations on smallbins ----------------------- */
1981
 
1981
 
1982
/*
1982
/*
1983
  Various forms of linking and unlinking are defined as macros.  Even
1983
  Various forms of linking and unlinking are defined as macros.  Even
1984
  the ones for trees, which are very long but have very short typical
1984
  the ones for trees, which are very long but have very short typical
1985
  paths.  This is ugly but reduces reliance on inlining support of
1985
  paths.  This is ugly but reduces reliance on inlining support of
1986
  compilers.
1986
  compilers.
1987
*/
1987
*/
1988
 
1988
 
1989
/* Link a free chunk into a smallbin  */
1989
/* Link a free chunk into a smallbin  */
1990
#define insert_small_chunk(M, P, S) {\
1990
#define insert_small_chunk(M, P, S) {\
1991
  bindex_t I  = small_index(S);\
1991
  bindex_t I  = small_index(S);\
1992
  mchunkptr B = smallbin_at(M, I);\
1992
  mchunkptr B = smallbin_at(M, I);\
1993
  mchunkptr F = B;\
1993
  mchunkptr F = B;\
1994
  assert(S >= MIN_CHUNK_SIZE);\
1994
  assert(S >= MIN_CHUNK_SIZE);\
1995
  if (!smallmap_is_marked(M, I))\
1995
  if (!smallmap_is_marked(M, I))\
1996
    mark_smallmap(M, I);\
1996
    mark_smallmap(M, I);\
1997
  else if (RTCHECK(ok_address(M, B->fd)))\
1997
  else if (RTCHECK(ok_address(M, B->fd)))\
1998
    F = B->fd;\
1998
    F = B->fd;\
1999
  else {\
1999
  else {\
2000
    CORRUPTION_ERROR_ACTION(M);\
2000
    CORRUPTION_ERROR_ACTION(M);\
2001
  }\
2001
  }\
2002
  B->fd = P;\
2002
  B->fd = P;\
2003
  F->bk = P;\
2003
  F->bk = P;\
2004
  P->fd = F;\
2004
  P->fd = F;\
2005
  P->bk = B;\
2005
  P->bk = B;\
2006
}
2006
}
2007
 
2007
 
2008
/* Unlink a chunk from a smallbin  */
2008
/* Unlink a chunk from a smallbin  */
2009
#define unlink_small_chunk(M, P, S) {\
2009
#define unlink_small_chunk(M, P, S) {\
2010
  mchunkptr F = P->fd;\
2010
  mchunkptr F = P->fd;\
2011
  mchunkptr B = P->bk;\
2011
  mchunkptr B = P->bk;\
2012
  bindex_t I = small_index(S);\
2012
  bindex_t I = small_index(S);\
2013
  assert(P != B);\
2013
  assert(P != B);\
2014
  assert(P != F);\
2014
  assert(P != F);\
2015
  assert(chunksize(P) == small_index2size(I));\
2015
  assert(chunksize(P) == small_index2size(I));\
2016
  if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
2016
  if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
2017
    if (B == F) {\
2017
    if (B == F) {\
2018
    clear_smallmap(M, I);\
2018
    clear_smallmap(M, I);\
2019
    }\
2019
    }\
2020
    else if (RTCHECK(B == smallbin_at(M,I) ||\
2020
    else if (RTCHECK(B == smallbin_at(M,I) ||\
2021
                     (ok_address(M, B) && B->fd == P))) {\
2021
                     (ok_address(M, B) && B->fd == P))) {\
2022
    F->bk = B;\
2022
    F->bk = B;\
2023
    B->fd = F;\
2023
    B->fd = F;\
2024
  }\
2024
  }\
2025
  else {\
2025
  else {\
2026
    CORRUPTION_ERROR_ACTION(M);\
2026
    CORRUPTION_ERROR_ACTION(M);\
2027
  }\
2027
  }\
2028
  }\
2028
  }\
2029
  else {\
2029
  else {\
2030
    CORRUPTION_ERROR_ACTION(M);\
2030
    CORRUPTION_ERROR_ACTION(M);\
2031
  }\
2031
  }\
2032
}
2032
}
2033
 
2033
 
2034
/* Unlink the first chunk from a smallbin */
2034
/* Unlink the first chunk from a smallbin */
2035
#define unlink_first_small_chunk(M, B, P, I) {\
2035
#define unlink_first_small_chunk(M, B, P, I) {\
2036
  mchunkptr F = P->fd;\
2036
  mchunkptr F = P->fd;\
2037
  assert(P != B);\
2037
  assert(P != B);\
2038
  assert(P != F);\
2038
  assert(P != F);\
2039
  assert(chunksize(P) == small_index2size(I));\
2039
  assert(chunksize(P) == small_index2size(I));\
2040
  if (B == F) {\
2040
  if (B == F) {\
2041
    clear_smallmap(M, I);\
2041
    clear_smallmap(M, I);\
2042
  }\
2042
  }\
2043
  else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
2043
  else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
2044
    F->bk = B;\
2044
    F->bk = B;\
2045
    B->fd = F;\
2045
    B->fd = F;\
2046
  }\
2046
  }\
2047
  else {\
2047
  else {\
2048
    CORRUPTION_ERROR_ACTION(M);\
2048
    CORRUPTION_ERROR_ACTION(M);\
2049
  }\
2049
  }\
2050
}
2050
}
2051
 
2051
 
2052
/* Replace dv node, binning the old one */
2052
/* Replace dv node, binning the old one */
2053
/* Used only when dvsize known to be small */
2053
/* Used only when dvsize known to be small */
2054
#define replace_dv(M, P, S) {\
2054
#define replace_dv(M, P, S) {\
2055
  size_t DVS = M->dvsize;\
2055
  size_t DVS = M->dvsize;\
2056
  assert(is_small(DVS));\
2056
  assert(is_small(DVS));\
2057
  if (DVS != 0) {\
2057
  if (DVS != 0) {\
2058
    mchunkptr DV = M->dv;\
2058
    mchunkptr DV = M->dv;\
2059
    insert_small_chunk(M, DV, DVS);\
2059
    insert_small_chunk(M, DV, DVS);\
2060
  }\
2060
  }\
2061
  M->dvsize = S;\
2061
  M->dvsize = S;\
2062
  M->dv = P;\
2062
  M->dv = P;\
2063
}
2063
}
2064
 
2064
 
2065
/* ------------------------- Operations on trees ------------------------- */
2065
/* ------------------------- Operations on trees ------------------------- */
2066
 
2066
 
2067
/* Insert chunk into tree */
2067
/* Insert chunk into tree */
2068
#define insert_large_chunk(M, X, S) {\
2068
#define insert_large_chunk(M, X, S) {\
2069
  tbinptr* H;\
2069
  tbinptr* H;\
2070
  bindex_t I;\
2070
  bindex_t I;\
2071
  compute_tree_index(S, I);\
2071
  compute_tree_index(S, I);\
2072
  H = treebin_at(M, I);\
2072
  H = treebin_at(M, I);\
2073
  X->index = I;\
2073
  X->index = I;\
2074
  X->child[0] = X->child[1] = 0;\
2074
  X->child[0] = X->child[1] = 0;\
2075
  if (!treemap_is_marked(M, I)) {\
2075
  if (!treemap_is_marked(M, I)) {\
2076
    mark_treemap(M, I);\
2076
    mark_treemap(M, I);\
2077
    *H = X;\
2077
    *H = X;\
2078
    X->parent = (tchunkptr)H;\
2078
    X->parent = (tchunkptr)H;\
2079
    X->fd = X->bk = X;\
2079
    X->fd = X->bk = X;\
2080
  }\
2080
  }\
2081
  else {\
2081
  else {\
2082
    tchunkptr T = *H;\
2082
    tchunkptr T = *H;\
2083
    size_t K = S << leftshift_for_tree_index(I);\
2083
    size_t K = S << leftshift_for_tree_index(I);\
2084
    for (;;) {\
2084
    for (;;) {\
2085
      if (chunksize(T) != S) {\
2085
      if (chunksize(T) != S) {\
2086
        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
2086
        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
2087
        K <<= 1;\
2087
        K <<= 1;\
2088
        if (*C != 0)\
2088
        if (*C != 0)\
2089
          T = *C;\
2089
          T = *C;\
2090
        else if (RTCHECK(ok_address(M, C))) {\
2090
        else if (RTCHECK(ok_address(M, C))) {\
2091
          *C = X;\
2091
          *C = X;\
2092
          X->parent = T;\
2092
          X->parent = T;\
2093
          X->fd = X->bk = X;\
2093
          X->fd = X->bk = X;\
2094
          break;\
2094
          break;\
2095
        }\
2095
        }\
2096
        else {\
2096
        else {\
2097
          CORRUPTION_ERROR_ACTION(M);\
2097
          CORRUPTION_ERROR_ACTION(M);\
2098
          break;\
2098
          break;\
2099
        }\
2099
        }\
2100
      }\
2100
      }\
2101
      else {\
2101
      else {\
2102
        tchunkptr F = T->fd;\
2102
        tchunkptr F = T->fd;\
2103
        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
2103
        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
2104
          T->fd = F->bk = X;\
2104
          T->fd = F->bk = X;\
2105
          X->fd = F;\
2105
          X->fd = F;\
2106
          X->bk = T;\
2106
          X->bk = T;\
2107
          X->parent = 0;\
2107
          X->parent = 0;\
2108
          break;\
2108
          break;\
2109
        }\
2109
        }\
2110
        else {\
2110
        else {\
2111
          CORRUPTION_ERROR_ACTION(M);\
2111
          CORRUPTION_ERROR_ACTION(M);\
2112
          break;\
2112
          break;\
2113
        }\
2113
        }\
2114
      }\
2114
      }\
2115
    }\
2115
    }\
2116
  }\
2116
  }\
2117
}
2117
}
2118
 
2118
 
2119
/*
2119
/*
2120
  Unlink steps:
2120
  Unlink steps:
2121
 
2121
 
2122
  1. If x is a chained node, unlink it from its same-sized fd/bk links
2122
  1. If x is a chained node, unlink it from its same-sized fd/bk links
2123
     and choose its bk node as its replacement.
2123
     and choose its bk node as its replacement.
2124
  2. If x was the last node of its size, but not a leaf node, it must
2124
  2. If x was the last node of its size, but not a leaf node, it must
2125
     be replaced with a leaf node (not merely one with an open left or
2125
     be replaced with a leaf node (not merely one with an open left or
2126
     right), to make sure that lefts and rights of descendents
2126
     right), to make sure that lefts and rights of descendents
2127
     correspond properly to bit masks.  We use the rightmost descendent
2127
     correspond properly to bit masks.  We use the rightmost descendent
2128
     of x.  We could use any other leaf, but this is easy to locate and
2128
     of x.  We could use any other leaf, but this is easy to locate and
2129
     tends to counteract removal of leftmosts elsewhere, and so keeps
2129
     tends to counteract removal of leftmosts elsewhere, and so keeps
2130
     paths shorter than minimally guaranteed.  This doesn't loop much
2130
     paths shorter than minimally guaranteed.  This doesn't loop much
2131
     because on average a node in a tree is near the bottom.
2131
     because on average a node in a tree is near the bottom.
2132
  3. If x is the base of a chain (i.e., has parent links) relink
2132
  3. If x is the base of a chain (i.e., has parent links) relink
2133
     x's parent and children to x's replacement (or null if none).
2133
     x's parent and children to x's replacement (or null if none).
2134
*/
2134
*/
2135
 
2135
 
2136
#define unlink_large_chunk(M, X) {\
2136
#define unlink_large_chunk(M, X) {\
2137
  tchunkptr XP = X->parent;\
2137
  tchunkptr XP = X->parent;\
2138
  tchunkptr R;\
2138
  tchunkptr R;\
2139
  if (X->bk != X) {\
2139
  if (X->bk != X) {\
2140
    tchunkptr F = X->fd;\
2140
    tchunkptr F = X->fd;\
2141
    R = X->bk;\
2141
    R = X->bk;\
2142
    if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
2142
    if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
2143
      F->bk = R;\
2143
      F->bk = R;\
2144
      R->fd = F;\
2144
      R->fd = F;\
2145
    }\
2145
    }\
2146
    else {\
2146
    else {\
2147
      CORRUPTION_ERROR_ACTION(M);\
2147
      CORRUPTION_ERROR_ACTION(M);\
2148
    }\
2148
    }\
2149
  }\
2149
  }\
2150
  else {\
2150
  else {\
2151
    tchunkptr* RP;\
2151
    tchunkptr* RP;\
2152
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
2152
    if (((R = *(RP = &(X->child[1]))) != 0) ||\
2153
        ((R = *(RP = &(X->child[0]))) != 0)) {\
2153
        ((R = *(RP = &(X->child[0]))) != 0)) {\
2154
      tchunkptr* CP;\
2154
      tchunkptr* CP;\
2155
      while ((*(CP = &(R->child[1])) != 0) ||\
2155
      while ((*(CP = &(R->child[1])) != 0) ||\
2156
             (*(CP = &(R->child[0])) != 0)) {\
2156
             (*(CP = &(R->child[0])) != 0)) {\
2157
        R = *(RP = CP);\
2157
        R = *(RP = CP);\
2158
      }\
2158
      }\
2159
      if (RTCHECK(ok_address(M, RP)))\
2159
      if (RTCHECK(ok_address(M, RP)))\
2160
        *RP = 0;\
2160
        *RP = 0;\
2161
      else {\
2161
      else {\
2162
        CORRUPTION_ERROR_ACTION(M);\
2162
        CORRUPTION_ERROR_ACTION(M);\
2163
      }\
2163
      }\
2164
    }\
2164
    }\
2165
  }\
2165
  }\
2166
  if (XP != 0) {\
2166
  if (XP != 0) {\
2167
    tbinptr* H = treebin_at(M, X->index);\
2167
    tbinptr* H = treebin_at(M, X->index);\
2168
    if (X == *H) {\
2168
    if (X == *H) {\
2169
      if ((*H = R) == 0) \
2169
      if ((*H = R) == 0) \
2170
        clear_treemap(M, X->index);\
2170
        clear_treemap(M, X->index);\
2171
    }\
2171
    }\
2172
    else if (RTCHECK(ok_address(M, XP))) {\
2172
    else if (RTCHECK(ok_address(M, XP))) {\
2173
      if (XP->child[0] == X) \
2173
      if (XP->child[0] == X) \
2174
        XP->child[0] = R;\
2174
        XP->child[0] = R;\
2175
      else \
2175
      else \
2176
        XP->child[1] = R;\
2176
        XP->child[1] = R;\
2177
    }\
2177
    }\
2178
    else\
2178
    else\
2179
      CORRUPTION_ERROR_ACTION(M);\
2179
      CORRUPTION_ERROR_ACTION(M);\
2180
    if (R != 0) {\
2180
    if (R != 0) {\
2181
      if (RTCHECK(ok_address(M, R))) {\
2181
      if (RTCHECK(ok_address(M, R))) {\
2182
        tchunkptr C0, C1;\
2182
        tchunkptr C0, C1;\
2183
        R->parent = XP;\
2183
        R->parent = XP;\
2184
        if ((C0 = X->child[0]) != 0) {\
2184
        if ((C0 = X->child[0]) != 0) {\
2185
          if (RTCHECK(ok_address(M, C0))) {\
2185
          if (RTCHECK(ok_address(M, C0))) {\
2186
            R->child[0] = C0;\
2186
            R->child[0] = C0;\
2187
            C0->parent = R;\
2187
            C0->parent = R;\
2188
          }\
2188
          }\
2189
          else\
2189
          else\
2190
            CORRUPTION_ERROR_ACTION(M);\
2190
            CORRUPTION_ERROR_ACTION(M);\
2191
        }\
2191
        }\
2192
        if ((C1 = X->child[1]) != 0) {\
2192
        if ((C1 = X->child[1]) != 0) {\
2193
          if (RTCHECK(ok_address(M, C1))) {\
2193
          if (RTCHECK(ok_address(M, C1))) {\
2194
            R->child[1] = C1;\
2194
            R->child[1] = C1;\
2195
            C1->parent = R;\
2195
            C1->parent = R;\
2196
          }\
2196
          }\
2197
          else\
2197
          else\
2198
            CORRUPTION_ERROR_ACTION(M);\
2198
            CORRUPTION_ERROR_ACTION(M);\
2199
        }\
2199
        }\
2200
      }\
2200
      }\
2201
      else\
2201
      else\
2202
        CORRUPTION_ERROR_ACTION(M);\
2202
        CORRUPTION_ERROR_ACTION(M);\
2203
    }\
2203
    }\
2204
  }\
2204
  }\
2205
}
2205
}
2206
 
2206
 
2207
/* Relays to large vs small bin operations */
2207
/* Relays to large vs small bin operations */
2208
 
2208
 
2209
#define insert_chunk(M, P, S)\
2209
#define insert_chunk(M, P, S)\
2210
  if (is_small(S)) insert_small_chunk(M, P, S)\
2210
  if (is_small(S)) insert_small_chunk(M, P, S)\
2211
  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
2211
  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
2212
 
2212
 
2213
#define unlink_chunk(M, P, S)\
2213
#define unlink_chunk(M, P, S)\
2214
  if (is_small(S)) unlink_small_chunk(M, P, S)\
2214
  if (is_small(S)) unlink_small_chunk(M, P, S)\
2215
  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
2215
  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
2216
 
2216
 
2217
 
2217
 
2218
/* Relays to internal calls to malloc/free from realloc, memalign etc */
2218
/* Relays to internal calls to malloc/free from realloc, memalign etc */
2219
 
2219
 
2220
#if ONLY_MSPACES
2220
#if ONLY_MSPACES
2221
#define internal_malloc(m, b) mspace_malloc(m, b)
2221
#define internal_malloc(m, b) mspace_malloc(m, b)
2222
#define internal_free(m, mem) mspace_free(m,mem);
2222
#define internal_free(m, mem) mspace_free(m,mem);
2223
#else /* ONLY_MSPACES */
2223
#else /* ONLY_MSPACES */
2224
#if MSPACES
2224
#if MSPACES
2225
#define internal_malloc(m, b)\
2225
#define internal_malloc(m, b)\
2226
  ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
2226
  ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
2227
#define internal_free(m, mem)\
2227
#define internal_free(m, mem)\
2228
   if (m == gm) dlfree(mem); else mspace_free(m,mem);
2228
   if (m == gm) dlfree(mem); else mspace_free(m,mem);
2229
#else /* MSPACES */
2229
#else /* MSPACES */
2230
#define internal_malloc(m, b) malloc(b)
2230
#define internal_malloc(m, b) malloc(b)
2231
#define internal_free(m, mem) free(mem)
2231
#define internal_free(m, mem) free(mem)
2232
#endif /* MSPACES */
2232
#endif /* MSPACES */
2233
#endif /* ONLY_MSPACES */
2233
#endif /* ONLY_MSPACES */
2234
 
2234
 
2235
 
2235
 
2236
static inline void* os_mmap(size_t size)
2236
static inline void* os_mmap(size_t size)
2237
{
2237
{
2238
  void* ptr = KernelAlloc(size);
2238
  void* ptr = KernelAlloc(size);
2239
  printf("%s %x %d bytes\n",__FUNCTION__, ptr, size);
2239
  printf("%s %x %d bytes\n",__FUNCTION__, ptr, size);
2240
  return (ptr != 0)? ptr: MFAIL;
2240
  return (ptr != 0)? ptr: MFAIL;
2241
}
2241
}
2242
 
2242
 
2243
static inline int os_munmap(void* ptr, size_t size)
2243
static inline int os_munmap(void* ptr, size_t size)
2244
{
2244
{
2245
    return (KernelFree(ptr) != 0) ? 0 : -1;
2245
    return (KernelFree(ptr) != 0) ? 0 : -1;
2246
}
2246
}
2247
 
2247
 
2248
 
2248
 
2249
#define MMAP_DEFAULT(s)        os_mmap(s)
2249
#define MMAP_DEFAULT(s)        os_mmap(s)
2250
#define MUNMAP_DEFAULT(a, s)   os_munmap((a), (s))
2250
#define MUNMAP_DEFAULT(a, s)   os_munmap((a), (s))
2251
#define DIRECT_MMAP_DEFAULT(s) os_mmap(s)
2251
#define DIRECT_MMAP_DEFAULT(s) os_mmap(s)
2252
 
2252
 
2253
 
2253
 
2254
/* -----------------------  Direct-mmapping chunks ----------------------- */
2254
/* -----------------------  Direct-mmapping chunks ----------------------- */
2255
 
2255
 
2256
/*
2256
/*
2257
  Directly mmapped chunks are set up with an offset to the start of
2257
  Directly mmapped chunks are set up with an offset to the start of
2258
  the mmapped region stored in the prev_foot field of the chunk. This
2258
  the mmapped region stored in the prev_foot field of the chunk. This
2259
  allows reconstruction of the required argument to MUNMAP when freed,
2259
  allows reconstruction of the required argument to MUNMAP when freed,
2260
  and also allows adjustment of the returned chunk to meet alignment
2260
  and also allows adjustment of the returned chunk to meet alignment
2261
  requirements (especially in memalign).
2261
  requirements (especially in memalign).
2262
*/
2262
*/
2263
 
2263
 
2264
/* Malloc using mmap */
2264
/* Malloc using mmap */
2265
static void* mmap_alloc(mstate m, size_t nb) {
2265
static void* mmap_alloc(mstate m, size_t nb) {
2266
    size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2266
    size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2267
  if (m->footprint_limit != 0) {
2267
  if (m->footprint_limit != 0) {
2268
    size_t fp = m->footprint + mmsize;
2268
    size_t fp = m->footprint + mmsize;
2269
    if (fp <= m->footprint || fp > m->footprint_limit)
2269
    if (fp <= m->footprint || fp > m->footprint_limit)
2270
      return 0;
2270
      return 0;
2271
  }
2271
  }
2272
  if (mmsize > nb) {     /* Check for wrap around 0 */
2272
  if (mmsize > nb) {     /* Check for wrap around 0 */
2273
    char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
2273
    char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
2274
    if (mm != CMFAIL) {
2274
    if (mm != CMFAIL) {
2275
            size_t offset = align_offset(chunk2mem(mm));
2275
            size_t offset = align_offset(chunk2mem(mm));
2276
            size_t psize = mmsize - offset - MMAP_FOOT_PAD;
2276
            size_t psize = mmsize - offset - MMAP_FOOT_PAD;
2277
            mchunkptr p = (mchunkptr)(mm + offset);
2277
            mchunkptr p = (mchunkptr)(mm + offset);
2278
            p->prev_foot = offset;
2278
            p->prev_foot = offset;
2279
            p->head = psize;
2279
            p->head = psize;
2280
            mark_inuse_foot(m, p, psize);
2280
            mark_inuse_foot(m, p, psize);
2281
            chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2281
            chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2282
            chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2282
            chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2283
 
2283
 
2284
            if (m->least_addr == 0 || mm < m->least_addr)
2284
            if (m->least_addr == 0 || mm < m->least_addr)
2285
                m->least_addr = mm;
2285
                m->least_addr = mm;
2286
            if ((m->footprint += mmsize) > m->max_footprint)
2286
            if ((m->footprint += mmsize) > m->max_footprint)
2287
                m->max_footprint = m->footprint;
2287
                m->max_footprint = m->footprint;
2288
            assert(is_aligned(chunk2mem(p)));
2288
            assert(is_aligned(chunk2mem(p)));
2289
            check_mmapped_chunk(m, p);
2289
            check_mmapped_chunk(m, p);
2290
            return chunk2mem(p);
2290
            return chunk2mem(p);
2291
        }
2291
        }
2292
    }
2292
    }
2293
    return 0;
2293
    return 0;
2294
}
2294
}
2295
 
2295
 
2296
/* Realloc using mmap */
2296
/* Realloc using mmap */
2297
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
2297
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
2298
    size_t oldsize = chunksize(oldp);
2298
    size_t oldsize = chunksize(oldp);
2299
  (void)flags; /* placate people compiling -Wunused */
2299
  (void)flags; /* placate people compiling -Wunused */
2300
    if (is_small(nb)) /* Can't shrink mmap regions below small size */
2300
    if (is_small(nb)) /* Can't shrink mmap regions below small size */
2301
        return 0;
2301
        return 0;
2302
  /* Keep old chunk if big enough but not too big */
2302
  /* Keep old chunk if big enough but not too big */
2303
    if (oldsize >= nb + SIZE_T_SIZE &&
2303
    if (oldsize >= nb + SIZE_T_SIZE &&
2304
      (oldsize - nb) <= (mparams.granularity << 1))
2304
      (oldsize - nb) <= (mparams.granularity << 1))
2305
        return oldp;
2305
        return oldp;
2306
  else {
2306
  else {
2307
        size_t offset = oldp->prev_foot;
2307
        size_t offset = oldp->prev_foot;
2308
        size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
2308
        size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
2309
        size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2309
        size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2310
        char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
2310
        char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
2311
                                  oldmmsize, newmmsize, flags);
2311
                                  oldmmsize, newmmsize, flags);
2312
    if (cp != CMFAIL) {
2312
    if (cp != CMFAIL) {
2313
            mchunkptr newp = (mchunkptr)(cp + offset);
2313
            mchunkptr newp = (mchunkptr)(cp + offset);
2314
            size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
2314
            size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
2315
            newp->head = psize;
2315
            newp->head = psize;
2316
            mark_inuse_foot(m, newp, psize);
2316
            mark_inuse_foot(m, newp, psize);
2317
            chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
2317
            chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
2318
            chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
2318
            chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
2319
 
2319
 
2320
      if (cp < m->least_addr)
2320
      if (cp < m->least_addr)
2321
        m->least_addr = cp;
2321
        m->least_addr = cp;
2322
      if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
2322
      if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
2323
        m->max_footprint = m->footprint;
2323
        m->max_footprint = m->footprint;
2324
      check_mmapped_chunk(m, newp);
2324
      check_mmapped_chunk(m, newp);
2325
      return newp;
2325
      return newp;
2326
    }
2326
    }
2327
    }
2327
    }
2328
  return 0;
2328
  return 0;
2329
}
2329
}
2330
 
2330
 
2331
 
2331
 
2332
/* -------------------------- mspace management -------------------------- */
2332
/* -------------------------- mspace management -------------------------- */
2333
 
2333
 
2334
/* Initialize top chunk and its size */
2334
/* Initialize top chunk and its size */
2335
static void init_top(mstate m, mchunkptr p, size_t psize) {
2335
static void init_top(mstate m, mchunkptr p, size_t psize) {
2336
  /* Ensure alignment */
2336
  /* Ensure alignment */
2337
    size_t offset = align_offset(chunk2mem(p));
2337
    size_t offset = align_offset(chunk2mem(p));
2338
    p = (mchunkptr)((char*)p + offset);
2338
    p = (mchunkptr)((char*)p + offset);
2339
    psize -= offset;
2339
    psize -= offset;
2340
 
2340
 
2341
    m->top = p;
2341
    m->top = p;
2342
    m->topsize = psize;
2342
    m->topsize = psize;
2343
    p->head = psize | PINUSE_BIT;
2343
    p->head = psize | PINUSE_BIT;
2344
  /* set size of fake trailing chunk holding overhead space only once */
2344
  /* set size of fake trailing chunk holding overhead space only once */
2345
    chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
2345
    chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
2346
    m->trim_check = mparams.trim_threshold; /* reset on each update */
2346
    m->trim_check = mparams.trim_threshold; /* reset on each update */
2347
}
2347
}
2348
 
2348
 
2349
/* Initialize bins for a new mstate that is otherwise zeroed out */
2349
/* Initialize bins for a new mstate that is otherwise zeroed out */
2350
static void init_bins(mstate m) {
2350
static void init_bins(mstate m) {
2351
  /* Establish circular links for smallbins */
2351
  /* Establish circular links for smallbins */
2352
    bindex_t i;
2352
    bindex_t i;
2353
    for (i = 0; i < NSMALLBINS; ++i) {
2353
    for (i = 0; i < NSMALLBINS; ++i) {
2354
        sbinptr bin = smallbin_at(m,i);
2354
        sbinptr bin = smallbin_at(m,i);
2355
        bin->fd = bin->bk = bin;
2355
        bin->fd = bin->bk = bin;
2356
    }
2356
    }
2357
}
2357
}
2358
 
2358
 
2359
/* Allocate chunk and prepend remainder with chunk in successor base. */
2359
/* Allocate chunk and prepend remainder with chunk in successor base. */
2360
static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
2360
static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
2361
                           size_t nb) {
2361
                           size_t nb) {
2362
    mchunkptr p = align_as_chunk(newbase);
2362
    mchunkptr p = align_as_chunk(newbase);
2363
    mchunkptr oldfirst = align_as_chunk(oldbase);
2363
    mchunkptr oldfirst = align_as_chunk(oldbase);
2364
    size_t psize = (char*)oldfirst - (char*)p;
2364
    size_t psize = (char*)oldfirst - (char*)p;
2365
    mchunkptr q = chunk_plus_offset(p, nb);
2365
    mchunkptr q = chunk_plus_offset(p, nb);
2366
    size_t qsize = psize - nb;
2366
    size_t qsize = psize - nb;
2367
    set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2367
    set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2368
 
2368
 
2369
    assert((char*)oldfirst > (char*)q);
2369
    assert((char*)oldfirst > (char*)q);
2370
    assert(pinuse(oldfirst));
2370
    assert(pinuse(oldfirst));
2371
    assert(qsize >= MIN_CHUNK_SIZE);
2371
    assert(qsize >= MIN_CHUNK_SIZE);
2372
 
2372
 
2373
  /* consolidate remainder with first chunk of old base */
2373
  /* consolidate remainder with first chunk of old base */
2374
    if (oldfirst == m->top) {
2374
    if (oldfirst == m->top) {
2375
        size_t tsize = m->topsize += qsize;
2375
        size_t tsize = m->topsize += qsize;
2376
        m->top = q;
2376
        m->top = q;
2377
        q->head = tsize | PINUSE_BIT;
2377
        q->head = tsize | PINUSE_BIT;
2378
        check_top_chunk(m, q);
2378
        check_top_chunk(m, q);
2379
    }
2379
    }
2380
    else if (oldfirst == m->dv) {
2380
    else if (oldfirst == m->dv) {
2381
        size_t dsize = m->dvsize += qsize;
2381
        size_t dsize = m->dvsize += qsize;
2382
        m->dv = q;
2382
        m->dv = q;
2383
        set_size_and_pinuse_of_free_chunk(q, dsize);
2383
        set_size_and_pinuse_of_free_chunk(q, dsize);
2384
    }
2384
    }
2385
    else {
2385
    else {
2386
        if (!is_inuse(oldfirst)) {
2386
        if (!is_inuse(oldfirst)) {
2387
            size_t nsize = chunksize(oldfirst);
2387
            size_t nsize = chunksize(oldfirst);
2388
            unlink_chunk(m, oldfirst, nsize);
2388
            unlink_chunk(m, oldfirst, nsize);
2389
            oldfirst = chunk_plus_offset(oldfirst, nsize);
2389
            oldfirst = chunk_plus_offset(oldfirst, nsize);
2390
            qsize += nsize;
2390
            qsize += nsize;
2391
        }
2391
        }
2392
        set_free_with_pinuse(q, qsize, oldfirst);
2392
        set_free_with_pinuse(q, qsize, oldfirst);
2393
        insert_chunk(m, q, qsize);
2393
        insert_chunk(m, q, qsize);
2394
        check_free_chunk(m, q);
2394
        check_free_chunk(m, q);
2395
    }
2395
    }
2396
 
2396
 
2397
    check_malloced_chunk(m, chunk2mem(p), nb);
2397
    check_malloced_chunk(m, chunk2mem(p), nb);
2398
    return chunk2mem(p);
2398
    return chunk2mem(p);
2399
}
2399
}
2400
 
2400
 
2401
/* Add a segment to hold a new noncontiguous region */
2401
/* Add a segment to hold a new noncontiguous region */
2402
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
2402
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
2403
  /* Determine locations and sizes of segment, fenceposts, old top */
2403
  /* Determine locations and sizes of segment, fenceposts, old top */
2404
    char* old_top = (char*)m->top;
2404
    char* old_top = (char*)m->top;
2405
    msegmentptr oldsp = segment_holding(m, old_top);
2405
    msegmentptr oldsp = segment_holding(m, old_top);
2406
    char* old_end = oldsp->base + oldsp->size;
2406
    char* old_end = oldsp->base + oldsp->size;
2407
    size_t ssize = pad_request(sizeof(struct malloc_segment));
2407
    size_t ssize = pad_request(sizeof(struct malloc_segment));
2408
    char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2408
    char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2409
    size_t offset = align_offset(chunk2mem(rawsp));
2409
    size_t offset = align_offset(chunk2mem(rawsp));
2410
    char* asp = rawsp + offset;
2410
    char* asp = rawsp + offset;
2411
    char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
2411
    char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
2412
    mchunkptr sp = (mchunkptr)csp;
2412
    mchunkptr sp = (mchunkptr)csp;
2413
    msegmentptr ss = (msegmentptr)(chunk2mem(sp));
2413
    msegmentptr ss = (msegmentptr)(chunk2mem(sp));
2414
    mchunkptr tnext = chunk_plus_offset(sp, ssize);
2414
    mchunkptr tnext = chunk_plus_offset(sp, ssize);
2415
    mchunkptr p = tnext;
2415
    mchunkptr p = tnext;
2416
    int nfences = 0;
2416
    int nfences = 0;
2417
 
2417
 
2418
  /* reset top to new space */
2418
  /* reset top to new space */
2419
    init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2419
    init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2420
 
2420
 
2421
  /* Set up segment record */
2421
  /* Set up segment record */
2422
    assert(is_aligned(ss));
2422
    assert(is_aligned(ss));
2423
    set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
2423
    set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
2424
    *ss = m->seg; /* Push current record */
2424
    *ss = m->seg; /* Push current record */
2425
    m->seg.base = tbase;
2425
    m->seg.base = tbase;
2426
    m->seg.size = tsize;
2426
    m->seg.size = tsize;
2427
    m->seg.sflags = mmapped;
2427
    m->seg.sflags = mmapped;
2428
    m->seg.next = ss;
2428
    m->seg.next = ss;
2429
 
2429
 
2430
  /* Insert trailing fenceposts */
2430
  /* Insert trailing fenceposts */
2431
    for (;;) {
2431
    for (;;) {
2432
        mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
2432
        mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
2433
        p->head = FENCEPOST_HEAD;
2433
        p->head = FENCEPOST_HEAD;
2434
        ++nfences;
2434
        ++nfences;
2435
        if ((char*)(&(nextp->head)) < old_end)
2435
        if ((char*)(&(nextp->head)) < old_end)
2436
            p = nextp;
2436
            p = nextp;
2437
        else
2437
        else
2438
            break;
2438
            break;
2439
    }
2439
    }
2440
    assert(nfences >= 2);
2440
    assert(nfences >= 2);
2441
 
2441
 
2442
  /* Insert the rest of old top into a bin as an ordinary free chunk */
2442
  /* Insert the rest of old top into a bin as an ordinary free chunk */
2443
    if (csp != old_top) {
2443
    if (csp != old_top) {
2444
        mchunkptr q = (mchunkptr)old_top;
2444
        mchunkptr q = (mchunkptr)old_top;
2445
        size_t psize = csp - old_top;
2445
        size_t psize = csp - old_top;
2446
        mchunkptr tn = chunk_plus_offset(q, psize);
2446
        mchunkptr tn = chunk_plus_offset(q, psize);
2447
        set_free_with_pinuse(q, psize, tn);
2447
        set_free_with_pinuse(q, psize, tn);
2448
        insert_chunk(m, q, psize);
2448
        insert_chunk(m, q, psize);
2449
    }
2449
    }
2450
 
2450
 
2451
  check_top_chunk(m, m->top);
2451
  check_top_chunk(m, m->top);
2452
}
2452
}
2453
 
2453
 
2454
/* -------------------------- System allocation -------------------------- */
2454
/* -------------------------- System allocation -------------------------- */
2455
 
2455
 
2456
/* Get memory from system using MORECORE or MMAP */
2456
/* Get memory from system using MORECORE or MMAP */
2457
static void* sys_alloc(mstate m, size_t nb) {
2457
static void* sys_alloc(mstate m, size_t nb) {
2458
    char* tbase = CMFAIL;
2458
    char* tbase = CMFAIL;
2459
    size_t tsize = 0;
2459
    size_t tsize = 0;
2460
    flag_t mmap_flag = 0;
2460
    flag_t mmap_flag = 0;
2461
  size_t asize; /* allocation size */
2461
  size_t asize; /* allocation size */
2462
 
2462
 
2463
    ensure_initialization();
2463
    ensure_initialization();
2464
 
2464
 
2465
    printf("%s %d bytes\n", __FUNCTION__, nb);
2465
    printf("%s %d bytes\n", __FUNCTION__, nb);
2466
 
2466
 
2467
  /* Directly map large chunks, but only if already initialized */
2467
  /* Directly map large chunks, but only if already initialized */
2468
  if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
2468
  if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
2469
        void* mem = mmap_alloc(m, nb);
2469
        void* mem = mmap_alloc(m, nb);
2470
        if (mem != 0)
2470
        if (mem != 0)
2471
            return mem;
2471
            return mem;
2472
    }
2472
    }
2473
 
2473
 
2474
  asize = granularity_align(nb + SYS_ALLOC_PADDING);
2474
  asize = granularity_align(nb + SYS_ALLOC_PADDING);
2475
  if (asize <= nb)
2475
  if (asize <= nb)
2476
    return 0; /* wraparound */
2476
    return 0; /* wraparound */
2477
  if (m->footprint_limit != 0) {
2477
  if (m->footprint_limit != 0) {
2478
    size_t fp = m->footprint + asize;
2478
    size_t fp = m->footprint + asize;
2479
    if (fp <= m->footprint || fp > m->footprint_limit)
2479
    if (fp <= m->footprint || fp > m->footprint_limit)
2480
      return 0;
2480
      return 0;
2481
  }
2481
  }
2482
 
2482
 
2483
  /*
2483
  /*
2484
    Try getting memory in any of three ways (in most-preferred to
2484
    Try getting memory in any of three ways (in most-preferred to
2485
    least-preferred order):
2485
    least-preferred order):
2486
    1. A call to MORECORE that can normally contiguously extend memory.
2486
    1. A call to MORECORE that can normally contiguously extend memory.
2487
       (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
2487
       (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
2488
       or main space is mmapped or a previous contiguous call failed)
2488
       or main space is mmapped or a previous contiguous call failed)
2489
    2. A call to MMAP new space (disabled if not HAVE_MMAP).
2489
    2. A call to MMAP new space (disabled if not HAVE_MMAP).
2490
       Note that under the default settings, if MORECORE is unable to
2490
       Note that under the default settings, if MORECORE is unable to
2491
       fulfill a request, and HAVE_MMAP is true, then mmap is
2491
       fulfill a request, and HAVE_MMAP is true, then mmap is
2492
       used as a noncontiguous system allocator. This is a useful backup
2492
       used as a noncontiguous system allocator. This is a useful backup
2493
       strategy for systems with holes in address spaces -- in this case
2493
       strategy for systems with holes in address spaces -- in this case
2494
       sbrk cannot contiguously expand the heap, but mmap may be able to
2494
       sbrk cannot contiguously expand the heap, but mmap may be able to
2495
       find space.
2495
       find space.
2496
    3. A call to MORECORE that cannot usually contiguously extend memory.
2496
    3. A call to MORECORE that cannot usually contiguously extend memory.
2497
       (disabled if not HAVE_MORECORE)
2497
       (disabled if not HAVE_MORECORE)
2498
 
2498
 
2499
   In all cases, we need to request enough bytes from system to ensure
2499
   In all cases, we need to request enough bytes from system to ensure
2500
   we can malloc nb bytes upon success, so pad with enough space for
2500
   we can malloc nb bytes upon success, so pad with enough space for
2501
   top_foot, plus alignment-pad to make sure we don't lose bytes if
2501
   top_foot, plus alignment-pad to make sure we don't lose bytes if
2502
   not on boundary, and round this up to a granularity unit.
2502
   not on boundary, and round this up to a granularity unit.
2503
  */
2503
  */
2504
 
2504
 
2505
#if 0
2505
#if 0
2506
  if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
2506
  if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
2507
    char* br = CMFAIL;
2507
    char* br = CMFAIL;
2508
    size_t ssize = asize; /* sbrk call size */
2508
    size_t ssize = asize; /* sbrk call size */
2509
    msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
2509
    msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
2510
    ACQUIRE_MALLOC_GLOBAL_LOCK();
2510
    ACQUIRE_MALLOC_GLOBAL_LOCK();
2511
 
2511
 
2512
    if (ss == 0) {  /* First time through or recovery */
2512
    if (ss == 0) {  /* First time through or recovery */
2513
      char* base = (char*)CALL_MORECORE(0);
2513
      char* base = (char*)CALL_MORECORE(0);
2514
      if (base != CMFAIL) {
2514
      if (base != CMFAIL) {
2515
        size_t fp;
2515
        size_t fp;
2516
        /* Adjust to end on a page boundary */
2516
        /* Adjust to end on a page boundary */
2517
        if (!is_page_aligned(base))
2517
        if (!is_page_aligned(base))
2518
          ssize += (page_align((size_t)base) - (size_t)base);
2518
          ssize += (page_align((size_t)base) - (size_t)base);
2519
        fp = m->footprint + ssize; /* recheck limits */
2519
        fp = m->footprint + ssize; /* recheck limits */
2520
        if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
2520
        if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
2521
            (m->footprint_limit == 0 ||
2521
            (m->footprint_limit == 0 ||
2522
             (fp > m->footprint && fp <= m->footprint_limit)) &&
2522
             (fp > m->footprint && fp <= m->footprint_limit)) &&
2523
            (br = (char*)(CALL_MORECORE(ssize))) == base) {
2523
            (br = (char*)(CALL_MORECORE(ssize))) == base) {
2524
          tbase = base;
2524
          tbase = base;
2525
          tsize = ssize;
2525
          tsize = ssize;
2526
        }
2526
        }
2527
      }
2527
      }
2528
    }
2528
    }
2529
    else {
2529
    else {
2530
      /* Subtract out existing available top space from MORECORE request. */
2530
      /* Subtract out existing available top space from MORECORE request. */
2531
      ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
2531
      ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
2532
      /* Use mem here only if it did continuously extend old space */
2532
      /* Use mem here only if it did continuously extend old space */
2533
      if (ssize < HALF_MAX_SIZE_T &&
2533
      if (ssize < HALF_MAX_SIZE_T &&
2534
          (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
2534
          (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
2535
        tbase = br;
2535
        tbase = br;
2536
        tsize = ssize;
2536
        tsize = ssize;
2537
      }
2537
      }
2538
    }
2538
    }
2539
 
2539
 
2540
    if (tbase == CMFAIL) {    /* Cope with partial failure */
2540
    if (tbase == CMFAIL) {    /* Cope with partial failure */
2541
      if (br != CMFAIL) {    /* Try to use/extend the space we did get */
2541
      if (br != CMFAIL) {    /* Try to use/extend the space we did get */
2542
        if (ssize < HALF_MAX_SIZE_T &&
2542
        if (ssize < HALF_MAX_SIZE_T &&
2543
            ssize < nb + SYS_ALLOC_PADDING) {
2543
            ssize < nb + SYS_ALLOC_PADDING) {
2544
          size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
2544
          size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
2545
          if (esize < HALF_MAX_SIZE_T) {
2545
          if (esize < HALF_MAX_SIZE_T) {
2546
            char* end = (char*)CALL_MORECORE(esize);
2546
            char* end = (char*)CALL_MORECORE(esize);
2547
            if (end != CMFAIL)
2547
            if (end != CMFAIL)
2548
              ssize += esize;
2548
              ssize += esize;
2549
            else {            /* Can't use; try to release */
2549
            else {            /* Can't use; try to release */
2550
              (void) CALL_MORECORE(-ssize);
2550
              (void) CALL_MORECORE(-ssize);
2551
              br = CMFAIL;
2551
              br = CMFAIL;
2552
            }
2552
            }
2553
          }
2553
          }
2554
        }
2554
        }
2555
      }
2555
      }
2556
      if (br != CMFAIL) {    /* Use the space we did get */
2556
      if (br != CMFAIL) {    /* Use the space we did get */
2557
        tbase = br;
2557
        tbase = br;
2558
        tsize = ssize;
2558
        tsize = ssize;
2559
      }
2559
      }
2560
      else
2560
      else
2561
        disable_contiguous(m); /* Don't try contiguous path in the future */
2561
        disable_contiguous(m); /* Don't try contiguous path in the future */
2562
    }
2562
    }
2563
 
2563
 
2564
    RELEASE_MALLOC_GLOBAL_LOCK();
2564
    RELEASE_MALLOC_GLOBAL_LOCK();
2565
  }
2565
  }
2566
#endif
2566
#endif
2567
 
2567
 
2568
  if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
2568
  if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
2569
    char* mp = (char*)(CALL_MMAP(asize));
2569
    char* mp = (char*)(CALL_MMAP(asize));
2570
    if (mp != CMFAIL) {
2570
    if (mp != CMFAIL) {
2571
      tbase = mp;
2571
      tbase = mp;
2572
      tsize = asize;
2572
      tsize = asize;
2573
      mmap_flag = USE_MMAP_BIT;
2573
      mmap_flag = USE_MMAP_BIT;
2574
    }
2574
    }
2575
  }
2575
  }
2576
 
2576
 
2577
#if 0
2577
#if 0
2578
  if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
2578
  if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
2579
    if (asize < HALF_MAX_SIZE_T) {
2579
    if (asize < HALF_MAX_SIZE_T) {
2580
      char* br = CMFAIL;
2580
      char* br = CMFAIL;
2581
      char* end = CMFAIL;
2581
      char* end = CMFAIL;
2582
      ACQUIRE_MALLOC_GLOBAL_LOCK();
2582
      ACQUIRE_MALLOC_GLOBAL_LOCK();
2583
      br = (char*)(CALL_MORECORE(asize));
2583
      br = (char*)(CALL_MORECORE(asize));
2584
      end = (char*)(CALL_MORECORE(0));
2584
      end = (char*)(CALL_MORECORE(0));
2585
      RELEASE_MALLOC_GLOBAL_LOCK();
2585
      RELEASE_MALLOC_GLOBAL_LOCK();
2586
      if (br != CMFAIL && end != CMFAIL && br < end) {
2586
      if (br != CMFAIL && end != CMFAIL && br < end) {
2587
        size_t ssize = end - br;
2587
        size_t ssize = end - br;
2588
        if (ssize > nb + TOP_FOOT_SIZE) {
2588
        if (ssize > nb + TOP_FOOT_SIZE) {
2589
          tbase = br;
2589
          tbase = br;
2590
          tsize = ssize;
2590
          tsize = ssize;
2591
        }
2591
        }
2592
            }
2592
            }
2593
        }
2593
        }
2594
    }
2594
    }
2595
#endif
2595
#endif
2596
 
2596
 
2597
  if (tbase != CMFAIL) {
2597
  if (tbase != CMFAIL) {
2598
 
2598
 
2599
        if ((m->footprint += tsize) > m->max_footprint)
2599
        if ((m->footprint += tsize) > m->max_footprint)
2600
            m->max_footprint = m->footprint;
2600
            m->max_footprint = m->footprint;
2601
 
2601
 
2602
    if (!is_initialized(m)) { /* first-time initialization */
2602
    if (!is_initialized(m)) { /* first-time initialization */
2603
            if (m->least_addr == 0 || tbase < m->least_addr)
2603
            if (m->least_addr == 0 || tbase < m->least_addr)
2604
                m->least_addr = tbase;
2604
                m->least_addr = tbase;
2605
            m->seg.base = tbase;
2605
            m->seg.base = tbase;
2606
             m->seg.size = tsize;
2606
             m->seg.size = tsize;
2607
            m->seg.sflags = mmap_flag;
2607
            m->seg.sflags = mmap_flag;
2608
            m->magic = mparams.magic;
2608
            m->magic = mparams.magic;
2609
            m->release_checks = MAX_RELEASE_CHECK_RATE;
2609
            m->release_checks = MAX_RELEASE_CHECK_RATE;
2610
            init_bins(m);
2610
            init_bins(m);
2611
 
2611
 
2612
            if (is_global(m))
2612
            if (is_global(m))
2613
                init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2613
                init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2614
            else
2614
            else
2615
            {
2615
            {
2616
        /* Offset top by embedded malloc_state */
2616
        /* Offset top by embedded malloc_state */
2617
                mchunkptr mn = next_chunk(mem2chunk(m));
2617
                mchunkptr mn = next_chunk(mem2chunk(m));
2618
                init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
2618
                init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
2619
            }
2619
            }
2620
        }
2620
        }
2621
 
2621
 
2622
    else {
2622
    else {
2623
      /* Try to merge with an existing segment */
2623
      /* Try to merge with an existing segment */
2624
            msegmentptr sp = &m->seg;
2624
            msegmentptr sp = &m->seg;
2625
      /* Only consider most recent segment if traversal suppressed */
2625
      /* Only consider most recent segment if traversal suppressed */
2626
            while (sp != 0 && tbase != sp->base + sp->size)
2626
            while (sp != 0 && tbase != sp->base + sp->size)
2627
                sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2627
                sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2628
      if (sp != 0 &&
2628
      if (sp != 0 &&
2629
          !is_extern_segment(sp) &&
2629
          !is_extern_segment(sp) &&
2630
               (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
2630
               (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
2631
          segment_holds(sp, m->top)) { /* append */
2631
          segment_holds(sp, m->top)) { /* append */
2632
                sp->size += tsize;
2632
                sp->size += tsize;
2633
                init_top(m, m->top, m->topsize + tsize);
2633
                init_top(m, m->top, m->topsize + tsize);
2634
            }
2634
            }
2635
      else {
2635
      else {
2636
                if (tbase < m->least_addr)
2636
                if (tbase < m->least_addr)
2637
                    m->least_addr = tbase;
2637
                    m->least_addr = tbase;
2638
                sp = &m->seg;
2638
                sp = &m->seg;
2639
                while (sp != 0 && sp->base != tbase + tsize)
2639
                while (sp != 0 && sp->base != tbase + tsize)
2640
                    sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2640
                    sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2641
                if (sp != 0 &&
2641
                if (sp != 0 &&
2642
                    !is_extern_segment(sp) &&
2642
                    !is_extern_segment(sp) &&
2643
            (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
2643
            (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
2644
                    char* oldbase = sp->base;
2644
                    char* oldbase = sp->base;
2645
                    sp->base = tbase;
2645
                    sp->base = tbase;
2646
                    sp->size += tsize;
2646
                    sp->size += tsize;
2647
                    return prepend_alloc(m, tbase, oldbase, nb);
2647
                    return prepend_alloc(m, tbase, oldbase, nb);
2648
                }
2648
                }
2649
                else
2649
                else
2650
                    add_segment(m, tbase, tsize, mmap_flag);
2650
                    add_segment(m, tbase, tsize, mmap_flag);
2651
            }
2651
            }
2652
        }
2652
        }
2653
 
2653
 
2654
    if (nb < m->topsize) { /* Allocate from new or extended top space */
2654
    if (nb < m->topsize) { /* Allocate from new or extended top space */
2655
            size_t rsize = m->topsize -= nb;
2655
            size_t rsize = m->topsize -= nb;
2656
            mchunkptr p = m->top;
2656
            mchunkptr p = m->top;
2657
            mchunkptr r = m->top = chunk_plus_offset(p, nb);
2657
            mchunkptr r = m->top = chunk_plus_offset(p, nb);
2658
            r->head = rsize | PINUSE_BIT;
2658
            r->head = rsize | PINUSE_BIT;
2659
            set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2659
            set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2660
            check_top_chunk(m, m->top);
2660
            check_top_chunk(m, m->top);
2661
            check_malloced_chunk(m, chunk2mem(p), nb);
2661
            check_malloced_chunk(m, chunk2mem(p), nb);
2662
            return chunk2mem(p);
2662
            return chunk2mem(p);
2663
        }
2663
        }
2664
    }
2664
    }
2665
 
2665
 
2666
//    MALLOC_FAILURE_ACTION;
2666
//    MALLOC_FAILURE_ACTION;
2667
    return 0;
2667
    return 0;
2668
}
2668
}
2669
 
2669
 
2670
/* -----------------------  system deallocation -------------------------- */
2670
/* -----------------------  system deallocation -------------------------- */
2671
 
2671
 
2672
/* Unmap and unlink any mmapped segments that don't contain used chunks */
2672
/* Unmap and unlink any mmapped segments that don't contain used chunks */
2673
static size_t release_unused_segments(mstate m) {
2673
static size_t release_unused_segments(mstate m) {
2674
    size_t released = 0;
2674
    size_t released = 0;
2675
    int nsegs = 0;
2675
    int nsegs = 0;
2676
    msegmentptr pred = &m->seg;
2676
    msegmentptr pred = &m->seg;
2677
    msegmentptr sp = pred->next;
2677
    msegmentptr sp = pred->next;
2678
  while (sp != 0) {
2678
  while (sp != 0) {
2679
        char* base = sp->base;
2679
        char* base = sp->base;
2680
        size_t size = sp->size;
2680
        size_t size = sp->size;
2681
        msegmentptr next = sp->next;
2681
        msegmentptr next = sp->next;
2682
        ++nsegs;
2682
        ++nsegs;
2683
    if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
2683
    if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
2684
            mchunkptr p = align_as_chunk(base);
2684
            mchunkptr p = align_as_chunk(base);
2685
            size_t psize = chunksize(p);
2685
            size_t psize = chunksize(p);
2686
      /* Can unmap if first chunk holds entire segment and not pinned */
2686
      /* Can unmap if first chunk holds entire segment and not pinned */
2687
      if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
2687
      if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
2688
                tchunkptr tp = (tchunkptr)p;
2688
                tchunkptr tp = (tchunkptr)p;
2689
                assert(segment_holds(sp, (char*)sp));
2689
                assert(segment_holds(sp, (char*)sp));
2690
                if (p == m->dv) {
2690
                if (p == m->dv) {
2691
                    m->dv = 0;
2691
                    m->dv = 0;
2692
                    m->dvsize = 0;
2692
                    m->dvsize = 0;
2693
                }
2693
                }
2694
                else {
2694
                else {
2695
                    unlink_large_chunk(m, tp);
2695
                    unlink_large_chunk(m, tp);
2696
                }
2696
                }
2697
        if (CALL_MUNMAP(base, size) == 0) {
2697
        if (CALL_MUNMAP(base, size) == 0) {
2698
                    released += size;
2698
                    released += size;
2699
                    m->footprint -= size;
2699
                    m->footprint -= size;
2700
          /* unlink obsoleted record */
2700
          /* unlink obsoleted record */
2701
                    sp = pred;
2701
                    sp = pred;
2702
                    sp->next = next;
2702
                    sp->next = next;
2703
                }
2703
                }
2704
                else { /* back out if cannot unmap */
2704
                else { /* back out if cannot unmap */
2705
                    insert_large_chunk(m, tp, psize);
2705
                    insert_large_chunk(m, tp, psize);
2706
                }
2706
                }
2707
            }
2707
            }
2708
        }
2708
        }
2709
        if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
2709
        if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
2710
            break;
2710
            break;
2711
        pred = sp;
2711
        pred = sp;
2712
        sp = next;
2712
        sp = next;
2713
    }
2713
    }
2714
  /* Reset check counter */
2714
  /* Reset check counter */
2715
  m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
2715
  m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
2716
                       (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
2716
                       (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
2717
    return released;
2717
    return released;
2718
}
2718
}
2719
 
2719
 
2720
static int sys_trim(mstate m, size_t pad) {
2720
static int sys_trim(mstate m, size_t pad) {
2721
    size_t released = 0;
2721
    size_t released = 0;
2722
    ensure_initialization();
2722
    ensure_initialization();
2723
  if (pad < MAX_REQUEST && is_initialized(m)) {
2723
  if (pad < MAX_REQUEST && is_initialized(m)) {
2724
        pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
2724
        pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
2725
 
2725
 
2726
    if (m->topsize > pad) {
2726
    if (m->topsize > pad) {
2727
      /* Shrink top space in granularity-size units, keeping at least one */
2727
      /* Shrink top space in granularity-size units, keeping at least one */
2728
            size_t unit = mparams.granularity;
2728
            size_t unit = mparams.granularity;
2729
            size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
2729
            size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
2730
                             SIZE_T_ONE) * unit;
2730
                             SIZE_T_ONE) * unit;
2731
            msegmentptr sp = segment_holding(m, (char*)m->top);
2731
            msegmentptr sp = segment_holding(m, (char*)m->top);
2732
 
2732
 
2733
      if (!is_extern_segment(sp)) {
2733
      if (!is_extern_segment(sp)) {
2734
        if (is_mmapped_segment(sp)) {
2734
        if (is_mmapped_segment(sp)) {
2735
                    if (HAVE_MMAP &&
2735
                    if (HAVE_MMAP &&
2736
                        sp->size >= extra &&
2736
                        sp->size >= extra &&
2737
              !has_segment_link(m, sp)) { /* can't shrink if pinned */
2737
              !has_segment_link(m, sp)) { /* can't shrink if pinned */
2738
                        size_t newsize = sp->size - extra;
2738
                        size_t newsize = sp->size - extra;
2739
            (void)newsize; /* placate people compiling -Wunused-variable */
2739
            (void)newsize; /* placate people compiling -Wunused-variable */
2740
            /* Prefer mremap, fall back to munmap */
2740
            /* Prefer mremap, fall back to munmap */
2741
                        if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
2741
                        if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
2742
                (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
2742
                (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
2743
              released = extra;
2743
              released = extra;
2744
            }
2744
            }
2745
          }
2745
          }
2746
        }
2746
        }
2747
        else if (HAVE_MORECORE) {
2747
        else if (HAVE_MORECORE) {
2748
          if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
2748
          if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
2749
            extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
2749
            extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
2750
          ACQUIRE_MALLOC_GLOBAL_LOCK();
2750
          ACQUIRE_MALLOC_GLOBAL_LOCK();
2751
                        {
2751
                        {
2752
            /* Make sure end of memory is where we last set it. */
2752
            /* Make sure end of memory is where we last set it. */
2753
            char* old_br = (char*)(CALL_MORECORE(0));
2753
            char* old_br = (char*)(CALL_MORECORE(0));
2754
            if (old_br == sp->base + sp->size) {
2754
            if (old_br == sp->base + sp->size) {
2755
              char* rel_br = (char*)(CALL_MORECORE(-extra));
2755
              char* rel_br = (char*)(CALL_MORECORE(-extra));
2756
              char* new_br = (char*)(CALL_MORECORE(0));
2756
              char* new_br = (char*)(CALL_MORECORE(0));
2757
              if (rel_br != CMFAIL && new_br < old_br)
2757
              if (rel_br != CMFAIL && new_br < old_br)
2758
                released = old_br - new_br;
2758
                released = old_br - new_br;
2759
                        }
2759
                        }
2760
                    }
2760
                    }
2761
          RELEASE_MALLOC_GLOBAL_LOCK();
2761
          RELEASE_MALLOC_GLOBAL_LOCK();
2762
                }
2762
                }
2763
            }
2763
            }
2764
 
2764
 
2765
      if (released != 0) {
2765
      if (released != 0) {
2766
                sp->size -= released;
2766
                sp->size -= released;
2767
                m->footprint -= released;
2767
                m->footprint -= released;
2768
                init_top(m, m->top, m->topsize - released);
2768
                init_top(m, m->top, m->topsize - released);
2769
                check_top_chunk(m, m->top);
2769
                check_top_chunk(m, m->top);
2770
            }
2770
            }
2771
        }
2771
        }
2772
 
2772
 
2773
    /* Unmap any unused mmapped segments */
2773
    /* Unmap any unused mmapped segments */
2774
        if (HAVE_MMAP)
2774
        if (HAVE_MMAP)
2775
            released += release_unused_segments(m);
2775
            released += release_unused_segments(m);
2776
 
2776
 
2777
    /* On failure, disable autotrim to avoid repeated failed future calls */
2777
    /* On failure, disable autotrim to avoid repeated failed future calls */
2778
        if (released == 0 && m->topsize > m->trim_check)
2778
        if (released == 0 && m->topsize > m->trim_check)
2779
        m->trim_check = MAX_SIZE_T;
2779
        m->trim_check = MAX_SIZE_T;
2780
    }
2780
    }
2781
 
2781
 
2782
    return (released != 0)? 1 : 0;
2782
    return (released != 0)? 1 : 0;
2783
}
2783
}
2784
 
2784
 
2785
/* Consolidate and bin a chunk. Differs from exported versions
2785
/* Consolidate and bin a chunk. Differs from exported versions
2786
   of free mainly in that the chunk need not be marked as inuse.
2786
   of free mainly in that the chunk need not be marked as inuse.
2787
*/
2787
*/
2788
static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
2788
static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
2789
  mchunkptr next = chunk_plus_offset(p, psize);
2789
  mchunkptr next = chunk_plus_offset(p, psize);
2790
  if (!pinuse(p)) {
2790
  if (!pinuse(p)) {
2791
    mchunkptr prev;
2791
    mchunkptr prev;
2792
    size_t prevsize = p->prev_foot;
2792
    size_t prevsize = p->prev_foot;
2793
    if (is_mmapped(p)) {
2793
    if (is_mmapped(p)) {
2794
      psize += prevsize + MMAP_FOOT_PAD;
2794
      psize += prevsize + MMAP_FOOT_PAD;
2795
      if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
2795
      if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
2796
        m->footprint -= psize;
2796
        m->footprint -= psize;
2797
      return;
2797
      return;
2798
    }
2798
    }
2799
    prev = chunk_minus_offset(p, prevsize);
2799
    prev = chunk_minus_offset(p, prevsize);
2800
    psize += prevsize;
2800
    psize += prevsize;
2801
    p = prev;
2801
    p = prev;
2802
    if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
2802
    if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
2803
      if (p != m->dv) {
2803
      if (p != m->dv) {
2804
        unlink_chunk(m, p, prevsize);
2804
        unlink_chunk(m, p, prevsize);
2805
      }
2805
      }
2806
      else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2806
      else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2807
        m->dvsize = psize;
2807
        m->dvsize = psize;
2808
        set_free_with_pinuse(p, psize, next);
2808
        set_free_with_pinuse(p, psize, next);
2809
        return;
2809
        return;
2810
      }
2810
      }
2811
    }
2811
    }
2812
    else {
2812
    else {
2813
      CORRUPTION_ERROR_ACTION(m);
2813
      CORRUPTION_ERROR_ACTION(m);
2814
      return;
2814
      return;
2815
    }
2815
    }
2816
  }
2816
  }
2817
  if (RTCHECK(ok_address(m, next))) {
2817
  if (RTCHECK(ok_address(m, next))) {
2818
    if (!cinuse(next)) {  /* consolidate forward */
2818
    if (!cinuse(next)) {  /* consolidate forward */
2819
      if (next == m->top) {
2819
      if (next == m->top) {
2820
        size_t tsize = m->topsize += psize;
2820
        size_t tsize = m->topsize += psize;
2821
        m->top = p;
2821
        m->top = p;
2822
        p->head = tsize | PINUSE_BIT;
2822
        p->head = tsize | PINUSE_BIT;
2823
        if (p == m->dv) {
2823
        if (p == m->dv) {
2824
          m->dv = 0;
2824
          m->dv = 0;
2825
          m->dvsize = 0;
2825
          m->dvsize = 0;
2826
        }
2826
        }
2827
        return;
2827
        return;
2828
      }
2828
      }
2829
      else if (next == m->dv) {
2829
      else if (next == m->dv) {
2830
        size_t dsize = m->dvsize += psize;
2830
        size_t dsize = m->dvsize += psize;
2831
        m->dv = p;
2831
        m->dv = p;
2832
        set_size_and_pinuse_of_free_chunk(p, dsize);
2832
        set_size_and_pinuse_of_free_chunk(p, dsize);
2833
        return;
2833
        return;
2834
      }
2834
      }
2835
      else {
2835
      else {
2836
        size_t nsize = chunksize(next);
2836
        size_t nsize = chunksize(next);
2837
        psize += nsize;
2837
        psize += nsize;
2838
        unlink_chunk(m, next, nsize);
2838
        unlink_chunk(m, next, nsize);
2839
        set_size_and_pinuse_of_free_chunk(p, psize);
2839
        set_size_and_pinuse_of_free_chunk(p, psize);
2840
        if (p == m->dv) {
2840
        if (p == m->dv) {
2841
          m->dvsize = psize;
2841
          m->dvsize = psize;
2842
          return;
2842
          return;
2843
        }
2843
        }
2844
      }
2844
      }
2845
    }
2845
    }
2846
    else {
2846
    else {
2847
      set_free_with_pinuse(p, psize, next);
2847
      set_free_with_pinuse(p, psize, next);
2848
    }
2848
    }
2849
    insert_chunk(m, p, psize);
2849
    insert_chunk(m, p, psize);
2850
  }
2850
  }
2851
  else {
2851
  else {
2852
    CORRUPTION_ERROR_ACTION(m);
2852
    CORRUPTION_ERROR_ACTION(m);
2853
  }
2853
  }
2854
}
2854
}
2855
 
2855
 
2856
/* ---------------------------- malloc --------------------------- */
2856
/* ---------------------------- malloc --------------------------- */
2857
 
2857
 
2858
/* allocate a large request from the best fitting chunk in a treebin */
2858
/* allocate a large request from the best fitting chunk in a treebin */
2859
static void* tmalloc_large(mstate m, size_t nb) {
2859
static void* tmalloc_large(mstate m, size_t nb) {
2860
  tchunkptr v = 0;
2860
  tchunkptr v = 0;
2861
  size_t rsize = -nb; /* Unsigned negation */
2861
  size_t rsize = -nb; /* Unsigned negation */
2862
  tchunkptr t;
2862
  tchunkptr t;
2863
  bindex_t idx;
2863
  bindex_t idx;
2864
  compute_tree_index(nb, idx);
2864
  compute_tree_index(nb, idx);
2865
  if ((t = *treebin_at(m, idx)) != 0) {
2865
  if ((t = *treebin_at(m, idx)) != 0) {
2866
    /* Traverse tree for this bin looking for node with size == nb */
2866
    /* Traverse tree for this bin looking for node with size == nb */
2867
    size_t sizebits = nb << leftshift_for_tree_index(idx);
2867
    size_t sizebits = nb << leftshift_for_tree_index(idx);
2868
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
2868
    tchunkptr rst = 0;  /* The deepest untaken right subtree */
2869
    for (;;) {
2869
    for (;;) {
2870
      tchunkptr rt;
2870
      tchunkptr rt;
2871
      size_t trem = chunksize(t) - nb;
2871
      size_t trem = chunksize(t) - nb;
2872
      if (trem < rsize) {
2872
      if (trem < rsize) {
2873
        v = t;
2873
        v = t;
2874
        if ((rsize = trem) == 0)
2874
        if ((rsize = trem) == 0)
2875
          break;
2875
          break;
2876
      }
2876
      }
2877
      rt = t->child[1];
2877
      rt = t->child[1];
2878
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
2878
      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
2879
      if (rt != 0 && rt != t)
2879
      if (rt != 0 && rt != t)
2880
        rst = rt;
2880
        rst = rt;
2881
      if (t == 0) {
2881
      if (t == 0) {
2882
        t = rst; /* set t to least subtree holding sizes > nb */
2882
        t = rst; /* set t to least subtree holding sizes > nb */
2883
        break;
2883
        break;
2884
      }
2884
      }
2885
      sizebits <<= 1;
2885
      sizebits <<= 1;
2886
    }
2886
    }
2887
  }
2887
  }
2888
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
2888
  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
2889
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
2889
    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
2890
    if (leftbits != 0) {
2890
    if (leftbits != 0) {
2891
      bindex_t i;
2891
      bindex_t i;
2892
      binmap_t leastbit = least_bit(leftbits);
2892
      binmap_t leastbit = least_bit(leftbits);
2893
      compute_bit2idx(leastbit, i);
2893
      compute_bit2idx(leastbit, i);
2894
      t = *treebin_at(m, i);
2894
      t = *treebin_at(m, i);
2895
    }
2895
    }
2896
  }
2896
  }
2897
 
2897
 
2898
  while (t != 0) { /* find smallest of tree or subtree */
2898
  while (t != 0) { /* find smallest of tree or subtree */
2899
    size_t trem = chunksize(t) - nb;
2899
    size_t trem = chunksize(t) - nb;
2900
    if (trem < rsize) {
2900
    if (trem < rsize) {
2901
      rsize = trem;
2901
      rsize = trem;
2902
      v = t;
2902
      v = t;
2903
    }
2903
    }
2904
    t = leftmost_child(t);
2904
    t = leftmost_child(t);
2905
  }
2905
  }
2906
 
2906
 
2907
  /*  If dv is a better fit, return 0 so malloc will use it */
2907
  /*  If dv is a better fit, return 0 so malloc will use it */
2908
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
2908
  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
2909
    if (RTCHECK(ok_address(m, v))) { /* split */
2909
    if (RTCHECK(ok_address(m, v))) { /* split */
2910
      mchunkptr r = chunk_plus_offset(v, nb);
2910
      mchunkptr r = chunk_plus_offset(v, nb);
2911
      assert(chunksize(v) == rsize + nb);
2911
      assert(chunksize(v) == rsize + nb);
2912
      if (RTCHECK(ok_next(v, r))) {
2912
      if (RTCHECK(ok_next(v, r))) {
2913
        unlink_large_chunk(m, v);
2913
        unlink_large_chunk(m, v);
2914
        if (rsize < MIN_CHUNK_SIZE)
2914
        if (rsize < MIN_CHUNK_SIZE)
2915
          set_inuse_and_pinuse(m, v, (rsize + nb));
2915
          set_inuse_and_pinuse(m, v, (rsize + nb));
2916
        else {
2916
        else {
2917
          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
2917
          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
2918
          set_size_and_pinuse_of_free_chunk(r, rsize);
2918
          set_size_and_pinuse_of_free_chunk(r, rsize);
2919
          insert_chunk(m, r, rsize);
2919
          insert_chunk(m, r, rsize);
2920
        }
2920
        }
2921
        return chunk2mem(v);
2921
        return chunk2mem(v);
2922
      }
2922
      }
2923
    }
2923
    }
2924
    CORRUPTION_ERROR_ACTION(m);
2924
    CORRUPTION_ERROR_ACTION(m);
2925
  }
2925
  }
2926
  return 0;
2926
  return 0;
2927
}
2927
}
2928
 
2928
 
2929
/* allocate a small request from the best fitting chunk in a treebin */
2929
/* allocate a small request from the best fitting chunk in a treebin */
2930
static void* tmalloc_small(mstate m, size_t nb) {
2930
static void* tmalloc_small(mstate m, size_t nb) {
2931
    tchunkptr t, v;
2931
    tchunkptr t, v;
2932
    size_t rsize;
2932
    size_t rsize;
2933
    bindex_t i;
2933
    bindex_t i;
2934
    binmap_t leastbit = least_bit(m->treemap);
2934
    binmap_t leastbit = least_bit(m->treemap);
2935
    compute_bit2idx(leastbit, i);
2935
    compute_bit2idx(leastbit, i);
2936
    v = t = *treebin_at(m, i);
2936
    v = t = *treebin_at(m, i);
2937
    rsize = chunksize(t) - nb;
2937
    rsize = chunksize(t) - nb;
2938
 
2938
 
2939
    while ((t = leftmost_child(t)) != 0) {
2939
    while ((t = leftmost_child(t)) != 0) {
2940
        size_t trem = chunksize(t) - nb;
2940
        size_t trem = chunksize(t) - nb;
2941
        if (trem < rsize) {
2941
        if (trem < rsize) {
2942
            rsize = trem;
2942
            rsize = trem;
2943
            v = t;
2943
            v = t;
2944
        }
2944
        }
2945
    }
2945
    }
2946
 
2946
 
2947
    if (RTCHECK(ok_address(m, v))) {
2947
    if (RTCHECK(ok_address(m, v))) {
2948
        mchunkptr r = chunk_plus_offset(v, nb);
2948
        mchunkptr r = chunk_plus_offset(v, nb);
2949
        assert(chunksize(v) == rsize + nb);
2949
        assert(chunksize(v) == rsize + nb);
2950
        if (RTCHECK(ok_next(v, r))) {
2950
        if (RTCHECK(ok_next(v, r))) {
2951
            unlink_large_chunk(m, v);
2951
            unlink_large_chunk(m, v);
2952
            if (rsize < MIN_CHUNK_SIZE)
2952
            if (rsize < MIN_CHUNK_SIZE)
2953
                set_inuse_and_pinuse(m, v, (rsize + nb));
2953
                set_inuse_and_pinuse(m, v, (rsize + nb));
2954
            else {
2954
            else {
2955
                set_size_and_pinuse_of_inuse_chunk(m, v, nb);
2955
                set_size_and_pinuse_of_inuse_chunk(m, v, nb);
2956
                set_size_and_pinuse_of_free_chunk(r, rsize);
2956
                set_size_and_pinuse_of_free_chunk(r, rsize);
2957
                replace_dv(m, r, rsize);
2957
                replace_dv(m, r, rsize);
2958
            }
2958
            }
2959
            return chunk2mem(v);
2959
            return chunk2mem(v);
2960
        }
2960
        }
2961
    }
2961
    }
2962
 
2962
 
2963
    CORRUPTION_ERROR_ACTION(m);
2963
    CORRUPTION_ERROR_ACTION(m);
2964
    return 0;
2964
    return 0;
2965
}
2965
}
2966
 
2966
 
2967
 
2967
 
2968
void* malloc(size_t bytes)
2968
void* malloc(size_t bytes)
2969
{
2969
{
2970
  /*
2970
  /*
2971
     Basic algorithm:
2971
     Basic algorithm:
2972
     If a small request (< 256 bytes minus per-chunk overhead):
2972
     If a small request (< 256 bytes minus per-chunk overhead):
2973
       1. If one exists, use a remainderless chunk in associated smallbin.
2973
       1. If one exists, use a remainderless chunk in associated smallbin.
2974
          (Remainderless means that there are too few excess bytes to
2974
          (Remainderless means that there are too few excess bytes to
2975
          represent as a chunk.)
2975
          represent as a chunk.)
2976
       2. If it is big enough, use the dv chunk, which is normally the
2976
       2. If it is big enough, use the dv chunk, which is normally the
2977
          chunk adjacent to the one used for the most recent small request.
2977
          chunk adjacent to the one used for the most recent small request.
2978
       3. If one exists, split the smallest available chunk in a bin,
2978
       3. If one exists, split the smallest available chunk in a bin,
2979
          saving remainder in dv.
2979
          saving remainder in dv.
2980
       4. If it is big enough, use the top chunk.
2980
       4. If it is big enough, use the top chunk.
2981
       5. If available, get memory from system and use it
2981
       5. If available, get memory from system and use it
2982
     Otherwise, for a large request:
2982
     Otherwise, for a large request:
2983
       1. Find the smallest available binned chunk that fits, and use it
2983
       1. Find the smallest available binned chunk that fits, and use it
2984
          if it is better fitting than dv chunk, splitting if necessary.
2984
          if it is better fitting than dv chunk, splitting if necessary.
2985
       2. If better fitting than any binned chunk, use the dv chunk.
2985
       2. If better fitting than any binned chunk, use the dv chunk.
2986
       3. If it is big enough, use the top chunk.
2986
       3. If it is big enough, use the top chunk.
2987
       4. If request size >= mmap threshold, try to directly mmap this chunk.
2987
       4. If request size >= mmap threshold, try to directly mmap this chunk.
2988
       5. If available, get memory from system and use it
2988
       5. If available, get memory from system and use it
2989
 
2989
 
2990
     The ugly goto's here ensure that postaction occurs along all paths.
2990
     The ugly goto's here ensure that postaction occurs along all paths.
2991
  */
2991
  */
2992
 
2992
 
2993
    ensure_initialization(); /* initialize in sys_alloc if not using locks */
2993
    ensure_initialization(); /* initialize in sys_alloc if not using locks */
2994
 
2994
 
2995
    PREACTION(gm);
2995
    PREACTION(gm);
2996
    {
2996
    {
2997
        void* mem;
2997
        void* mem;
2998
        size_t nb;
2998
        size_t nb;
2999
    if (bytes <= MAX_SMALL_REQUEST) {
2999
    if (bytes <= MAX_SMALL_REQUEST) {
3000
            bindex_t idx;
3000
            bindex_t idx;
3001
            binmap_t smallbits;
3001
            binmap_t smallbits;
3002
            nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
3002
            nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
3003
            idx = small_index(nb);
3003
            idx = small_index(nb);
3004
            smallbits = gm->smallmap >> idx;
3004
            smallbits = gm->smallmap >> idx;
3005
 
3005
 
3006
      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
3006
      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
3007
                mchunkptr b, p;
3007
                mchunkptr b, p;
3008
                idx += ~smallbits & 1;       /* Uses next bin if idx empty */
3008
                idx += ~smallbits & 1;       /* Uses next bin if idx empty */
3009
                b = smallbin_at(gm, idx);
3009
                b = smallbin_at(gm, idx);
3010
                p = b->fd;
3010
                p = b->fd;
3011
                assert(chunksize(p) == small_index2size(idx));
3011
                assert(chunksize(p) == small_index2size(idx));
3012
                unlink_first_small_chunk(gm, b, p, idx);
3012
                unlink_first_small_chunk(gm, b, p, idx);
3013
                set_inuse_and_pinuse(gm, p, small_index2size(idx));
3013
                set_inuse_and_pinuse(gm, p, small_index2size(idx));
3014
                mem = chunk2mem(p);
3014
                mem = chunk2mem(p);
3015
                check_malloced_chunk(gm, mem, nb);
3015
                check_malloced_chunk(gm, mem, nb);
3016
                goto postaction;
3016
                goto postaction;
3017
            }
3017
            }
3018
 
3018
 
3019
      else if (nb > gm->dvsize) {
3019
      else if (nb > gm->dvsize) {
3020
        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
3020
        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
3021
                    mchunkptr b, p, r;
3021
                    mchunkptr b, p, r;
3022
                    size_t rsize;
3022
                    size_t rsize;
3023
                    bindex_t i;
3023
                    bindex_t i;
3024
                    binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
3024
                    binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
3025
                    binmap_t leastbit = least_bit(leftbits);
3025
                    binmap_t leastbit = least_bit(leftbits);
3026
                    compute_bit2idx(leastbit, i);
3026
                    compute_bit2idx(leastbit, i);
3027
                    b = smallbin_at(gm, i);
3027
                    b = smallbin_at(gm, i);
3028
                    p = b->fd;
3028
                    p = b->fd;
3029
                    assert(chunksize(p) == small_index2size(i));
3029
                    assert(chunksize(p) == small_index2size(i));
3030
                    unlink_first_small_chunk(gm, b, p, i);
3030
                    unlink_first_small_chunk(gm, b, p, i);
3031
                    rsize = small_index2size(i) - nb;
3031
                    rsize = small_index2size(i) - nb;
3032
          /* Fit here cannot be remainderless if 4byte sizes */
3032
          /* Fit here cannot be remainderless if 4byte sizes */
3033
                    if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
3033
                    if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
3034
                        set_inuse_and_pinuse(gm, p, small_index2size(i));
3034
                        set_inuse_and_pinuse(gm, p, small_index2size(i));
3035
          else {
3035
          else {
3036
                        set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3036
                        set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3037
                        r = chunk_plus_offset(p, nb);
3037
                        r = chunk_plus_offset(p, nb);
3038
                        set_size_and_pinuse_of_free_chunk(r, rsize);
3038
                        set_size_and_pinuse_of_free_chunk(r, rsize);
3039
                        replace_dv(gm, r, rsize);
3039
                        replace_dv(gm, r, rsize);
3040
                    }
3040
                    }
3041
                    mem = chunk2mem(p);
3041
                    mem = chunk2mem(p);
3042
                    check_malloced_chunk(gm, mem, nb);
3042
                    check_malloced_chunk(gm, mem, nb);
3043
                    goto postaction;
3043
                    goto postaction;
3044
                }
3044
                }
3045
 
3045
 
3046
        else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
3046
        else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
3047
                    check_malloced_chunk(gm, mem, nb);
3047
                    check_malloced_chunk(gm, mem, nb);
3048
                    goto postaction;
3048
                    goto postaction;
3049
                }
3049
                }
3050
            }
3050
            }
3051
        }
3051
        }
3052
        else if (bytes >= MAX_REQUEST)
3052
        else if (bytes >= MAX_REQUEST)
3053
            nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
3053
            nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
3054
    else {
3054
    else {
3055
            nb = pad_request(bytes);
3055
            nb = pad_request(bytes);
3056
      if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
3056
      if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
3057
                check_malloced_chunk(gm, mem, nb);
3057
                check_malloced_chunk(gm, mem, nb);
3058
                goto postaction;
3058
                goto postaction;
3059
            }
3059
            }
3060
        }
3060
        }
3061
 
3061
 
3062
        if (nb <= gm->dvsize) {
3062
        if (nb <= gm->dvsize) {
3063
            size_t rsize = gm->dvsize - nb;
3063
            size_t rsize = gm->dvsize - nb;
3064
            mchunkptr p = gm->dv;
3064
            mchunkptr p = gm->dv;
3065
            if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
3065
            if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
3066
                mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
3066
                mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
3067
                gm->dvsize = rsize;
3067
                gm->dvsize = rsize;
3068
                set_size_and_pinuse_of_free_chunk(r, rsize);
3068
                set_size_and_pinuse_of_free_chunk(r, rsize);
3069
                set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3069
                set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3070
            }
3070
            }
3071
            else { /* exhaust dv */
3071
            else { /* exhaust dv */
3072
                size_t dvs = gm->dvsize;
3072
                size_t dvs = gm->dvsize;
3073
                gm->dvsize = 0;
3073
                gm->dvsize = 0;
3074
                gm->dv = 0;
3074
                gm->dv = 0;
3075
                set_inuse_and_pinuse(gm, p, dvs);
3075
                set_inuse_and_pinuse(gm, p, dvs);
3076
            }
3076
            }
3077
            mem = chunk2mem(p);
3077
            mem = chunk2mem(p);
3078
            check_malloced_chunk(gm, mem, nb);
3078
            check_malloced_chunk(gm, mem, nb);
3079
            goto postaction;
3079
            goto postaction;
3080
        }
3080
        }
3081
 
3081
 
3082
        else if (nb < gm->topsize) { /* Split top */
3082
        else if (nb < gm->topsize) { /* Split top */
3083
            size_t rsize = gm->topsize -= nb;
3083
            size_t rsize = gm->topsize -= nb;
3084
            mchunkptr p = gm->top;
3084
            mchunkptr p = gm->top;
3085
            mchunkptr r = gm->top = chunk_plus_offset(p, nb);
3085
            mchunkptr r = gm->top = chunk_plus_offset(p, nb);
3086
            r->head = rsize | PINUSE_BIT;
3086
            r->head = rsize | PINUSE_BIT;
3087
            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3087
            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3088
            mem = chunk2mem(p);
3088
            mem = chunk2mem(p);
3089
            check_top_chunk(gm, gm->top);
3089
            check_top_chunk(gm, gm->top);
3090
            check_malloced_chunk(gm, mem, nb);
3090
            check_malloced_chunk(gm, mem, nb);
3091
            goto postaction;
3091
            goto postaction;
3092
        }
3092
        }
3093
 
3093
 
3094
        mem = sys_alloc(gm, nb);
3094
        mem = sys_alloc(gm, nb);
3095
 
3095
 
3096
  postaction:
3096
  postaction:
3097
        POSTACTION(gm);
3097
        POSTACTION(gm);
3098
//        printf("%s %p %d\n", __FUNCTION__, mem, bytes);
3098
//        printf("%s %p %d\n", __FUNCTION__, mem, bytes);
3099
        return mem;
3099
        return mem;
3100
    }
3100
    }
3101
//    FAIL();
3101
//    FAIL();
3102
    return 0;
3102
    return 0;
3103
}
3103
}
3104
 
3104
 
3105
/* ---------------------------- free --------------------------- */
3105
/* ---------------------------- free --------------------------- */
3106
 
3106
 
3107
void free(void* mem){
3107
void free(void* mem){
3108
  /*
3108
  /*
3109
     Consolidate freed chunks with preceeding or succeeding bordering
3109
     Consolidate freed chunks with preceeding or succeeding bordering
3110
     free chunks, if they exist, and then place in a bin.  Intermixed
3110
     free chunks, if they exist, and then place in a bin.  Intermixed
3111
     with special cases for top, dv, mmapped chunks, and usage errors.
3111
     with special cases for top, dv, mmapped chunks, and usage errors.
3112
  */
3112
  */
3113
 
3113
 
3114
//  dbgprintf("%s %p\n", __FUNCTION__, mem);
3114
//  dbgprintf("%s %p\n", __FUNCTION__, mem);
3115
 
3115
 
3116
  if (mem != 0) {
3116
  if (mem != 0) {
3117
        mchunkptr p  = mem2chunk(mem);
3117
        mchunkptr p  = mem2chunk(mem);
3118
#if FOOTERS
3118
#if FOOTERS
3119
    mstate fm = get_mstate_for(p);
3119
    mstate fm = get_mstate_for(p);
3120
    if (!ok_magic(fm)) {
3120
    if (!ok_magic(fm)) {
3121
      USAGE_ERROR_ACTION(fm, p);
3121
      USAGE_ERROR_ACTION(fm, p);
3122
      return;
3122
      return;
3123
    }
3123
    }
3124
#else /* FOOTERS */
3124
#else /* FOOTERS */
3125
#define fm gm
3125
#define fm gm
3126
#endif /* FOOTERS */
3126
#endif /* FOOTERS */
3127
        PREACTION(fm);
3127
        PREACTION(fm);
3128
        {
3128
        {
3129
            check_inuse_chunk(fm, p);
3129
            check_inuse_chunk(fm, p);
3130
      if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
3130
      if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
3131
                size_t psize = chunksize(p);
3131
                size_t psize = chunksize(p);
3132
                mchunkptr next = chunk_plus_offset(p, psize);
3132
                mchunkptr next = chunk_plus_offset(p, psize);
3133
        if (!pinuse(p)) {
3133
        if (!pinuse(p)) {
3134
                    size_t prevsize = p->prev_foot;
3134
                    size_t prevsize = p->prev_foot;
3135
          if (is_mmapped(p)) {
3135
          if (is_mmapped(p)) {
3136
                        psize += prevsize + MMAP_FOOT_PAD;
3136
                        psize += prevsize + MMAP_FOOT_PAD;
3137
                        if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
3137
                        if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
3138
                        fm->footprint -= psize;
3138
                        fm->footprint -= psize;
3139
                        goto postaction;
3139
                        goto postaction;
3140
                    }
3140
                    }
3141
          else {
3141
          else {
3142
                        mchunkptr prev = chunk_minus_offset(p, prevsize);
3142
                        mchunkptr prev = chunk_minus_offset(p, prevsize);
3143
                        psize += prevsize;
3143
                        psize += prevsize;
3144
                        p = prev;
3144
                        p = prev;
3145
            if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
3145
            if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
3146
              if (p != fm->dv) {
3146
              if (p != fm->dv) {
3147
                                unlink_chunk(fm, p, prevsize);
3147
                                unlink_chunk(fm, p, prevsize);
3148
                            }
3148
                            }
3149
              else if ((next->head & INUSE_BITS) == INUSE_BITS) {
3149
              else if ((next->head & INUSE_BITS) == INUSE_BITS) {
3150
                                fm->dvsize = psize;
3150
                                fm->dvsize = psize;
3151
                                set_free_with_pinuse(p, psize, next);
3151
                                set_free_with_pinuse(p, psize, next);
3152
                                goto postaction;
3152
                                goto postaction;
3153
                            }
3153
                            }
3154
                        }
3154
                        }
3155
                        else
3155
                        else
3156
                            goto erroraction;
3156
                            goto erroraction;
3157
                    }
3157
                    }
3158
                }
3158
                }
3159
 
3159
 
3160
        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
3160
        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
3161
          if (!cinuse(next)) {  /* consolidate forward */
3161
          if (!cinuse(next)) {  /* consolidate forward */
3162
            if (next == fm->top) {
3162
            if (next == fm->top) {
3163
                            size_t tsize = fm->topsize += psize;
3163
                            size_t tsize = fm->topsize += psize;
3164
                            fm->top = p;
3164
                            fm->top = p;
3165
                            p->head = tsize | PINUSE_BIT;
3165
                            p->head = tsize | PINUSE_BIT;
3166
              if (p == fm->dv) {
3166
              if (p == fm->dv) {
3167
                                fm->dv = 0;
3167
                                fm->dv = 0;
3168
                                fm->dvsize = 0;
3168
                                fm->dvsize = 0;
3169
                            }
3169
                            }
3170
                            if (should_trim(fm, tsize))
3170
                            if (should_trim(fm, tsize))
3171
                                sys_trim(fm, 0);
3171
                                sys_trim(fm, 0);
3172
                            goto postaction;
3172
                            goto postaction;
3173
                        }
3173
                        }
3174
            else if (next == fm->dv) {
3174
            else if (next == fm->dv) {
3175
                            size_t dsize = fm->dvsize += psize;
3175
                            size_t dsize = fm->dvsize += psize;
3176
                            fm->dv = p;
3176
                            fm->dv = p;
3177
                            set_size_and_pinuse_of_free_chunk(p, dsize);
3177
                            set_size_and_pinuse_of_free_chunk(p, dsize);
3178
                            goto postaction;
3178
                            goto postaction;
3179
                        }
3179
                        }
3180
            else {
3180
            else {
3181
                            size_t nsize = chunksize(next);
3181
                            size_t nsize = chunksize(next);
3182
                            psize += nsize;
3182
                            psize += nsize;
3183
                            unlink_chunk(fm, next, nsize);
3183
                            unlink_chunk(fm, next, nsize);
3184
                            set_size_and_pinuse_of_free_chunk(p, psize);
3184
                            set_size_and_pinuse_of_free_chunk(p, psize);
3185
              if (p == fm->dv) {
3185
              if (p == fm->dv) {
3186
                                fm->dvsize = psize;
3186
                                fm->dvsize = psize;
3187
                                goto postaction;
3187
                                goto postaction;
3188
                            }
3188
                            }
3189
                        }
3189
                        }
3190
                    }
3190
                    }
3191
                    else
3191
                    else
3192
                        set_free_with_pinuse(p, psize, next);
3192
                        set_free_with_pinuse(p, psize, next);
3193
 
3193
 
3194
          if (is_small(psize)) {
3194
          if (is_small(psize)) {
3195
                        insert_small_chunk(fm, p, psize);
3195
                        insert_small_chunk(fm, p, psize);
3196
                        check_free_chunk(fm, p);
3196
                        check_free_chunk(fm, p);
3197
                    }
3197
                    }
3198
          else {
3198
          else {
3199
                        tchunkptr tp = (tchunkptr)p;
3199
                        tchunkptr tp = (tchunkptr)p;
3200
                        insert_large_chunk(fm, tp, psize);
3200
                        insert_large_chunk(fm, tp, psize);
3201
                        check_free_chunk(fm, p);
3201
                        check_free_chunk(fm, p);
3202
                        if (--fm->release_checks == 0)
3202
                        if (--fm->release_checks == 0)
3203
                            release_unused_segments(fm);
3203
                            release_unused_segments(fm);
3204
                    }
3204
                    }
3205
                    goto postaction;
3205
                    goto postaction;
3206
                }
3206
                }
3207
            }
3207
            }
3208
    erroraction:
3208
    erroraction:
3209
        USAGE_ERROR_ACTION(fm, p);
3209
        USAGE_ERROR_ACTION(fm, p);
3210
    postaction:
3210
    postaction:
3211
        POSTACTION(fm);
3211
        POSTACTION(fm);
3212
        }
3212
        }
3213
    }
3213
    }
3214
 
3214
 
3215
//    LEAVE();
3215
//    LEAVE();
3216
 
3216
 
3217
#if !FOOTERS
3217
#if !FOOTERS
3218
#undef fm
3218
#undef fm
3219
#endif /* FOOTERS */
3219
#endif /* FOOTERS */
3220
}
3220
}
3221
 
3221
 
3222
void* calloc(size_t n_elements, size_t elem_size) {
3222
void* calloc(size_t n_elements, size_t elem_size) {
3223
  void* mem;
3223
  void* mem;
3224
  size_t req = 0;
3224
  size_t req = 0;
3225
  if (n_elements != 0) {
3225
  if (n_elements != 0) {
3226
    req = n_elements * elem_size;
3226
    req = n_elements * elem_size;
3227
    if (((n_elements | elem_size) & ~(size_t)0xffff) &&
3227
    if (((n_elements | elem_size) & ~(size_t)0xffff) &&
3228
        (req / n_elements != elem_size))
3228
        (req / n_elements != elem_size))
3229
      req = MAX_SIZE_T; /* force downstream failure on overflow */
3229
      req = MAX_SIZE_T; /* force downstream failure on overflow */
3230
  }
3230
  }
3231
  mem = malloc(req);
3231
  mem = malloc(req);
3232
  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
3232
  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
3233
    memset(mem, 0, req);
3233
    memset(mem, 0, req);
3234
  return mem;
3234
  return mem;
3235
}
3235
}
3236
 
3236
 
3237
/* ------------ Internal support for realloc, memalign, etc -------------- */
3237
/* ------------ Internal support for realloc, memalign, etc -------------- */
3238
 
3238
 
3239
/* Try to realloc; only in-place unless can_move true */
3239
/* Try to realloc; only in-place unless can_move true */
3240
static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
3240
static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
3241
                                   int can_move) {
3241
                                   int can_move) {
3242
  mchunkptr newp = 0;
3242
  mchunkptr newp = 0;
3243
  size_t oldsize = chunksize(p);
3243
  size_t oldsize = chunksize(p);
3244
  mchunkptr next = chunk_plus_offset(p, oldsize);
3244
  mchunkptr next = chunk_plus_offset(p, oldsize);
3245
  if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
3245
  if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
3246
              ok_next(p, next) && ok_pinuse(next))) {
3246
              ok_next(p, next) && ok_pinuse(next))) {
3247
    if (is_mmapped(p)) {
3247
    if (is_mmapped(p)) {
3248
      newp = mmap_resize(m, p, nb, can_move);
3248
      newp = mmap_resize(m, p, nb, can_move);
3249
    }
3249
    }
3250
    else if (oldsize >= nb) {             /* already big enough */
3250
    else if (oldsize >= nb) {             /* already big enough */
3251
      size_t rsize = oldsize - nb;
3251
      size_t rsize = oldsize - nb;
3252
      if (rsize >= MIN_CHUNK_SIZE) {      /* split off remainder */
3252
      if (rsize >= MIN_CHUNK_SIZE) {      /* split off remainder */
3253
        mchunkptr r = chunk_plus_offset(p, nb);
3253
        mchunkptr r = chunk_plus_offset(p, nb);
3254
        set_inuse(m, p, nb);
3254
        set_inuse(m, p, nb);
3255
        set_inuse(m, r, rsize);
3255
        set_inuse(m, r, rsize);
3256
        dispose_chunk(m, r, rsize);
3256
        dispose_chunk(m, r, rsize);
3257
      }
3257
      }
3258
      newp = p;
3258
      newp = p;
3259
    }
3259
    }
3260
    else if (next == m->top) {  /* extend into top */
3260
    else if (next == m->top) {  /* extend into top */
3261
      if (oldsize + m->topsize > nb) {
3261
      if (oldsize + m->topsize > nb) {
3262
        size_t newsize = oldsize + m->topsize;
3262
        size_t newsize = oldsize + m->topsize;
3263
        size_t newtopsize = newsize - nb;
3263
        size_t newtopsize = newsize - nb;
3264
        mchunkptr newtop = chunk_plus_offset(p, nb);
3264
        mchunkptr newtop = chunk_plus_offset(p, nb);
3265
        set_inuse(m, p, nb);
3265
        set_inuse(m, p, nb);
3266
        newtop->head = newtopsize |PINUSE_BIT;
3266
        newtop->head = newtopsize |PINUSE_BIT;
3267
        m->top = newtop;
3267
        m->top = newtop;
3268
        m->topsize = newtopsize;
3268
        m->topsize = newtopsize;
3269
        newp = p;
3269
        newp = p;
3270
      }
3270
      }
3271
    }
3271
    }
3272
    else if (next == m->dv) { /* extend into dv */
3272
    else if (next == m->dv) { /* extend into dv */
3273
      size_t dvs = m->dvsize;
3273
      size_t dvs = m->dvsize;
3274
      if (oldsize + dvs >= nb) {
3274
      if (oldsize + dvs >= nb) {
3275
        size_t dsize = oldsize + dvs - nb;
3275
        size_t dsize = oldsize + dvs - nb;
3276
        if (dsize >= MIN_CHUNK_SIZE) {
3276
        if (dsize >= MIN_CHUNK_SIZE) {
3277
          mchunkptr r = chunk_plus_offset(p, nb);
3277
          mchunkptr r = chunk_plus_offset(p, nb);
3278
          mchunkptr n = chunk_plus_offset(r, dsize);
3278
          mchunkptr n = chunk_plus_offset(r, dsize);
3279
          set_inuse(m, p, nb);
3279
          set_inuse(m, p, nb);
3280
          set_size_and_pinuse_of_free_chunk(r, dsize);
3280
          set_size_and_pinuse_of_free_chunk(r, dsize);
3281
          clear_pinuse(n);
3281
          clear_pinuse(n);
3282
          m->dvsize = dsize;
3282
          m->dvsize = dsize;
3283
          m->dv = r;
3283
          m->dv = r;
3284
        }
3284
        }
3285
        else { /* exhaust dv */
3285
        else { /* exhaust dv */
3286
          size_t newsize = oldsize + dvs;
3286
          size_t newsize = oldsize + dvs;
3287
          set_inuse(m, p, newsize);
3287
          set_inuse(m, p, newsize);
3288
          m->dvsize = 0;
3288
          m->dvsize = 0;
3289
          m->dv = 0;
3289
          m->dv = 0;
3290
        }
3290
        }
3291
        newp = p;
3291
        newp = p;
3292
      }
3292
      }
3293
    }
3293
    }
3294
    else if (!cinuse(next)) { /* extend into next free chunk */
3294
    else if (!cinuse(next)) { /* extend into next free chunk */
3295
      size_t nextsize = chunksize(next);
3295
      size_t nextsize = chunksize(next);
3296
      if (oldsize + nextsize >= nb) {
3296
      if (oldsize + nextsize >= nb) {
3297
        size_t rsize = oldsize + nextsize - nb;
3297
        size_t rsize = oldsize + nextsize - nb;
3298
        unlink_chunk(m, next, nextsize);
3298
        unlink_chunk(m, next, nextsize);
3299
        if (rsize < MIN_CHUNK_SIZE) {
3299
        if (rsize < MIN_CHUNK_SIZE) {
3300
          size_t newsize = oldsize + nextsize;
3300
          size_t newsize = oldsize + nextsize;
3301
          set_inuse(m, p, newsize);
3301
          set_inuse(m, p, newsize);
3302
        }
3302
        }
3303
        else {
3303
        else {
3304
          mchunkptr r = chunk_plus_offset(p, nb);
3304
          mchunkptr r = chunk_plus_offset(p, nb);
3305
          set_inuse(m, p, nb);
3305
          set_inuse(m, p, nb);
3306
          set_inuse(m, r, rsize);
3306
          set_inuse(m, r, rsize);
3307
          dispose_chunk(m, r, rsize);
3307
          dispose_chunk(m, r, rsize);
3308
        }
3308
        }
3309
        newp = p;
3309
        newp = p;
3310
      }
3310
      }
3311
    }
3311
    }
3312
  }
3312
  }
3313
  else {
3313
  else {
3314
    USAGE_ERROR_ACTION(m, chunk2mem(p));
3314
    USAGE_ERROR_ACTION(m, chunk2mem(p));
3315
  }
3315
  }
3316
  return newp;
3316
  return newp;
3317
}
3317
}
3318
 
3318
 
3319
 
3319
 
3320
void* realloc(void* oldmem, size_t bytes) {
3320
void* realloc(void* oldmem, size_t bytes) {
3321
  void* mem = 0;
3321
  void* mem = 0;
3322
  if (oldmem == 0) {
3322
  if (oldmem == 0) {
3323
    mem = malloc(bytes);
3323
    mem = malloc(bytes);
3324
  }
3324
  }
3325
  else if (bytes >= MAX_REQUEST) {
3325
  else if (bytes >= MAX_REQUEST) {
3326
//    MALLOC_FAILURE_ACTION;
3326
//    MALLOC_FAILURE_ACTION;
3327
  }
3327
  }
3328
#ifdef REALLOC_ZERO_BYTES_FREES
3328
#ifdef REALLOC_ZERO_BYTES_FREES
3329
  else if (bytes == 0) {
3329
  else if (bytes == 0) {
3330
    free(oldmem);
3330
    free(oldmem);
3331
  }
3331
  }
3332
#endif /* REALLOC_ZERO_BYTES_FREES */
3332
#endif /* REALLOC_ZERO_BYTES_FREES */
3333
  else {
3333
  else {
3334
    size_t nb = request2size(bytes);
3334
    size_t nb = request2size(bytes);
3335
    mchunkptr oldp = mem2chunk(oldmem);
3335
    mchunkptr oldp = mem2chunk(oldmem);
3336
#if ! FOOTERS
3336
#if ! FOOTERS
3337
    mstate m = gm;
3337
    mstate m = gm;
3338
#else /* FOOTERS */
3338
#else /* FOOTERS */
3339
    mstate m = get_mstate_for(oldp);
3339
    mstate m = get_mstate_for(oldp);
3340
    if (!ok_magic(m)) {
3340
    if (!ok_magic(m)) {
3341
      USAGE_ERROR_ACTION(m, oldmem);
3341
      USAGE_ERROR_ACTION(m, oldmem);
3342
      return 0;
3342
      return 0;
3343
    }
3343
    }
3344
#endif /* FOOTERS */
3344
#endif /* FOOTERS */
3345
    PREACTION(m); {
3345
    PREACTION(m); {
3346
      mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
3346
      mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
3347
      POSTACTION(m);
3347
      POSTACTION(m);
3348
      if (newp != 0) {
3348
      if (newp != 0) {
3349
        check_inuse_chunk(m, newp);
3349
        check_inuse_chunk(m, newp);
3350
        mem = chunk2mem(newp);
3350
        mem = chunk2mem(newp);
3351
      }
3351
      }
3352
      else {
3352
      else {
3353
        mem = internal_malloc(m, bytes);
3353
        mem = internal_malloc(m, bytes);
3354
        if (mem != 0) {
3354
        if (mem != 0) {
3355
          size_t oc = chunksize(oldp) - overhead_for(oldp);
3355
          size_t oc = chunksize(oldp) - overhead_for(oldp);
3356
          memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
3356
          memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
3357
          internal_free(m, oldmem);
3357
          internal_free(m, oldmem);
3358
        }
3358
        }
3359
      }
3359
      }
3360
    }
3360
    }
3361
  }
3361
  }
3362
  return mem;
3362
  return mem;
3363
}
3363
}
3364
 
3364
 
3365
/*>
3365
/*>
3366
 
3366
 
3367
/*>
3367
/*>
3368
#define>
3368
#define>
3369
#define>
3369
#define>
3370
#define>
3370
#define>
3371
#define>
3371
#define>
3372
#define>
3372
#define>
3373
#define>
3373
#define>