1,11 → 1,10 |
/* |
This is a version (aka dlmalloc) of malloc/free/realloc written by |
Doug Lea and released to the public domain, as explained at |
http://creativecommons.org/licenses/publicdomain. Send questions, |
http://creativecommons.org/publicdomain/zero/1.0/ Send questions, |
comments, complaints, performance data, etc to dl@cs.oswego.edu |
|
* Version 2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee) |
|
* Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea |
Note: There may be an updated version of this malloc obtainable at |
ftp://gee.cs.oswego.edu/pub/misc/malloc.c |
Check before installing! |
19,7 → 18,7 |
compile-time and dynamic tuning options. |
|
For convenience, an include file for code using this malloc is at: |
ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h |
ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h |
You don't really need this .h file unless you call functions not |
defined in your system include files. The .h file contains only the |
excerpts from this file needed for using this malloc on ANSI C/C++ |
32,6 → 31,494 |
in this file to customize settings or to avoid overheads associated |
with library versions. |
|
* Vital statistics: |
|
Supported pointer/size_t representation: 4 or 8 bytes |
size_t MUST be an unsigned type of the same width as |
pointers. (If you are using an ancient system that declares |
size_t as a signed type, or need it to be a different width |
than pointers, you can use a previous release of this malloc |
(e.g. 2.7.2) supporting these.) |
|
Alignment: 8 bytes (minimum) |
This suffices for nearly all current machines and C compilers. |
However, you can define MALLOC_ALIGNMENT to be wider than this |
if necessary (up to 128bytes), at the expense of using more space. |
|
Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) |
8 or 16 bytes (if 8byte sizes) |
Each malloced chunk has a hidden word of overhead holding size |
and status information, and additional cross-check word |
if FOOTERS is defined. |
|
Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) |
8-byte ptrs: 32 bytes (including overhead) |
|
Even a request for zero bytes (i.e., malloc(0)) returns a |
pointer to something of the minimum allocatable size. |
The maximum overhead wastage (i.e., number of extra bytes |
allocated than were requested in malloc) is less than or equal |
to the minimum size, except for requests >= mmap_threshold that |
are serviced via mmap(), where the worst case wastage is about |
32 bytes plus the remainder from a system page (the minimal |
mmap unit); typically 4096 or 8192 bytes. |
|
Security: static-safe; optionally more or less |
The "security" of malloc refers to the ability of malicious |
code to accentuate the effects of errors (for example, freeing |
space that is not currently malloc'ed or overwriting past the |
ends of chunks) in code that calls malloc. This malloc |
guarantees not to modify any memory locations below the base of |
heap, i.e., static variables, even in the presence of usage |
errors. The routines additionally detect most improper frees |
and reallocs. All this holds as long as the static bookkeeping |
for malloc itself is not corrupted by some other means. This |
is only one aspect of security -- these checks do not, and |
cannot, detect all possible programming errors. |
|
If FOOTERS is defined nonzero, then each allocated chunk |
carries an additional check word to verify that it was malloced |
from its space. These check words are the same within each |
execution of a program using malloc, but differ across |
executions, so externally crafted fake chunks cannot be |
freed. This improves security by rejecting frees/reallocs that |
could corrupt heap memory, in addition to the checks preventing |
writes to statics that are always on. This may further improve |
security at the expense of time and space overhead. (Note that |
FOOTERS may also be worth using with MSPACES.) |
|
By default detected errors cause the program to abort (calling |
"abort()"). You can override this to instead proceed past |
errors by defining PROCEED_ON_ERROR. In this case, a bad free |
has no effect, and a malloc that encounters a bad address |
caused by user overwrites will ignore the bad address by |
dropping pointers and indices to all known memory. This may |
be appropriate for programs that should continue if at all |
possible in the face of programming errors, although they may |
run out of memory because dropped memory is never reclaimed. |
|
If you don't like either of these options, you can define |
CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything |
else. And if if you are sure that your program using malloc has |
no errors or vulnerabilities, you can define INSECURE to 1, |
which might (or might not) provide a small performance improvement. |
|
It is also possible to limit the maximum total allocatable |
space, using malloc_set_footprint_limit. This is not |
designed as a security feature in itself (calls to set limits |
are not screened or privileged), but may be useful as one |
aspect of a secure implementation. |
|
Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero |
When USE_LOCKS is defined, each public call to malloc, free, |
etc is surrounded with a lock. By default, this uses a plain |
pthread mutex, win32 critical section, or a spin-lock if if |
available for the platform and not disabled by setting |
USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined, |
recursive versions are used instead (which are not required for |
base functionality but may be needed in layered extensions). |
Using a global lock is not especially fast, and can be a major |
bottleneck. It is designed only to provide minimal protection |
in concurrent environments, and to provide a basis for |
extensions. If you are using malloc in a concurrent program, |
consider instead using nedmalloc |
(http://www.nedprod.com/programs/portable/nedmalloc/) or |
ptmalloc (See http://www.malloc.de), which are derived from |
versions of this malloc. |
|
System requirements: Any combination of MORECORE and/or MMAP/MUNMAP |
This malloc can use unix sbrk or any emulation (invoked using |
the CALL_MORECORE macro) and/or mmap/munmap or any emulation |
(invoked using CALL_MMAP/CALL_MUNMAP) to get and release system |
memory. On most unix systems, it tends to work best if both |
MORECORE and MMAP are enabled. On Win32, it uses emulations |
based on VirtualAlloc. It also uses common C library functions |
like memset. |
|
Compliance: I believe it is compliant with the Single Unix Specification |
(See http://www.unix.org). Also SVID/XPG, ANSI C, and probably |
others as well. |
|
* Overview of algorithms |
|
This is not the fastest, most space-conserving, most portable, or |
most tunable malloc ever written. However it is among the fastest |
while also being among the most space-conserving, portable and |
tunable. Consistent balance across these factors results in a good |
general-purpose allocator for malloc-intensive programs. |
|
In most ways, this malloc is a best-fit allocator. Generally, it |
chooses the best-fitting existing chunk for a request, with ties |
broken in approximately least-recently-used order. (This strategy |
normally maintains low fragmentation.) However, for requests less |
than 256bytes, it deviates from best-fit when there is not an |
exactly fitting available chunk by preferring to use space adjacent |
to that used for the previous small request, as well as by breaking |
ties in approximately most-recently-used order. (These enhance |
locality of series of small allocations.) And for very large requests |
(>= 256Kb by default), it relies on system memory mapping |
facilities, if supported. (This helps avoid carrying around and |
possibly fragmenting memory used only for large chunks.) |
|
All operations (except malloc_stats and mallinfo) have execution |
times that are bounded by a constant factor of the number of bits in |
a size_t, not counting any clearing in calloc or copying in realloc, |
or actions surrounding MORECORE and MMAP that have times |
proportional to the number of non-contiguous regions returned by |
system allocation routines, which is often just 1. In real-time |
applications, you can optionally suppress segment traversals using |
NO_SEGMENT_TRAVERSAL, which assures bounded execution even when |
system allocators return non-contiguous spaces, at the typical |
expense of carrying around more memory and increased fragmentation. |
|
The implementation is not very modular and seriously overuses |
macros. Perhaps someday all C compilers will do as good a job |
inlining modular code as can now be done by brute-force expansion, |
but now, enough of them seem not to. |
|
Some compilers issue a lot of warnings about code that is |
dead/unreachable only on some platforms, and also about intentional |
uses of negation on unsigned types. All known cases of each can be |
ignored. |
|
For a longer but out of date high-level description, see |
http://gee.cs.oswego.edu/dl/html/malloc.html |
|
* MSPACES |
If MSPACES is defined, then in addition to malloc, free, etc., |
this file also defines mspace_malloc, mspace_free, etc. These |
are versions of malloc routines that take an "mspace" argument |
obtained using create_mspace, to control all internal bookkeeping. |
If ONLY_MSPACES is defined, only these versions are compiled. |
So if you would like to use this allocator for only some allocations, |
and your system malloc for others, you can compile with |
ONLY_MSPACES and then do something like... |
static mspace mymspace = create_mspace(0,0); // for example |
#define mymalloc(bytes) mspace_malloc(mymspace, bytes) |
|
(Note: If you only need one instance of an mspace, you can instead |
use "USE_DL_PREFIX" to relabel the global malloc.) |
|
You can similarly create thread-local allocators by storing |
mspaces as thread-locals. For example: |
static __thread mspace tlms = 0; |
void* tlmalloc(size_t bytes) { |
if (tlms == 0) tlms = create_mspace(0, 0); |
return mspace_malloc(tlms, bytes); |
} |
void tlfree(void* mem) { mspace_free(tlms, mem); } |
|
Unless FOOTERS is defined, each mspace is completely independent. |
You cannot allocate from one and free to another (although |
conformance is only weakly checked, so usage errors are not always |
caught). If FOOTERS is defined, then each chunk carries around a tag |
indicating its originating mspace, and frees are directed to their |
originating spaces. Normally, this requires use of locks. |
|
------------------------- Compile-time options --------------------------- |
|
Be careful in setting #define values for numerical constants of type |
size_t. On some systems, literal values are not automatically extended |
to size_t precision unless they are explicitly casted. You can also |
use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below. |
|
WIN32 default: defined if _WIN32 defined |
Defining WIN32 sets up defaults for MS environment and compilers. |
Otherwise defaults are for unix. Beware that there seem to be some |
cases where this malloc might not be a pure drop-in replacement for |
Win32 malloc: Random-looking failures from Win32 GDI API's (eg; |
SetDIBits()) may be due to bugs in some video driver implementations |
when pixel buffers are malloc()ed, and the region spans more than |
one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb) |
default granularity, pixel buffers may straddle virtual allocation |
regions more often than when using the Microsoft allocator. You can |
avoid this by using VirtualAlloc() and VirtualFree() for all pixel |
buffers rather than using malloc(). If this is not possible, |
recompile this malloc with a larger DEFAULT_GRANULARITY. Note: |
in cases where MSC and gcc (cygwin) are known to differ on WIN32, |
conditions use _MSC_VER to distinguish them. |
|
DLMALLOC_EXPORT default: extern |
Defines how public APIs are declared. If you want to export via a |
Windows DLL, you might define this as |
#define DLMALLOC_EXPORT extern __declspec(dllexport) |
If you want a POSIX ELF shared object, you might use |
#define DLMALLOC_EXPORT extern __attribute__((visibility("default"))) |
|
MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *)) |
Controls the minimum alignment for malloc'ed chunks. It must be a |
power of two and at least 8, even on machines for which smaller |
alignments would suffice. It may be defined as larger than this |
though. Note however that code and data structures are optimized for |
the case of 8-byte alignment. |
|
MSPACES default: 0 (false) |
If true, compile in support for independent allocation spaces. |
This is only supported if HAVE_MMAP is true. |
|
ONLY_MSPACES default: 0 (false) |
If true, only compile in mspace versions, not regular versions. |
|
USE_LOCKS default: 0 (false) |
Causes each call to each public routine to be surrounded with |
pthread or WIN32 mutex lock/unlock. (If set true, this can be |
overridden on a per-mspace basis for mspace versions.) If set to a |
non-zero value other than 1, locks are used, but their |
implementation is left out, so lock functions must be supplied manually, |
as described below. |
|
USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available |
If true, uses custom spin locks for locking. This is currently |
supported only gcc >= 4.1, older gccs on x86 platforms, and recent |
MS compilers. Otherwise, posix locks or win32 critical sections are |
used. |
|
USE_RECURSIVE_LOCKS default: not defined |
If defined nonzero, uses recursive (aka reentrant) locks, otherwise |
uses plain mutexes. This is not required for malloc proper, but may |
be needed for layered allocators such as nedmalloc. |
|
LOCK_AT_FORK default: not defined |
If defined nonzero, performs pthread_atfork upon initialization |
to initialize child lock while holding parent lock. The implementation |
assumes that pthread locks (not custom locks) are being used. In other |
cases, you may need to customize the implementation. |
|
FOOTERS default: 0 |
If true, provide extra checking and dispatching by placing |
information in the footers of allocated chunks. This adds |
space and time overhead. |
|
INSECURE default: 0 |
If true, omit checks for usage errors and heap space overwrites. |
|
USE_DL_PREFIX default: NOT defined |
Causes compiler to prefix all public routines with the string 'dl'. |
This can be useful when you only want to use this malloc in one part |
of a program, using your regular system malloc elsewhere. |
|
MALLOC_INSPECT_ALL default: NOT defined |
If defined, compiles malloc_inspect_all and mspace_inspect_all, that |
perform traversal of all heap space. Unless access to these |
functions is otherwise restricted, you probably do not want to |
include them in secure implementations. |
|
ABORT default: defined as abort() |
Defines how to abort on failed checks. On most systems, a failed |
check cannot die with an "assert" or even print an informative |
message, because the underlying print routines in turn call malloc, |
which will fail again. Generally, the best policy is to simply call |
abort(). It's not very useful to do more than this because many |
errors due to overwriting will show up as address faults (null, odd |
addresses etc) rather than malloc-triggered checks, so will also |
abort. Also, most compilers know that abort() does not return, so |
can better optimize code conditionally calling it. |
|
PROCEED_ON_ERROR default: defined as 0 (false) |
Controls whether detected bad addresses cause them to bypassed |
rather than aborting. If set, detected bad arguments to free and |
realloc are ignored. And all bookkeeping information is zeroed out |
upon a detected overwrite of freed heap space, thus losing the |
ability to ever return it from malloc again, but enabling the |
application to proceed. If PROCEED_ON_ERROR is defined, the |
static variable malloc_corruption_error_count is compiled in |
and can be examined to see if errors have occurred. This option |
generates slower code than the default abort policy. |
|
DEBUG default: NOT defined |
The DEBUG setting is mainly intended for people trying to modify |
this code or diagnose problems when porting to new platforms. |
However, it may also be able to better isolate user errors than just |
using runtime checks. The assertions in the check routines spell |
out in more detail the assumptions and invariants underlying the |
algorithms. The checking is fairly extensive, and will slow down |
execution noticeably. Calling malloc_stats or mallinfo with DEBUG |
set will attempt to check every non-mmapped allocated and free chunk |
in the course of computing the summaries. |
|
ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) |
Debugging assertion failures can be nearly impossible if your |
version of the assert macro causes malloc to be called, which will |
lead to a cascade of further failures, blowing the runtime stack. |
ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), |
which will usually make debugging easier. |
|
MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 |
The action to take before "return 0" when malloc fails to be able to |
return memory because there is none available. |
|
HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES |
True if this system supports sbrk or an emulation of it. |
|
MORECORE default: sbrk |
The name of the sbrk-style system routine to call to obtain more |
memory. See below for guidance on writing custom MORECORE |
functions. The type of the argument to sbrk/MORECORE varies across |
systems. It cannot be size_t, because it supports negative |
arguments, so it is normally the signed type of the same width as |
size_t (sometimes declared as "intptr_t"). It doesn't much matter |
though. Internally, we only call it with arguments less than half |
the max value of a size_t, which should work across all reasonable |
possibilities, although sometimes generating compiler warnings. |
|
MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE |
If true, take advantage of fact that consecutive calls to MORECORE |
with positive arguments always return contiguous increasing |
addresses. This is true of unix sbrk. It does not hurt too much to |
set it true anyway, since malloc copes with non-contiguities. |
Setting it false when definitely non-contiguous saves time |
and possibly wasted space it would take to discover this though. |
|
MORECORE_CANNOT_TRIM default: NOT defined |
True if MORECORE cannot release space back to the system when given |
negative arguments. This is generally necessary only if you are |
using a hand-crafted MORECORE function that cannot handle negative |
arguments. |
|
NO_SEGMENT_TRAVERSAL default: 0 |
If non-zero, suppresses traversals of memory segments |
returned by either MORECORE or CALL_MMAP. This disables |
merging of segments that are contiguous, and selectively |
releasing them to the OS if unused, but bounds execution times. |
|
HAVE_MMAP default: 1 (true) |
True if this system supports mmap or an emulation of it. If so, and |
HAVE_MORECORE is not true, MMAP is used for all system |
allocation. If set and HAVE_MORECORE is true as well, MMAP is |
primarily used to directly allocate very large blocks. It is also |
used as a backup strategy in cases where MORECORE fails to provide |
space from system. Note: A single call to MUNMAP is assumed to be |
able to unmap memory that may have be allocated using multiple calls |
to MMAP, so long as they are adjacent. |
|
HAVE_MREMAP default: 1 on linux, else 0 |
If true realloc() uses mremap() to re-allocate large blocks and |
extend or shrink allocation spaces. |
|
MMAP_CLEARS default: 1 except on WINCE. |
True if mmap clears memory so calloc doesn't need to. This is true |
for standard unix mmap using /dev/zero and on WIN32 except for WINCE. |
|
USE_BUILTIN_FFS default: 0 (i.e., not used) |
Causes malloc to use the builtin ffs() function to compute indices. |
Some compilers may recognize and intrinsify ffs to be faster than the |
supplied C version. Also, the case of x86 using gcc is special-cased |
to an asm instruction, so is already as fast as it can be, and so |
this setting has no effect. Similarly for Win32 under recent MS compilers. |
(On most x86s, the asm version is only slightly faster than the C version.) |
|
malloc_getpagesize default: derive from system includes, or 4096. |
The system page size. To the extent possible, this malloc manages |
memory from the system in page-size units. This may be (and |
usually is) a function rather than a constant. This is ignored |
if WIN32, where page size is determined using getSystemInfo during |
initialization. |
|
USE_DEV_RANDOM default: 0 (i.e., not used) |
Causes malloc to use /dev/random to initialize secure magic seed for |
stamping footers. Otherwise, the current time is used. |
|
NO_MALLINFO default: 0 |
If defined, don't compile "mallinfo". This can be a simple way |
of dealing with mismatches between system declarations and |
those in this file. |
|
MALLINFO_FIELD_TYPE default: size_t |
The type of the fields in the mallinfo struct. This was originally |
defined as "int" in SVID etc, but is more usefully defined as |
size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set |
|
NO_MALLOC_STATS default: 0 |
If defined, don't compile "malloc_stats". This avoids calls to |
fprintf and bringing in stdio dependencies you might not want. |
|
REALLOC_ZERO_BYTES_FREES default: not defined |
This should be set if a call to realloc with zero bytes should |
be the same as a call to free. Some people think it should. Otherwise, |
since this malloc returns a unique pointer for malloc(0), so does |
realloc(p, 0). |
|
LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H |
LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H |
LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32 |
Define these if your system does not have these header files. |
You might need to manually insert some of the declarations they provide. |
|
DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, |
system_info.dwAllocationGranularity in WIN32, |
otherwise 64K. |
Also settable using mallopt(M_GRANULARITY, x) |
The unit for allocating and deallocating memory from the system. On |
most systems with contiguous MORECORE, there is no reason to |
make this more than a page. However, systems with MMAP tend to |
either require or encourage larger granularities. You can increase |
this value to prevent system allocation functions to be called so |
often, especially if they are slow. The value must be at least one |
page and must be a power of two. Setting to 0 causes initialization |
to either page size or win32 region size. (Note: In previous |
versions of malloc, the equivalent of this option was called |
"TOP_PAD") |
|
DEFAULT_TRIM_THRESHOLD default: 2MB |
Also settable using mallopt(M_TRIM_THRESHOLD, x) |
The maximum amount of unused top-most memory to keep before |
releasing via malloc_trim in free(). Automatic trimming is mainly |
useful in long-lived programs using contiguous MORECORE. Because |
trimming via sbrk can be slow on some systems, and can sometimes be |
wasteful (in cases where programs immediately afterward allocate |
more large chunks) the value should be high enough so that your |
overall system performance would improve by releasing this much |
memory. As a rough guide, you might set to a value close to the |
average size of a process (program) running on your system. |
Releasing this much memory would allow such a process to run in |
memory. Generally, it is worth tuning trim thresholds when a |
program undergoes phases where several large chunks are allocated |
and released in ways that can reuse each other's storage, perhaps |
mixed with phases where there are no such chunks at all. The trim |
value must be greater than page size to have any useful effect. To |
disable trimming completely, you can set to MAX_SIZE_T. Note that the trick |
some people use of mallocing a huge space and then freeing it at |
program startup, in an attempt to reserve system memory, doesn't |
have the intended effect under automatic trimming, since that memory |
will immediately be returned to the system. |
|
DEFAULT_MMAP_THRESHOLD default: 256K |
Also settable using mallopt(M_MMAP_THRESHOLD, x) |
The request size threshold for using MMAP to directly service a |
request. Requests of at least this size that cannot be allocated |
using already-existing space will be serviced via mmap. (If enough |
normal freed space already exists it is used instead.) Using mmap |
segregates relatively large chunks of memory so that they can be |
individually obtained and released from the host system. A request |
serviced through mmap is never reused by any other request (at least |
not directly; the system may just so happen to remap successive |
requests to the same locations). Segregating space in this way has |
the benefits that: Mmapped space can always be individually released |
back to the system, which helps keep the system level memory demands |
of a long-lived program low. Also, mapped memory doesn't become |
`locked' between other chunks, as can happen with normally allocated |
chunks, which means that even trimming via malloc_trim would not |
release them. However, it has the disadvantage that the space |
cannot be reclaimed, consolidated, and then used to service later |
requests, as happens with normal chunks. The advantages of mmap |
nearly always outweigh disadvantages for "large" chunks, but the |
value of "large" may vary across systems. The default is an |
empirically derived value that works well in most systems. You can |
disable mmap by setting to MAX_SIZE_T. |
|
MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP |
The number of consolidated frees between checks to release |
unused segments when freeing. When using non-contiguous segments, |
especially with multiple mspaces, checking only for topmost space |
doesn't always suffice to trigger trimming. To compensate for this, |
free() will, with a period of MAX_RELEASE_CHECK_RATE (or the |
current number of segments, if greater) try to release unused |
segments to the OS when freeing chunks that result in |
consolidation. The best value for this parameter is a compromise |
between slowing down frees with relatively costly checks that |
rarely trigger versus holding on to unused memory. To effectively |
disable, set to MAX_SIZE_T. This may lead to a very slight speed |
improvement at the expense of carrying around more memory. |
*/ |
|
#include <ddk.h> |
38,27 → 525,98 |
#include <mutex.h> |
#include <syscall.h> |
|
struct malloc_chunk { |
size_t prev_foot; /* Size of previous chunk (if free). */ |
size_t head; /* Size and inuse bits. */ |
struct malloc_chunk* fd; /* double links -- used only if free. */ |
struct malloc_chunk* bk; |
}; |
/* Version identifier to allow people to support multiple versions */ |
#ifndef DLMALLOC_VERSION |
#define DLMALLOC_VERSION 20806 |
#endif /* DLMALLOC_VERSION */ |
|
typedef struct malloc_chunk mchunk; |
typedef struct malloc_chunk* mchunkptr; |
typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ |
typedef unsigned int bindex_t; /* Described below */ |
typedef unsigned int binmap_t; /* Described below */ |
typedef unsigned int flag_t; /* The type of various bit flag sets */ |
|
/* |
malloc(size_t n) |
Returns a pointer to a newly allocated chunk of at least n bytes, or |
null if no space is available, in which case errno is set to ENOMEM |
on ANSI C systems. |
|
If n is zero, malloc returns a minimum-sized chunk. (The minimum |
size is 16 bytes on most 32bit systems, and 32 bytes on 64bit |
systems.) Note that size_t is an unsigned type, so calls with |
arguments that would be negative if signed are interpreted as |
requests for huge amounts of space, which will often fail. The |
maximum supported value of n differs across systems, but is in all |
cases less than the maximum representable value of a size_t. |
*/ |
|
/* ------------------- size_t and alignment properties -------------------- */ |
/* |
free(void* p) |
Releases the chunk of memory pointed to by p, that had been previously |
allocated using malloc or a related routine such as realloc. |
It has no effect if p is null. If p was not malloced or already |
freed, free(p) will by default cause the current program to abort. |
*/ |
|
/* The maximum possible size_t value has all bits set */ |
/* |
calloc(size_t n_elements, size_t element_size); |
Returns a pointer to n_elements * element_size bytes, with all locations |
set to zero. |
*/ |
|
/* |
realloc(void* p, size_t n) |
Returns a pointer to a chunk of size n that contains the same data |
as does chunk p up to the minimum of (n, p's size) bytes, or null |
if no space is available. |
|
The returned pointer may or may not be the same as p. The algorithm |
prefers extending p in most cases when possible, otherwise it |
employs the equivalent of a malloc-copy-free sequence. |
|
If p is null, realloc is equivalent to malloc. |
|
If space is not available, realloc returns null, errno is set (if on |
ANSI) and p is NOT freed. |
|
if n is for fewer bytes than already held by p, the newly unused |
space is lopped off and freed if possible. realloc with a size |
argument of zero (re)allocates a minimum-sized chunk. |
|
The old unix realloc convention of allowing the last-free'd chunk |
to be used as an argument to realloc is not supported. |
*/ |
/* |
memalign(size_t alignment, size_t n); |
Returns a pointer to a newly allocated chunk of n bytes, aligned |
in accord with the alignment argument. |
|
The alignment argument should be a power of two. If the argument is |
not a power of two, the nearest greater power is used. |
8-byte alignment is guaranteed by normal malloc calls, so don't |
bother calling memalign with an argument of 8 or less. |
|
Overreliance on memalign is a sure way to fragment space. |
*/ |
|
|
#define DEBUG 1 |
|
|
|
|
|
|
|
|
|
|
|
|
#define assert(x) |
|
#define MAX_SIZE_T (~(size_t)0) |
|
#define CALL_DIRECT_MMAP(s) MMAP_DEFAULT(s) |
|
/* ------------------- size_t and alignment properties -------------------- */ |
|
/* The byte and bit size of a size_t */ |
#define SIZE_T_SIZE (sizeof(size_t)) |
#define SIZE_T_BITSIZE (sizeof(size_t) << 3) |
82,6 → 640,8 |
#define EXTERN_BIT (8U) |
|
#define HAVE_MMAP 1 |
#define HAVE_MORECORE 0 |
#define MORECORE_CANNOT_TRIM 1 |
#define CALL_MMAP(s) MMAP_DEFAULT(s) |
#define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) |
#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL |
104,25 → 664,220 |
((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ |
((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) |
|
/* -------------------------- MMAP preliminaries ------------------------- */ |
|
/* |
If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and |
checks to fail so compiler optimizer can delete code rather than |
using so many "#if"s. |
*/ |
|
|
/* MORECORE and MMAP must return MFAIL on failure */ |
#define MFAIL ((void*)(MAX_SIZE_T)) |
#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ |
|
/* For sys_alloc, enough padding to ensure can malloc request on success */ |
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) |
#define should_trim(M,s) (0) |
|
|
|
/* --------------------------- Lock preliminaries ------------------------ */ |
|
/* |
TOP_FOOT_SIZE is padding at the end of a segment, including space |
that may be needed to place segment records and fenceposts when new |
noncontiguous segments are added. |
When locks are defined, there is one global lock, plus |
one per-mspace lock. |
|
The global lock_ensures that mparams.magic and other unique |
mparams values are initialized only once. It also protects |
sequences of calls to MORECORE. In many cases sys_alloc requires |
two calls, that should not be interleaved with calls by other |
threads. This does not protect against direct calls to MORECORE |
by other threads not using this lock, so there is still code to |
cope the best we can on interference. |
|
Per-mspace locks surround calls to malloc, free, etc. |
By default, locks are simple non-reentrant mutexes. |
|
Because lock-protected regions generally have bounded times, it is |
OK to use the supplied simple spinlocks. Spinlocks are likely to |
improve performance for lightly contended applications, but worsen |
performance under heavy contention. |
|
If USE_LOCKS is > 1, the definitions of lock routines here are |
bypassed, in which case you will need to define the type MLOCK_T, |
and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK |
and TRY_LOCK. You must also declare a |
static MLOCK_T malloc_global_mutex = { initialization values };. |
|
*/ |
#define TOP_FOOT_SIZE\ |
(align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) |
|
static DEFINE_MUTEX(malloc_global_mutex); |
|
#define ACQUIRE_MALLOC_GLOBAL_LOCK() MutexLock(&malloc_global_mutex); |
#define RELEASE_MALLOC_GLOBAL_LOCK() MutexUnlock(&malloc_global_mutex); |
|
|
/* ----------------------- Chunk representations ------------------------ */ |
|
/* |
(The following includes lightly edited explanations by Colin Plumb.) |
|
The malloc_chunk declaration below is misleading (but accurate and |
necessary). It declares a "view" into memory allowing access to |
necessary fields at known offsets from a given base. |
|
Chunks of memory are maintained using a `boundary tag' method as |
originally described by Knuth. (See the paper by Paul Wilson |
ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such |
techniques.) Sizes of free chunks are stored both in the front of |
each chunk and at the end. This makes consolidating fragmented |
chunks into bigger chunks fast. The head fields also hold bits |
representing whether chunks are free or in use. |
|
Here are some pictures to make it clearer. They are "exploded" to |
show that the state of a chunk can be thought of as extending from |
the high 31 bits of the head field of its header through the |
prev_foot and PINUSE_BIT bit of the following chunk header. |
|
A chunk that's in use looks like: |
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk (if P = 0) | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| |
| Size of this chunk 1| +-+ |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| | |
+- -+ |
| | |
+- -+ |
| : |
+- size - sizeof(size_t) available payload bytes -+ |
: | |
chunk-> +- -+ |
| | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| |
| Size of next chunk (may or may not be in use) | +-+ |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
And if it's free, it looks like this: |
|
chunk-> +- -+ |
| User payload (must be in use, or we would have merged!) | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| |
| Size of this chunk 0| +-+ |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Next pointer | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Prev pointer | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| : |
+- size - sizeof(struct chunk) unused bytes -+ |
: | |
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of this chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| |
| Size of next chunk (must be in use, or we would have merged)| +-+ |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| : |
+- User payload -+ |
: | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|0| |
+-+ |
Note that since we always merge adjacent free chunks, the chunks |
adjacent to a free chunk must be in use. |
|
Given a pointer to a chunk (which can be derived trivially from the |
payload pointer) we can, in O(1) time, find out whether the adjacent |
chunks are free, and if so, unlink them from the lists that they |
are on and merge them with the current chunk. |
|
Chunks always begin on even word boundaries, so the mem portion |
(which is returned to the user) is also on an even word boundary, and |
thus at least double-word aligned. |
|
The P (PINUSE_BIT) bit, stored in the unused low-order bit of the |
chunk size (which is always a multiple of two words), is an in-use |
bit for the *previous* chunk. If that bit is *clear*, then the |
word before the current chunk size contains the previous chunk |
size, and can be used to find the front of the previous chunk. |
The very first chunk allocated always has this bit set, preventing |
access to non-existent (or non-owned) memory. If pinuse is set for |
any given chunk, then you CANNOT determine the size of the |
previous chunk, and might even get a memory addressing fault when |
trying to do so. |
|
The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of |
the chunk size redundantly records whether the current chunk is |
inuse (unless the chunk is mmapped). This redundancy enables usage |
checks within free and realloc, and reduces indirection when freeing |
and consolidating chunks. |
|
Each freshly allocated chunk must have both cinuse and pinuse set. |
That is, each allocated chunk borders either a previously allocated |
and still in-use chunk, or the base of its memory arena. This is |
ensured by making all allocations from the `lowest' part of any |
found chunk. Further, no free chunk physically borders another one, |
so each free chunk is known to be preceded and followed by either |
inuse chunks or the ends of memory. |
|
Note that the `foot' of the current chunk is actually represented |
as the prev_foot of the NEXT chunk. This makes it easier to |
deal with alignments etc but can be very confusing when trying |
to extend or adapt this code. |
|
The exceptions to all this are |
|
1. The special chunk `top' is the top-most available chunk (i.e., |
the one bordering the end of available memory). It is treated |
specially. Top is never included in any bin, is used only if |
no other chunk is available, and is released back to the |
system if it is very large (see M_TRIM_THRESHOLD). In effect, |
the top chunk is treated as larger (and thus less well |
fitting) than any other available chunk. The top chunk |
doesn't update its trailing size field since there is no next |
contiguous chunk that would have to index off it. However, |
space is still allocated for it (TOP_FOOT_SIZE) to enable |
separation or merging when space is extended. |
|
3. Chunks allocated via mmap, have both cinuse and pinuse bits |
cleared in their head fields. Because they are allocated |
one-by-one, each must carry its own prev_foot field, which is |
also used to hold the offset this chunk has within its mmapped |
region, which is needed to preserve alignment. Each mmapped |
chunk is trailed by the first two fields of a fake next-chunk |
for sake of usage checks. |
|
*/ |
|
struct malloc_chunk { |
size_t prev_foot; /* Size of previous chunk (if free). */ |
size_t head; /* Size and inuse bits. */ |
struct malloc_chunk* fd; /* double links -- used only if free. */ |
struct malloc_chunk* bk; |
}; |
|
typedef struct malloc_chunk mchunk; |
typedef struct malloc_chunk* mchunkptr; |
typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ |
typedef unsigned int bindex_t; /* Described below */ |
typedef unsigned int binmap_t; /* Described below */ |
typedef unsigned int flag_t; /* The type of various bit flag sets */ |
|
/* ------------------- Chunks sizes and alignments ----------------------- */ |
|
#define MCHUNK_SIZE (sizeof(mchunk)) |
|
#if FOOTERS |
#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
#else /* FOOTERS */ |
#define CHUNK_OVERHEAD (SIZE_T_SIZE) |
#endif /* FOOTERS */ |
|
/* MMapped chunks need a second word of overhead ... */ |
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
/* ... and additional padding for fake next-chunk at foot */ |
150,6 → 905,7 |
#define request2size(req) \ |
(((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) |
|
|
/* ------------------ Operations on head and foot fields ----------------- */ |
|
/* |
172,6 → 928,7 |
/* extraction of fields from head words */ |
#define cinuse(p) ((p)->head & CINUSE_BIT) |
#define pinuse(p) ((p)->head & PINUSE_BIT) |
#define flag4inuse(p) ((p)->head & FLAG4_BIT) |
#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) |
#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) |
|
178,6 → 935,8 |
#define chunksize(p) ((p)->head & ~(FLAG_BITS)) |
|
#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) |
#define set_flag4(p) ((p)->head |= FLAG4_BIT) |
#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) |
|
/* Treat space at ptr +/- offset as a chunk */ |
#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) |
190,6 → 949,10 |
/* extract next chunk's pinuse bit */ |
#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) |
|
/* Get/set size at footer */ |
#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) |
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) |
|
/* Set size, pinuse bit, and foot */ |
#define set_size_and_pinuse_of_free_chunk(p, s)\ |
((p)->head = (s|PINUSE_BIT), set_foot(p, s)) |
202,7 → 965,104 |
#define overhead_for(p)\ |
(is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) |
|
/* Return true if malloced space is not necessarily cleared */ |
#if MMAP_CLEARS |
#define calloc_must_clear(p) (!is_mmapped(p)) |
#else /* MMAP_CLEARS */ |
#define calloc_must_clear(p) (1) |
#endif /* MMAP_CLEARS */ |
|
/* ---------------------- Overlaid data structures ----------------------- */ |
|
/* |
When chunks are not in use, they are treated as nodes of either |
lists or trees. |
|
"Small" chunks are stored in circular doubly-linked lists, and look |
like this: |
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`head:' | Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Forward pointer to next chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Back pointer to previous chunk in list | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Unused space (may be 0 bytes long) . |
. . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`foot:' | Size of chunk, in bytes | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
Larger chunks are kept in a form of bitwise digital trees (aka |
tries) keyed on chunksizes. Because malloc_tree_chunks are only for |
free chunks greater than 256 bytes, their size doesn't impose any |
constraints on user chunk sizes. Each node looks like: |
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Size of previous chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`head:' | Size of chunk, in bytes |P| |
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Forward pointer to next chunk of same size | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Back pointer to previous chunk of same size | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Pointer to left child (child[0]) | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Pointer to right child (child[1]) | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Pointer to parent | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| bin index of this chunk | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
| Unused space . |
. | |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
`foot:' | Size of chunk, in bytes | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
|
Each tree holding treenodes is a tree of unique chunk sizes. Chunks |
of the same size are arranged in a circularly-linked list, with only |
the oldest chunk (the next to be used, in our FIFO ordering) |
actually in the tree. (Tree members are distinguished by a non-null |
parent pointer.) If a chunk with the same size an an existing node |
is inserted, it is linked off the existing node using pointers that |
work in the same way as fd/bk pointers of small chunks. |
|
Each tree contains a power of 2 sized range of chunk sizes (the |
smallest is 0x100 <= x < 0x180), which is is divided in half at each |
tree level, with the chunks in the smaller half of the range (0x100 |
<= x < 0x140 for the top nose) in the left subtree and the larger |
half (0x140 <= x < 0x180) in the right subtree. This is, of course, |
done by inspecting individual bits. |
|
Using these rules, each node's left subtree contains all smaller |
sizes than its right subtree. However, the node at the root of each |
subtree has no particular ordering relationship to either. (The |
dividing line between the subtree sizes is based on trie relation.) |
If we remove the last chunk of a given size from the interior of the |
tree, we need to replace it with a leaf node. The tree ordering |
rules permit a node to be replaced by any leaf below it. |
|
The smallest chunk in a tree (a common operation in a best-fit |
allocator) can be found by walking a path to the leftmost leaf in |
the tree. Unlike a usual binary tree, where we follow left child |
pointers until we reach a null, here we follow the right child |
pointer any time the left one is null, until we reach a leaf with |
both child pointers null. The smallest chunk in the tree will be |
somewhere along that path. |
|
The worst case number of steps to add, find, or remove a node is |
bounded by the number of bits differentiating chunks within |
bins. Under current bin calculations, this ranges from 6 up to 21 |
(for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case |
is of course much better. |
*/ |
|
struct malloc_tree_chunk { |
/* The first four fields must be compatible with malloc_chunk */ |
size_t prev_foot; |
222,7 → 1082,63 |
/* A little helper macro for trees */ |
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) |
|
/* ----------------------------- Segments -------------------------------- */ |
|
/* |
Each malloc space may include non-contiguous segments, held in a |
list headed by an embedded malloc_segment record representing the |
top-most space. Segments also include flags holding properties of |
the space. Large chunks that are directly allocated by mmap are not |
included in this list. They are instead independently created and |
destroyed without otherwise keeping track of them. |
|
Segment management mainly comes into play for spaces allocated by |
MMAP. Any call to MMAP might or might not return memory that is |
adjacent to an existing segment. MORECORE normally contiguously |
extends the current space, so this space is almost always adjacent, |
which is simpler and faster to deal with. (This is why MORECORE is |
used preferentially to MMAP when both are available -- see |
sys_alloc.) When allocating using MMAP, we don't use any of the |
hinting mechanisms (inconsistently) supported in various |
implementations of unix mmap, or distinguish reserving from |
committing memory. Instead, we just ask for space, and exploit |
contiguity when we get it. It is probably possible to do |
better than this on some systems, but no general scheme seems |
to be significantly better. |
|
Management entails a simpler variant of the consolidation scheme |
used for chunks to reduce fragmentation -- new adjacent memory is |
normally prepended or appended to an existing segment. However, |
there are limitations compared to chunk consolidation that mostly |
reflect the fact that segment processing is relatively infrequent |
(occurring only when getting memory from system) and that we |
don't expect to have huge numbers of segments: |
|
* Segments are not indexed, so traversal requires linear scans. (It |
would be possible to index these, but is not worth the extra |
overhead and complexity for most programs on most platforms.) |
* New segments are only appended to old ones when holding top-most |
memory; if they cannot be prepended to others, they are held in |
different segments. |
|
Except for the top-most segment of an mstate, each segment record |
is kept at the tail of its segment. Segments are added by pushing |
segment records onto the list headed by &mstate.seg for the |
containing mstate. |
|
Segment flags control allocation/merge/deallocation policies: |
* If EXTERN_BIT set, then we did not allocate this segment, |
and so should not try to deallocate or merge with others. |
(This currently holds only for the initial segment passed |
into create_mspace_with_base.) |
* If USE_MMAP_BIT set, the segment may be merged with |
other surrounding mmapped segments and trimmed/de-allocated |
using munmap. |
* If neither bit is set, then the segment was obtained using |
MORECORE so can be merged with surrounding MORECORE'd segments |
and deallocated/trimmed using MORECORE with negative arguments. |
*/ |
|
struct malloc_segment { |
char* base; /* base address */ |
size_t size; /* allocated size */ |
299,6 → 1215,9 |
Magic tag |
A cross-check field that should always hold same value as mparams.magic. |
|
Max allowed footprint |
The maximum allowed bytes to allocate from system (zero means no limit) |
|
Flags |
Bits recording whether to use MMAP, locks, or contiguous MORECORE |
|
308,7 → 1227,7 |
|
Trim support |
Fields holding the amount of unused topmost memory that should trigger |
timming, and a counter to force periodic scanning to release unused |
trimming, and a counter to force periodic scanning to release unused |
non-topmost segments. |
|
Locking |
345,6 → 1264,7 |
tbinptr treebins[NTREEBINS]; |
size_t footprint; |
size_t max_footprint; |
size_t footprint_limit; /* zero means no limit */ |
flag_t mflags; |
struct mutex lock; /* locate lock among fields that rarely change */ |
msegment seg; |
363,9 → 1283,8 |
also serves as an initialization flag. |
*/ |
|
struct malloc_params |
{ |
volatile size_t magic; |
struct malloc_params { |
size_t magic; |
size_t page_size; |
size_t granularity; |
size_t mmap_threshold; |
375,6 → 1294,7 |
|
static struct malloc_params mparams; |
|
/* Ensure mparams initialized */ |
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) |
|
static struct malloc_state _gm_; |
383,22 → 1303,143 |
|
#define is_initialized(M) ((M)->top != 0) |
|
/* -------------------------- system alloc setup ------------------------- */ |
|
//struct mutex malloc_global_mutex; |
/* Operations on mflags */ |
|
static DEFINE_MUTEX(malloc_global_mutex); |
#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) |
#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) |
#if USE_LOCKS |
#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) |
#else |
#define disable_lock(M) |
#endif |
|
#define ACQUIRE_MALLOC_GLOBAL_LOCK() MutexLock(&malloc_global_mutex); |
#define RELEASE_MALLOC_GLOBAL_LOCK() MutexUnlock(&malloc_global_mutex); |
#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) |
#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) |
#if HAVE_MMAP |
#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) |
#else |
#define disable_mmap(M) |
#endif |
|
#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) |
#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) |
|
#define set_lock(M,L)\ |
((M)->mflags = (L)?\ |
((M)->mflags | USE_LOCK_BIT) :\ |
((M)->mflags & ~USE_LOCK_BIT)) |
|
/* page-align a size */ |
#define page_align(S)\ |
(((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) |
|
/* granularity-align a size */ |
#define granularity_align(S)\ |
(((S) + (mparams.granularity - SIZE_T_ONE))\ |
& ~(mparams.granularity - SIZE_T_ONE)) |
|
|
/* For mmap, use granularity alignment on windows, else page-align */ |
#ifdef WIN32 |
#define mmap_align(S) granularity_align(S) |
#else |
#define mmap_align(S) page_align(S) |
#endif |
|
/* For sys_alloc, enough padding to ensure can malloc request on success */ |
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) |
|
#define is_page_aligned(S)\ |
(((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) |
#define is_granularity_aligned(S)\ |
(((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) |
|
/* True if segment S holds address A */ |
#define segment_holds(S, A)\ |
((char*)(A) >= S->base && (char*)(A) < S->base + S->size) |
|
/* Return segment holding given address */ |
static msegmentptr segment_holding(mstate m, char* addr) { |
msegmentptr sp = &m->seg; |
for (;;) { |
if (addr >= sp->base && addr < sp->base + sp->size) |
return sp; |
if ((sp = sp->next) == 0) |
return 0; |
} |
} |
|
/* Return true if segment contains a segment link */ |
static int has_segment_link(mstate m, msegmentptr ss) { |
msegmentptr sp = &m->seg; |
for (;;) { |
if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) |
return 1; |
if ((sp = sp->next) == 0) |
return 0; |
} |
} |
|
|
/* |
TOP_FOOT_SIZE is padding at the end of a segment, including space |
that may be needed to place segment records and fenceposts when new |
noncontiguous segments are added. |
*/ |
#define TOP_FOOT_SIZE\ |
(align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) |
|
|
/* ------------------------------- Hooks -------------------------------- */ |
|
/* |
PREACTION should be defined to return 0 on success, and nonzero on |
failure. If you are not using locking, you can redefine these to do |
anything you like. |
*/ |
|
#define PREACTION(M) ( MutexLock(&(M)->lock)) |
#define POSTACTION(M) { MutexUnlock(&(M)->lock); } |
|
/* -------------------------- Debugging setup ---------------------------- */ |
|
#if ! DEBUG |
|
#define check_free_chunk(M,P) |
#define check_inuse_chunk(M,P) |
#define check_malloced_chunk(M,P,N) |
#define check_mmapped_chunk(M,P) |
#define check_malloc_state(M) |
#define check_top_chunk(M,P) |
|
#else /* DEBUG */ |
#define check_free_chunk(M,P) do_check_free_chunk(M,P) |
#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) |
#define check_top_chunk(M,P) do_check_top_chunk(M,P) |
#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) |
#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) |
#define check_malloc_state(M) do_check_malloc_state(M) |
|
static void do_check_any_chunk(mstate m, mchunkptr p); |
static void do_check_top_chunk(mstate m, mchunkptr p); |
static void do_check_mmapped_chunk(mstate m, mchunkptr p); |
static void do_check_inuse_chunk(mstate m, mchunkptr p); |
static void do_check_free_chunk(mstate m, mchunkptr p); |
static void do_check_malloced_chunk(mstate m, void* mem, size_t s); |
static void do_check_tree(mstate m, tchunkptr t); |
static void do_check_treebin(mstate m, bindex_t i); |
static void do_check_smallbin(mstate m, bindex_t i); |
static void do_check_malloc_state(mstate m); |
static int bin_find(mstate m, mchunkptr x); |
static size_t traverse_and_check(mstate m); |
#endif /* DEBUG */ |
|
/* ---------------------------- Indexing Bins ---------------------------- */ |
|
#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) |
#define small_index(s) ((s) >> SMALLBIN_SHIFT) |
#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) |
#define small_index2size(i) ((i) << SMALLBIN_SHIFT) |
#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) |
|
406,7 → 1447,6 |
#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) |
#define treebin_at(M,i) (&((M)->treebins[i])) |
|
|
#define compute_tree_index(S, I)\ |
{\ |
unsigned int X = S >> TREEBIN_SHIFT;\ |
415,12 → 1455,13 |
else if (X > 0xFFFF)\ |
I = NTREEBINS-1;\ |
else {\ |
unsigned int K;\ |
__asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g" (X));\ |
unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ |
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ |
}\ |
} |
|
|
|
/* Bit representing maximum resolved size in a treebin at i */ |
#define bit_for_tree_index(i) \ |
(i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) |
459,23 → 1500,82 |
/* mask with all bits to left of or equal to least bit of x on */ |
#define same_or_left_bits(x) ((x) | -(x)) |
|
|
/* index corresponding to given bit. Use x86 asm if possible */ |
|
#define compute_bit2idx(X, I)\ |
{\ |
unsigned int J;\ |
__asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\ |
J = __builtin_ctz(X); \ |
I = (bindex_t)J;\ |
} |
|
/* ----------------------- Runtime Check Support ------------------------- */ |
|
/* |
For security, the main invariant is that malloc/free/etc never |
writes to a static address other than malloc_state, unless static |
malloc_state itself has been corrupted, which cannot occur via |
malloc (because of these checks). In essence this means that we |
believe all pointers, sizes, maps etc held in malloc_state, but |
check all of those linked or offsetted from other embedded data |
structures. These checks are interspersed with main code in a way |
that tends to minimize their run-time cost. |
|
When FOOTERS is defined, in addition to range checking, we also |
verify footer fields of inuse chunks, which can be used guarantee |
that the mstate controlling malloc/free is intact. This is a |
streamlined version of the approach described by William Robertson |
et al in "Run-time Detection of Heap-based Overflows" LISA'03 |
http://www.usenix.org/events/lisa03/tech/robertson.html The footer |
of an inuse chunk holds the xor of its mstate and a random seed, |
that is checked upon calls to free() and realloc(). This is |
(probabalistically) unguessable from outside the program, but can be |
computed by any code successfully malloc'ing any chunk, so does not |
itself provide protection against code that has already broken |
security through some other means. Unlike Robertson et al, we |
always dynamically check addresses of all offset chunks (previous, |
next, etc). This turns out to be cheaper than relying on hashes. |
*/ |
|
#if !INSECURE |
/* Check if address a is at least as high as any from MORECORE or MMAP */ |
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) |
/* Check if address of next chunk n is higher than base chunk p */ |
#define ok_next(p, n) ((char*)(p) < (char*)(n)) |
/* Check if p has inuse status */ |
#define ok_inuse(p) is_inuse(p) |
/* Check if p has its pinuse bit on */ |
#define ok_pinuse(p) pinuse(p) |
|
#else /* !INSECURE */ |
#define ok_address(M, a) (1) |
#define ok_next(b, n) (1) |
#define ok_inuse(p) (1) |
#define ok_pinuse(p) (1) |
#endif /* !INSECURE */ |
|
#if (FOOTERS && !INSECURE) |
/* Check if (alleged) mstate m has expected magic field */ |
#define ok_magic(M) ((M)->magic == mparams.magic) |
#else /* (FOOTERS && !INSECURE) */ |
#define ok_magic(M) (1) |
#endif /* (FOOTERS && !INSECURE) */ |
|
/* In gcc, use __builtin_expect to minimize impact of checks */ |
#if !INSECURE |
#if defined(__GNUC__) && __GNUC__ >= 3 |
#define RTCHECK(e) __builtin_expect(e, 1) |
#else /* GNUC */ |
#define RTCHECK(e) (e) |
#endif /* GNUC */ |
#else /* !INSECURE */ |
#define RTCHECK(e) (1) |
#endif /* !INSECURE */ |
|
/* macros to set up inuse chunks with or without footers */ |
|
#if !FOOTERS |
|
#define mark_inuse_foot(M,p,s) |
|
/* Get/set size at footer */ |
#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) |
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) |
|
/* Macros for setting head/foot of non-mmapped chunks */ |
|
/* Set cinuse bit and pinuse bit of next chunk */ |
492,26 → 1592,345 |
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) |
|
#else /* FOOTERS */ |
|
#define assert(x) |
#define RTCHECK(e) __builtin_expect(e, 1) |
/* Set foot of inuse chunk to be xor of mstate and seed */ |
#define mark_inuse_foot(M,p,s)\ |
(((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) |
|
#define check_free_chunk(M,P) |
#define check_inuse_chunk(M,P) |
#define check_malloced_chunk(M,P,N) |
#define check_mmapped_chunk(M,P) |
#define check_malloc_state(M) |
#define check_top_chunk(M,P) |
#define get_mstate_for(p)\ |
((mstate)(((mchunkptr)((char*)(p) +\ |
(chunksize(p))))->prev_foot ^ mparams.magic)) |
|
/* Check if address a is at least as high as any from MORECORE or MMAP */ |
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) |
/* Check if address of next chunk n is higher than base chunk p */ |
#define ok_next(p, n) ((char*)(p) < (char*)(n)) |
/* Check if p has inuse status */ |
#define ok_inuse(p) is_inuse(p) |
/* Check if p has its pinuse bit on */ |
#define ok_pinuse(p) pinuse(p) |
#define set_inuse(M,p,s)\ |
((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ |
(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ |
mark_inuse_foot(M,p,s)) |
|
#define set_inuse_and_pinuse(M,p,s)\ |
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ |
mark_inuse_foot(M,p,s)) |
|
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
mark_inuse_foot(M, p, s)) |
|
#endif /* !FOOTERS */ |
|
/* ---------------------------- setting mparams -------------------------- */ |
|
#if LOCK_AT_FORK |
static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } |
static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } |
static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } |
#endif /* LOCK_AT_FORK */ |
|
/* Initialize mparams */ |
static int init_mparams(void) { |
|
ACQUIRE_MALLOC_GLOBAL_LOCK(); |
if (mparams.magic == 0) { |
size_t magic; |
size_t psize; |
size_t gsize; |
|
psize = 4096; |
gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); |
|
mparams.granularity = gsize; |
mparams.page_size = psize; |
mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; |
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; |
|
/* Set up lock for main malloc area */ |
gm->mflags = mparams.default_mflags; |
MutexInit(&gm->lock); |
|
{ |
magic = (size_t)&magic ^ (size_t)0x55555555U; |
magic |= (size_t)8U; /* ensure nonzero */ |
magic &= ~(size_t)7U; /* improve chances of fault for bad values */ |
/* Until memory modes commonly available, use volatile-write */ |
(*(volatile size_t *)(&(mparams.magic))) = magic; |
} |
} |
|
RELEASE_MALLOC_GLOBAL_LOCK(); |
return 1; |
} |
|
|
#if DEBUG |
/* ------------------------- Debugging Support --------------------------- */ |
|
/* Check properties of any chunk, whether free, inuse, mmapped etc */ |
static void do_check_any_chunk(mstate m, mchunkptr p) { |
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); |
assert(ok_address(m, p)); |
} |
|
/* Check properties of top chunk */ |
static void do_check_top_chunk(mstate m, mchunkptr p) { |
msegmentptr sp = segment_holding(m, (char*)p); |
size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ |
assert(sp != 0); |
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); |
assert(ok_address(m, p)); |
assert(sz == m->topsize); |
assert(sz > 0); |
assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); |
assert(pinuse(p)); |
assert(!pinuse(chunk_plus_offset(p, sz))); |
} |
|
/* Check properties of (inuse) mmapped chunks */ |
static void do_check_mmapped_chunk(mstate m, mchunkptr p) { |
size_t sz = chunksize(p); |
size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); |
assert(is_mmapped(p)); |
assert(use_mmap(m)); |
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); |
assert(ok_address(m, p)); |
assert(!is_small(sz)); |
assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); |
assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); |
assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); |
} |
|
/* Check properties of inuse chunks */ |
static void do_check_inuse_chunk(mstate m, mchunkptr p) { |
do_check_any_chunk(m, p); |
assert(is_inuse(p)); |
assert(next_pinuse(p)); |
/* If not pinuse and not mmapped, previous chunk has OK offset */ |
assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); |
if (is_mmapped(p)) |
do_check_mmapped_chunk(m, p); |
} |
|
/* Check properties of free chunks */ |
static void do_check_free_chunk(mstate m, mchunkptr p) { |
size_t sz = chunksize(p); |
mchunkptr next = chunk_plus_offset(p, sz); |
do_check_any_chunk(m, p); |
assert(!is_inuse(p)); |
assert(!next_pinuse(p)); |
assert (!is_mmapped(p)); |
if (p != m->dv && p != m->top) { |
if (sz >= MIN_CHUNK_SIZE) { |
assert((sz & CHUNK_ALIGN_MASK) == 0); |
assert(is_aligned(chunk2mem(p))); |
assert(next->prev_foot == sz); |
assert(pinuse(p)); |
assert (next == m->top || is_inuse(next)); |
assert(p->fd->bk == p); |
assert(p->bk->fd == p); |
} |
else /* markers are always of size SIZE_T_SIZE */ |
assert(sz == SIZE_T_SIZE); |
} |
} |
|
/* Check properties of malloced chunks at the point they are malloced */ |
static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { |
if (mem != 0) { |
mchunkptr p = mem2chunk(mem); |
size_t sz = p->head & ~INUSE_BITS; |
do_check_inuse_chunk(m, p); |
assert((sz & CHUNK_ALIGN_MASK) == 0); |
assert(sz >= MIN_CHUNK_SIZE); |
assert(sz >= s); |
/* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ |
assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); |
} |
} |
|
/* Check a tree and its subtrees. */ |
static void do_check_tree(mstate m, tchunkptr t) { |
tchunkptr head = 0; |
tchunkptr u = t; |
bindex_t tindex = t->index; |
size_t tsize = chunksize(t); |
bindex_t idx; |
compute_tree_index(tsize, idx); |
assert(tindex == idx); |
assert(tsize >= MIN_LARGE_SIZE); |
assert(tsize >= minsize_for_tree_index(idx)); |
assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); |
|
do { /* traverse through chain of same-sized nodes */ |
do_check_any_chunk(m, ((mchunkptr)u)); |
assert(u->index == tindex); |
assert(chunksize(u) == tsize); |
assert(!is_inuse(u)); |
assert(!next_pinuse(u)); |
assert(u->fd->bk == u); |
assert(u->bk->fd == u); |
if (u->parent == 0) { |
assert(u->child[0] == 0); |
assert(u->child[1] == 0); |
} |
else { |
assert(head == 0); /* only one node on chain has parent */ |
head = u; |
assert(u->parent != u); |
assert (u->parent->child[0] == u || |
u->parent->child[1] == u || |
*((tbinptr*)(u->parent)) == u); |
if (u->child[0] != 0) { |
assert(u->child[0]->parent == u); |
assert(u->child[0] != u); |
do_check_tree(m, u->child[0]); |
} |
if (u->child[1] != 0) { |
assert(u->child[1]->parent == u); |
assert(u->child[1] != u); |
do_check_tree(m, u->child[1]); |
} |
if (u->child[0] != 0 && u->child[1] != 0) { |
assert(chunksize(u->child[0]) < chunksize(u->child[1])); |
} |
} |
u = u->fd; |
} while (u != t); |
assert(head != 0); |
} |
|
/* Check all the chunks in a treebin. */ |
static void do_check_treebin(mstate m, bindex_t i) { |
tbinptr* tb = treebin_at(m, i); |
tchunkptr t = *tb; |
int empty = (m->treemap & (1U << i)) == 0; |
if (t == 0) |
assert(empty); |
if (!empty) |
do_check_tree(m, t); |
} |
|
/* Check all the chunks in a smallbin. */ |
static void do_check_smallbin(mstate m, bindex_t i) { |
sbinptr b = smallbin_at(m, i); |
mchunkptr p = b->bk; |
unsigned int empty = (m->smallmap & (1U << i)) == 0; |
if (p == b) |
assert(empty); |
if (!empty) { |
for (; p != b; p = p->bk) { |
size_t size = chunksize(p); |
mchunkptr q; |
/* each chunk claims to be free */ |
do_check_free_chunk(m, p); |
/* chunk belongs in bin */ |
assert(small_index(size) == i); |
assert(p->bk == b || chunksize(p->bk) == chunksize(p)); |
/* chunk is followed by an inuse chunk */ |
q = next_chunk(p); |
if (q->head != FENCEPOST_HEAD) |
do_check_inuse_chunk(m, q); |
} |
} |
} |
|
/* Find x in a bin. Used in other check functions. */ |
static int bin_find(mstate m, mchunkptr x) { |
size_t size = chunksize(x); |
if (is_small(size)) { |
bindex_t sidx = small_index(size); |
sbinptr b = smallbin_at(m, sidx); |
if (smallmap_is_marked(m, sidx)) { |
mchunkptr p = b; |
do { |
if (p == x) |
return 1; |
} while ((p = p->fd) != b); |
} |
} |
else { |
bindex_t tidx; |
compute_tree_index(size, tidx); |
if (treemap_is_marked(m, tidx)) { |
tchunkptr t = *treebin_at(m, tidx); |
size_t sizebits = size << leftshift_for_tree_index(tidx); |
while (t != 0 && chunksize(t) != size) { |
t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; |
sizebits <<= 1; |
} |
if (t != 0) { |
tchunkptr u = t; |
do { |
if (u == (tchunkptr)x) |
return 1; |
} while ((u = u->fd) != t); |
} |
} |
} |
return 0; |
} |
|
/* Traverse each chunk and check it; return total */ |
static size_t traverse_and_check(mstate m) { |
size_t sum = 0; |
if (is_initialized(m)) { |
msegmentptr s = &m->seg; |
sum += m->topsize + TOP_FOOT_SIZE; |
while (s != 0) { |
mchunkptr q = align_as_chunk(s->base); |
mchunkptr lastq = 0; |
assert(pinuse(q)); |
while (segment_holds(s, q) && |
q != m->top && q->head != FENCEPOST_HEAD) { |
sum += chunksize(q); |
if (is_inuse(q)) { |
assert(!bin_find(m, q)); |
do_check_inuse_chunk(m, q); |
} |
else { |
assert(q == m->dv || bin_find(m, q)); |
assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ |
do_check_free_chunk(m, q); |
} |
lastq = q; |
q = next_chunk(q); |
} |
s = s->next; |
} |
} |
return sum; |
} |
|
|
/* Check all properties of malloc_state. */ |
static void do_check_malloc_state(mstate m) { |
bindex_t i; |
size_t total; |
/* check bins */ |
for (i = 0; i < NSMALLBINS; ++i) |
do_check_smallbin(m, i); |
for (i = 0; i < NTREEBINS; ++i) |
do_check_treebin(m, i); |
|
if (m->dvsize != 0) { /* check dv chunk */ |
do_check_any_chunk(m, m->dv); |
assert(m->dvsize == chunksize(m->dv)); |
assert(m->dvsize >= MIN_CHUNK_SIZE); |
assert(bin_find(m, m->dv) == 0); |
} |
|
if (m->top != 0) { /* check top chunk */ |
do_check_top_chunk(m, m->top); |
/*assert(m->topsize == chunksize(m->top)); redundant */ |
assert(m->topsize > 0); |
assert(bin_find(m, m->top) == 0); |
} |
|
total = traverse_and_check(m); |
assert(total <= m->footprint); |
assert(m->footprint <= m->max_footprint); |
} |
#endif /* DEBUG */ |
|
#define CORRUPTION_ERROR_ACTION(m) \ |
do { \ |
printf("%s malloc heap corrupted\n",__FUNCTION__); \ |
558,7 → 1977,34 |
P->fd = F;\ |
P->bk = B;\ |
} |
/* ----------------------- Operations on smallbins ----------------------- */ |
|
/* |
Various forms of linking and unlinking are defined as macros. Even |
the ones for trees, which are very long but have very short typical |
paths. This is ugly but reduces reliance on inlining support of |
compilers. |
*/ |
|
/* Link a free chunk into a smallbin */ |
#define insert_small_chunk(M, P, S) {\ |
bindex_t I = small_index(S);\ |
mchunkptr B = smallbin_at(M, I);\ |
mchunkptr F = B;\ |
assert(S >= MIN_CHUNK_SIZE);\ |
if (!smallmap_is_marked(M, I))\ |
mark_smallmap(M, I);\ |
else if (RTCHECK(ok_address(M, B->fd)))\ |
F = B->fd;\ |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
B->fd = P;\ |
F->bk = P;\ |
P->fd = F;\ |
P->bk = B;\ |
} |
|
/* Unlink a chunk from a smallbin */ |
#define unlink_small_chunk(M, P, S) {\ |
mchunkptr F = P->fd;\ |
567,10 → 2013,12 |
assert(P != B);\ |
assert(P != F);\ |
assert(chunksize(P) == small_index2size(I));\ |
if (F == B)\ |
if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ |
if (B == F) {\ |
clear_smallmap(M, I);\ |
else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ |
(B == smallbin_at(M,I) || ok_address(M, B)))) {\ |
}\ |
else if (RTCHECK(B == smallbin_at(M,I) ||\ |
(ok_address(M, B) && B->fd == P))) {\ |
F->bk = B;\ |
B->fd = F;\ |
}\ |
577,6 → 2025,10 |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
}\ |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
}\ |
} |
|
/* Unlink the first chunk from a smallbin */ |
585,11 → 2037,12 |
assert(P != B);\ |
assert(P != F);\ |
assert(chunksize(P) == small_index2size(I));\ |
if (B == F)\ |
if (B == F) {\ |
clear_smallmap(M, I);\ |
else if (RTCHECK(ok_address(M, F))) {\ |
}\ |
else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ |
F->bk = B;\ |
B->fd = F;\ |
F->bk = B;\ |
}\ |
else {\ |
CORRUPTION_ERROR_ACTION(M);\ |
600,9 → 2053,9 |
/* Used only when dvsize known to be small */ |
#define replace_dv(M, P, S) {\ |
size_t DVS = M->dvsize;\ |
assert(is_small(DVS));\ |
if (DVS != 0) {\ |
mchunkptr DV = M->dv;\ |
assert(is_small(DVS));\ |
insert_small_chunk(M, DV, DVS);\ |
}\ |
M->dvsize = S;\ |
609,7 → 2062,6 |
M->dv = P;\ |
} |
|
|
/* ------------------------- Operations on trees ------------------------- */ |
|
/* Insert chunk into tree */ |
687,7 → 2139,7 |
if (X->bk != X) {\ |
tchunkptr F = X->fd;\ |
R = X->bk;\ |
if (RTCHECK(ok_address(M, F))) {\ |
if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ |
F->bk = R;\ |
R->fd = F;\ |
}\ |
763,75 → 2215,9 |
else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } |
|
|
/* -------------------------- system alloc setup ------------------------- */ |
|
/* Operations on mflags */ |
|
#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) |
#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) |
#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) |
|
#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) |
#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) |
#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) |
|
#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) |
#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) |
|
#define set_lock(M,L)\ |
((M)->mflags = (L)?\ |
((M)->mflags | USE_LOCK_BIT) :\ |
((M)->mflags & ~USE_LOCK_BIT)) |
|
/* page-align a size */ |
#define page_align(S)\ |
(((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) |
|
/* granularity-align a size */ |
#define granularity_align(S)\ |
(((S) + (mparams.granularity - SIZE_T_ONE))\ |
& ~(mparams.granularity - SIZE_T_ONE)) |
|
|
/* For mmap, use granularity alignment */ |
#define mmap_align(S) granularity_align(S) |
|
/* For sys_alloc, enough padding to ensure can malloc request on success */ |
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) |
|
#define is_page_aligned(S)\ |
(((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) |
#define is_granularity_aligned(S)\ |
(((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) |
|
/* True if segment S holds address A */ |
#define segment_holds(S, A)\ |
((char*)(A) >= S->base && (char*)(A) < S->base + S->size) |
|
/* Return segment holding given address */ |
static msegmentptr segment_holding(mstate m, char* addr) |
{ |
msegmentptr sp = &m->seg; |
for (;;) { |
if (addr >= sp->base && addr < sp->base + sp->size) |
return sp; |
if ((sp = sp->next) == 0) |
return 0; |
} |
} |
|
/* Return true if segment contains a segment link */ |
static int has_segment_link(mstate m, msegmentptr ss) |
{ |
msegmentptr sp = &m->seg; |
for (;;) { |
if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) |
return 1; |
if ((sp = sp->next) == 0) |
return 0; |
} |
} |
|
static inline void* os_mmap(size_t size) |
{ |
void* ptr = KernelAlloc(size); |
844,7 → 2230,6 |
return (KernelFree(ptr) != 0) ? 0 : -1; |
} |
|
#define should_trim(M,s) ((s) > (M)->trim_check) |
|
|
#define MMAP_DEFAULT(s) os_mmap(s) |
851,8 → 2236,6 |
#define MUNMAP_DEFAULT(a, s) os_munmap((a), (s)) |
#define DIRECT_MMAP_DEFAULT(s) os_mmap(s) |
|
#define internal_malloc(m, b) malloc(b) |
#define internal_free(m, mem) free(mem) |
|
/* ----------------------- Direct-mmapping chunks ----------------------- */ |
|
865,14 → 2248,16 |
*/ |
|
/* Malloc using mmap */ |
static void* mmap_alloc(mstate m, size_t nb) |
{ |
static void* mmap_alloc(mstate m, size_t nb) { |
size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
if (mmsize > nb) /* Check for wrap around 0 */ |
{ |
char* mm = (char*)(os_mmap(mmsize)); |
if (mm != CMFAIL) |
{ |
if (m->footprint_limit != 0) { |
size_t fp = m->footprint + mmsize; |
if (fp <= m->footprint || fp > m->footprint_limit) |
return 0; |
} |
if (mmsize > nb) { /* Check for wrap around 0 */ |
char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); |
if (mm != CMFAIL) { |
size_t offset = align_offset(chunk2mem(mm)); |
size_t psize = mmsize - offset - MMAP_FOOT_PAD; |
mchunkptr p = (mchunkptr)(mm + offset); |
895,9 → 2280,9 |
} |
|
/* Realloc using mmap */ |
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) |
{ |
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { |
size_t oldsize = chunksize(oldp); |
(void)flags; /* placate people compiling -Wunused */ |
if (is_small(nb)) /* Can't shrink mmap regions below small size */ |
return 0; |
/* Keep old chunk if big enough but not too big */ |
904,15 → 2289,13 |
if (oldsize >= nb + SIZE_T_SIZE && |
(oldsize - nb) <= (mparams.granularity << 1)) |
return oldp; |
else |
{ |
else { |
size_t offset = oldp->prev_foot; |
size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; |
size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
char* cp = (char*)CALL_MREMAP((char*)oldp - offset, |
oldmmsize, newmmsize, 1); |
if (cp != CMFAIL) |
{ |
oldmmsize, newmmsize, flags); |
if (cp != CMFAIL) { |
mchunkptr newp = (mchunkptr)(cp + offset); |
size_t psize = newmmsize - offset - MMAP_FOOT_PAD; |
newp->head = psize; |
931,54 → 2314,11 |
return 0; |
} |
|
/* ---------------------------- setting mparams -------------------------- */ |
|
/* Initialize mparams */ |
static int init_mparams(void) { |
|
ACQUIRE_MALLOC_GLOBAL_LOCK(); |
|
if (mparams.magic == 0) |
{ |
size_t magic; |
size_t psize; |
size_t gsize; |
|
psize = 4096; |
gsize = DEFAULT_GRANULARITY; |
|
/* Sanity-check configuration: |
size_t must be unsigned and as wide as pointer type. |
ints must be at least 4 bytes. |
alignment must be at least 8. |
Alignment, min chunk size, and page size must all be powers of 2. |
*/ |
|
mparams.granularity = gsize; |
mparams.page_size = psize; |
mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; |
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; |
|
/* Set up lock for main malloc area */ |
gm->mflags = mparams.default_mflags; |
MutexInit(&gm->lock); |
|
magic = (size_t)(GetTimerTicks() ^ (size_t)0x55555555U); |
magic |= (size_t)8U; /* ensure nonzero */ |
magic &= ~(size_t)7U; /* improve chances of fault for bad values */ |
mparams.magic = magic; |
} |
|
RELEASE_MALLOC_GLOBAL_LOCK(); |
return 1; |
} |
|
/* -------------------------- mspace management -------------------------- */ |
|
/* Initialize top chunk and its size */ |
static void init_top(mstate m, mchunkptr p, size_t psize) |
{ |
static void init_top(mstate m, mchunkptr p, size_t psize) { |
/* Ensure alignment */ |
size_t offset = align_offset(chunk2mem(p)); |
p = (mchunkptr)((char*)p + offset); |
993,8 → 2333,7 |
} |
|
/* Initialize bins for a new mstate that is otherwise zeroed out */ |
static void init_bins(mstate m) |
{ |
static void init_bins(mstate m) { |
/* Establish circular links for smallbins */ |
bindex_t i; |
for (i = 0; i < NSMALLBINS; ++i) { |
1005,8 → 2344,7 |
|
/* Allocate chunk and prepend remainder with chunk in successor base. */ |
static void* prepend_alloc(mstate m, char* newbase, char* oldbase, |
size_t nb) |
{ |
size_t nb) { |
mchunkptr p = align_as_chunk(newbase); |
mchunkptr oldfirst = align_as_chunk(oldbase); |
size_t psize = (char*)oldfirst - (char*)p; |
1047,8 → 2385,7 |
} |
|
/* Add a segment to hold a new noncontiguous region */ |
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) |
{ |
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { |
/* Determine locations and sizes of segment, fenceposts, old top */ |
char* old_top = (char*)m->top; |
msegmentptr oldsp = segment_holding(m, old_top); |
1103,11 → 2440,11 |
/* -------------------------- System allocation -------------------------- */ |
|
/* Get memory from system using MORECORE or MMAP */ |
static void* sys_alloc(mstate m, size_t nb) |
{ |
static void* sys_alloc(mstate m, size_t nb) { |
char* tbase = CMFAIL; |
size_t tsize = 0; |
flag_t mmap_flag = 0; |
size_t asize; /* allocation size */ |
|
ensure_initialization(); |
|
1114,13 → 2451,21 |
printf("%s %d bytes\n", __FUNCTION__, nb); |
|
/* Directly map large chunks, but only if already initialized */ |
if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) |
{ |
if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { |
void* mem = mmap_alloc(m, nb); |
if (mem != 0) |
return mem; |
} |
|
asize = granularity_align(nb + SYS_ALLOC_PADDING); |
if (asize <= nb) |
return 0; /* wraparound */ |
if (m->footprint_limit != 0) { |
size_t fp = m->footprint + asize; |
if (fp <= m->footprint || fp > m->footprint_limit) |
return 0; |
} |
|
/* |
Try getting memory in any of three ways (in most-preferred to |
least-preferred order): |
1143,30 → 2488,104 |
not on boundary, and round this up to a granularity unit. |
*/ |
|
if (HAVE_MMAP && tbase == CMFAIL) /* Try MMAP */ |
{ |
size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING); |
if (rsize > nb) /* Fail if wraps around zero */ |
{ |
char* mp = (char*)(CALL_MMAP(rsize)); |
if (mp != CMFAIL) |
{ |
#if 0 |
if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { |
char* br = CMFAIL; |
size_t ssize = asize; /* sbrk call size */ |
msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); |
ACQUIRE_MALLOC_GLOBAL_LOCK(); |
|
if (ss == 0) { /* First time through or recovery */ |
char* base = (char*)CALL_MORECORE(0); |
if (base != CMFAIL) { |
size_t fp; |
/* Adjust to end on a page boundary */ |
if (!is_page_aligned(base)) |
ssize += (page_align((size_t)base) - (size_t)base); |
fp = m->footprint + ssize; /* recheck limits */ |
if (ssize > nb && ssize < HALF_MAX_SIZE_T && |
(m->footprint_limit == 0 || |
(fp > m->footprint && fp <= m->footprint_limit)) && |
(br = (char*)(CALL_MORECORE(ssize))) == base) { |
tbase = base; |
tsize = ssize; |
} |
} |
} |
else { |
/* Subtract out existing available top space from MORECORE request. */ |
ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); |
/* Use mem here only if it did continuously extend old space */ |
if (ssize < HALF_MAX_SIZE_T && |
(br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { |
tbase = br; |
tsize = ssize; |
} |
} |
|
if (tbase == CMFAIL) { /* Cope with partial failure */ |
if (br != CMFAIL) { /* Try to use/extend the space we did get */ |
if (ssize < HALF_MAX_SIZE_T && |
ssize < nb + SYS_ALLOC_PADDING) { |
size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); |
if (esize < HALF_MAX_SIZE_T) { |
char* end = (char*)CALL_MORECORE(esize); |
if (end != CMFAIL) |
ssize += esize; |
else { /* Can't use; try to release */ |
(void) CALL_MORECORE(-ssize); |
br = CMFAIL; |
} |
} |
} |
} |
if (br != CMFAIL) { /* Use the space we did get */ |
tbase = br; |
tsize = ssize; |
} |
else |
disable_contiguous(m); /* Don't try contiguous path in the future */ |
} |
|
RELEASE_MALLOC_GLOBAL_LOCK(); |
} |
#endif |
|
if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ |
char* mp = (char*)(CALL_MMAP(asize)); |
if (mp != CMFAIL) { |
tbase = mp; |
tsize = rsize; |
tsize = asize; |
mmap_flag = USE_MMAP_BIT; |
} |
} |
|
#if 0 |
if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ |
if (asize < HALF_MAX_SIZE_T) { |
char* br = CMFAIL; |
char* end = CMFAIL; |
ACQUIRE_MALLOC_GLOBAL_LOCK(); |
br = (char*)(CALL_MORECORE(asize)); |
end = (char*)(CALL_MORECORE(0)); |
RELEASE_MALLOC_GLOBAL_LOCK(); |
if (br != CMFAIL && end != CMFAIL && br < end) { |
size_t ssize = end - br; |
if (ssize > nb + TOP_FOOT_SIZE) { |
tbase = br; |
tsize = ssize; |
} |
} |
} |
} |
#endif |
|
if (tbase != CMFAIL) { |
|
if (tbase != CMFAIL) |
{ |
|
if ((m->footprint += tsize) > m->max_footprint) |
m->max_footprint = m->footprint; |
|
if (!is_initialized(m)) /* first-time initialization */ |
{ |
if (!is_initialized(m)) { /* first-time initialization */ |
if (m->least_addr == 0 || tbase < m->least_addr) |
m->least_addr = tbase; |
m->seg.base = tbase; |
1185,22 → 2604,21 |
init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); |
} |
} |
else |
{ |
|
else { |
/* Try to merge with an existing segment */ |
msegmentptr sp = &m->seg; |
/* Only consider most recent segment if traversal suppressed */ |
while (sp != 0 && tbase != sp->base + sp->size) |
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; |
if (sp != 0 && !is_extern_segment(sp) && |
if (sp != 0 && |
!is_extern_segment(sp) && |
(sp->sflags & USE_MMAP_BIT) == mmap_flag && |
segment_holds(sp, m->top)) /* append */ |
{ |
segment_holds(sp, m->top)) { /* append */ |
sp->size += tsize; |
init_top(m, m->top, m->topsize + tsize); |
} |
else |
{ |
else { |
if (tbase < m->least_addr) |
m->least_addr = tbase; |
sp = &m->seg; |
1208,8 → 2626,7 |
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; |
if (sp != 0 && |
!is_extern_segment(sp) && |
(sp->sflags & USE_MMAP_BIT) == mmap_flag) |
{ |
(sp->sflags & USE_MMAP_BIT) == mmap_flag) { |
char* oldbase = sp->base; |
sp->base = tbase; |
sp->size += tsize; |
1220,8 → 2637,7 |
} |
} |
|
if (nb < m->topsize) /* Allocate from new or extended top space */ |
{ |
if (nb < m->topsize) { /* Allocate from new or extended top space */ |
size_t rsize = m->topsize -= nb; |
mchunkptr p = m->top; |
mchunkptr r = m->top = chunk_plus_offset(p, nb); |
1237,29 → 2653,24 |
return 0; |
} |
|
|
/* ----------------------- system deallocation -------------------------- */ |
|
/* Unmap and unlink any mmapped segments that don't contain used chunks */ |
static size_t release_unused_segments(mstate m) |
{ |
static size_t release_unused_segments(mstate m) { |
size_t released = 0; |
int nsegs = 0; |
msegmentptr pred = &m->seg; |
msegmentptr sp = pred->next; |
while (sp != 0) |
{ |
while (sp != 0) { |
char* base = sp->base; |
size_t size = sp->size; |
msegmentptr next = sp->next; |
++nsegs; |
if (is_mmapped_segment(sp) && !is_extern_segment(sp)) |
{ |
if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { |
mchunkptr p = align_as_chunk(base); |
size_t psize = chunksize(p); |
/* Can unmap if first chunk holds entire segment and not pinned */ |
if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) |
{ |
if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { |
tchunkptr tp = (tchunkptr)p; |
assert(segment_holds(sp, (char*)sp)); |
if (p == m->dv) { |
1269,8 → 2680,7 |
else { |
unlink_large_chunk(m, tp); |
} |
if (CALL_MUNMAP(base, size) == 0) |
{ |
if (CALL_MUNMAP(base, size) == 0) { |
released += size; |
m->footprint -= size; |
/* unlink obsoleted record */ |
1288,21 → 2698,18 |
sp = next; |
} |
/* Reset check counter */ |
m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)? |
nsegs : MAX_RELEASE_CHECK_RATE); |
m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? |
(size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); |
return released; |
} |
|
static int sys_trim(mstate m, size_t pad) |
{ |
static int sys_trim(mstate m, size_t pad) { |
size_t released = 0; |
ensure_initialization(); |
if (pad < MAX_REQUEST && is_initialized(m)) |
{ |
if (pad < MAX_REQUEST && is_initialized(m)) { |
pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ |
|
if (m->topsize > pad) |
{ |
if (m->topsize > pad) { |
/* Shrink top space in granularity-size units, keeping at least one */ |
size_t unit = mparams.granularity; |
size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - |
1309,27 → 2716,39 |
SIZE_T_ONE) * unit; |
msegmentptr sp = segment_holding(m, (char*)m->top); |
|
if (!is_extern_segment(sp)) |
{ |
if (is_mmapped_segment(sp)) |
{ |
if (!is_extern_segment(sp)) { |
if (is_mmapped_segment(sp)) { |
if (HAVE_MMAP && |
sp->size >= extra && |
!has_segment_link(m, sp)) /* can't shrink if pinned */ |
{ |
!has_segment_link(m, sp)) { /* can't shrink if pinned */ |
size_t newsize = sp->size - extra; |
(void)newsize; /* placate people compiling -Wunused-variable */ |
/* Prefer mremap, fall back to munmap */ |
if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || |
(CALL_MUNMAP(sp->base + newsize, extra) == 0)) |
{ |
(CALL_MUNMAP(sp->base + newsize, extra) == 0)) { |
released = extra; |
} |
} |
} |
else if (HAVE_MORECORE) { |
if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ |
extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; |
ACQUIRE_MALLOC_GLOBAL_LOCK(); |
{ |
/* Make sure end of memory is where we last set it. */ |
char* old_br = (char*)(CALL_MORECORE(0)); |
if (old_br == sp->base + sp->size) { |
char* rel_br = (char*)(CALL_MORECORE(-extra)); |
char* new_br = (char*)(CALL_MORECORE(0)); |
if (rel_br != CMFAIL && new_br < old_br) |
released = old_br - new_br; |
} |
} |
RELEASE_MALLOC_GLOBAL_LOCK(); |
} |
} |
|
if (released != 0) |
{ |
if (released != 0) { |
sp->size -= released; |
m->footprint -= released; |
init_top(m, m->top, m->topsize - released); |
1349,10 → 2768,79 |
return (released != 0)? 1 : 0; |
} |
|
/* Consolidate and bin a chunk. Differs from exported versions |
of free mainly in that the chunk need not be marked as inuse. |
*/ |
static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { |
mchunkptr next = chunk_plus_offset(p, psize); |
if (!pinuse(p)) { |
mchunkptr prev; |
size_t prevsize = p->prev_foot; |
if (is_mmapped(p)) { |
psize += prevsize + MMAP_FOOT_PAD; |
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
m->footprint -= psize; |
return; |
} |
prev = chunk_minus_offset(p, prevsize); |
psize += prevsize; |
p = prev; |
if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ |
if (p != m->dv) { |
unlink_chunk(m, p, prevsize); |
} |
else if ((next->head & INUSE_BITS) == INUSE_BITS) { |
m->dvsize = psize; |
set_free_with_pinuse(p, psize, next); |
return; |
} |
} |
else { |
CORRUPTION_ERROR_ACTION(m); |
return; |
} |
} |
if (RTCHECK(ok_address(m, next))) { |
if (!cinuse(next)) { /* consolidate forward */ |
if (next == m->top) { |
size_t tsize = m->topsize += psize; |
m->top = p; |
p->head = tsize | PINUSE_BIT; |
if (p == m->dv) { |
m->dv = 0; |
m->dvsize = 0; |
} |
return; |
} |
else if (next == m->dv) { |
size_t dsize = m->dvsize += psize; |
m->dv = p; |
set_size_and_pinuse_of_free_chunk(p, dsize); |
return; |
} |
else { |
size_t nsize = chunksize(next); |
psize += nsize; |
unlink_chunk(m, next, nsize); |
set_size_and_pinuse_of_free_chunk(p, psize); |
if (p == m->dv) { |
m->dvsize = psize; |
return; |
} |
} |
} |
else { |
set_free_with_pinuse(p, psize, next); |
} |
insert_chunk(m, p, psize); |
} |
else { |
CORRUPTION_ERROR_ACTION(m); |
} |
} |
|
/* ---------------------------- malloc --------------------------- */ |
|
/* ---------------------------- malloc support --------------------------- */ |
|
/* allocate a large request from the best fitting chunk in a treebin */ |
static void* tmalloc_large(mstate m, size_t nb) { |
tchunkptr v = 0; |
1425,8 → 2913,7 |
} |
|
/* allocate a small request from the best fitting chunk in a treebin */ |
static void* tmalloc_small(mstate m, size_t nb) |
{ |
static void* tmalloc_small(mstate m, size_t nb) { |
tchunkptr t, v; |
size_t rsize; |
bindex_t i; |
1463,106 → 2950,7 |
return 0; |
} |
|
/* --------------------------- memalign support -------------------------- */ |
|
static void* internal_memalign(mstate m, size_t alignment, size_t bytes) |
{ |
if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ |
return internal_malloc(m, bytes); |
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ |
alignment = MIN_CHUNK_SIZE; |
if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ |
size_t a = MALLOC_ALIGNMENT << 1; |
while (a < alignment) a <<= 1; |
alignment = a; |
} |
|
if (bytes >= MAX_REQUEST - alignment) { |
if (m != 0) { /* Test isn't needed but avoids compiler warning */ |
// MALLOC_FAILURE_ACTION; |
} |
} |
else |
{ |
size_t nb = request2size(bytes); |
size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; |
char* mem = (char*)internal_malloc(m, req); |
if (mem != 0) |
{ |
void* leader = 0; |
void* trailer = 0; |
mchunkptr p = mem2chunk(mem); |
|
PREACTION(m); |
|
if ((((size_t)(mem)) % alignment) != 0) /* misaligned */ |
{ |
/* |
Find an aligned spot inside chunk. Since we need to give |
back leading space in a chunk of at least MIN_CHUNK_SIZE, if |
the first calculation places us at a spot with less than |
MIN_CHUNK_SIZE leader, we can move to the next aligned spot. |
We've allocated enough total room so that this is always |
possible. |
*/ |
char* br = (char*)mem2chunk((size_t)(((size_t)(mem + |
alignment - |
SIZE_T_ONE)) & |
-alignment)); |
char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? |
br : br+alignment; |
mchunkptr newp = (mchunkptr)pos; |
size_t leadsize = pos - (char*)(p); |
size_t newsize = chunksize(p) - leadsize; |
|
if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ |
newp->prev_foot = p->prev_foot + leadsize; |
newp->head = newsize; |
} |
else { /* Otherwise, give back leader, use the rest */ |
set_inuse(m, newp, newsize); |
set_inuse(m, p, leadsize); |
leader = chunk2mem(p); |
} |
p = newp; |
} |
|
/* Give back spare room at the end */ |
if (!is_mmapped(p)) |
{ |
size_t size = chunksize(p); |
if (size > nb + MIN_CHUNK_SIZE) |
{ |
size_t remainder_size = size - nb; |
mchunkptr remainder = chunk_plus_offset(p, nb); |
set_inuse(m, p, nb); |
set_inuse(m, remainder, remainder_size); |
trailer = chunk2mem(remainder); |
} |
} |
|
assert (chunksize(p) >= nb); |
assert((((size_t)(chunk2mem(p))) % alignment) == 0); |
check_inuse_chunk(m, p); |
POSTACTION(m); |
if (leader != 0) { |
internal_free(m, leader); |
} |
if (trailer != 0) { |
internal_free(m, trailer); |
} |
return chunk2mem(p); |
} |
} |
return 0; |
} |
|
void* memalign(size_t alignment, size_t bytes) |
{ |
return internal_memalign(gm, alignment, bytes); |
} |
|
|
void* malloc(size_t bytes) |
{ |
/* |
1594,9 → 2982,7 |
{ |
void* mem; |
size_t nb; |
|
if (bytes <= MAX_SMALL_REQUEST) |
{ |
if (bytes <= MAX_SMALL_REQUEST) { |
bindex_t idx; |
binmap_t smallbits; |
nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); |
1603,8 → 2989,7 |
idx = small_index(nb); |
smallbits = gm->smallmap >> idx; |
|
if ((smallbits & 0x3U) != 0) /* Remainderless fit to a smallbin. */ |
{ |
if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ |
mchunkptr b, p; |
idx += ~smallbits & 1; /* Uses next bin if idx empty */ |
b = smallbin_at(gm, idx); |
1616,10 → 3001,9 |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
else if (nb > gm->dvsize) |
{ |
if (smallbits != 0) /* Use chunk in next nonempty smallbin */ |
{ |
|
else if (nb > gm->dvsize) { |
if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ |
mchunkptr b, p, r; |
size_t rsize; |
bindex_t i; |
1634,8 → 3018,7 |
/* Fit here cannot be remainderless if 4byte sizes */ |
if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) |
set_inuse_and_pinuse(gm, p, small_index2size(i)); |
else |
{ |
else { |
set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
r = chunk_plus_offset(p, nb); |
set_size_and_pinuse_of_free_chunk(r, rsize); |
1645,8 → 3028,8 |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) |
{ |
|
else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
1654,11 → 3037,9 |
} |
else if (bytes >= MAX_REQUEST) |
nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ |
else |
{ |
else { |
nb = pad_request(bytes); |
if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) |
{ |
if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
1683,6 → 3064,7 |
check_malloced_chunk(gm, mem, nb); |
goto postaction; |
} |
|
else if (nb < gm->topsize) { /* Split top */ |
size_t rsize = gm->topsize -= nb; |
mchunkptr p = gm->top; |
1699,12 → 3081,14 |
|
postaction: |
POSTACTION(gm); |
// printf("%s %p %d\n", __FUNCTION__, mem, bytes); |
return mem; |
} |
|
// FAIL(); |
return 0; |
} |
|
/* ---------------------------- free --------------------------- */ |
|
void free(void* mem) |
{ |
1714,42 → 3098,42 |
with special cases for top, dv, mmapped chunks, and usage errors. |
*/ |
|
if (mem != 0) |
{ |
// dbgprintf("%s %p\n", __FUNCTION__, mem); |
|
if (mem != 0) { |
mchunkptr p = mem2chunk(mem); |
|
#if FOOTERS |
mstate fm = get_mstate_for(p); |
if (!ok_magic(fm)) { |
USAGE_ERROR_ACTION(fm, p); |
return; |
} |
#else /* FOOTERS */ |
#define fm gm |
|
#endif /* FOOTERS */ |
PREACTION(fm); |
{ |
check_inuse_chunk(fm, p); |
if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) |
{ |
if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { |
size_t psize = chunksize(p); |
mchunkptr next = chunk_plus_offset(p, psize); |
if (!pinuse(p)) |
{ |
if (!pinuse(p)) { |
size_t prevsize = p->prev_foot; |
if (is_mmapped(p)) |
{ |
if (is_mmapped(p)) { |
psize += prevsize + MMAP_FOOT_PAD; |
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
fm->footprint -= psize; |
goto postaction; |
} |
else |
{ |
else { |
mchunkptr prev = chunk_minus_offset(p, prevsize); |
psize += prevsize; |
p = prev; |
if (RTCHECK(ok_address(fm, prev))) /* consolidate backward */ |
{ |
if (p != fm->dv) |
{ |
if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ |
if (p != fm->dv) { |
unlink_chunk(fm, p, prevsize); |
} |
else if ((next->head & INUSE_BITS) == INUSE_BITS) |
{ |
else if ((next->head & INUSE_BITS) == INUSE_BITS) { |
fm->dvsize = psize; |
set_free_with_pinuse(p, psize, next); |
goto postaction; |
1760,17 → 3144,13 |
} |
} |
|
if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) |
{ |
if (!cinuse(next)) /* consolidate forward */ |
{ |
if (next == fm->top) |
{ |
if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { |
if (!cinuse(next)) { /* consolidate forward */ |
if (next == fm->top) { |
size_t tsize = fm->topsize += psize; |
fm->top = p; |
p->head = tsize | PINUSE_BIT; |
if (p == fm->dv) |
{ |
if (p == fm->dv) { |
fm->dv = 0; |
fm->dvsize = 0; |
} |
1778,21 → 3158,18 |
sys_trim(fm, 0); |
goto postaction; |
} |
else if (next == fm->dv) |
{ |
else if (next == fm->dv) { |
size_t dsize = fm->dvsize += psize; |
fm->dv = p; |
set_size_and_pinuse_of_free_chunk(p, dsize); |
goto postaction; |
} |
else |
{ |
else { |
size_t nsize = chunksize(next); |
psize += nsize; |
unlink_chunk(fm, next, nsize); |
set_size_and_pinuse_of_free_chunk(p, psize); |
if (p == fm->dv) |
{ |
if (p == fm->dv) { |
fm->dvsize = psize; |
goto postaction; |
} |
1801,13 → 3178,11 |
else |
set_free_with_pinuse(p, psize, next); |
|
if (is_small(psize)) |
{ |
if (is_small(psize)) { |
insert_small_chunk(fm, p, psize); |
check_free_chunk(fm, p); |
} |
else |
{ |
else { |
tchunkptr tp = (tchunkptr)p; |
insert_large_chunk(fm, tp, psize); |
check_free_chunk(fm, p); |
1823,7 → 3198,12 |
POSTACTION(fm); |
} |
} |
|
// LEAVE(); |
|
#if !FOOTERS |
#undef fm |
#endif /* FOOTERS */ |
} |
|
|