summaryrefslogtreecommitdiff
path: root/common/dlmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'common/dlmalloc.c')
-rw-r--r--common/dlmalloc.c563
1 files changed, 274 insertions, 289 deletions
diff --git a/common/dlmalloc.c b/common/dlmalloc.c
index 9261507..0c04872 100644
--- a/common/dlmalloc.c
+++ b/common/dlmalloc.c
@@ -9,8 +9,8 @@
* VERSION 2.6.6 Sun Mar 5 19:10:03 2000 Doug Lea (dl at gee)
Note: There may be an updated version of this malloc obtainable at
- ftp://g.oswego.edu/pub/misc/malloc.c
- Check before installing!
+ ftp://g.oswego.edu/pub/misc/malloc.c
+ Check before installing!
* Why use this malloc?
@@ -87,7 +87,7 @@
and status information.
Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
- 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
+ 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
@@ -99,7 +99,7 @@
pointer to something of the minimum allocatable size.
Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes
- 8-byte size_t: 2^63 - 16 bytes
+ 8-byte size_t: 2^63 - 16 bytes
It is assumed that (possibly signed) size_t bit values suffice to
represent chunk sizes. `Possibly signed' is due to the fact
@@ -115,11 +115,11 @@
make the normal worst-case wastage 15 bytes (i.e., up to 15
more bytes will be allocated than were requested in malloc), with
two exceptions:
- 1. Because requests for zero bytes allocate non-zero space,
- the worst case wastage for a request of zero bytes is 24 bytes.
- 2. For requests >= mmap_threshold that are serviced via
- mmap(), the worst case wastage is 8 bytes plus the remainder
- from a system page (the minimal mmap unit); typically 4096 bytes.
+ 1. Because requests for zero bytes allocate non-zero space,
+ the worst case wastage for a request of zero bytes is 24 bytes.
+ 2. For requests >= mmap_threshold that are serviced via
+ mmap(), the worst case wastage is 8 bytes plus the remainder
+ from a system page (the minimal mmap unit); typically 4096 bytes.
* Limitations
@@ -372,8 +372,8 @@ void* memset(void*, int, size_t);
void* memcpy(void*, const void*, size_t);
#else
#ifdef WIN32
-// On Win32 platforms, 'memset()' and 'memcpy()' are already declared in
-// 'windows.h'
+/* On Win32 platforms, 'memset()' and 'memcpy()' are already declared in */
+/* 'windows.h' */
#else
Void_t* memset();
Void_t* memcpy();
@@ -393,14 +393,14 @@ do { \
if(mzsz <= 9*sizeof(mzsz)) { \
INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \
if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \
- *mz++ = 0; \
+ *mz++ = 0; \
if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \
- *mz++ = 0; \
- if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \
- *mz++ = 0; }}} \
- *mz++ = 0; \
- *mz++ = 0; \
- *mz = 0; \
+ *mz++ = 0; \
+ if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \
+ *mz++ = 0; }}} \
+ *mz++ = 0; \
+ *mz++ = 0; \
+ *mz = 0; \
} else memset((charp), 0, mzsz); \
} while(0)
@@ -411,14 +411,14 @@ do { \
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \
if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
- *mcdst++ = *mcsrc++; \
+ *mcdst++ = *mcsrc++; \
if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
- *mcdst++ = *mcsrc++; \
- if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
- *mcdst++ = *mcsrc++; }}} \
- *mcdst++ = *mcsrc++; \
- *mcdst++ = *mcsrc++; \
- *mcdst = *mcsrc ; \
+ *mcdst++ = *mcsrc++; \
+ if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
+ *mcdst++ = *mcsrc++; }}} \
+ *mcdst++ = *mcsrc++; \
+ *mcdst++ = *mcsrc++; \
+ *mcdst = *mcsrc ; \
} else memcpy(dest, src, mcsz); \
} while(0)
@@ -558,7 +558,6 @@ do { \
#endif
-
/*
This version of malloc supports the standard SVID/XPG mallinfo
@@ -622,7 +621,6 @@ struct mallinfo {
#define M_MMAP_MAX -4
-
#ifndef DEFAULT_TRIM_THRESHOLD
#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
#endif
@@ -686,11 +684,11 @@ struct mallinfo {
retain whenever sbrk is called. It is used in two ways internally:
* When sbrk is called to extend the top of the arena to satisfy
- a new malloc request, this much padding is added to the sbrk
- request.
+ a new malloc request, this much padding is added to the sbrk
+ request.
* When malloc_trim is called automatically from free(),
- it is used as the `pad' argument.
+ it is used as the `pad' argument.
In both cases, the actual amount of padding is rounded
so that the end of the arena is always a system page boundary.
@@ -736,15 +734,15 @@ struct mallinfo {
However, it has the disadvantages that:
- 1. The space cannot be reclaimed, consolidated, and then
- used to service later requests, as happens with normal chunks.
- 2. It can lead to more wastage because of mmap page alignment
- requirements
- 3. It causes malloc performance to be more dependent on host
- system memory management support routines which may vary in
- implementation quality and may impose arbitrary
- limitations. Generally, servicing a request via normal
- malloc steps is faster than going through a system's mmap.
+ 1. The space cannot be reclaimed, consolidated, and then
+ used to service later requests, as happens with normal chunks.
+ 2. It can lead to more wastage because of mmap page alignment
+ requirements
+ 3. It causes malloc performance to be more dependent on host
+ system memory management support routines which may vary in
+ implementation quality and may impose arbitrary
+ limitations. Generally, servicing a request via normal
+ malloc steps is faster than going through a system's mmap.
All together, these considerations should lead you to use mmap
only for relatively large requests.
@@ -753,7 +751,6 @@ struct mallinfo {
*/
-
#ifndef DEFAULT_MMAP_MAX
#if HAVE_MMAP
#define DEFAULT_MMAP_MAX (64)
@@ -766,15 +763,15 @@ struct mallinfo {
M_MMAP_MAX is the maximum number of requests to simultaneously
service using mmap. This parameter exists because:
- 1. Some systems have a limited number of internal tables for
- use by mmap.
- 2. In most systems, overreliance on mmap can degrade overall
- performance.
- 3. If a program allocates many large regions, it is probably
- better off using normal sbrk-based allocation routines that
- can reclaim and reallocate normal heap memory. Using a
- small value allows transition into this mode after the
- first few allocations.
+ 1. Some systems have a limited number of internal tables for
+ use by mmap.
+ 2. In most systems, overreliance on mmap can degrade overall
+ performance.
+ 3. If a program allocates many large regions, it is probably
+ better off using normal sbrk-based allocation routines that
+ can reclaim and reallocate normal heap memory. Using a
+ small value allows transition into this mode after the
+ first few allocations.
Setting to 0 disables all use of mmap. If HAVE_MMAP is not set,
the default value is 0, and attempts to set it to non-zero values
@@ -782,8 +779,6 @@ struct mallinfo {
*/
-
-
/*
USE_DL_PREFIX will prefix all public routines with the string 'dl'.
Useful to quickly avoid procedure declaration conflicts and linker
@@ -794,8 +789,6 @@ struct mallinfo {
/* #define USE_DL_PREFIX */
-
-
/*
Special defines for linux libc
@@ -1013,7 +1006,7 @@ void gcleanup ()
rval = VirtualFree ((void*)gAddressBase,
gNextAddress - gAddressBase,
MEM_DECOMMIT);
- assert (rval);
+ assert (rval);
}
while (head)
{
@@ -1038,24 +1031,24 @@ void* findRegion (void* start_address, unsigned long size)
return start_address;
else
{
- // Requested region is not available so see if the
- // next region is available. Set 'start_address'
- // to the next region and call 'VirtualQuery()'
- // again.
+ /* Requested region is not available so see if the */
+ /* next region is available. Set 'start_address' */
+ /* to the next region and call 'VirtualQuery()' */
+ /* again. */
start_address = (char*)info.BaseAddress + info.RegionSize;
- // Make sure we start looking for the next region
- // on the *next* 64K boundary. Otherwise, even if
- // the new region is free according to
- // 'VirtualQuery()', the subsequent call to
- // 'VirtualAlloc()' (which follows the call to
- // this routine in 'wsbrk()') will round *down*
- // the requested address to a 64K boundary which
- // we already know is an address in the
- // unavailable region. Thus, the subsequent call
- // to 'VirtualAlloc()' will fail and bring us back
- // here, causing us to go into an infinite loop.
+ /* Make sure we start looking for the next region */
+ /* on the *next* 64K boundary. Otherwise, even if */
+ /* the new region is free according to */
+ /* 'VirtualQuery()', the subsequent call to */
+ /* 'VirtualAlloc()' (which follows the call to */
+ /* this routine in 'wsbrk()') will round *down* */
+ /* the requested address to a 64K boundary which */
+ /* we already know is an address in the */
+ /* unavailable region. Thus, the subsequent call */
+ /* to 'VirtualAlloc()' will fail and bring us back */
+ /* here, causing us to go into an infinite loop. */
start_address =
(void *) AlignPage64K((unsigned long) start_address);
@@ -1092,9 +1085,9 @@ gAllocatedSize))
gAddressBase = gNextAddress =
(unsigned int)VirtualAlloc (new_address, new_size,
MEM_RESERVE, PAGE_NOACCESS);
- // repeat in case of race condition
- // The region that we found has been snagged
- // by another thread
+ /* repeat in case of race condition */
+ /* The region that we found has been snagged */
+ /* by another thread */
}
while (gAddressBase == 0);
@@ -1182,17 +1175,17 @@ typedef struct malloc_chunk* mchunkptr;
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk, if allocated | |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk, in bytes |P|
+ | Size of previous chunk, if allocated | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of chunk, in bytes |P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | User data starts here... .
- . .
- . (malloc_usable_space() bytes) .
- . |
+ | User data starts here... .
+ . .
+ . (malloc_usable_space() bytes) .
+ . |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Where "chunk" is the front of the chunk for the purpose of most of
@@ -1206,20 +1199,20 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Free chunks are stored in circular doubly-linked lists, and look like this:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`head:' | Size of chunk, in bytes |P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Forward pointer to next chunk in list |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Back pointer to previous chunk in list |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Unused space (may be 0 bytes long) .
- . .
- . |
+ | Forward pointer to next chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Back pointer to previous chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Unused space (may be 0 bytes long) .
+ . .
+ . |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`foot:' | Size of chunk, in bytes |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
The P (PREV_INUSE) bit, stored in the unused low-order bit of the
chunk size (which is always a multiple of two words), is an in-use
@@ -1236,16 +1229,16 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
The two exceptions to all this are
1. The special chunk `top', which doesn't bother using the
- trailing size field since there is no
- next contiguous chunk that would have to index off it. (After
- initialization, `top' is forced to always exist. If it would
- become less than MINSIZE bytes long, it is replenished via
- malloc_extend_top.)
+ trailing size field since there is no
+ next contiguous chunk that would have to index off it. (After
+ initialization, `top' is forced to always exist. If it would
+ become less than MINSIZE bytes long, it is replenished via
+ malloc_extend_top.)
2. Chunks allocated via mmap, which have the second-lowest-order
- bit (IS_MMAPPED) set in their size fields. Because they are
- never merged or traversed from any other chunk, they have no
- foot size or inuse information.
+ bit (IS_MMAPPED) set in their size fields. Because they are
+ never merged or traversed from any other chunk, they have no
+ foot size or inuse information.
Available chunks are kept in any of several places (all declared below):
@@ -1286,12 +1279,7 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
serviced via calls to mmap, and then later released via munmap.
*/
-
-
-
-
-
/* sizes, alignments */
#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
@@ -1531,7 +1519,7 @@ void malloc_bin_reloc (void)
((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \
((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \
((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
- 126)
+ 126)
/*
bins for chunks < 512 are all spaced 8 bytes apart, and hold
identically sized chunks. This is exploited in malloc.
@@ -1829,7 +1817,6 @@ static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
(last_remainder->fd = last_remainder->bk = last_remainder)
-
@@ -2030,7 +2017,7 @@ static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
/* Guarantee the next brk will be at a page boundary */
correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
- ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
+ ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
/* Allocate correction */
new_brk = (char*)(MORECORE (correction));
@@ -2051,20 +2038,20 @@ static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
/* If not enough space to do this, then user did something very wrong */
if (old_top_size < MINSIZE)
{
- set_head(top, PREV_INUSE); /* will force null return from malloc */
- return;
+ set_head(top, PREV_INUSE); /* will force null return from malloc */
+ return;
}
/* Also keep size a multiple of MALLOC_ALIGNMENT */
old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
set_head_size(old_top, old_top_size);
chunk_at_offset(old_top, old_top_size )->size =
- SIZE_SZ|PREV_INUSE;
+ SIZE_SZ|PREV_INUSE;
chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
- SIZE_SZ|PREV_INUSE;
+ SIZE_SZ|PREV_INUSE;
/* If possible, release the rest. */
if (old_top_size >= MINSIZE)
- fREe(chunk2mem(old_top));
+ fREe(chunk2mem(old_top));
}
}
@@ -2095,43 +2082,43 @@ static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
From there, the first successful of the following steps is taken:
1. The bin corresponding to the request size is scanned, and if
- a chunk of exactly the right size is found, it is taken.
+ a chunk of exactly the right size is found, it is taken.
2. The most recently remaindered chunk is used if it is big
- enough. This is a form of (roving) first fit, used only in
- the absence of exact fits. Runs of consecutive requests use
- the remainder of the chunk used for the previous such request
- whenever possible. This limited use of a first-fit style
- allocation strategy tends to give contiguous chunks
- coextensive lifetimes, which improves locality and can reduce
- fragmentation in the long run.
+ enough. This is a form of (roving) first fit, used only in
+ the absence of exact fits. Runs of consecutive requests use
+ the remainder of the chunk used for the previous such request
+ whenever possible. This limited use of a first-fit style
+ allocation strategy tends to give contiguous chunks
+ coextensive lifetimes, which improves locality and can reduce
+ fragmentation in the long run.
3. Other bins are scanned in increasing size order, using a
- chunk big enough to fulfill the request, and splitting off
- any remainder. This search is strictly by best-fit; i.e.,
- the smallest (with ties going to approximately the least
- recently used) chunk that fits is selected.
+ chunk big enough to fulfill the request, and splitting off
+ any remainder. This search is strictly by best-fit; i.e.,
+ the smallest (with ties going to approximately the least
+ recently used) chunk that fits is selected.
4. If large enough, the chunk bordering the end of memory
- (`top') is split off. (This use of `top' is in accord with
- the best-fit search rule. In effect, `top' is treated as
- larger (and thus less well fitting) than any other available
- chunk since it can be extended to be as large as necessary
- (up to system limitations).
+ (`top') is split off. (This use of `top' is in accord with
+ the best-fit search rule. In effect, `top' is treated as
+ larger (and thus less well fitting) than any other available
+ chunk since it can be extended to be as large as necessary
+ (up to system limitations).
5. If the request size meets the mmap threshold and the
- system supports mmap, and there are few enough currently
- allocated mmapped regions, and a call to mmap succeeds,
- the request is allocated via direct memory mapping.
+ system supports mmap, and there are few enough currently
+ allocated mmapped regions, and a call to mmap succeeds,
+ the request is allocated via direct memory mapping.
6. Otherwise, the top of memory is extended by
- obtaining more space from the system (normally using sbrk,
- but definable to anything else via the MORECORE macro).
- Memory is gathered from the system (in system page-sized
- units) in a way that allows chunks obtained across different
- sbrk calls to be consolidated, but does not require
- contiguous memory. Thus, it should be safe to intersperse
- mallocs with other sbrk calls.
+ obtaining more space from the system (normally using sbrk,
+ but definable to anything else via the MORECORE macro).
+ Memory is gathered from the system (in system page-sized
+ units) in a way that allows chunks obtained across different
+ sbrk calls to be consolidated, but does not require
+ contiguous memory. Thus, it should be safe to intersperse
+ mallocs with other sbrk calls.
All allocations are made from the the `lowest' part of any found
@@ -2208,16 +2195,16 @@ Void_t* mALLOc(bytes) size_t bytes;
if (remainder_size >= (long)MINSIZE) /* too big */
{
- --idx; /* adjust to rescan below after checking last remainder */
- break;
+ --idx; /* adjust to rescan below after checking last remainder */
+ break;
}
else if (remainder_size >= 0) /* exact fit */
{
- unlink(victim, bck, fwd);
- set_inuse_bit_at_offset(victim, victim_size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
+ unlink(victim, bck, fwd);
+ set_inuse_bit_at_offset(victim, victim_size);
+ check_malloced_chunk(victim, nb);
+ return chunk2mem(victim);
}
}
@@ -2274,8 +2261,8 @@ Void_t* mALLOc(bytes) size_t bytes;
block <<= 1;
while ((block & binblocks) == 0)
{
- idx += BINBLOCKWIDTH;
- block <<= 1;
+ idx += BINBLOCKWIDTH;
+ block <<= 1;
}
}
@@ -2288,34 +2275,34 @@ Void_t* mALLOc(bytes) size_t bytes;
/* For each bin in this block ... */
do
{
- /* Find and use first big enough chunk ... */
-
- for (victim = last(bin); victim != bin; victim = victim->bk)
- {
- victim_size = chunksize(victim);
- remainder_size = victim_size - nb;
-
- if (remainder_size >= (long)MINSIZE) /* split */
- {
- remainder = chunk_at_offset(victim, nb);
- set_head(victim, nb | PREV_INUSE);
- unlink(victim, bck, fwd);
- link_last_remainder(remainder);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_foot(remainder, remainder_size);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
-
- else if (remainder_size >= 0) /* take */
- {
- set_inuse_bit_at_offset(victim, victim_size);
- unlink(victim, bck, fwd);
- check_malloced_chunk(victim, nb);
- return chunk2mem(victim);
- }
-
- }
+ /* Find and use first big enough chunk ... */
+
+ for (victim = last(bin); victim != bin; victim = victim->bk)
+ {
+ victim_size = chunksize(victim);
+ remainder_size = victim_size - nb;
+
+ if (remainder_size >= (long)MINSIZE) /* split */
+ {
+ remainder = chunk_at_offset(victim, nb);
+ set_head(victim, nb | PREV_INUSE);
+ unlink(victim, bck, fwd);
+ link_last_remainder(remainder);
+ set_head(remainder, remainder_size | PREV_INUSE);
+ set_foot(remainder, remainder_size);
+ check_malloced_chunk(victim, nb);
+ return chunk2mem(victim);
+ }
+
+ else if (remainder_size >= 0) /* take */
+ {
+ set_inuse_bit_at_offset(victim, victim_size);
+ unlink(victim, bck, fwd);
+ check_malloced_chunk(victim, nb);
+ return chunk2mem(victim);
+ }
+
+ }
bin = next_bin(bin);
@@ -2325,12 +2312,12 @@ Void_t* mALLOc(bytes) size_t bytes;
do /* Possibly backtrack to try to clear a partial block */
{
- if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
- {
- binblocks &= ~block;
- break;
- }
- --startidx;
+ if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
+ {
+ binblocks &= ~block;
+ break;
+ }
+ --startidx;
q = prev_bin(q);
} while (first(q) == q);
@@ -2338,14 +2325,14 @@ Void_t* mALLOc(bytes) size_t bytes;
if ( (block <<= 1) <= binblocks && (block != 0) )
{
- while ((block & binblocks) == 0)
- {
- idx += BINBLOCKWIDTH;
- block <<= 1;
- }
+ while ((block & binblocks) == 0)
+ {
+ idx += BINBLOCKWIDTH;
+ block <<= 1;
+ }
}
else
- break;
+ break;
}
}
@@ -2359,7 +2346,7 @@ Void_t* mALLOc(bytes) size_t bytes;
#if HAVE_MMAP
/* If big and would otherwise need to extend, try to use mmap instead */
if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
- (victim = mmap_chunk(nb)) != 0)
+ (victim = mmap_chunk(nb)) != 0)
return chunk2mem(victim);
#endif
@@ -2392,13 +2379,13 @@ Void_t* mALLOc(bytes) size_t bytes;
2. If the chunk was allocated via mmap, it is release via munmap().
3. If a returned chunk borders the current high end of memory,
- it is consolidated into the top, and if the total unused
- topmost memory exceeds the trim threshold, malloc_trim is
- called.
+ it is consolidated into the top, and if the total unused
+ topmost memory exceeds the trim threshold, malloc_trim is
+ called.
4. Other chunks are consolidated as they arrive, and
- placed in corresponding bins. (This includes the case of
- consolidating with the current `last_remainder').
+ placed in corresponding bins. (This includes the case of
+ consolidating with the current `last_remainder').
*/
@@ -2610,22 +2597,22 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
/* Forward into top only if a remainder */
if (next == top)
{
- if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
- {
- newsize += nextsize;
- top = chunk_at_offset(oldp, nb);
- set_head(top, (newsize - nb) | PREV_INUSE);
- set_head_size(oldp, nb);
- return chunk2mem(oldp);
- }
+ if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
+ {
+ newsize += nextsize;
+ top = chunk_at_offset(oldp, nb);
+ set_head(top, (newsize - nb) | PREV_INUSE);
+ set_head_size(oldp, nb);
+ return chunk2mem(oldp);
+ }
}
/* Forward into next chunk */
else if (((long)(nextsize + newsize) >= (long)(nb)))
{
- unlink(next, bck, fwd);
- newsize += nextsize;
- goto split;
+ unlink(next, bck, fwd);
+ newsize += nextsize;
+ goto split;
}
}
else
@@ -2645,45 +2632,45 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
if (next != 0)
{
- /* into top */
- if (next == top)
- {
- if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
- {
- unlink(prev, bck, fwd);
- newp = prev;
- newsize += prevsize + nextsize;
- newmem = chunk2mem(newp);
- MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
- top = chunk_at_offset(newp, nb);
- set_head(top, (newsize - nb) | PREV_INUSE);
- set_head_size(newp, nb);
- return newmem;
- }
- }
-
- /* into next chunk */
- else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
- {
- unlink(next, bck, fwd);
- unlink(prev, bck, fwd);
- newp = prev;
- newsize += nextsize + prevsize;
- newmem = chunk2mem(newp);
- MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
- goto split;
- }
+ /* into top */
+ if (next == top)
+ {
+ if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
+ {
+ unlink(prev, bck, fwd);
+ newp = prev;
+ newsize += prevsize + nextsize;
+ newmem = chunk2mem(newp);
+ MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
+ top = chunk_at_offset(newp, nb);
+ set_head(top, (newsize - nb) | PREV_INUSE);
+ set_head_size(newp, nb);
+ return newmem;
+ }
+ }
+
+ /* into next chunk */
+ else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
+ {
+ unlink(next, bck, fwd);
+ unlink(prev, bck, fwd);
+ newp = prev;
+ newsize += nextsize + prevsize;
+ newmem = chunk2mem(newp);
+ MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
+ goto split;
+ }
}
/* backward only */
if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
{
- unlink(prev, bck, fwd);
- newp = prev;
- newsize += prevsize;
- newmem = chunk2mem(newp);
- MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
- goto split;
+ unlink(prev, bck, fwd);
+ newp = prev;
+ newsize += prevsize;
+ newmem = chunk2mem(newp);
+ MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
+ goto split;
}
}
@@ -3015,25 +3002,25 @@ int malloc_trim(pad) size_t pad;
if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
{
- /* Try to figure out what we have */
- current_brk = (char*)(MORECORE (0));
- top_size = current_brk - (char*)top;
- if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
- {
- sbrked_mem = current_brk - sbrk_base;
- set_head(top, top_size | PREV_INUSE);
- }
- check_chunk(top);
- return 0;
+ /* Try to figure out what we have */
+ current_brk = (char*)(MORECORE (0));
+ top_size = current_brk - (char*)top;
+ if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
+ {
+ sbrked_mem = current_brk - sbrk_base;
+ set_head(top, top_size | PREV_INUSE);
+ }
+ check_chunk(top);
+ return 0;
}
else
{
- /* Success. Adjust top accordingly. */
- set_head(top, (top_size - extra) | PREV_INUSE);
- sbrked_mem -= extra;
- check_chunk(top);
- return 1;
+ /* Success. Adjust top accordingly. */
+ set_head(top, (top_size - extra) | PREV_INUSE);
+ sbrked_mem -= extra;
+ check_chunk(top);
+ return 1;
}
}
}
@@ -3100,9 +3087,9 @@ static void malloc_update_mallinfo()
#ifdef DEBUG
check_free_chunk(p);
for (q = next_chunk(p);
- q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
- q = next_chunk(q))
- check_inuse_chunk(q);
+ q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
+ q = next_chunk(q))
+ check_inuse_chunk(q);
#endif
avail += chunksize(p);
navail++;
@@ -3141,14 +3128,14 @@ void malloc_stats()
{
malloc_update_mallinfo();
printf("max system bytes = %10u\n",
- (unsigned int)(max_total_mem));
+ (unsigned int)(max_total_mem));
printf("system bytes = %10u\n",
- (unsigned int)(sbrked_mem + mmapped_mem));
+ (unsigned int)(sbrked_mem + mmapped_mem));
printf("in use bytes = %10u\n",
- (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
+ (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
#if HAVE_MMAP
printf("max mmap regions = %10u\n",
- (unsigned int)max_n_mmaps);
+ (unsigned int)max_n_mmaps);
#endif
}
#endif /* 0 */
@@ -3214,17 +3201,17 @@ History:
V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
* return null for negative arguments
* Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com>
- * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
- (e.g. WIN32 platforms)
- * Cleanup up header file inclusion for WIN32 platforms
- * Cleanup code to avoid Microsoft Visual C++ compiler complaints
- * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
- memory allocation routines
- * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
- * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
+ * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
+ (e.g. WIN32 platforms)
+ * Cleanup up header file inclusion for WIN32 platforms
+ * Cleanup code to avoid Microsoft Visual C++ compiler complaints
+ * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
+ memory allocation routines
+ * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
+ * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
usage of 'assert' in non-WIN32 code
- * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
- avoid infinite loop
+ * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
+ avoid infinite loop
* Always call 'fREe()' rather than 'free()'
V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
@@ -3236,13 +3223,13 @@ History:
* Added anonymously donated WIN32 sbrk emulation
* Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
* malloc_extend_top: fix mask error that caused wastage after
- foreign sbrks
+ foreign sbrks
* Add linux mremap support code from HJ Liu
V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
* Integrated most documentation with the code.
* Add support for mmap, with help from
- Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
+ Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
* Use last_remainder in more cases.
* Pack bins using idea from colin@nyx10.cs.du.edu
* Use ordered bins instead of best-fit threshhold
@@ -3250,34 +3237,34 @@ History:
* Support another case of realloc via move into top
* Fix error occuring when initial sbrk_base not word-aligned.
* Rely on page size for units instead of SBRK_UNIT to
- avoid surprises about sbrk alignment conventions.
+ avoid surprises about sbrk alignment conventions.
* Add mallinfo, mallopt. Thanks to Raymond Nijssen
- (raymond@es.ele.tue.nl) for the suggestion.
+ (raymond@es.ele.tue.nl) for the suggestion.
* Add `pad' argument to malloc_trim and top_pad mallopt parameter.
* More precautions for cases where other routines call sbrk,
- courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
+ courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
* Added macros etc., allowing use in linux libc from
- H.J. Lu (hjl@gnu.ai.mit.edu)
+ H.J. Lu (hjl@gnu.ai.mit.edu)
* Inverted this history list
V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
* Re-tuned and fixed to behave more nicely with V2.6.0 changes.
* Removed all preallocation code since under current scheme
- the work required to undo bad preallocations exceeds
- the work saved in good cases for most test programs.
+ the work required to undo bad preallocations exceeds
+ the work saved in good cases for most test programs.
* No longer use return list or unconsolidated bins since
- no scheme using them consistently outperforms those that don't
- given above changes.
+ no scheme using them consistently outperforms those that don't
+ given above changes.
* Use best fit for very large chunks to prevent some worst-cases.
* Added some support for debugging
V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
* Removed footers when chunks are in use. Thanks to
- Paul Wilson (wilson@cs.texas.edu) for the suggestion.
+ Paul Wilson (wilson@cs.texas.edu) for the suggestion.
V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
* Added malloc_trim, with help from Wolfram Gloger
- (wmglo@Dent.MED.Uni-Muenchen.DE).
+ (wmglo@Dent.MED.Uni-Muenchen.DE).
V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
@@ -3293,11 +3280,11 @@ History:
V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
* faster bin computation & slightly different binning
* merged all consolidations to one part of malloc proper
- (eliminating old malloc_find_space & malloc_clean_bin)
+ (eliminating old malloc_find_space & malloc_clean_bin)
* Scan 2 returns chunks (not just 1)
* Propagate failure in realloc if malloc returns 0
* Add stuff to allow compilation on non-ANSI compilers
- from kpv@research.att.com
+ from kpv@research.att.com
V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
* removed potential for odd address access in prev_chunk
@@ -3305,13 +3292,11 @@ History:
* misc cosmetics and a bit more internal documentation
* anticosmetics: mangled names in macros to evade debugger strangeness
* tested on sparc, hp-700, dec-mips, rs6000
- with gcc & native cc (hp, dec only) allowing
- Detlefs & Zorn comparison study (in SIGPLAN Notices.)
+ with gcc & native cc (hp, dec only) allowing
+ Detlefs & Zorn comparison study (in SIGPLAN Notices.)
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
* Based loosely on libg++-1.2X malloc. (It retains some of the overall
- structure of old version, but most details differ.)
+ structure of old version, but most details differ.)
*/
-
-