aboutsummaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorAndreas Schwab <schwab@redhat.com>2009-09-01 10:08:05 +0200
committerAndreas Schwab <schwab@redhat.com>2009-09-01 10:08:05 +0200
commita562fe3cdbb82fae5868e245de31031c19432550 (patch)
tree542c921e7b94c7c35b433f0dce140adbbaf3ec24 /malloc
parent62f946ade44beeabffb8e91c989ff2bd47293e4c (diff)
parentc2735e958acc6a69daab71fdc463564860f20794 (diff)
downloadglibc-a562fe3cdbb82fae5868e245de31031c19432550.tar.xz
glibc-a562fe3cdbb82fae5868e245de31031c19432550.zip
Merge commit 'origin/master' into fedora/master
Diffstat (limited to 'malloc')
-rw-r--r--malloc/malloc.c900
1 files changed, 461 insertions, 439 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index a459a2b89d..79ba6b6f06 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -107,7 +107,7 @@
and status information.
Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
- 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
+ 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
@@ -126,7 +126,7 @@
minimal mmap unit); typically 4096 or 8192 bytes.
Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
- 8-byte size_t: 2^64 minus about two pages
+ 8-byte size_t: 2^64 minus about two pages
It is assumed that (possibly signed) size_t values suffice to
represent chunk sizes. `Possibly signed' is due to the fact
@@ -329,7 +329,29 @@ extern "C" {
or other mallocs available that do this.
*/
-#include <assert.h>
+#ifdef NDEBUG
+# define assert(expr) ((void) 0)
+#else
+# define assert(expr) \
+ ((expr) \
+ ? ((void) 0) \
+ : __malloc_assert (__STRING (expr), __FILE__, __LINE__, __func__))
+
+extern const char *__progname;
+
+static void
+__malloc_assert (const char *assertion, const char *file, unsigned int line,
+ const char *function)
+{
+ (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
+ __progname, __progname[0] ? ": " : "",
+ file, line,
+ function ? function : "", function ? ": " : "",
+ assertion);
+ fflush (stderr);
+ abort ();
+}
+#endif
/*
@@ -1000,17 +1022,17 @@ int public_mALLOPt();
arena: current total non-mmapped bytes allocated from system
ordblks: the number of free chunks
smblks: the number of fastbin blocks (i.e., small chunks that
- have been freed but not use resused or consolidated)
+ have been freed but not use resused or consolidated)
hblks: current number of mmapped regions
hblkhd: total bytes held in mmapped regions
usmblks: the maximum total allocated space. This will be greater
- than current total if trimming has occurred.
+ than current total if trimming has occurred.
fsmblks: total bytes held in fastbin blocks
uordblks: current total allocated space (normal or mmapped)
fordblks: total free space
keepcost: the maximum number of bytes that could ideally be released
- back to system via malloc_trim. ("ideally" means that
- it ignores page restrictions etc.)
+ back to system via malloc_trim. ("ideally" means that
+ it ignores page restrictions etc.)
Because these fields are ints, but internal bookkeeping may
be kept as longs, the reported values may wrap around zero and
@@ -1817,17 +1839,17 @@ struct malloc_chunk {
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk, if allocated | |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk, in bytes |M|P|
+ | Size of previous chunk, if allocated | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of chunk, in bytes |M|P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | User data starts here... .
- . .
- . (malloc_usable_size() bytes) .
- . |
+ | User data starts here... .
+ . .
+ . (malloc_usable_size() bytes) .
+ . |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Where "chunk" is the front of the chunk for the purpose of most of
@@ -1841,20 +1863,20 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Free chunks are stored in circular doubly-linked lists, and look like this:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of previous chunk |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`head:' | Size of chunk, in bytes |P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Forward pointer to next chunk in list |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Back pointer to previous chunk in list |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Unused space (may be 0 bytes long) .
- . .
- . |
+ | Forward pointer to next chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Back pointer to previous chunk in list |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Unused space (may be 0 bytes long) .
+ . .
+ . |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`foot:' | Size of chunk, in bytes |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
The P (PREV_INUSE) bit, stored in the unused low-order bit of the
chunk size (which is always a multiple of two words), is an in-use
@@ -1875,14 +1897,14 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
The two exceptions to all this are
1. The special chunk `top' doesn't bother using the
- trailing size field since there is no next contiguous chunk
- that would have to index off it. After initialization, `top'
- is forced to always exist. If it would become less than
- MINSIZE bytes long, it is replenished.
+ trailing size field since there is no next contiguous chunk
+ that would have to index off it. After initialization, `top'
+ is forced to always exist. If it would become less than
+ MINSIZE bytes long, it is replenished.
2. Chunks allocated via mmap, which have the second-lowest-order
- bit M (IS_MMAPPED) set in their size fields. Because they are
- allocated one-by-one, each must contain its own trailing size field.
+ bit M (IS_MMAPPED) set in their size fields. Because they are
+ allocated one-by-one, each must contain its own trailing size field.
*/
@@ -2155,7 +2177,7 @@ typedef struct malloc_chunk* mbinptr;
((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
- 126)
+ 126)
// XXX It remains to be seen whether it is good to keep the widths of
// XXX the buckets the same or whether it should be scaled by a factor
@@ -2166,7 +2188,7 @@ typedef struct malloc_chunk* mbinptr;
((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
- 126)
+ 126)
#define largebin_index(sz) \
(SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
@@ -2592,8 +2614,8 @@ static void do_check_chunk(av, p) mstate av; mchunkptr p;
/* Has legal address ... */
if (p != av->top) {
if (contiguous(av)) {
- assert(((char*)p) >= min_address);
- assert(((char*)p + sz) <= ((char*)(av->top)));
+ assert(((char*)p) >= min_address);
+ assert(((char*)p + sz) <= ((char*)(av->top)));
}
}
else {
@@ -2850,9 +2872,9 @@ static void do_check_malloc_state(mstate av)
unsigned int binbit = get_binmap(av,i);
int empty = last(b) == b;
if (!binbit)
- assert(empty);
+ assert(empty);
else if (!empty)
- assert(binbit);
+ assert(binbit);
}
for (p = last(b); p != b; p = p->bk) {
@@ -2861,12 +2883,12 @@ static void do_check_malloc_state(mstate av)
size = chunksize(p);
total += size;
if (i >= 2) {
- /* chunk belongs in bin */
- idx = bin_index(size);
- assert(idx == i);
- /* lists are sorted */
- assert(p->bk == b ||
- (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
+ /* chunk belongs in bin */
+ idx = bin_index(size);
+ assert(idx == i);
+ /* lists are sorted */
+ assert(p->bk == b ||
+ (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
if (!in_smallbin_range(size))
{
@@ -2894,10 +2916,10 @@ static void do_check_malloc_state(mstate av)
assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
/* chunk is followed by a legal chain of inuse chunks */
for (q = next_chunk(p);
- (q != av->top && inuse(q) &&
- (unsigned long)(chunksize(q)) >= MINSIZE);
- q = next_chunk(q))
- do_check_inuse_chunk(av, q);
+ (q != av->top && inuse(q) &&
+ (unsigned long)(chunksize(q)) >= MINSIZE);
+ q = next_chunk(q))
+ do_check_inuse_chunk(av, q);
}
}
@@ -2913,14 +2935,14 @@ static void do_check_malloc_state(mstate av)
assert(mp_.n_mmaps <= mp_.max_n_mmaps);
assert((unsigned long)(av->system_mem) <=
- (unsigned long)(av->max_system_mem));
+ (unsigned long)(av->max_system_mem));
assert((unsigned long)(mp_.mmapped_mem) <=
- (unsigned long)(mp_.max_mmapped_mem));
+ (unsigned long)(mp_.max_mmapped_mem));
#ifdef NO_THREADS
assert((unsigned long)(mp_.max_total_mem) >=
- (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
+ (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
#endif
}
#endif
@@ -3005,51 +3027,51 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
if (mm != MAP_FAILED) {
- /*
- The offset to the start of the mmapped region is stored
- in the prev_size field of the chunk. This allows us to adjust
- returned start address to meet alignment requirements here
- and in memalign(), and still be able to compute proper
- address argument for later munmap in free() and realloc().
- */
+ /*
+ The offset to the start of the mmapped region is stored
+ in the prev_size field of the chunk. This allows us to adjust
+ returned start address to meet alignment requirements here
+ and in memalign(), and still be able to compute proper
+ address argument for later munmap in free() and realloc().
+ */
#if 1
/* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
- assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
+ assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
#else
- front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0) {
- correction = MALLOC_ALIGNMENT - front_misalign;
- p = (mchunkptr)(mm + correction);
- p->prev_size = correction;
- set_head(p, (size - correction) |IS_MMAPPED);
- }
- else
+ front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
+ if (front_misalign > 0) {
+ correction = MALLOC_ALIGNMENT - front_misalign;
+ p = (mchunkptr)(mm + correction);
+ p->prev_size = correction;
+ set_head(p, (size - correction) |IS_MMAPPED);
+ }
+ else
#endif
{
p = (mchunkptr)mm;
set_head(p, size|IS_MMAPPED);
}
- /* update statistics */
+ /* update statistics */
- if (++mp_.n_mmaps > mp_.max_n_mmaps)
- mp_.max_n_mmaps = mp_.n_mmaps;
+ if (++mp_.n_mmaps > mp_.max_n_mmaps)
+ mp_.max_n_mmaps = mp_.n_mmaps;
- sum = mp_.mmapped_mem += size;
- if (sum > (unsigned long)(mp_.max_mmapped_mem))
- mp_.max_mmapped_mem = sum;
+ sum = mp_.mmapped_mem += size;
+ if (sum > (unsigned long)(mp_.max_mmapped_mem))
+ mp_.max_mmapped_mem = sum;
#ifdef NO_THREADS
- sum += av->system_mem;
- if (sum > (unsigned long)(mp_.max_total_mem))
- mp_.max_total_mem = sum;
+ sum += av->system_mem;
+ if (sum > (unsigned long)(mp_.max_total_mem))
+ mp_.max_total_mem = sum;
#endif
- check_chunk(av, p);
+ check_chunk(av, p);
- return chunk2mem(p);
+ return chunk2mem(p);
}
}
}
@@ -3069,8 +3091,8 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
*/
assert((old_top == initial_top(av) && old_size == 0) ||
- ((unsigned long) (old_size) >= MINSIZE &&
- prev_inuse(old_top) &&
+ ((unsigned long) (old_size) >= MINSIZE &&
+ prev_inuse(old_top) &&
((unsigned long)old_end & pagemask) == 0));
/* Precondition: not enough current space to satisfy nb request */
@@ -3096,7 +3118,7 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
arena_mem += old_heap->size - old_heap_size;
#if 0
if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
- max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
+ max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
#endif
set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
| PREV_INUSE);
@@ -3205,17 +3227,17 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
if (mbrk != MAP_FAILED) {
- /* We do not need, and cannot use, another sbrk call to find end */
- brk = mbrk;
- snd_brk = brk + size;
-
- /*
- Record that we no longer have a contiguous sbrk region.
- After the first time mmap is used as backup, we do not
- ever rely on contiguous space since this could incorrectly
- bridge regions.
- */
- set_noncontiguous(av);
+ /* We do not need, and cannot use, another sbrk call to find end */
+ brk = mbrk;
+ snd_brk = brk + size;
+
+ /*
+ Record that we no longer have a contiguous sbrk region.
+ After the first time mmap is used as backup, we do not
+ ever rely on contiguous space since this could incorrectly
+ bridge regions.
+ */
+ set_noncontiguous(av);
}
}
#endif
@@ -3242,19 +3264,19 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
Otherwise, make adjustments:
* If the first time through or noncontiguous, we need to call sbrk
- just to find out where the end of memory lies.
+ just to find out where the end of memory lies.
* We need to ensure that all returned chunks from malloc will meet
- MALLOC_ALIGNMENT
+ MALLOC_ALIGNMENT
* If there was an intervening foreign sbrk, we need to adjust sbrk
- request size to account for fact that we will not be able to
- combine new space with existing space in old_top.
+ request size to account for fact that we will not be able to
+ combine new space with existing space in old_top.
* Almost all systems internally allocate whole pages at a time, in
- which case we might as well use the whole last page of request.
- So we allocate enough more memory to hit a page boundary now,
- which in turn causes future contiguous calls to page-align.
+ which case we might as well use the whole last page of request.
+ So we allocate enough more memory to hit a page boundary now,
+ which in turn causes future contiguous calls to page-align.
*/
else {
@@ -3270,51 +3292,51 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
if (old_size)
av->system_mem += brk - old_end;
- /* Guarantee alignment of first new chunk made from this space */
+ /* Guarantee alignment of first new chunk made from this space */
- front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0) {
+ front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+ if (front_misalign > 0) {
- /*
- Skip over some bytes to arrive at an aligned position.
- We don't need to specially mark these wasted front bytes.
- They will never be accessed anyway because
- prev_inuse of av->top (and any chunk created from its start)
- is always true after initialization.
- */
+ /*
+ Skip over some bytes to arrive at an aligned position.
+ We don't need to specially mark these wasted front bytes.
+ They will never be accessed anyway because
+ prev_inuse of av->top (and any chunk created from its start)
+ is always true after initialization.
+ */
- correction = MALLOC_ALIGNMENT - front_misalign;
- aligned_brk += correction;
- }
+ correction = MALLOC_ALIGNMENT - front_misalign;
+ aligned_brk += correction;
+ }
- /*
- If this isn't adjacent to existing space, then we will not
- be able to merge with old_top space, so must add to 2nd request.
- */
+ /*
+ If this isn't adjacent to existing space, then we will not
+ be able to merge with old_top space, so must add to 2nd request.
+ */
- correction += old_size;
+ correction += old_size;
- /* Extend the end address to hit a page boundary */
- end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
- correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
+ /* Extend the end address to hit a page boundary */
+ end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
+ correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
- assert(correction >= 0);
- snd_brk = (char*)(MORECORE(correction));
+ assert(correction >= 0);
+ snd_brk = (char*)(MORECORE(correction));
- /*
- If can't allocate correction, try to at least find out current
- brk. It might be enough to proceed without failing.
+ /*
+ If can't allocate correction, try to at least find out current
+ brk. It might be enough to proceed without failing.
- Note that if second sbrk did NOT fail, we assume that space
- is contiguous with first sbrk. This is a safe assumption unless
- program is multithreaded but doesn't use locks and a foreign sbrk
- occurred between our first and second calls.
- */
+ Note that if second sbrk did NOT fail, we assume that space
+ is contiguous with first sbrk. This is a safe assumption unless
+ program is multithreaded but doesn't use locks and a foreign sbrk
+ occurred between our first and second calls.
+ */
- if (snd_brk == (char*)(MORECORE_FAILURE)) {
- correction = 0;
- snd_brk = (char*)(MORECORE(0));
- } else {
+ if (snd_brk == (char*)(MORECORE_FAILURE)) {
+ correction = 0;
+ snd_brk = (char*)(MORECORE(0));
+ } else {
/* Call the `morecore' hook if necessary. */
void (*hook) (void) = force_reg (__after_morecore_hook);
if (__builtin_expect (hook != NULL, 0))
@@ -3324,61 +3346,61 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
/* handle non-contiguous cases */
else {
- /* MORECORE/mmap must correctly align */
- assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
+ /* MORECORE/mmap must correctly align */
+ assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
- /* Find out current end of memory */
- if (snd_brk == (char*)(MORECORE_FAILURE)) {
- snd_brk = (char*)(MORECORE(0));
- }
+ /* Find out current end of memory */
+ if (snd_brk == (char*)(MORECORE_FAILURE)) {
+ snd_brk = (char*)(MORECORE(0));
+ }
}
/* Adjust top based on results of second sbrk */
if (snd_brk != (char*)(MORECORE_FAILURE)) {
- av->top = (mchunkptr)aligned_brk;
- set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
- av->system_mem += correction;
-
- /*
- If not the first time through, we either have a
- gap due to foreign sbrk or a non-contiguous region. Insert a
- double fencepost at old_top to prevent consolidation with space
- we don't own. These fenceposts are artificial chunks that are
- marked as inuse and are in any case too small to use. We need
- two to make sizes and alignments work out.
- */
-
- if (old_size != 0) {
- /*
- Shrink old_top to insert fenceposts, keeping size a
- multiple of MALLOC_ALIGNMENT. We know there is at least
- enough space in old_top to do this.
- */
- old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
- set_head(old_top, old_size | PREV_INUSE);
-
- /*
- Note that the following assignments completely overwrite
- old_top when old_size was previously MINSIZE. This is
- intentional. We need the fencepost, even if old_top otherwise gets
- lost.
- */
- chunk_at_offset(old_top, old_size )->size =
- (2*SIZE_SZ)|PREV_INUSE;
-
- chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
- (2*SIZE_SZ)|PREV_INUSE;
-
- /* If possible, release the rest. */
- if (old_size >= MINSIZE) {
+ av->top = (mchunkptr)aligned_brk;
+ set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
+ av->system_mem += correction;
+
+ /*
+ If not the first time through, we either have a
+ gap due to foreign sbrk or a non-contiguous region. Insert a
+ double fencepost at old_top to prevent consolidation with space
+ we don't own. These fenceposts are artificial chunks that are
+ marked as inuse and are in any case too small to use. We need
+ two to make sizes and alignments work out.
+ */
+
+ if (old_size != 0) {
+ /*
+ Shrink old_top to insert fenceposts, keeping size a
+ multiple of MALLOC_ALIGNMENT. We know there is at least
+ enough space in old_top to do this.
+ */
+ old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
+ set_head(old_top, old_size | PREV_INUSE);
+
+ /*
+ Note that the following assignments completely overwrite
+ old_top when old_size was previously MINSIZE. This is
+ intentional. We need the fencepost, even if old_top otherwise gets
+ lost.
+ */
+ chunk_at_offset(old_top, old_size )->size =
+ (2*SIZE_SZ)|PREV_INUSE;
+
+ chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
+ (2*SIZE_SZ)|PREV_INUSE;
+
+ /* If possible, release the rest. */
+ if (old_size >= MINSIZE) {
#ifdef ATOMIC_FASTBINS
- _int_free(av, old_top, 1);
+ _int_free(av, old_top, 1);
#else
- _int_free(av, old_top);
+ _int_free(av, old_top);
#endif
- }
+ }
- }
+ }
}
}
@@ -3456,13 +3478,13 @@ static int sYS