From 6c8dbf00f536d78b1937b5af6f57be47fd376344 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20B=C3=ADlka?= Date: Thu, 2 Jan 2014 09:38:18 +0100 Subject: Reformat malloc to gnu style. --- malloc/malloc.c | 4283 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 2214 insertions(+), 2069 deletions(-) (limited to 'malloc/malloc.c') diff --git a/malloc/malloc.c b/malloc/malloc.c index 63d1d152ab..813e94eea3 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -353,10 +353,10 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line, malloc_set_state than will returning blocks not adequately aligned for long double objects under -mlong-double-128. */ -# define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \ - ? __alignof__ (long double) : 2 * SIZE_SZ) +# define MALLOC_ALIGNMENT (2 *SIZE_SZ < __alignof__ (long double) \ + ? __alignof__ (long double) : 2 *SIZE_SZ) # else -# define MALLOC_ALIGNMENT (2 * SIZE_SZ) +# define MALLOC_ALIGNMENT (2 *SIZE_SZ) # endif #endif @@ -463,10 +463,10 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore; some systems, if the application first decrements and then increments the break value, the contents of the reallocated space are unspecified. -*/ + */ #ifndef MORECORE_CLEARS -#define MORECORE_CLEARS 1 +# define MORECORE_CLEARS 1 #endif @@ -1232,11 +1232,11 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Check if a request is so large that it would wrap around zero when padded and aligned. To simplify some other code, the bound is made low enough so that adding MINSIZE will also not wrap around zero. -*/ + */ #define REQUEST_OUT_OF_RANGE(req) \ - ((unsigned long)(req) >= \ - (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE)) + ((unsigned long) (req) >= \ + (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE)) /* pad request bytes into a usable size -- internal version */ @@ -1248,15 +1248,15 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /* Same, except also perform argument check */ #define checked_request2size(req, sz) \ - if (REQUEST_OUT_OF_RANGE(req)) { \ - __set_errno (ENOMEM); \ - return 0; \ - } \ - (sz) = request2size(req); + if (REQUEST_OUT_OF_RANGE (req)) { \ + __set_errno (ENOMEM); \ + return 0; \ + } \ + (sz) = request2size (req); /* - --------------- Physical chunk operations --------------- -*/ + --------------- Physical chunk operations --------------- + */ /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ @@ -1283,49 +1283,49 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /* - Bits to mask off when extracting size + Bits to mask off when extracting size - Note: IS_MMAPPED is intentionally not masked off from size field in - macros for which mmapped chunks should never be seen. This should - cause helpful core dumps to occur if it is tried by accident by - people extending or adapting this malloc. -*/ -#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA) + Note: IS_MMAPPED is intentionally not masked off from size field in + macros for which mmapped chunks should never be seen. This should + cause helpful core dumps to occur if it is tried by accident by + people extending or adapting this malloc. + */ +#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA) /* Get size, ignoring use bits */ #define chunksize(p) ((p)->size & ~(SIZE_BITS)) /* Ptr to next physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) )) +#define next_chunk(p) ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS))) /* Ptr to previous physical malloc_chunk */ -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) +#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - ((p)->prev_size))) /* Treat space at ptr + offset as a chunk */ -#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s))) /* extract p's inuse bit */ -#define inuse(p)\ -((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE) +#define inuse(p) \ + ((((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE) /* set/clear chunk as being inuse without otherwise disturbing */ -#define set_inuse(p)\ -((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE +#define set_inuse(p) \ + ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE -#define clear_inuse(p)\ -((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE) +#define clear_inuse(p) \ + ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE) /* check/set/clear inuse bits in known places */ -#define inuse_bit_at_offset(p, s)\ - (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) +#define inuse_bit_at_offset(p, s) \ + (((mchunkptr) (((char *) (p)) + (s)))->size & PREV_INUSE) -#define set_inuse_bit_at_offset(p, s)\ - (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) +#define set_inuse_bit_at_offset(p, s) \ + (((mchunkptr) (((char *) (p)) + (s)))->size |= PREV_INUSE) -#define clear_inuse_bit_at_offset(p, s)\ - (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) +#define clear_inuse_bit_at_offset(p, s) \ + (((mchunkptr) (((char *) (p)) + (s)))->size &= ~(PREV_INUSE)) /* Set size at head, without disturbing its use bit */ @@ -1335,26 +1335,26 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ #define set_head(p, s) ((p)->size = (s)) /* Set size at footer (only when chunk is not in use) */ -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) +#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->prev_size = (s)) /* - -------------------- Internal data structures -------------------- + -------------------- Internal data structures -------------------- All internal state is held in an instance of malloc_state defined below. There are no other static variables, except in two optional cases: - * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. - * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor + * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. + * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor for mmap. Beware of lots of tricks that minimize the total bookkeeping space requirements. The result is a little over 1K bytes (for 4byte pointers and size_t.) -*/ + */ /* - Bins + Bins An array of bin headers for free chunks. Each bin is doubly linked. The bins are approximately proportionally (log) spaced. @@ -1387,17 +1387,17 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ But to conserve space and improve locality, we allocate only the fd/bk pointers of bins, and then use repositioning tricks to treat these as the fields of a malloc_chunk*. -*/ + */ -typedef struct malloc_chunk* mbinptr; +typedef struct malloc_chunk *mbinptr; /* addressing -- note that bin_at(0) does not exist */ #define bin_at(m, i) \ (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \ - - offsetof (struct malloc_chunk, fd)) + - offsetof (struct malloc_chunk, fd)) /* analog of ++bin */ -#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1))) +#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1))) /* Reminders about list directionality within bins */ #define first(b) ((b)->fd) @@ -1405,36 +1405,36 @@ typedef struct malloc_chunk* mbinptr; /* Take a chunk off a bin list */ #define unlink(P, BK, FD) { \ - FD = P->fd; \ - BK = P->bk; \ - if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \ - malloc_printerr (check_action, "corrupted double-linked list", P); \ - else { \ - FD->bk = BK; \ - BK->fd = FD; \ - if (!in_smallbin_range (P->size) \ - && __builtin_expect (P->fd_nextsize != NULL, 0)) { \ - assert (P->fd_nextsize->bk_nextsize == P); \ - assert (P->bk_nextsize->fd_nextsize == P); \ - if (FD->fd_nextsize == NULL) { \ - if (P->fd_nextsize == P) \ - FD->fd_nextsize = FD->bk_nextsize = FD; \ - else { \ - FD->fd_nextsize = P->fd_nextsize; \ - FD->bk_nextsize = P->bk_nextsize; \ - P->fd_nextsize->bk_nextsize = FD; \ - P->bk_nextsize->fd_nextsize = FD; \ - } \ - } else { \ - P->fd_nextsize->bk_nextsize = P->bk_nextsize; \ - P->bk_nextsize->fd_nextsize = P->fd_nextsize; \ - } \ - } \ - } \ + FD = P->fd; \ + BK = P->bk; \ + if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \ + malloc_printerr (check_action, "corrupted double-linked list", P); \ + else { \ + FD->bk = BK; \ + BK->fd = FD; \ + if (!in_smallbin_range (P->size) \ + && __builtin_expect (P->fd_nextsize != NULL, 0)) { \ + assert (P->fd_nextsize->bk_nextsize == P); \ + assert (P->bk_nextsize->fd_nextsize == P); \ + if (FD->fd_nextsize == NULL) { \ + if (P->fd_nextsize == P) \ + FD->fd_nextsize = FD->bk_nextsize = FD; \ + else { \ + FD->fd_nextsize = P->fd_nextsize; \ + FD->bk_nextsize = P->bk_nextsize; \ + P->fd_nextsize->bk_nextsize = FD; \ + P->bk_nextsize->fd_nextsize = FD; \ + } \ + } else { \ + P->fd_nextsize->bk_nextsize = P->bk_nextsize; \ + P->bk_nextsize->fd_nextsize = P->fd_nextsize; \ + } \ + } \ + } \ } /* - Indexing + Indexing Bins for sizes < 512 bytes contain chunks of all the same size, spaced 8 bytes apart. Larger bins are approximately logarithmically spaced: @@ -1455,7 +1455,7 @@ typedef struct malloc_chunk* mbinptr; Bin 0 does not exist. Bin 1 is the unordered list; if that would be a valid chunk size the small bins are bumped up one. -*/ + */ #define NBINS 128 #define NSMALLBINS 64 @@ -1464,38 +1464,38 @@ typedef struct malloc_chunk* mbinptr; #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH) #define in_smallbin_range(sz) \ - ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE) + ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE) #define smallbin_index(sz) \ - ((SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3)) \ + ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\ + SMALLBIN_CORRECTION) #define largebin_index_32(sz) \ -(((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \ - ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \ - ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ - ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ - ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ - 126) + (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\ + ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ + ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ + ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ + ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ + 126) #define largebin_index_32_big(sz) \ -(((((unsigned long)(sz)) >> 6) <= 45)? 49 + (((unsigned long)(sz)) >> 6): \ - ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \ - ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ - ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ - ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ - 126) + (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\ + ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ + ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ + ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ + ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ + 126) // XXX It remains to be seen whether it is good to keep the widths of // XXX the buckets the same or whether it should be scaled by a factor // XXX of two as well. #define largebin_index_64(sz) \ -(((((unsigned long)(sz)) >> 6) <= 48)? 48 + (((unsigned long)(sz)) >> 6): \ - ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \ - ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ - ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ - ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ - 126) + (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\ + ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ + ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ + ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ + ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ + 126) #define largebin_index(sz) \ (SIZE_SZ == 8 ? largebin_index_64 (sz) \ @@ -1503,11 +1503,11 @@ typedef struct malloc_chunk* mbinptr; : largebin_index_32 (sz)) #define bin_index(sz) \ - ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz)) + ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz)) /* - Unsorted chunks + Unsorted chunks All remainders from chunk splits, as well as all returned chunks, are first placed in the "unsorted" bin. They are then placed @@ -1518,13 +1518,13 @@ typedef struct malloc_chunk* mbinptr; The NON_MAIN_ARENA flag is never set for unsorted chunks, so it does not have to be taken into account in size comparisons. -*/ + */ /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ -#define unsorted_chunks(M) (bin_at(M, 1)) +#define unsorted_chunks(M) (bin_at (M, 1)) /* - Top + Top The top-most available chunk (i.e., the one bordering the end of available memory) is treated specially. It is never included in @@ -1539,13 +1539,13 @@ typedef struct malloc_chunk* mbinptr; interval between initialization and the first call to sysmalloc. (This is somewhat delicate, since it relies on the 2 preceding words to be zero during this interval as well.) -*/ + */ /* Conveniently, the unsorted bin can be used as dummy top on first call */ -#define initial_top(M) (unsorted_chunks(M)) +#define initial_top(M) (unsorted_chunks (M)) /* - Binmap + Binmap To help compensate for the large number of bins, a one-level index structure is used for bin-by-bin searching. `binmap' is a @@ -1553,7 +1553,7 @@ typedef struct malloc_chunk* mbinptr; be skipped over during during traversals. The bits are NOT always cleared as soon as bins are empty, but instead only when they are noticed to be empty during traversal in malloc. -*/ + */ /* Conservatively use 32 bits per map word, even if on 64bit system */ #define BINMAPSHIFT 5 @@ -1561,14 +1561,14 @@ typedef struct malloc_chunk* mbinptr; #define BINMAPSIZE (NBINS / BITSPERMAP) #define idx2block(i) ((i) >> BINMAPSHIFT) -#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1)))) +#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1)))) -#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i)) -#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i))) -#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i)) +#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i)) +#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i))) +#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i)) /* - Fastbins + Fastbins An array of lists holding recently freed small chunks. Fastbins are not doubly linked. It is faster to single-link them, and @@ -1582,69 +1582,69 @@ typedef struct malloc_chunk* mbinptr; be consolidated with other free chunks. malloc_consolidate releases all chunks in fastbins and consolidates them with other free chunks. -*/ + */ -typedef struct malloc_chunk* mfastbinptr; +typedef struct malloc_chunk *mfastbinptr; #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx]) /* offset 2 to use otherwise unindexable first 2 bins */ #define fastbin_index(sz) \ - ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2) + ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2) /* The maximum fastbin request size we support */ #define MAX_FAST_SIZE (80 * SIZE_SZ / 4) -#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1) +#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1) /* - FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() - that triggers automatic consolidation of possibly-surrounding - fastbin chunks. This is a heuristic, so the exact value should not - matter too much. It is defined at half the default trim threshold as a - compromise heuristic to only attempt consolidation if it is likely - to lead to trimming. However, it is not dynamically tunable, since - consolidation reduces fragmentation surrounding large chunks even - if trimming is not used. -*/ + FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() + that triggers automatic consolidation of possibly-surrounding + fastbin chunks. This is a heuristic, so the exact value should not + matter too much. It is defined at half the default trim threshold as a + compromise heuristic to only attempt consolidation if it is likely + to lead to trimming. However, it is not dynamically tunable, since + consolidation reduces fragmentation surrounding large chunks even + if trimming is not used. + */ #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL) /* - Since the lowest 2 bits in max_fast don't matter in size comparisons, - they are used as flags. -*/ + Since the lowest 2 bits in max_fast don't matter in size comparisons, + they are used as flags. + */ /* - FASTCHUNKS_BIT held in max_fast indicates that there are probably - some fastbin chunks. It is set true on entering a chunk into any - fastbin, and cleared only in malloc_consolidate. + FASTCHUNKS_BIT held in max_fast indicates that there are probably + some fastbin chunks. It is set true on entering a chunk into any + fastbin, and cleared only in malloc_consolidate. - The truth value is inverted so that have_fastchunks will be true - upon startup (since statics are zero-filled), simplifying - initialization checks. -*/ + The truth value is inverted so that have_fastchunks will be true + upon startup (since statics are zero-filled), simplifying + initialization checks. + */ #define FASTCHUNKS_BIT (1U) -#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0) +#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0) #define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT) #define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT) /* - NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous - regions. Otherwise, contiguity is exploited in merging together, - when possible, results from consecutive MORECORE calls. + NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous + regions. Otherwise, contiguity is exploited in merging together, + when possible, results from consecutive MORECORE calls. - The initial value comes from MORECORE_CONTIGUOUS, but is - changed dynamically if mmap is ever used as an sbrk substitute. -*/ + The initial value comes from MORECORE_CONTIGUOUS, but is + changed dynamically if mmap is ever used as an sbrk substitute. + */ #define NONCONTIGUOUS_BIT (2U) -#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0) -#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0) -#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT) +#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0) +#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0) +#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT) #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT) /* @@ -1652,19 +1652,20 @@ typedef struct malloc_chunk* mfastbinptr; Use impossibly small value if 0. Precondition: there are no existing fastbin chunks. Setting the value clears fastchunk bit but preserves noncontiguous bit. -*/ + */ #define set_max_fast(s) \ global_max_fast = (((s) == 0) \ - ? SMALLBIN_WIDTH: ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK)) + ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK)) #define get_max_fast() global_max_fast /* ----------- Internal state representation and initialization ----------- -*/ + */ -struct malloc_state { +struct malloc_state +{ /* Serialize access. */ mutex_t mutex; @@ -1677,19 +1678,19 @@ struct malloc_state { #endif /* Fastbins */ - mfastbinptr fastbinsY[NFASTBINS]; + mfastbinptr fastbinsY[NFASTBINS]; /* Base of the topmost chunk -- not otherwise kept in a bin */ - mchunkptr top; + mchunkptr top; /* The remainder from the most recent split of a small request */ - mchunkptr last_remainder; + mchunkptr last_remainder; /* Normal bins packed as described above */ - mchunkptr bins[NBINS * 2 - 2]; + mchunkptr bins[NBINS * 2 - 2]; /* Bitmap of bins */ - unsigned int binmap[BINMAPSIZE]; + unsigned int binmap[BINMAPSIZE]; /* Linked list */ struct malloc_state *next; @@ -1702,32 +1703,33 @@ struct malloc_state { INTERNAL_SIZE_T max_system_mem; }; -struct malloc_par { +struct malloc_par +{ /* Tunable parameters */ - unsigned long trim_threshold; - INTERNAL_SIZE_T top_pad; - INTERNAL_SIZE_T mmap_threshold; - INTERNAL_SIZE_T arena_test; - INTERNAL_SIZE_T arena_max; + unsigned long trim_threshold; + INTERNAL_SIZE_T top_pad; + INTERNAL_SIZE_T mmap_threshold; + INTERNAL_SIZE_T arena_test; + INTERNAL_SIZE_T arena_max; /* Memory map support */ - int n_mmaps; - int n_mmaps_max; - int max_n_mmaps; + int n_mmaps; + int n_mmaps_max; + int max_n_mmaps; /* the mmap_threshold is dynamic, until the user sets it manually, at which point we need to disable any dynamic behavior. */ - int no_dyn_threshold; + int no_dyn_threshold; /* Statistics */ - INTERNAL_SIZE_T mmapped_mem; + INTERNAL_SIZE_T mmapped_mem; /*INTERNAL_SIZE_T sbrked_mem;*/ /*INTERNAL_SIZE_T max_sbrked_mem;*/ - INTERNAL_SIZE_T max_mmapped_mem; - INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */ + INTERNAL_SIZE_T max_mmapped_mem; + INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */ /* First address handed out by MORECORE/sbrk. */ - char* sbrk_base; + char *sbrk_base; }; /* There are several instances of this struct ("arenas") in this @@ -1737,22 +1739,22 @@ struct malloc_par { is initialized to all zeroes (as is true of C statics). */ static struct malloc_state main_arena = - { - .mutex = MUTEX_INITIALIZER, - .next = &main_arena - }; +{ + .mutex = MUTEX_INITIALIZER, + .next = &main_arena +}; /* There is only one instance of the malloc parameters. */ static struct malloc_par mp_ = - { - .top_pad = DEFAULT_TOP_PAD, - .n_mmaps_max = DEFAULT_MMAP_MAX, - .mmap_threshold = DEFAULT_MMAP_THRESHOLD, - .trim_threshold = DEFAULT_TRIM_THRESHOLD, -# define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8)) - .arena_test = NARENAS_FROM_NCORES (1) - }; +{ + .top_pad = DEFAULT_TOP_PAD, + .n_mmaps_max = DEFAULT_MMAP_MAX, + .mmap_threshold = DEFAULT_MMAP_THRESHOLD, + .trim_threshold = DEFAULT_TRIM_THRESHOLD, +#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8)) + .arena_test = NARENAS_FROM_NCORES (1) +}; /* Non public mallopt parameters. */ @@ -1764,44 +1766,46 @@ static struct malloc_par mp_ = static INTERNAL_SIZE_T global_max_fast; /* - Initialize a malloc_state struct. + Initialize a malloc_state struct. - This is called only from within malloc_consolidate, which needs - be called in the same contexts anyway. It is never called directly - outside of malloc_consolidate because some optimizing compilers try - to inline it at all call points, which turns out not to be an - optimization at all. (Inlining it in malloc_consolidate is fine though.) -*/ + This is called only from within malloc_consolidate, which needs + be called in the same contexts anyway. It is never called directly + outside of malloc_consolidate because some optimizing compilers try + to inline it at all call points, which turns out not to be an + optimization at all. (Inlining it in malloc_consolidate is fine though.) + */ -static void malloc_init_state(mstate av) +static void +malloc_init_state (mstate av) { - int i; + int i; mbinptr bin; /* Establish circular links for normal bins */ - for (i = 1; i < NBINS; ++i) { - bin = bin_at(av,i); - bin->fd = bin->bk = bin; - } + for (i = 1; i < NBINS; ++i) + { + bin = bin_at (av, i); + bin->fd = bin->bk = bin; + } #if MORECORE_CONTIGUOUS if (av != &main_arena) #endif - set_noncontiguous(av); + set_noncontiguous (av); if (av == &main_arena) - set_max_fast(DEFAULT_MXFAST); + set_max_fast (DEFAULT_MXFAST); av->flags |= FASTCHUNKS_BIT; - av->top = initial_top(av); + av->top = initial_top (av); } /* Other internal utilities operating on mstates -*/ + */ -static void* sysmalloc(INTERNAL_SIZE_T, mstate); -static int systrim(size_t, mstate); -static void malloc_consolidate(mstate); +static void *sysmalloc (INTERNAL_SIZE_T, mstate); +static int systrim (size_t, mstate); +static void malloc_consolidate (mstate); /* -------------- Early definitions for debugging hooks ---------------- */ @@ -1815,31 +1819,31 @@ static void malloc_consolidate(mstate); #endif /* Forward declarations. */ -static void* malloc_hook_ini (size_t sz, - const void *caller) __THROW; -static void* realloc_hook_ini (void* ptr, size_t sz, - const void *caller) __THROW; -static void* memalign_hook_ini (size_t alignment, size_t sz, - const void *caller) __THROW; +static void *malloc_hook_ini (size_t sz, + const void *caller) __THROW; +static void *realloc_hook_ini (void *ptr, size_t sz, + const void *caller) __THROW; +static void *memalign_hook_ini (size_t alignment, size_t sz, + const void *caller) __THROW; void weak_variable (*__malloc_initialize_hook) (void) = NULL; void weak_variable (*__free_hook) (void *__ptr, - const void *) = NULL; + const void *) = NULL; void *weak_variable (*__malloc_hook) - (size_t __size, const void *) = malloc_hook_ini; + (size_t __size, const void *) = malloc_hook_ini; void *weak_variable (*__realloc_hook) - (void *__ptr, size_t __size, const void *) - = realloc_hook_ini; + (void *__ptr, size_t __size, const void *) + = realloc_hook_ini; void *weak_variable (*__memalign_hook) - (size_t __alignment, size_t __size, const void *) - = memalign_hook_ini; + (size_t __alignment, size_t __size, const void *) + = memalign_hook_ini; void weak_variable (*__after_morecore_hook) (void) = NULL; /* ---------------- Error behavior ------------------------------------ */ #ifndef DEFAULT_CHECK_ACTION -#define DEFAULT_CHECK_ACTION 3 +# define DEFAULT_CHECK_ACTION 3 #endif static int check_action = DEFAULT_CHECK_ACTION; @@ -1871,207 +1875,220 @@ free_perturb (char *p, size_t n) #include "arena.c" /* - Debugging support + Debugging support - These routines make a number of assertions about the states - of data structures that should be true at all times. If any - are not true, it's very likely that a user program has somehow - trashed memory. (It's also possible that there is a coding error - in malloc. In which case, please report it!) -*/ + These routines make a number of assertions about the states + of data structures that should be true at all times. If any + are not true, it's very likely that a user program has somehow + trashed memory. (It's also possible that there is a coding error + in malloc. In which case, please report it!) + */ -#if ! MALLOC_DEBUG +#if !MALLOC_DEBUG -#define check_chunk(A,P) -#define check_free_chunk(A,P) -#define check_inuse_chunk(A,P) -#define check_remalloced_chunk(A,P,N) -#define check_malloced_chunk(A,P,N) -#define check_malloc_state(A) +# define check_chunk(A, P) +# define check_free_chunk(A, P) +# define check_inuse_chunk(A, P) +# define check_remalloced_chunk(A, P, N) +# define check_malloced_chunk(A, P, N) +# define check_malloc_state(A) #else -#define check_chunk(A,P) do_check_chunk(A,P) -#define check_free_chunk(A,P) do_check_free_chunk(A,P) -#define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P) -#define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N) -#define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N) -#define check_malloc_state(A) do_check_malloc_state(A) +# define check_chunk(A, P) do_check_chunk (A, P) +# define check_free_chunk(A, P) do_check_free_chunk (A, P) +# define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P) +# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N) +# define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N) +# define check_malloc_state(A) do_check_malloc_state (A) /* - Properties of all chunks -*/ + Properties of all chunks + */ -static void do_check_chunk(mstate av, mchunkptr p) +static void +do_check_chunk (mstate av, mchunkptr p) { - unsigned long sz = chunksize(p); + unsigned long sz = chunksize (p); /* min and max possible addresses assuming contiguous allocation */ - char* max_address = (char*)(av->top) + chunksize(av->top); - char* min_address = max_address - av->system_mem; + char *max_address = (char *) (av->top) + chunksize (av->top); + char *min_address = max_address - av->system_mem; - if (!chunk_is_mmapped(p)) { - - /* Has legal address ... */ - if (p != av->top) { - if (contiguous(av)) { - assert(((char*)p) >= min_address); - assert(((char*)p + sz) <= ((char*)(av->top))); - } - } - else { - /* top size is always at least MINSIZE */ - assert((unsigned long)(sz) >= MINSIZE); - /* top predecessor always marked inuse */ - assert(prev_inuse(p)); + if (!chunk_is_mmapped (p)) + { + /* Has legal address ... */ + if (p != av->top) + { + if (contiguous (av)) + { + assert (((char *) p) >= min_address); + assert (((char *) p + sz) <= ((char *) (av->top))); + } + } + else + { + /* top size is always at least MINSIZE */ + assert ((unsigned long) (sz) >= MINSIZE); + /* top predecessor always marked inuse */ + assert (prev_inuse (p)); + } } - - } - else { - /* address is outside main heap */ - if (contiguous(av) && av->top != initial_top(av)) { - assert(((char*)p) < min_address || ((char*)p) >= max_address); + else + { + /* address is outside main heap */ + if (contiguous (av) && av->top != initial_top (av)) + { + assert (((char *) p) < min_address || ((char *) p) >= max_address); + } + /* chunk is page-aligned */ + assert (((p->prev_size + sz) & (GLRO (dl_pagesize) - 1)) == 0); + /* mem is aligned */ + assert (aligned_OK (chunk2mem (p))); } - /* chunk is page-aligned */ - assert(((p->prev_size + sz) & (GLRO(dl_pagesize)-1)) == 0); - /* mem is aligned */ - assert(aligned_OK(chunk2mem(p))); - } } /* - Properties of free chunks -*/ + Properties of free chunks + */ -static void do_check_free_chunk(mstate av, mchunkptr p) +static void +do_check_free_chunk (mstate av, mchunkptr p) { - INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); - mchunkptr next = chunk_at_offset(p, sz); + INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA); + mchunkptr next = chunk_at_offset (p, sz); - do_check_chunk(av, p); + do_check_chunk (av, p); /* Chunk must claim to be free ... */ - assert(!inuse(p)); - assert (!chunk_is_mmapped(p)); + assert (!inuse (p)); + assert (!chunk_is_mmapped (p)); /* Unless a special marker, must have OK fields */ - if ((unsigned long)(sz) >= MINSIZE) - { - assert((sz & MALLOC_ALIGN_MASK) == 0); - assert(aligned_OK(chunk2mem(p))); - /* ... matching footer field */ - assert(next->prev_size == sz); - /* ... and is fully consolidated */ - assert(prev_inuse(p)); - assert (next == av->top || inuse(next)); - - /* ... and has minimally sane links */ - assert(p->fd->bk == p); - assert(p->bk->fd == p); - } + if ((unsigned long) (sz) >= MINSIZE) + { + assert ((sz & MALLOC_ALIGN_MASK) == 0); + assert (aligned_OK (chunk2mem (p))); + /* ... matching footer field */ + assert (next->prev_size == sz); + /* ... and is fully consolidated */ + assert (prev_inuse (p)); + assert (next == av->top || inuse (next)); + + /* ... and has minimally sane links */ + assert (p->fd->bk == p); + assert (p->bk->fd == p); + } else /* markers are always of size SIZE_SZ */ - assert(sz == SIZE_SZ); + assert (sz == SIZE_SZ); } /* - Properties of inuse chunks -*/ + Properties of inuse chunks + */ -static void do_check_inuse_chunk(mstate av, mchunkptr p) +static void +do_check_inuse_chunk (mstate av, mchunkptr p) { mchunkptr next; - do_check_chunk(av, p); + do_check_chunk (av, p); - if (chunk_is_mmapped(p)) + if (chunk_is_mmapped (p)) return; /* mmapped chunks have no next/prev */ /* Check whether it claims to be in use ... */ - assert(inuse(p)); + assert (inuse (p)); - next = next_chunk(p); + next = next_chunk (p); /* ... and is surrounded by OK chunks. - Since more things can be checked with free chunks than inuse ones, - if an inuse chunk borders them and debug is on, it's worth doing them. - */ - if (!prev_inuse(p)) { - /* Note that we cannot even look at prev unless it is not inuse */ - mchunkptr prv = prev_chunk(p); - assert(next_chunk(prv) == p); - do_check_free_chunk(av, prv); - } + Since more things can be checked with free chunks than inuse ones, + if an inuse chunk borders them and debug is on, it's worth doing them. + */ + if (!prev_inuse (p)) + { + /* Note that we cannot even look at prev unless it is not inuse */ + mchunkptr prv = prev_chunk (p); + assert (next_chunk (prv) == p); + do_check_free_chunk (av, prv); + } - if (next == av->top) { - assert(prev_inuse(next)); - assert(chunksize(next) >= MINSIZE); - } - else if (!inuse(next)) - do_check_free_chunk(av, next); + if (next == av->top) + { + assert (prev_inuse (next)); + assert (chunksize (next) >= MINSIZE); + } + else if (!inuse (next)) + do_check_free_chunk (av, next); } /* - Properties of chunks recycled from fastbins -*/ + Properties of chunks recycled from fastbins + */ -static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s) +static void +do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s) { - INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA); + INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA); - if (!chunk_is_mmapped(p)) { - assert(av == arena_for_chunk(p)); - if (chunk_non_main_arena(p)) - assert(av != &main_arena); - else - assert(av == &main_arena); - } + if (!chunk_is_mmapped (p)) + { + assert (av == arena_for_chunk (p)); + if (chunk_non_main_arena (p)) + assert (av != &main_arena); + else + assert (av == &main_arena); + } - do_check_inuse_chunk(av, p); + do_check_inuse_chunk (av, p); /* Legal size ... */ - assert((sz & MALLOC_ALIGN_MASK) == 0); - assert((unsigned long)(sz) >= MINSIZE); + assert ((sz & MALLOC_ALIGN_MASK) == 0); + assert ((unsigned long) (sz) >= MINSIZE); /* ... and alignment */ - assert(aligned_OK(chunk2mem(p))); + assert (aligned_OK (chunk2mem (p))); /* chunk is less than MINSIZE more than request */ - assert((long)(sz) - (long)(s) >= 0); - assert((long)(sz) - (long)(s + MINSIZE) < 0); + assert ((long) (sz) - (long) (s) >= 0); + assert ((long) (sz) - (long) (s + MINSIZE) < 0); } /* - Properties of nonrecycled chunks at the point they are malloced -*/ + Properties of nonrecycled chunks at the point they are malloced + */ -static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s) +static void +do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s) { /* same as recycled case ... */ - do_check_remalloced_chunk(av, p, s); + do_check_remalloced_chunk (av, p, s); /* - ... plus, must obey implementation invariant that prev_inuse is - always true of any allocated chunk; i.e., that each allocated - chunk borders either a previously allocated and still in-use - chunk, or the base of its memory arena. This is ensured - by making all allocations from the `lowest' part of any found - chunk. This does not necessarily hold however for chunks - recycled via fastbins. - */ - - assert(prev_inuse(p)); + ... plus, must obey implementation invariant that prev_inuse is + always true of any allocated chunk; i.e., that each allocated + chunk borders either a previously allocated and still in-use + chunk, or the base of its memory arena. This is ensured + by making all allocations from the `lowest' part of any found + chunk. This does not necessarily hold however for chunks + recycled via fastbins. + */ + + assert (prev_inuse (p)); } /* - Properties of malloc_state. + Properties of malloc_state. - This may be useful for debugging malloc, as well as detecting user - programmer errors that somehow write into malloc_state. + This may be useful for debugging malloc, as well as detecting user + programmer errors that somehow write into malloc_state. - If you are extending or experimenting with this malloc, you can - probably figure out how to hack this routine to print out or - display chunk addresses, sizes, bins, and other instrumentation. -*/ + If you are extending or experimenting with this malloc, you can + probably figure out how to hack this routine to print out or + display chunk addresses, sizes, bins, and other instrumentation. + */ -static void do_check_malloc_state(mstate av) +static void +do_check_malloc_state (mstate av) { int i; mchunkptr p; @@ -2083,126 +2100,132 @@ static void do_check_malloc_state(mstate av) int max_fast_bin; /* internal size_t must be no wider than pointer type */ - assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*)); + assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *)); /* alignment is a power of 2 */ - assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0); + assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0); /* cannot run remaining checks until fully initialized */ - if (av->top == 0 || av->top == initial_top(av)) + if (av->top == 0 || av->top == initial_top (av)) return; /* pagesize is a power of 2 */ - assert((GLRO(dl_pagesize) & (GLRO(dl_pagesize)-1)) == 0); + assert ((GLRO (dl_pagesize) & (GLRO (dl_pagesize) - 1)) == 0); /* A contiguous main_arena is consistent with sbrk_base. */ - if (av == &main_arena && contiguous(av)) - assert((char*)mp_.sbrk_base + av->system_mem == - (char*)av->top + chunksize(av->top)); + if (av == &main_arena && contiguous (av)) + assert ((char *) mp_.sbrk_base + av->system_mem == + (char *) av->top + chunksize (av->top)); /* properties of fastbins */ /* max_fast is in allowed range */ - assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE)); - - max_fast_bin = fastbin_index(get_max_fast ()); - - for (i = 0; i < NFASTBINS; ++i) { - p = fastbin (av, i); - - /* The following test can only be performed for the main arena. - While mallopt calls malloc_consolidate to get rid of all fast - bins (especially those larger than the new maximum) this does - only happen for the main arena. Trying to do this for any - other arena would mean those arenas have to be locked and - malloc_consolidate be called for them. This is excessive. And - even if this is acceptable to somebody it still cannot solve - the problem completely since if the arena is locked a - concurrent malloc call might create a new arena which then - could use the newly invalid fast bins. */ - - /* all bins past max_fast are empty */ - if (av == &main_arena && i > max_fast_bin) - assert(p == 0); - - while (p != 0) { - /* each chunk claims to be inuse */ - do_check_inuse_chunk(av, p); - total += chunksize(p); - /* chunk belongs in this bin */ - assert(fastbin_index(chunksize(p)) == i); - p = p->fd; + assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE)); + + max_fast_bin = fastbin_index (get_max_fast ()); + + for (i = 0; i < NFASTBINS; ++i) + { + p = fastbin (av, i); + + /* The following test can only be performed for the main arena. + While mallopt calls malloc_consolidate to get rid of all fast + bins (especially those larger than the new maximum) this does + only happen for the main arena. Trying to do this for any + other arena would mean those arenas have to be locked and + malloc_consolidate be called for them. This is excessive. And + even if this is acceptable to somebody it still cannot solve + the problem completely since if the arena is locked a + concurrent malloc call might create a new arena which then + could use the newly invalid fast bins. */ + + /* all bins past max_fast are empty */ + if (av == &main_arena && i > max_fast_bin) + assert (p == 0); + + while (p != 0) + { + /* each chunk claims to be inuse */ + do_check_inuse_chunk (av, p); + total += chunksize (p); + /* chunk belongs in this bin */ + assert (fastbin_index (chunksize (p)) == i); + p = p->fd; + } } - } if (total != 0) - assert(have_fastchunks(av)); - else if (!have_fastchunks(av)) - assert(total == 0); + assert (have_fastchunks (av)); + else if (!have_fastchunks (av)) + assert (total == 0); /* check normal bins */ - for (i = 1; i < NBINS; ++i) { - b = bin_at(av,i); - - /* binmap is accurate (except for bin 1 == unsorted_chunks) */ - if (i >= 2) { - unsigned int binbit = get_binmap(av,i); - int empty = last(b) == b; - if (!binbit) - assert(empty); - else if (!empty) - assert(binbit); - } - - for (p = last(b); p != b; p = p->bk) { - /* each chunk claims to be free */ - do_check_free_chunk(av, p); - size = chunksize(p); - total += size; - if (i >= 2) { - /* chunk belongs in bin */ - idx = bin_index(size); - assert(idx == i); - /* lists are sorted */ - assert(p->bk == b || - (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p)); - - if (!in_smallbin_range(size)) - { - if (p->fd_nextsize != NULL) - { - if (p->fd_nextsize == p) - assert (p->bk_nextsize == p); - else - { - if (p->fd_nextsize == first (b)) - assert (chunksize (p) < chunksize (p->fd_nextsize)); - else - assert (chunksize (p) > chunksize (p->fd_nextsize)); - - if (p == first (b)) - assert (chunksize (p) > chunksize (p->bk_nextsize)); - else - assert (chunksize (p) < chunksize (p->bk_nextsize)); - } - } - else - assert (p->bk_nextsize == NULL); - } - } else if (!in_smallbin_range(size)) - assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL); - /* chunk is followed by a legal chain of inuse chunks */ - for (q = next_chunk(p); - (q != av->top && inuse(q) && - (unsigned long)(chunksize(q)) >= MINSIZE); - q = next_chunk(q)) - do_check_inuse_chunk(av, q); + for (i = 1; i < NBINS; ++i) + { + b = bin_at (av, i); + + /* binmap is accurate (except for bin 1 == unsorted_chunks) */ + if (i >= 2) + { + unsigned int binbit = get_binmap (av, i); + int empty = last (b) == b; + if (!binbit) + assert (empty); + else if (!empty) + assert (binbit); + } + + for (p = last (b); p != b; p = p->bk) + { + /* each chunk claims to be free */ + do_check_free_chunk (av, p); + size = chunksize (p); + total += size; + if (i >= 2) + { + /* chunk belongs in bin */ + idx = bin_index (size); + assert (idx == i); + /* lists are sorted */ + assert (p->bk == b || + (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p)); + + if (!in_smallbin_range (size)) + { + if (p->fd_nextsize != NULL) + { + if (p->fd_nextsize == p) + assert (p->bk_nextsize == p); + else + { + if (p->fd_nextsize == first (b)) + assert (chunksize (p) < chunksize (p->fd_nextsize)); + else + assert (chunksize (p) > chunksize (p->fd_nextsize)); + + if (p == first (b)) + assert (chunksize (p) > chunksize (p->bk_nextsize)); + else + assert (chunksize (p) < chunksize (p->bk_nextsize)); + } + } + else + assert (p->bk_nextsize == NULL); + } + } + else if (!in_smallbin_range (size)) + assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL); + /* chunk is followed by a legal chain of inuse chunks */ + for (q = next_chunk (p); + (q != av->top && inuse (q) && + (unsigned long) (chunksize (q)) >= MINSIZE); + q = next_chunk (q)) + do_check_inuse_chunk (av, q); + } } - } /* top chunk is OK */ - check_chunk(av, av->top); - + check_chunk (av, av->top); } #endif @@ -2214,461 +2237,482 @@ static void do_check_malloc_state(mstate av) /* ----------- Routines dealing with system allocation -------------- */ /* - sysmalloc handles malloc cases requiring more memory from the system. - On entry, it is assumed that av->top does not have enough - space to service request for nb bytes, thus requiring that av->top - be extended or replaced. -*/ + sysmalloc handles malloc cases requiring more memory from the system. + On entry, it is assumed that av->top does not have enough + space to service request for nb bytes, thus requiring that av->top + be extended or replaced. + */ -static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av) +static void * +sysmalloc (INTERNAL_SIZE_T nb, mstate av) { - mchunkptr old_top; /* incoming value of av->top */ + mchunkptr old_top; /* incoming value of av->top */ INTERNAL_SIZE_T old_size; /* its size */ - char* old_end; /* its end address */ + char *old_end; /* its end address */ - long size; /* arg to first MORECORE or mmap call */ - char* brk; /* return value from MORECORE */ + long size; /* arg to first MORECORE or mmap call */ + char *brk; /* return value from MORECORE */ - long correction; /* arg to 2nd MORECORE call */ - char* snd_brk; /* 2nd return val */ + long correction; /* arg to 2nd MORECORE call */ + char *snd_brk; /* 2nd return val */ INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */ - char* aligned_brk; /* aligned offset into brk */ + char *aligned_brk; /* aligned offset into brk */ - mchunkptr p; /* the allocated/returned chunk */ - mchunkptr remainder; /* remainder from allocation */ - unsigned long remainder_size; /* its size */ + mchunkptr p; /* the allocated/returned chunk */ + mchunkptr remainder; /* remainder from allocation */ + unsigned long remainder_size; /* its size */ - size_t pagemask = GLRO(dl_pagesize) - 1; - bool tried_mmap = false; + size_t pagemask = GLRO (dl_pagesize) - 1; + bool tried_mmap = false; /* - If have mmap, and the request size meets the mmap threshold, and - the system supports mmap, and there are few enough currently - allocated mmapped regions, try to directly map this request - rather than expanding top. - */ - - if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) && - (mp_.n_mmaps < mp_.n_mmaps_max)) { - - char* mm; /* return value from mmap call*/ - - try_mmap: - /* - Round up size to nearest page. For mmapped chunks, the overhead - is one SIZE_SZ unit larger than for normal chunks, because there - is no following chunk whose prev_size field could be used. - - See the front_misalign handling below, for glibc there is no - need for further alignments unless we have have high alignment. - */ - if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) - size = (nb + SIZE_SZ + pagemask) & ~pagemask; - else - size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask; - tried_mmap = true; - - /* Don't try if size wraps around 0 */ - if ((unsigned long)(size) > (unsigned long)(nb)) { - - mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, 0)); - - if (mm != MAP_FAILED) { - - /* - The offset to the start of the mmapped region is stored - in the prev_size field of the chunk. This allows us to adjust - returned start address to meet alignment requirements here - and in memalign(), and still be able to compute proper - address argument for later munmap in free() and realloc(). - */ - - if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) - { - /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and - MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page - aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ - assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0); - front_misalign = 0; - } - else - front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK; - if (front_misalign > 0) { - correction = MALLOC_ALIGNMENT - front_misalign; - p = (mchunkptr)(mm + correction); - p->prev_size = correction; - set_head(p, (size - correction) |IS_MMAPPED); - } - else - { - p = (mchunkptr)mm; - set_head(p, size|IS_MMAPPED); - } - - /* update statistics */ - - int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1; - atomic_max (&mp_.max_n_mmaps, new); - - unsigned long sum; - sum = atomic_exchange_and_add(&mp_.mmapped_mem, size) + size; - atomic_max (&mp_.max_mmapped_mem, sum); - - check_chunk(av, p); + If have mmap, and the request size meets the mmap threshold, and + the system supports mmap, and there are few enough currently + allocated mmapped regions, try to directly map this request + rather than expanding top. + */ + + if ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold) && + (mp_.n_mmaps < mp_.n_mmaps_max)) + { + char *mm; /* return value from mmap call*/ - return chunk2mem(p); - } + try_mmap: + /* + Round up size to nearest page. For mmapped chunks, the overhead + is one SIZE_SZ unit larger than for normal chunks, because there + is no following chunk whose prev_size field could be used. + + See the front_misalign handling below, for glibc there is no + need for further alignments unless we have have high alignment. + */ + if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) + size = (nb + SIZE_SZ + pagemask) & ~pagemask; + else + size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask; + tried_mmap = true; + + /* Don't try if size wraps around 0 */ + if ((unsigned long) (size) > (unsigned long) (nb)) + { + mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0)); + + if (mm != MAP_FAILED) + { + /* + The offset to the start of the mmapped region is stored + in the prev_size field of the chunk. This allows us to adjust + returned start address to meet alignment requirements here + and in memalign(), and still be able to compute proper + address argument for later munmap in free() and realloc(). + */ + + if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) + { + /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and + MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page + aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ + assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0); + front_misalign = 0; + } + else + front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) + { + correction = MALLOC_ALIGNMENT - front_misalign; + p = (mchunkptr) (mm + correction); + p->prev_size = correction; + set_head (p, (size - correction) | IS_MMAPPED); + } + else + { + p = (mchunkptr) mm; + set_head (p, size | IS_MMAPPED); + } + + /* update statistics */ + + int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1; + atomic_max (&mp_.max_n_mmaps, new); + + unsigned long sum; + sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size; + atomic_max (&mp_.max_mmapped_mem, sum); + + check_chunk (av, p); + + return chunk2mem (p); + } + } } - } /* Record incoming configuration of top */ - old_top = av->top; - old_size = chunksize(old_top); - old_end = (char*)(chunk_at_offset(old_top, old_size)); + old_top = av->top; + old_size = chunksize (old_top); + old_end = (char *) (chunk_at_offset (old_top, old_size)); - brk = snd_brk = (char*)(MORECORE_FAILURE); + brk = snd_brk = (char *) (MORECORE_FAILURE); /* If not the first time through, we require old_size to be at least MINSIZE and to have prev_inuse set. - */ + */ - assert((old_top == initial_top(av) && old_size == 0) || - ((unsigned long) (old_size) >= MINSIZE && - prev_inuse(old_top) && - ((unsigned long)old_end & pagemask) == 0)); + assert ((old_top == initial_top (av) && old_size == 0) || + ((unsigned long) (old_size) >= MINSIZE && + prev_inuse (old_top) && + ((unsigned long) old_end & pagemask) == 0)); /* Precondition: not enough current space to satisfy nb request */ - assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE)); - - - if (av != &main_arena) { - - heap_info *old_heap, *heap; - size_t old_heap_size; - - /* First try to extend the current heap. */ - old_heap = heap_for_ptr(old_top); - old_heap_size = old_heap->size; - if ((long) (MINSIZE + nb - old_size) > 0 - && grow_heap(old_heap, MINSIZE + nb - old_size) == 0) { - av->system_mem += old_heap->size - old_heap_size; - arena_mem += old_heap->size - old_heap_size; - set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top) - | PREV_INUSE); - } - else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) { - /* Use a newly allocated heap. */ - heap->ar_ptr = av; - heap->prev = old_heap; - av->system_mem += heap->size; - arena_mem += heap->size; - /* Set up the new top. */ - top(av) = chunk_at_offset(heap, sizeof(*heap)); - set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE); - - /* Setup fencepost and free the old top chunk with a multiple of - MALLOC_ALIGNMENT in size. */ - /* The fencepost takes at least MINSIZE bytes, because it might - become the top chunk again later. Note that a footer is set - up, too, although the chunk is marked in use. */ - old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; - set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE); - if (old_size >= MINSIZE) { - set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE); - set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)); - set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA); - _int_free(av, old_top, 1); - } else { - set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE); - set_foot(old_top, (old_size + 2*SIZE_SZ)); - } - } - else if (!tried_mmap) - /* We can at least try to use to mmap memory. */ - goto try_mmap; - - } else { /* av == main_arena */ - - - /* Request enough space for nb + pad + overhead */ - - size = nb + mp_.top_pad + MINSIZE; - - /* - If contiguous, we can subtract out existing space that we hope to - combine with new space. We add it back later only if - we don't actually get contiguous space. - */ - - if (contiguous(av)) - size -= old_size; - - /* - Round to a multiple of page size. - If MORECORE is not contiguous, this ensures that we only call it - with whole-page arguments. And if MORECORE is contiguous and - this is not first time through, this preserves page-alignment of - previous calls. Otherwise, we correct to page-align below. - */ - - size = (size + pagemask) & ~pagemask; - - /* - Don't try to call MORECORE if argument is so big as to appear - negative. Note that since mmap takes size_t arg, it may succeed - below even if we cannot call MORECORE. - */ - - if (size > 0) { - brk = (char*)(MORECORE(size)); - LIBC_PROBE (memory_sbrk_more, 2, brk, size); - } - - if (brk != (char*)(MORECORE_FAILURE)) { - /* Call the `morecore' hook if necessary. */ - void (*hook) (void) = atomic_forced_read (__after_morecore_hook); - if (__builtin_expect (hook != NULL, 0)) - (*hook) (); - } else { - /* - If have mmap, try using it as a backup when MORECORE fails or - cannot be used. This is worth doing on systems that have "holes" in - address space, so sbrk cannot extend to give contiguous space, but - space is available elsewhere. Note that we ignore mmap max count - and threshold limits, since the space will not be used as a - segregated mmap region. - */ - - /* Cannot merge with old top, so add its size back in */ - if (contiguous(av)) - size = (size + old_size + pagemask) & ~pagemask; - - /* If we are relying on mmap as backup, then use larger units */ - if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE)) - size = MMAP_AS_MORECORE_SIZE; - - /* Don't try if size wraps around 0 */ - if ((unsigned long)(size) > (unsigned long)(nb)) { - - char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, 0)); - - if (mbrk != MAP_FAILED) { + assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE)); - /* We do not need, and cannot use, another sbrk call to find end */ - brk = mbrk; - snd_brk = brk + size; - /* - Record that we no longer have a contiguous sbrk region. - After the first time mmap is used as backup, we do not - ever rely on contiguous space since this could incorrectly - bridge regions. - */ - set_noncontiguous(av); - } - } - } - - if (brk != (char*)(MORECORE_FAILURE)) { - if (mp_.sbrk_base == 0) - mp_.sbrk_base = brk; - av->system_mem += size; - - /* - If MORECORE extends previous space, we can likewise extend top size. - */ - - if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) - set_head(old_top, (size + old_size) | PREV_INUSE); - - else if (contiguous(av) && old_size && brk < old_end) { - /* Oops! Someone else killed our space.. Can't touch anything. */ - malloc_printerr (3, "break adjusted to free malloc space", brk); + if (av != &main_arena) + { + heap_info *old_heap, *heap; + size_t old_heap_size; + + /* First try to extend the current heap. */ + old_heap = heap_for_ptr (old_top); + old_heap_size = old_heap->size; + if ((long) (MINSIZE + nb - old_size) > 0 + && grow_heap (old_heap, MINSIZE + nb - old_size) == 0) + { + av->system_mem += old_heap->size - old_heap_size; + arena_mem += old_heap->size - old_heap_size; + set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top) + | PREV_INUSE); + } + else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad))) + { + /* Use a newly allocated heap. */ + heap->ar_ptr = av; + heap->prev = old_heap; + av->system_mem += heap->size; + arena_mem += heap->size; + /* Set up the new top. */ + top (av) = chunk_at_offset (heap, sizeof (*heap)); + set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE); + + /* Setup fencepost and free the old top chunk with a multiple of + MALLOC_ALIGNMENT in size. */ + /* The fencepost takes at least MINSIZE bytes, because it might + become the top chunk again later. Note that a footer is set + up, too, although the chunk is marked in use. */ + old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; + set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE); + if (old_size >= MINSIZE) +