diff options
| author | Ondřej Bílka <neleai@seznam.cz> | 2014-01-02 09:38:18 +0100 |
|---|---|---|
| committer | Ondřej Bílka <neleai@seznam.cz> | 2014-01-02 09:40:10 +0100 |
| commit | 6c8dbf00f536d78b1937b5af6f57be47fd376344 (patch) | |
| tree | ad86d3e7433a907cac50ebbd9c39ca3402a87c6a /malloc/malloc.c | |
| parent | 9a3c6a6ff602c88d7155139a7d7d0000b7b7e946 (diff) | |
| download | glibc-6c8dbf00f536d78b1937b5af6f57be47fd376344.tar.xz glibc-6c8dbf00f536d78b1937b5af6f57be47fd376344.zip | |
Reformat malloc to gnu style.
Diffstat (limited to 'malloc/malloc.c')
| -rw-r--r-- | malloc/malloc.c | 4283 |
1 files changed, 2214 insertions, 2069 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c index 63d1d152ab..813e94eea3 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -353,10 +353,10 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line, malloc_set_state than will returning blocks not adequately aligned for long double objects under -mlong-double-128. */ -# define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \ - ? __alignof__ (long double) : 2 * SIZE_SZ) +# define MALLOC_ALIGNMENT (2 *SIZE_SZ < __alignof__ (long double) \ + ? __alignof__ (long double) : 2 *SIZE_SZ) # else -# define MALLOC_ALIGNMENT (2 * SIZE_SZ) +# define MALLOC_ALIGNMENT (2 *SIZE_SZ) # endif #endif @@ -463,10 +463,10 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore; some systems, if the application first decrements and then increments the break value, the contents of the reallocated space are unspecified. -*/ + */ #ifndef MORECORE_CLEARS -#define MORECORE_CLEARS 1 +# define MORECORE_CLEARS 1 #endif @@ -1232,11 +1232,11 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Check if a request is so large that it would wrap around zero when padded and aligned. To simplify some other code, the bound is made low enough so that adding MINSIZE will also not wrap around zero. -*/ + */ #define REQUEST_OUT_OF_RANGE(req) \ - ((unsigned long)(req) >= \ - (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE)) + ((unsigned long) (req) >= \ + (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE)) /* pad request bytes into a usable size -- internal version */ @@ -1248,15 +1248,15 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /* Same, except also perform argument check */ #define checked_request2size(req, sz) \ - if (REQUEST_OUT_OF_RANGE(req)) { \ - __set_errno (ENOMEM); \ - return 0; \ - } \ - (sz) = request2size(req); + if (REQUEST_OUT_OF_RANGE (req)) { \ + __set_errno (ENOMEM); \ + return 0; \ + } \ + (sz) = request2size (req); /* - --------------- Physical chunk operations --------------- -*/ + --------------- Physical chunk operations --------------- + */ /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ @@ -1283,49 +1283,49 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /* - Bits to mask off when extracting size + Bits to mask off when extracting size - Note: IS_MMAPPED is intentionally not masked off from size field in - macros for which mmapped chunks should never be seen. This should - cause helpful core dumps to occur if it is tried by accident by - people extending or adapting this malloc. -*/ -#define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA) + Note: IS_MMAPPED is intentionally not masked off from size field in + macros for which mmapped chunks should never be seen. This should + cause helpful core dumps to occur if it is tried by accident by + people extending or adapting this malloc. + */ +#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA) /* Get size, ignoring use bits */ #define chunksize(p) ((p)->size & ~(SIZE_BITS)) /* Ptr to next physical malloc_chunk. */ -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) )) +#define next_chunk(p) ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS))) /* Ptr to previous physical malloc_chunk */ -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) +#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - ((p)->prev_size))) /* Treat space at ptr + offset as a chunk */ -#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s))) /* extract p's inuse bit */ -#define inuse(p)\ -((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE) +#define inuse(p) \ + ((((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE) /* set/clear chunk as being inuse without otherwise disturbing */ -#define set_inuse(p)\ -((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE +#define set_inuse(p) \ + ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE -#define clear_inuse(p)\ -((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE) +#define clear_inuse(p) \ + ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE) /* check/set/clear inuse bits in known places */ -#define inuse_bit_at_offset(p, s)\ - (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) +#define inuse_bit_at_offset(p, s) \ + (((mchunkptr) (((char *) (p)) + (s)))->size & PREV_INUSE) -#define set_inuse_bit_at_offset(p, s)\ - (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) +#define set_inuse_bit_at_offset(p, s) \ + (((mchunkptr) (((char *) (p)) + (s)))->size |= PREV_INUSE) -#define clear_inuse_bit_at_offset(p, s)\ - (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) +#define clear_inuse_bit_at_offset(p, s) \ + (((mchunkptr) (((char *) (p)) + (s)))->size &= ~(PREV_INUSE)) /* Set size at head, without disturbing its use bit */ @@ -1335,26 +1335,26 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ #define set_head(p, s) ((p)->size = (s)) /* Set size at footer (only when chunk is not in use) */ -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) +#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->prev_size = (s)) /* - -------------------- Internal data structures -------------------- + -------------------- Internal data structures -------------------- All internal state is held in an instance of malloc_state defined below. There are no other static variables, except in two optional cases: - * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. - * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor + * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. + * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor for mmap. Beware of lots of tricks that minimize the total bookkeeping space requirements. The result is a little over 1K bytes (for 4byte pointers and size_t.) -*/ + */ /* - Bins + Bins An array of bin headers for free chunks. Each bin is doubly linked. The bins are approximately proportionally (log) spaced. @@ -1387,17 +1387,17 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ But to conserve space and improve locality, we allocate only the fd/bk pointers of bins, and then use repositioning tricks to treat these as the fields of a malloc_chunk*. -*/ + */ -typedef struct malloc_chunk* mbinptr; +typedef struct malloc_chunk *mbinptr; /* addressing -- note that bin_at(0) does not exist */ #define bin_at(m, i) \ (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \ - - offsetof (struct malloc_chunk, fd)) + - offsetof (struct malloc_chunk, fd)) /* analog of ++bin */ -#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1))) +#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1))) /* Reminders about list directionality within bins */ #define first(b) ((b)->fd) @@ -1405,36 +1405,36 @@ typedef struct malloc_chunk* mbinptr; /* Take a chunk off a bin list */ #define unlink(P, BK, FD) { \ - FD = P->fd; \ - BK = P->bk; \ - if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \ - malloc_printerr (check_action, "corrupted double-linked list", P); \ - else { \ - FD->bk = BK; \ - BK->fd = FD; \ - if (!in_smallbin_range (P->size) \ - && __builtin_expect (P->fd_nextsize != NULL, 0)) { \ - assert (P->fd_nextsize->bk_nextsize == P); \ - assert (P->bk_nextsize->fd_nextsize == P); \ - if (FD->fd_nextsize == NULL) { \ - if (P->fd_nextsize == P) \ - FD->fd_nextsize = FD->bk_nextsize = FD; \ - else { \ - FD->fd_nextsize = P->fd_nextsize; \ - FD->bk_nextsize = P->bk_nextsize; \ - P->fd_nextsize->bk_nextsize = FD; \ - P->bk_nextsize->fd_nextsize = FD; \ - } \ - } else { \ - P->fd_nextsize->bk_nextsize = P->bk_nextsize; \ - P->bk_nextsize->fd_nextsize = P->fd_nextsize; \ - } \ - } \ - } \ + FD = P->fd; \ + BK = P->bk; \ + if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \ + malloc_printerr (check_action, "corrupted double-linked list", P); \ + else { \ + FD->bk = BK; \ + BK->fd = FD; \ + if (!in_smallbin_range (P->size) \ + && __builtin_expect (P->fd_nextsize != NULL, 0)) { \ + assert (P->fd_nextsize->bk_nextsize == P); \ + assert (P->bk_nextsize->fd_nextsize == P); \ + if (FD->fd_nextsize == NULL) { \ + if (P->fd_nextsize == P) \ + FD->fd_nextsize = FD->bk_nextsize = FD; \ + else { \ + FD->fd_nextsize = P->fd_nextsize; \ + FD->bk_nextsize = P->bk_nextsize; \ + P->fd_nextsize->bk_nextsize = FD; \ + P->bk_nextsize->fd_nextsize = FD; \ + } \ + } else { \ + P->fd_nextsize->bk_nextsize = P->bk_nextsize; \ + P->bk_nextsize->fd_nextsize = P->fd_nextsize; \ + } \ + } \ + } \ } /* - Indexing + Indexing Bins for sizes < 512 bytes contain chunks of all the same size, spaced 8 bytes apart. Larger bins are approximately logarithmically spaced: @@ -1455,7 +1455,7 @@ typedef struct malloc_chunk* mbinptr; Bin 0 does not exist. Bin 1 is the unordered list; if that would be a valid chunk size the small bins are bumped up one. -*/ + */ #define NBINS 128 #define NSMALLBINS 64 @@ -1464,38 +1464,38 @@ typedef struct malloc_chunk* mbinptr; #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH) #define in_smallbin_range(sz) \ - ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE) + ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE) #define smallbin_index(sz) \ - ((SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3)) \ + ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\ + SMALLBIN_CORRECTION) #define largebin_index_32(sz) \ -(((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \ - ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \ - ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ - ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ - ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ - 126) + (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\ + ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ + ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ + ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ + ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ + 126) #define largebin_index_32_big(sz) \ -(((((unsigned long)(sz)) >> 6) <= 45)? 49 + (((unsigned long)(sz)) >> 6): \ - ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \ - ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ - ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ - ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ - 126) + (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\ + ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ + ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ + ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ + ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ + 126) // XXX It remains to be seen whether it is good to keep the widths of // XXX the buckets the same or whether it should be scaled by a factor // XXX of two as well. #define largebin_index_64(sz) \ -(((((unsigned long)(sz)) >> 6) <= 48)? 48 + (((unsigned long)(sz)) >> 6): \ - ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \ - ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \ - ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \ - ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \ - 126) + (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\ + ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ + ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ + ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ + ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ + 126) #define largebin_index(sz) \ (SIZE_SZ == 8 ? largebin_index_64 (sz) \ @@ -1503,11 +1503,11 @@ typedef struct malloc_chunk* mbinptr; : largebin_index_32 (sz)) #define bin_index(sz) \ - ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz)) + ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz)) /* - Unsorted chunks + Unsorted chunks All remainders from chunk splits, as well as all returned chunks, are first placed in the "unsorted" bin. They are then placed @@ -1518,13 +1518,13 @@ typedef struct malloc_chunk* mbinptr; The NON_MAIN_ARENA flag is never set for unsorted chunks, so it does not have to be taken into account in size comparisons. -*/ + */ /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ -#define unsorted_chunks(M) (bin_at(M, 1)) +#define unsorted_chunks(M) (bin_at (M, 1)) /* - Top + Top The top-most available chunk (i.e., the one bordering the end of available memory) is treated specially. It is never included in @@ -1539,13 +1539,13 @@ typedef struct malloc_chunk* mbinptr; interval between initialization and the first call to sysmalloc. (This is somewhat delicate, since it relies on the 2 preceding words to be zero during this interval as well.) -*/ + */ /* Conveniently, the unsorted bin can be used as dummy top on first call */ -#define initial_top(M) (unsorted_chunks(M)) +#define initial_top(M) (unsorted_chunks (M)) /* - Binmap + Binmap To help compensate for the large number of bins, a one-level index structure is used for bin-by-bin searching. `binmap' is a @@ -1553,7 +1553,7 @@ typedef struct malloc_chunk* mbinptr; be skipped over during during traversals. The bits are NOT always cleared as soon as bins are empty, but instead only when they are noticed to be empty during traversal in malloc. -*/ + */ /* Conservatively use 32 bits per map word, even if on 64bit system */ #define BINMAPSHIFT 5 @@ -1561,14 +1561,14 @@ typedef struct malloc_chunk* mbinptr; #define BINMAPSIZE (NBINS / BITSPERMAP) #define idx2block(i) ((i) >> BINMAPSHIFT) -#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1)))) +#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1)))) -#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i)) -#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i))) -#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i)) +#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i)) +#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i))) +#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i)) /* - Fastbins + Fastbins An array of lists holding recently freed small chunks. Fastbins are not doubly linked. It is faster to single-link them, and @@ -1582,69 +1582,69 @@ typedef struct malloc_chunk* mbinptr; be consolidated with other free chunks. malloc_consolidate releases all chunks in fastbins and consolidates them with other free chunks. -*/ + */ -typedef struct malloc_chunk* mfastbinptr; +typedef struct malloc_chunk *mfastbinptr; #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx]) /* offset 2 to use otherwise unindexable first 2 bins */ #define fastbin_index(sz) \ - ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2) + ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2) /* The maximum fastbin request size we support */ #define MAX_FAST_SIZE (80 * SIZE_SZ / 4) -#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1) +#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1) /* - FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() - that triggers automatic consolidation of possibly-surrounding - fastbin chunks. This is a heuristic, so the exact value should not - matter too much. It is defined at half the default trim threshold as a - compromise heuristic to only attempt consolidation if it is likely - to lead to trimming. However, it is not dynamically tunable, since - consolidation reduces fragmentation surrounding large chunks even - if trimming is not used. -*/ + FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() + that triggers automatic consolidation of possibly-surrounding + fastbin chunks. This is a heuristic, so the exact value should not + matter too much. It is defined at half the default trim threshold as a + compromise heuristic to only attempt consolidation if it is likely + to lead to trimming. However, it is not dynamically tunable, since + consolidation reduces fragmentation surrounding large chunks even + if trimming is not used. + */ #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL) /* - Since the lowest 2 bits in max_fast don't matter in size comparisons, - they are used as flags. -*/ + Since the lowest 2 bits in max_fast don't matter in size comparisons, + they are used as flags. + */ /* - FASTCHUNKS_BIT held in max_fast indicates that there are probably - some fastbin chunks. It is set true on entering a chunk into any - fastbin, and cleared only in malloc_consolidate. + FASTCHUNKS_BIT held in max_fast indicates that there are probably + some fastbin chunks. It is set true on entering a chunk into any + fastbin, and cleared only in malloc_consolidate. - The truth value is inverted so that have_fastchunks will be true - upon startup (since statics are zero-filled), simplifying - initialization checks. -*/ + The truth value is inverted so that have_fastchunks will be true + upon startup (since statics are zero-filled), simplifying + initialization checks. + */ #define FASTCHUNKS_BIT (1U) -#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0) +#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0) #define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT) #define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT) /* - NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous - regions. Otherwise, contiguity is exploited in merging together, - when possible, results from consecutive MORECORE calls. + NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous + regions. Otherwise, contiguity is exploited in merging together, + when possible, results from consecutive MORECORE calls. - The initial value comes from MORECORE_CONTIGUOUS, but is - changed dynamically if mmap is ever used as an sbrk substitute. -*/ + The initial value comes from MORECORE_CONTIGUOUS, but is + changed dynamically if mmap is ever used as an sbrk substitute. + */ #define NONCONTIGUOUS_BIT (2U) -#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0) -#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0) -#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT) +#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0) +#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0) +#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT) #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT) /* @@ -1652,19 +1652,20 @@ typedef struct malloc_chunk* mfastbinptr; Use impossibly small value if 0. Precondition: there are no existing fastbin chunks. Setting the value clears fastchunk bit but preserves noncontiguous bit. -*/ + */ #define set_max_fast(s) \ global_max_fast = (((s) == 0) \ - ? SMALLBIN_WIDTH: ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK)) + ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK)) #define get_max_fast() global_max_fast /* ----------- Internal state representation and initialization ----------- -*/ + */ -struct malloc_state { +struct malloc_state +{ /* Serialize access. */ mutex_t mutex; @@ -1677,19 +1678,19 @@ struct malloc_state { #endif /* Fastbins */ - mfastbinptr fastbinsY[NFASTBINS]; + mfastbinptr fastbinsY[NFASTBINS]; /* Base of the topmost chunk -- not otherwise kept in a bin */ - mchunkptr top; + mchunkptr top; /* The remainder from the most recent split of a small request */ - mchunkptr last_remainder; + mchunkptr last_remainder; /* Normal bins packed as described above */ - mchunkptr bins[NBINS * 2 - 2]; + mchunkptr bins[NBINS * 2 - 2]; /* Bitmap of bins */ - unsigned int binmap[BINMAPSIZE]; + unsigned int binmap[BINMAPSIZE]; /* Linked list */ struct malloc_state *next; @@ -1702,32 +1703,33 @@ struct malloc_state { INTERNAL_SIZE_T max_system_mem; }; -struct malloc_par { +struct malloc_par +{ /* Tunable parameters */ - unsigned long trim_threshold; - INTERNAL_SIZE_T top_pad; - INTERNAL_SIZE_T mmap_threshold; - INTERNAL_SIZE_T arena_test; - INTERNAL_SIZE_T arena_max; + unsigned long trim_threshold; + INTERNAL_SIZE_T top_pad; + INTERNAL_SIZE_T mmap_threshold; + INTERNAL_SIZE_T arena_test; + INTERNAL_SIZE_T arena_max; /* Memory map support */ - int n_mmaps; - int n_mmaps_max; - int max_n_mmaps; + int n_mmaps; + int n_mmaps_max; + int max_n_mmaps; /* the mmap_threshold is dynamic, until the user sets it manually, at which point we need to disable any dynamic behavior. */ - int no_dyn_threshold; + int no_dyn_threshold; /* Statistics */ - INTERNAL_SIZE_T mmapped_mem; + INTERNAL_SIZE_T mmapped_mem; /*INTERNAL_SIZE_T sbrked_mem;*/ /*INTERNAL_SIZE_T max_sbrked_mem;*/ - INTERNAL_SIZE_T max_mmapped_mem; - INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */ + INTERNAL_SIZE_T max_mmapped_mem; + INTERNAL_SIZE_T m |
