diff options
| author | Jakub Jelinek <jakub@redhat.com> | 2007-07-12 18:26:36 +0000 |
|---|---|---|
| committer | Jakub Jelinek <jakub@redhat.com> | 2007-07-12 18:26:36 +0000 |
| commit | 0ecb606cb6cf65de1d9fc8a919bceb4be476c602 (patch) | |
| tree | 2ea1f8305970753e4a657acb2ccc15ca3eec8e2c /malloc | |
| parent | 7d58530341304d403a6626d7f7a1913165fe2f32 (diff) | |
| download | glibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.tar.xz glibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.zip | |
2.5-18.1
Diffstat (limited to 'malloc')
| -rw-r--r-- | malloc/Makefile | 16 | ||||
| -rw-r--r-- | malloc/arena.c | 179 | ||||
| -rw-r--r-- | malloc/hooks.c | 49 | ||||
| -rw-r--r-- | malloc/malloc.c | 374 | ||||
| -rw-r--r-- | malloc/malloc.h | 142 | ||||
| -rw-r--r-- | malloc/mcheck.c | 30 | ||||
| -rw-r--r-- | malloc/memusage.c | 325 | ||||
| -rwxr-xr-x | malloc/memusage.sh | 28 | ||||
| -rw-r--r-- | malloc/memusagestat.c | 51 | ||||
| -rw-r--r-- | malloc/morecore.c | 54 | ||||
| -rw-r--r-- | malloc/mtrace.c | 20 | ||||
| -rw-r--r-- | malloc/mtrace.pl | 4 | ||||
| -rw-r--r-- | malloc/obstack.c | 71 | ||||
| -rw-r--r-- | malloc/obstack.h | 114 | ||||
| -rw-r--r-- | malloc/tst-malloc.c | 12 | ||||
| -rw-r--r-- | malloc/tst-mallocfork.c | 51 | ||||
| -rw-r--r-- | malloc/tst-mcheck.c | 91 | ||||
| -rwxr-xr-x | malloc/tst-mtrace.sh | 5 |
18 files changed, 1109 insertions, 507 deletions
diff --git a/malloc/Makefile b/malloc/Makefile index 0512c49839..c39eae5474 100644 --- a/malloc/Makefile +++ b/malloc/Makefile @@ -1,4 +1,5 @@ -# Copyright (C) 1991-1999,2000,2001,2002,2003 Free Software Foundation, Inc. +# Copyright (C) 1991-1999, 2000, 2001, 2002, 2003, 2005, 2006 +# Free Software Foundation, Inc. # This file is part of the GNU C Library. # The GNU C Library is free software; you can redistribute it and/or @@ -26,7 +27,7 @@ all: dist-headers := malloc.h headers := $(dist-headers) obstack.h mcheck.h tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \ - tst-mallocstate + tst-mallocstate tst-mcheck tst-mallocfork test-srcs = tst-mtrace distribute = thread-m.h mtrace.pl mcheck-init.c stackinfo.h memusage.h \ @@ -78,6 +79,7 @@ endif ifneq ($(cross-compiling),yes) # If the gd library is available we build the `memusagestat' program. ifneq ($(LIBGD),no) +others: $(objpfx)memusage install-bin = memusagestat install-bin-script += memusage generated += memusagestat memusage @@ -101,6 +103,8 @@ $(objpfx)memusagestat: $(memusagestat-modules:%=$(objpfx)%.o) include ../Rules +CFLAGS-mcheck-init.c = $(PIC-ccflag) + $(objpfx)libmcheck.a: $(objpfx)mcheck-init.o -rm -f $@ $(patsubst %/,cd % &&,$(objpfx)) \ @@ -118,9 +122,13 @@ endif endif endif +tst-mcheck-ENV = MALLOC_CHECK_=3 + # Uncomment this for test releases. For public releases it is too expensive. #CPPFLAGS-malloc.o += -DMALLOC_DEBUG=1 +sLIBdir := $(shell echo $(slibdir) | sed 's,lib\(\|64\)$$,\\\\$$LIB,') + $(objpfx)mtrace: mtrace.pl rm -f $@.new sed -e 's|@PERL@|$(PERL)|' -e 's|@XXX@|$(address-width)|' \ @@ -130,12 +138,12 @@ $(objpfx)mtrace: mtrace.pl $(objpfx)memusage: memusage.sh rm -f $@.new sed -e 's|@BASH@|$(BASH)|' -e 's|@VERSION@|$(version)|' \ - -e 's|@SLIBDIR@|$(slibdir)|' -e 's|@BINDIR@|$(bindir)|' $^ > $@.new \ + -e 's|@SLIBDIR@|$(sLIBdir)|' -e 's|@BINDIR@|$(bindir)|' $^ > $@.new \ && rm -f $@ && mv $@.new $@ && chmod +x $@ # The implementation uses `dlsym' -$(objpfx)libmemusage.so: $(common-objpfx)dlfcn/libdl.so +$(objpfx)libmemusage.so: $(common-objpfx)dlfcn/libdl.so $(elfobjdir)/ld.so # Extra dependencies $(foreach o,$(all-object-suffixes),$(objpfx)malloc$(o)): arena.c hooks.c diff --git a/malloc/arena.c b/malloc/arena.c index 026f2c7822..0dcb7cb9f8 100644 --- a/malloc/arena.c +++ b/malloc/arena.c @@ -1,5 +1,6 @@ /* Malloc implementation for multiple threads without lock contention. - Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. + Copyright (C) 2001,2002,2003,2004,2005,2006,2007 + Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Wolfram Gloger <wg@malloc.de>, 2001. @@ -18,13 +19,17 @@ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -/* $Id$ */ +#include <stdbool.h> /* Compile-time constants. */ #define HEAP_MIN_SIZE (32*1024) #ifndef HEAP_MAX_SIZE -#define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */ +# ifdef DEFAULT_MMAP_THRESHOLD_MAX +# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX) +# else +# define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */ +# endif #endif /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps @@ -55,9 +60,20 @@ typedef struct _heap_info { mstate ar_ptr; /* Arena for this heap. */ struct _heap_info *prev; /* Previous heap. */ size_t size; /* Current size in bytes. */ - size_t pad; /* Make sure the following data is properly aligned. */ + size_t mprotect_size; /* Size in bytes that has been mprotected + PROT_READ|PROT_WRITE. */ + /* Make sure the following data is properly aligned, particularly + that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of + MALLOC_ALIGNMENT. */ + char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK]; } heap_info; +/* Get a compile-time error if the heap_info padding is not correct + to make alignment work as expected in sYSMALLOc. */ +extern int sanity_check_heap_info_alignment[(sizeof (heap_info) + + 2 * SIZE_SZ) % MALLOC_ALIGNMENT + ? -1 : 1]; + /* Thread specific data */ static tsd_key_t arena_key; @@ -208,6 +224,10 @@ free_atfork(Void_t* mem, const Void_t *caller) (void)mutex_unlock(&ar_ptr->mutex); } + +/* Counter for number of times the list is locked by the same thread. */ +static unsigned int atfork_recursive_cntr; + /* The following two functions are registered via thread_atfork() to make sure that the mutexes remain in a consistent state in the fork()ed version of a thread. Also adapt the malloc and free hooks @@ -221,7 +241,18 @@ ptmalloc_lock_all (void) if(__malloc_initialized < 1) return; - (void)mutex_lock(&list_lock); + if (mutex_trylock(&list_lock)) + { + Void_t *my_arena; + tsd_getspecific(arena_key, my_arena); + if (my_arena == ATFORK_ARENA_PTR) + /* This is the same thread which already locks the global list. + Just bump the counter. */ + goto out; + + /* This thread has to wait its turn. */ + (void)mutex_lock(&list_lock); + } for(ar_ptr = &main_arena;;) { (void)mutex_lock(&ar_ptr->mutex); ar_ptr = ar_ptr->next; @@ -234,6 +265,8 @@ ptmalloc_lock_all (void) /* Only the current thread may perform malloc/free calls now. */ tsd_getspecific(arena_key, save_arena); tsd_setspecific(arena_key, ATFORK_ARENA_PTR); + out: + ++atfork_recursive_cntr; } static void @@ -243,6 +276,8 @@ ptmalloc_unlock_all (void) if(__malloc_initialized < 1) return; + if (--atfork_recursive_cntr != 0) + return; tsd_setspecific(arena_key, save_arena); __malloc_hook = save_malloc_hook; __free_hook = save_free_hook; @@ -256,7 +291,7 @@ ptmalloc_unlock_all (void) #ifdef __linux__ -/* In LinuxThreads, unlocking a mutex in the child process after a +/* In NPTL, unlocking a mutex in the child process after a fork() is currently unsafe, whereas re-initializing it is safe and does not leak resources. Therefore, a special atfork handler is installed for the child. */ @@ -279,6 +314,7 @@ ptmalloc_unlock_all2 (void) if(ar_ptr == &main_arena) break; } mutex_init(&list_lock); + atfork_recursive_cntr = 0; } #else @@ -353,8 +389,6 @@ libc_hidden_proto (_dl_open_hook); # endif # if defined SHARED && defined USE_TLS && !USE___THREAD -# include <stdbool.h> - /* This is called by __pthread_initialize_minimal when it needs to use malloc to set up the TLS state. We cannot do the full work of ptmalloc_init (below) until __pthread_initialize_minimal has finished, @@ -482,8 +516,13 @@ ptmalloc_init (void) s = &envline[7]; break; case 8: - if (! secure && memcmp (envline, "TOP_PAD_", 8) == 0) - mALLOPt(M_TOP_PAD, atoi(&envline[9])); + if (! secure) + { + if (memcmp (envline, "TOP_PAD_", 8) == 0) + mALLOPt(M_TOP_PAD, atoi(&envline[9])); + else if (memcmp (envline, "PERTURB_", 8) == 0) + mALLOPt(M_PERTURB, atoi(&envline[9])); + } break; case 9: if (! secure && memcmp (envline, "MMAP_MAX_", 9) == 0) @@ -510,6 +549,8 @@ ptmalloc_init (void) mALLOPt(M_TRIM_THRESHOLD, atoi(s)); if((s = getenv("MALLOC_TOP_PAD_"))) mALLOPt(M_TOP_PAD, atoi(s)); + if((s = getenv("MALLOC_PERTURB_"))) + mALLOPt(M_PERTURB, atoi(s)); if((s = getenv("MALLOC_MMAP_THRESHOLD_"))) mALLOPt(M_MMAP_THRESHOLD, atoi(s)); if((s = getenv("MALLOC_MMAP_MAX_"))) @@ -517,8 +558,8 @@ ptmalloc_init (void) } s = getenv("MALLOC_CHECK_"); #endif - if(s) { - if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); + if(s && s[0]) { + mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); if (check_action != 0) __malloc_check_init(); } @@ -654,6 +695,7 @@ new_heap(size, top_pad) size_t size, top_pad; } h = (heap_info *)p2; h->size = size; + h->mprotect_size = size; THREAD_STAT(stat_n_heaps++); return h; } @@ -674,19 +716,36 @@ grow_heap(h, diff) heap_info *h; long diff; if(diff >= 0) { diff = (diff + page_mask) & ~page_mask; new_size = (long)h->size + diff; - if(new_size > HEAP_MAX_SIZE) + if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE) return -1; - if(mprotect((char *)h + h->size, diff, PROT_READ|PROT_WRITE) != 0) - return -2; + if((unsigned long) new_size > h->mprotect_size) { + if (mprotect((char *)h + h->mprotect_size, + (unsigned long) new_size - h->mprotect_size, + PROT_READ|PROT_WRITE) != 0) + return -2; + h->mprotect_size = new_size; + } } else { new_size = (long)h->size + diff; if(new_size < (long)sizeof(*h)) return -1; /* Try to re-map the extra heap space freshly to save memory, and make it inaccessible. */ - if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE, - MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED) - return -2; +#ifdef _LIBC + if (__builtin_expect (__libc_enable_secure, 0)) +#else + if (1) +#endif + { + if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE, + MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED) + return -2; + h->mprotect_size = new_size; + } +#ifdef _LIBC + else + madvise ((char *)h + new_size, -diff, MADV_DONTNEED); +#endif /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/ } h->size = new_size; @@ -759,6 +818,48 @@ heap_trim(heap, pad) heap_info *heap; size_t pad; return 1; } +/* Create a new arena with initial size "size". */ + +static mstate +_int_new_arena(size_t size) +{ + mstate a; + heap_info *h; + char *ptr; + unsigned long misalign; + + h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT), + mp_.top_pad); + if(!h) { + /* Maybe size is too large to fit in a single heap. So, just try + to create a minimally-sized arena and let _int_malloc() attempt + to deal with the large request via mmap_chunk(). */ + h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad); + if(!h) + return 0; + } + a = h->ar_ptr = (mstate)(h+1); + malloc_init_state(a); + /*a->next = NULL;*/ + a->system_mem = a->max_system_mem = h->size; + arena_mem += h->size; +#ifdef NO_THREADS + if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) > + mp_.max_total_mem) + mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem; +#endif + + /* Set up the top chunk, with proper alignment. */ + ptr = (char *)(a + 1); + misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK; + if (misalign > 0) + ptr += MALLOC_ALIGNMENT - misalign; + top(a) = (mchunkptr)ptr; + set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE); + + return a; +} + static mstate internal_function #if __STD_C @@ -829,48 +930,6 @@ arena_get2(a_tsd, size) mstate a_tsd; size_t size; return a; } -/* Create a new arena with initial size "size". */ - -mstate -_int_new_arena(size_t size) -{ - mstate a; - heap_info *h; - char *ptr; - unsigned long misalign; - - h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT), - mp_.top_pad); - if(!h) { - /* Maybe size is too large to fit in a single heap. So, just try - to create a minimally-sized arena and let _int_malloc() attempt - to deal with the large request via mmap_chunk(). */ - h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad); - if(!h) - return 0; - } - a = h->ar_ptr = (mstate)(h+1); - malloc_init_state(a); - /*a->next = NULL;*/ - a->system_mem = a->max_system_mem = h->size; - arena_mem += h->size; -#ifdef NO_THREADS - if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) > - mp_.max_total_mem) - mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem; -#endif - - /* Set up the top chunk, with proper alignment. */ - ptr = (char *)(a + 1); - misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK; - if (misalign > 0) - ptr += MALLOC_ALIGNMENT - misalign; - top(a) = (mchunkptr)ptr; - set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE); - - return a; -} - #endif /* USE_ARENAS */ /* diff --git a/malloc/hooks.c b/malloc/hooks.c index a5c97f3133..708f0faf83 100644 --- a/malloc/hooks.c +++ b/malloc/hooks.c @@ -1,5 +1,5 @@ /* Malloc implementation for multiple threads without lock contention. - Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. + Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Wolfram Gloger <wg@malloc.de>, 2001. @@ -18,8 +18,6 @@ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -/* $Id$ */ - /* What to do if the standard debugging hooks are in place and a corrupt pointer is detected: do nothing (0), print an error message (1), or call abort() (2). */ @@ -146,9 +144,9 @@ mem2mem_check(ptr, sz) Void_t *ptr; size_t sz; static mchunkptr internal_function #if __STD_C -mem2chunk_check(Void_t* mem) +mem2chunk_check(Void_t* mem, unsigned char **magic_p) #else -mem2chunk_check(mem) Void_t* mem; +mem2chunk_check(mem, magic_p) Void_t* mem; unsigned char **magic_p; #endif { mchunkptr p; @@ -173,7 +171,6 @@ mem2chunk_check(mem) Void_t* mem; for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; } - ((unsigned char*)p)[sz] ^= 0xFF; } else { unsigned long offset, page_mask = malloc_getpagesize-1; @@ -193,8 +190,10 @@ mem2chunk_check(mem) Void_t* mem; for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; } - ((unsigned char*)p)[sz] ^= 0xFF; } + ((unsigned char*)p)[sz] ^= 0xFF; + if (magic_p) + *magic_p = (unsigned char *)p + sz; return p; } @@ -232,7 +231,11 @@ top_check() sbrk_size = front_misalign + mp_.top_pad + MINSIZE; sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1)); new_brk = (char*)(MORECORE (sbrk_size)); - if (new_brk == (char*)(MORECORE_FAILURE)) return -1; + if (new_brk == (char*)(MORECORE_FAILURE)) + { + MALLOC_FAILURE_ACTION; + return -1; + } /* Call the `morecore' hook if necessary. */ if (__after_morecore_hook) (*__after_morecore_hook) (); @@ -253,6 +256,11 @@ malloc_check(sz, caller) size_t sz; const Void_t *caller; { Void_t *victim; + if (sz+1 == 0) { + MALLOC_FAILURE_ACTION; + return NULL; + } + (void)mutex_lock(&main_arena.mutex); victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL; (void)mutex_unlock(&main_arena.mutex); @@ -270,7 +278,7 @@ free_check(mem, caller) Void_t* mem; const Void_t *caller; if(!mem) return; (void)mutex_lock(&main_arena.mutex); - p = mem2chunk_check(mem); + p = mem2chunk_check(mem, NULL); if(!p) { (void)mutex_unlock(&main_arena.mutex); @@ -302,10 +310,19 @@ realloc_check(oldmem, bytes, caller) mchunkptr oldp; INTERNAL_SIZE_T nb, oldsize; Void_t* newmem = 0; + unsigned char *magic_p; + if (bytes+1 == 0) { + MALLOC_FAILURE_ACTION; + return NULL; + } if (oldmem == 0) return malloc_check(bytes, NULL); + if (bytes == 0) { + free_check (oldmem, NULL); + return NULL; + } (void)mutex_lock(&main_arena.mutex); - oldp = mem2chunk_check(oldmem); + oldp = mem2chunk_check(oldmem, &magic_p); (void)mutex_unlock(&main_arena.mutex); if(!oldp) { malloc_printerr(check_action, "realloc(): invalid pointer", oldmem); @@ -357,6 +374,12 @@ realloc_check(oldmem, bytes, caller) #if HAVE_MMAP } #endif + + /* mem2chunk_check changed the magic byte in the old chunk. + If newmem is NULL, then the old chunk will still be used though, + so we need to invert that change here. */ + if (newmem == NULL) *magic_p ^= 0xFF; + (void)mutex_unlock(&main_arena.mutex); return mem2mem_check(newmem, bytes); @@ -376,6 +399,10 @@ memalign_check(alignment, bytes, caller) if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL); if (alignment < MINSIZE) alignment = MINSIZE; + if (bytes+1 == 0) { + MALLOC_FAILURE_ACTION; + return NULL; + } checked_request2size(bytes+1, nb); (void)mutex_lock(&main_arena.mutex); mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) : @@ -555,7 +582,7 @@ public_sET_STATe(Void_t* msptr) (void)mutex_lock(&main_arena.mutex); /* There are no fastchunks. */ clear_fastchunks(&main_arena); - set_max_fast(&main_arena, DEFAULT_MXFAST); + set_max_fast(DEFAULT_MXFAST); for (i=0; i<NFASTBINS; ++i) main_arena.fastbins[i] = 0; for (i=0; i<BINMAPSIZE; ++i) diff --git a/malloc/malloc.c b/malloc/malloc.c index e3ccbde7b5..a369001520 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -1,5 +1,5 @@ - /* Malloc implementation for multiple threads without lock contention. - Copyright (C) 1996-2002, 2003, 2004 Free Software Foundation, Inc. +/* Malloc implementation for multiple threads without lock contention. + Copyright (C) 1996-2002,2003,2004,2005,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Wolfram Gloger <wg@malloc.de> and Doug Lea <dl@cs.oswego.edu>, 2001. @@ -24,7 +24,6 @@ Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger. * Version ptmalloc2-20011215 - $Id$ based on: VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) @@ -189,7 +188,8 @@ Changing default word sizes: INTERNAL_SIZE_T size_t - MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T) + MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T), + __alignof__ (long double)) Configuration and functionality options: @@ -259,6 +259,7 @@ #ifdef _LIBC #include <stdio-common/_itoa.h> +#include <bits/wordsize.h> #endif #ifdef __cplusplus @@ -381,6 +382,15 @@ extern "C" { #ifndef MALLOC_ALIGNMENT +/* XXX This is the correct definition. It differs from 2*SIZE_SZ only on + powerpc32. For the time being, changing this is causing more + compatibility problems due to malloc_get_state/malloc_set_state than + will returning blocks not adequately aligned for long double objects + under -mlong-double-128. + +#define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \ + ? __alignof__ (long double) : 2 * SIZE_SZ) +*/ #define MALLOC_ALIGNMENT (2 * SIZE_SZ) #endif @@ -1006,6 +1016,7 @@ struct mallinfo public_mALLINFo(void); struct mallinfo public_mALLINFo(); #endif +#ifndef _LIBC /* independent_calloc(size_t n_eleme |
