aboutsummaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorDJ Delorie <dj@delorie.com>2016-06-30 16:19:04 -0400
committerDJ Delorie <dj@delorie.com>2016-06-30 16:19:04 -0400
commit2d7af358a376603f78ae503036e445699ada346c (patch)
treef06e67ccca163ea25e31a51f8914ac263396ea2c /malloc
parent7328eadb40411e561e6cce905dba331a1739c63a (diff)
parentaa95fc13f5b02044eadc3af3d9e1c025f2e1edda (diff)
downloadglibc-2d7af358a376603f78ae503036e445699ada346c.tar.xz
glibc-2d7af358a376603f78ae503036e445699ada346c.zip
Merge branch 'master' into dj/malloc
Diffstat (limited to 'malloc')
-rw-r--r--malloc/arena.c12
-rw-r--r--malloc/hooks.c152
-rw-r--r--malloc/malloc-hooks.h24
-rw-r--r--malloc/malloc.c96
-rw-r--r--malloc/malloc.h5
-rw-r--r--malloc/tst-mallocfork2.c22
6 files changed, 163 insertions, 148 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index 1dd9deef0e..229783f3b7 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -340,9 +340,11 @@ ptmalloc_init (void)
if (check_action != 0)
__malloc_check_init ();
}
+#if HAVE_MALLOC_INIT_HOOK
void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
if (hook != NULL)
(*hook)();
+#endif
__malloc_initialized = 1;
}
@@ -769,14 +771,12 @@ reused_arena (mstate avoid_arena)
{
result = result->next;
if (result == begin)
- break;
+ /* We looped around the arena list. We could not find any
+ arena that was either not corrupted or not the one we
+ wanted to avoid. */
+ return NULL;
}
- /* We could not find any arena that was either not corrupted or not the one
- we wanted to avoid. */
- if (result == begin || result == avoid_arena)
- return NULL;
-
/* No arena available without contention. Wait for the next in line. */
LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
(void) mutex_lock (&result->mutex);
diff --git a/malloc/hooks.c b/malloc/hooks.c
index 0d2bb96c03..caa1e70d13 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -465,7 +465,7 @@ memalign_check (size_t alignment, size_t bytes, const void *caller)
then the hooks are reset to 0. */
#define MALLOC_STATE_MAGIC 0x444c4541l
-#define MALLOC_STATE_VERSION (0 * 0x100l + 4l) /* major*0x100 + minor */
+#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
struct malloc_save_state
{
@@ -548,11 +548,7 @@ int
__malloc_set_state (void *msptr)
{
struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
- size_t i;
- mbinptr b;
- disallow_malloc_check = 1;
- ptmalloc_init ();
if (ms->magic != MALLOC_STATE_MAGIC)
return -1;
@@ -560,106 +556,60 @@ __malloc_set_state (void *msptr)
if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
return -2;
- (void) mutex_lock (&main_arena.mutex);
- /* There are no fastchunks. */
- clear_fastchunks (&main_arena);
- if (ms->version >= 4)
- set_max_fast (ms->max_fast);
- else
- set_max_fast (64); /* 64 used to be the value we always used. */
- for (i = 0; i < NFASTBINS; ++i)
- fastbin (&main_arena, i) = 0;
- for (i = 0; i < BINMAPSIZE; ++i)
- main_arena.binmap[i] = 0;
- top (&main_arena) = ms->av[2];
- main_arena.last_remainder = 0;
- for (i = 1; i < NBINS; i++)
- {
- b = bin_at (&main_arena, i);
- if (ms->av[2 * i + 2] == 0)
- {
- assert (ms->av[2 * i + 3] == 0);
- first (b) = last (b) = b;
- }
+ /* We do not need to perform locking here because __malloc_set_state
+ must be called before the first call into the malloc subsytem
+ (usually via __malloc_initialize_hook). pthread_create always
+ calls calloc and thus must be called only afterwards, so there
+ cannot be more than one thread when we reach this point. */
+
+ /* Disable the malloc hooks (and malloc checking). */
+ __malloc_hook = NULL;
+ __realloc_hook = NULL;
+ __free_hook = NULL;
+ __memalign_hook = NULL;
+ using_malloc_checking = 0;
+
+ /* Patch the dumped heap. We no longer try to integrate into the
+ existing heap. Instead, we mark the existing chunks as mmapped.
+ Together with the update to dumped_main_arena_start and
+ dumped_main_arena_end, realloc and free will recognize these
+ chunks as dumped fake mmapped chunks and never free them. */
+
+ /* Find the chunk with the lowest address with the heap. */
+ mchunkptr chunk = NULL;
+ {
+ size_t *candidate = (size_t *) ms->sbrk_base;
+ size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
+ while (candidate < end)
+ if (*candidate != 0)
+ {
+ chunk = mem2chunk ((void *) (candidate + 1));
+ break;
+ }
else
- {
- if (ms->version >= 3 &&
- (i < NSMALLBINS || (largebin_index (chunksize (ms->av[2 * i + 2])) == i &&
- largebin_index (chunksize (ms->av[2 * i + 3])) == i)))
- {
- first (b) = ms->av[2 * i + 2];
- last (b) = ms->av[2 * i + 3];
- /* Make sure the links to the bins within the heap are correct. */
- first (b)->bk = b;
- last (b)->fd = b;
- /* Set bit in binblocks. */
- mark_bin (&main_arena, i);
- }
- else
- {
- /* Oops, index computation from chunksize must have changed.
- Link the whole list into unsorted_chunks. */
- first (b) = last (b) = b;
- b = unsorted_chunks (&main_arena);
- ms->av[2 * i + 2]->bk = b;
- ms->av[2 * i + 3]->fd = b->fd;
- b->fd->bk = ms->av[2 * i + 3];
- b->fd = ms->av[2 * i + 2];
- }
- }
- }
- if (ms->version < 3)
- {
- /* Clear fd_nextsize and bk_nextsize fields. */
- b = unsorted_chunks (&main_arena)->fd;
- while (b != unsorted_chunks (&main_arena))
- {
- if (!in_smallbin_range (chunksize (b)))
- {
- b->fd_nextsize = NULL;
- b->bk_nextsize = NULL;
- }
- b = b->fd;
- }
- }
- mp_.sbrk_base = ms->sbrk_base;
- main_arena.system_mem = ms->sbrked_mem_bytes;
- mp_.trim_threshold = ms->trim_threshold;
- mp_.top_pad = ms->top_pad;
- mp_.n_mmaps_max = ms->n_mmaps_max;
- mp_.mmap_threshold = ms->mmap_threshold;
- check_action = ms->check_action;
- main_arena.max_system_mem = ms->max_sbrked_mem;
- mp_.n_mmaps = ms->n_mmaps;
- mp_.max_n_mmaps = ms->max_n_mmaps;
- mp_.mmapped_mem = ms->mmapped_mem;
- mp_.max_mmapped_mem = ms->max_mmapped_mem;
- /* add version-dependent code here */
- if (ms->version >= 1)
- {
- /* Check whether it is safe to enable malloc checking, or whether
- it is necessary to disable it. */
- if (ms->using_malloc_checking && !using_malloc_checking &&
- !disallow_malloc_check)
- __malloc_check_init ();
- else if (!ms->using_malloc_checking && using_malloc_checking)
- {
- __malloc_hook = NULL;
- __free_hook = NULL;
- __realloc_hook = NULL;
- __memalign_hook = NULL;
- using_malloc_checking = 0;
- }
- }
- if (ms->version >= 4)
+ ++candidate;
+ }
+ if (chunk == NULL)
+ return 0;
+
+ /* Iterate over the dumped heap and patch the chunks so that they
+ are treated as fake mmapped chunks. */
+ mchunkptr top = ms->av[2];
+ while (chunk < top)
{
- mp_.arena_test = ms->arena_test;
- mp_.arena_max = ms->arena_max;
- narenas = ms->narenas;
+ if (inuse (chunk))
+ {
+ /* Mark chunk as mmapped, to trigger the fallback path. */
+ size_t size = chunksize (chunk);
+ set_head (chunk, size | IS_MMAPPED);
+ }
+ chunk = next_chunk (chunk);
}
- check_malloc_state (&main_arena);
- (void) mutex_unlock (&main_arena.mutex);
+ /* The dumped fake mmapped chunks all lie in this address range. */
+ dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
+ dumped_main_arena_end = top;
+
return 0;
}
diff --git a/malloc/malloc-hooks.h b/malloc/malloc-hooks.h
new file mode 100644
index 0000000000..c7aa8b2d53
--- /dev/null
+++ b/malloc/malloc-hooks.h
@@ -0,0 +1,24 @@
+/* Internal declarations of malloc hooks no longer in the public API.
+ Copyright (C) 2016 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef _MALLOC_HOOKS_H
+#define _MALLOC_HOOKS_H
+
+void (*__malloc_initialize_hook) (void);
+
+#endif /* _MALLOC_HOOKS_H */
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 57aca089fa..891c644df2 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -368,20 +368,8 @@ _m_printf(const char *fmt, ...)
#ifndef MALLOC_ALIGNMENT
-# if !SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_16)
-/* This is the correct definition when there is no past ABI to constrain it.
-
- Among configurations with a past ABI constraint, it differs from
- 2*SIZE_SZ only on powerpc32. For the time being, changing this is
- causing more compatibility problems due to malloc_get_state and
- malloc_set_state than will returning blocks not adequately aligned for
- long double objects under -mlong-double-128. */
-
-# define MALLOC_ALIGNMENT (2 *SIZE_SZ < __alignof__ (long double) \
- ? __alignof__ (long double) : 2 *SIZE_SZ)
-# else
-# define MALLOC_ALIGNMENT (2 *SIZE_SZ)
-# endif
+# define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
+ ? __alignof__ (long double) : 2 * SIZE_SZ)
#endif
/* The corresponding bit mask value */
@@ -519,6 +507,15 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
#define HAVE_MREMAP 0
#endif
+/* We may need to support __malloc_initialize_hook for backwards
+ compatibility. */
+
+#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
+# define HAVE_MALLOC_INIT_HOOK 1
+#else
+# define HAVE_MALLOC_INIT_HOOK 0
+#endif
+
/*
This version of malloc supports the standard SVID/XPG mallinfo
@@ -1852,6 +1849,19 @@ static struct malloc_state main_arena =
.attached_threads = 1
};
+/* These variables are used for undumping support. Chunked are marked
+ as using mmap, but we leave them alone if they fall into this
+ range. NB: The chunk size for these chunks only includes the
+ initial size field (of SIZE_SZ bytes), there is no trailing size
+ field (unlike with regular mmapped chunks). */
+static mchunkptr dumped_main_arena_start; /* Inclusive. */
+static mchunkptr dumped_main_arena_end; /* Exclusive. */
+
+/* True if the pointer falls into the dumped arena. Use this after
+ chunk_is_mmapped indicates a chunk is mmapped. */
+#define DUMPED_MAIN_ARENA_CHUNK(p) \
+ ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
+
/* There is only one instance of the malloc parameters. */
static struct malloc_par mp_ =
@@ -1934,7 +1944,12 @@ static void *realloc_hook_ini (void *ptr, size_t sz,
static void *memalign_hook_ini (size_t alignment, size_t sz,
const void *caller) __THROW;
+#if HAVE_MALLOC_INIT_HOOK
void weak_variable (*__malloc_initialize_hook) (void) = NULL;
+compat_symbol (libc, __malloc_initialize_hook,
+ __malloc_initialize_hook, GLIBC_2_0);
+#endif
+
void weak_variable (*__free_hook) (void *__ptr,
const void *) = NULL;
void *weak_variable (*__malloc_hook)
@@ -2043,7 +2058,7 @@ do_check_chunk (mstate av, mchunkptr p)
assert (prev_inuse (p));
}
}
- else
+ else if (!DUMPED_MAIN_ARENA_CHUNK (p))
{
/* address is outside main heap */
if (contiguous (av) && av->top != initial_top (av))
@@ -2927,6 +2942,11 @@ munmap_chunk (mchunkptr p)
assert (chunk_is_mmapped (p));
+ /* Do nothing if the chunk is a faked mmapped chunk in the dumped
+ main arena. We never free this memory. */
+ if (DUMPED_MAIN_ARENA_CHUNK (p))
+ return;
+
uintptr_t block = (uintptr_t) p - p->prev_size;
size_t total_size = p->prev_size + size;
#if 0
@@ -3234,10 +3254,12 @@ __libc_free (void *mem)
if (chunk_is_mmapped (p)) /* release mmapped memory. */
{
- /* see if the dynamic brk/mmap threshold needs adjusting */
+ /* See if the dynamic brk/mmap threshold needs adjusting.
+ Dumped fake mmapped chunks do not affect the threshold. */
if (!mp_.no_dyn_threshold
&& p->size > mp_.mmap_threshold
- && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
+ && p->size <= DEFAULT_MMAP_THRESHOLD_MAX
+ && !DUMPED_MAIN_ARENA_CHUNK (p))
{
mp_.mmap_threshold = chunksize (p);
mp_.trim_threshold = 2 * mp_.mmap_threshold;
@@ -3290,12 +3312,15 @@ __libc_realloc (void *oldmem, size_t bytes)
else
ar_ptr = arena_for_chunk (oldp);
- /* Little security check which won't hurt performance: the
- allocator never wrapps around at the end of the address space.
- Therefore we can exclude some size values which might appear
- here by accident or by "design" from some intruder. */
- if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
- || __builtin_expect (misaligned_chunk (oldp), 0))
+ /* Little security check which won't hurt performance: the allocator
+ never wrapps around at the end of the address space. Therefore
+ we can exclude some size values which might appear here by
+ accident or by "design" from some intruder. We need to bypass
+ this check for dumped fake mmap chunks from the old main arena
+ because the new malloc may provide additional alignment. */
+ if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
+ || __builtin_expect (misaligned_chunk (oldp), 0))
+ && !DUMPED_MAIN_ARENA_CHUNK (oldp))
{
malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
ar_ptr);
@@ -3306,6 +3331,24 @@ __libc_realloc (void *oldmem, size_t bytes)
if (chunk_is_mmapped (oldp))
{
+ /* If this is a faked mmapped chunk from the dumped main arena,
+ always make a copy (and do not free the old chunk). */
+ if (DUMPED_MAIN_ARENA_CHUNK (oldp))
+ {
+ /* Must alloc, copy, free. */
+ void *newmem = __libc_malloc (bytes);
+ if (newmem == 0)
+ return NULL;
+ /* Copy as many bytes as are available from the old chunk
+ and fit into the new size. NB: The overhead for faked
+ mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
+ regular mmapped chunks. */
+ if (bytes > oldsize - SIZE_SZ)
+ bytes = oldsize - SIZE_SZ;
+ memcpy (newmem, oldmem, bytes);
+ return newmem;
+ }
+
void *newmem;
#if HAVE_MREMAP
@@ -5139,7 +5182,12 @@ musable (void *mem)
return malloc_check_get_size (p);
if (chunk_is_mmapped (p))
- return chunksize (p) - 2 * SIZE_SZ;
+ {
+ if (DUMPED_MAIN_ARENA_CHUNK (p))
+ return chunksize (p) - SIZE_SZ;
+ else
+ return chunksize (p) - 2 * SIZE_SZ;
+ }
else if (inuse (p))
return chunksize (p) - SIZE_SZ;
}
diff --git a/malloc/malloc.h b/malloc/malloc.h
index d95a3157a3..54b1862035 100644
--- a/malloc/malloc.h
+++ b/malloc/malloc.h
@@ -141,11 +141,6 @@ extern void *malloc_get_state (void) __THROW;
malloc_get_state(). */
extern int malloc_set_state (void *__ptr) __THROW;
-/* Called once when malloc is initialized; redefining this variable in
- the application provides the preferred way to set up the hook
- pointers. */
-extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void)
-__MALLOC_DEPRECATED;
/* Hooks for debugging and user-defined versions. */
extern void (*__MALLOC_HOOK_VOLATILE __free_hook) (void *__ptr,
const void *)
diff --git a/malloc/tst-mallocfork2.c b/malloc/tst-mallocfork2.c
index a9e3e94aad..109c1b922a 100644
--- a/malloc/tst-mallocfork2.c
+++ b/malloc/tst-mallocfork2.c
@@ -25,6 +25,7 @@
still make fork unsafe, even in single-threaded processes. */
#include <errno.h>
+#include <sched.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
@@ -43,6 +44,9 @@ enum { malloc_maximum_size = 70000 };
/* How many signals need to be delivered before the test exits. */
enum { signal_count = 1000 };
+static int do_test (void);
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
/* Process ID of the subprocess which sends SIGUSR1 signals. */
static pid_t sigusr1_sender_pid;
@@ -55,14 +59,6 @@ static volatile sig_atomic_t sigusr1_received;
progress. Checked by liveness_signal_handler. */
static volatile sig_atomic_t progress_indicator = 1;
-/* Write the message to standard output. Usable from signal
- handlers. */
-static void
-write_message (const char *str)
-{
- write (STDOUT_FILENO, str, strlen (str));
-}
-
static void
sigusr1_handler (int signo)
{
@@ -70,7 +66,9 @@ sigusr1_handler (int signo)
signals from the subprocess. */
if (sigusr1_received)
return;
- if (kill (sigusr1_sender_pid, SIGSTOP) != 0)
+ /* sigusr1_sender_pid might not be initialized in the parent when
+ the first SIGUSR1 signal arrives. */
+ if (sigusr1_sender_pid > 0 && kill (sigusr1_sender_pid, SIGSTOP) != 0)
{
write_message ("error: kill (SIGSTOP)\n");
abort ();
@@ -123,6 +121,9 @@ signal_sender (int signo, bool sleep)
}
if (sleep)
usleep (1 * 1000 * 1000);
+ else
+ /* Reduce the rate at which we send signals. */
+ sched_yield ();
}
}
@@ -207,6 +208,3 @@ do_test (void)
return 0;
}
-
-#define TEST_FUNCTION do_test ()
-#include "../test-skeleton.c"