219 lines
6.4 KiB
Diff
219 lines
6.4 KiB
Diff
|
commit 8a727af925be63aa6ea0f5f90e16751fd541626b
|
||
|
Author: Florian Weimer <fweimer@redhat.com>
|
||
|
Date: Thu Apr 14 09:18:30 2016 +0200
|
||
|
|
||
|
malloc: Remove malloc hooks from fork handler
|
||
|
|
||
|
The fork handler now runs so late that there is no risk anymore that
|
||
|
other fork handlers in the same thread use malloc, so it is no
|
||
|
longer necessary to install malloc hooks which made a subset
|
||
|
of malloc functionality available to the thread that called fork.
|
||
|
|
||
|
Index: b/malloc/arena.c
|
||
|
===================================================================
|
||
|
--- a/malloc/arena.c
|
||
|
+++ b/malloc/arena.c
|
||
|
@@ -137,79 +137,6 @@ int __malloc_initialized = -1;
|
||
|
|
||
|
/* atfork support. */
|
||
|
|
||
|
-static void *(*save_malloc_hook)(size_t __size, const void *);
|
||
|
-static void (*save_free_hook) (void *__ptr, const void *);
|
||
|
-static void *save_arena;
|
||
|
-
|
||
|
-/* Magic value for the thread-specific arena pointer when
|
||
|
- malloc_atfork() is in use. */
|
||
|
-
|
||
|
-# define ATFORK_ARENA_PTR ((void *) -1)
|
||
|
-
|
||
|
-/* The following hooks are used while the `atfork' handling mechanism
|
||
|
- is active. */
|
||
|
-
|
||
|
-static void *
|
||
|
-malloc_atfork (size_t sz, const void *caller)
|
||
|
-{
|
||
|
- void *vptr = NULL;
|
||
|
- void *victim;
|
||
|
-
|
||
|
- tsd_getspecific (arena_key, vptr);
|
||
|
- if (vptr == ATFORK_ARENA_PTR)
|
||
|
- {
|
||
|
- /* We are the only thread that may allocate at all. */
|
||
|
- if (save_malloc_hook != malloc_check)
|
||
|
- {
|
||
|
- return _int_malloc (&main_arena, sz);
|
||
|
- }
|
||
|
- else
|
||
|
- {
|
||
|
- if (top_check () < 0)
|
||
|
- return 0;
|
||
|
-
|
||
|
- victim = _int_malloc (&main_arena, sz + 1);
|
||
|
- return mem2mem_check (victim, sz);
|
||
|
- }
|
||
|
- }
|
||
|
- else
|
||
|
- {
|
||
|
- /* Suspend the thread until the `atfork' handlers have completed.
|
||
|
- By that time, the hooks will have been reset as well, so that
|
||
|
- mALLOc() can be used again. */
|
||
|
- (void) mutex_lock (&list_lock);
|
||
|
- (void) mutex_unlock (&list_lock);
|
||
|
- return __libc_malloc (sz);
|
||
|
- }
|
||
|
-}
|
||
|
-
|
||
|
-static void
|
||
|
-free_atfork (void *mem, const void *caller)
|
||
|
-{
|
||
|
- void *vptr = NULL;
|
||
|
- mstate ar_ptr;
|
||
|
- mchunkptr p; /* chunk corresponding to mem */
|
||
|
-
|
||
|
- if (mem == 0) /* free(0) has no effect */
|
||
|
- return;
|
||
|
-
|
||
|
- p = mem2chunk (mem); /* do not bother to replicate free_check here */
|
||
|
-
|
||
|
- if (chunk_is_mmapped (p)) /* release mmapped memory. */
|
||
|
- {
|
||
|
- munmap_chunk (p);
|
||
|
- return;
|
||
|
- }
|
||
|
-
|
||
|
- ar_ptr = arena_for_chunk (p);
|
||
|
- tsd_getspecific (arena_key, vptr);
|
||
|
- _int_free (ar_ptr, p, vptr == ATFORK_ARENA_PTR);
|
||
|
-}
|
||
|
-
|
||
|
-
|
||
|
-/* Counter for number of times the list is locked by the same thread. */
|
||
|
-static unsigned int atfork_recursive_cntr;
|
||
|
-
|
||
|
/* The following three functions are called around fork from a
|
||
|
multi-threaded process. We do not use the general fork handler
|
||
|
mechanism to make sure that our handlers are the last ones being
|
||
|
@@ -219,65 +146,29 @@ static unsigned int atfork_recursive_cnt
|
||
|
void
|
||
|
__malloc_fork_lock_parent (void)
|
||
|
{
|
||
|
- mstate ar_ptr;
|
||
|
-
|
||
|
if (__malloc_initialized < 1)
|
||
|
return;
|
||
|
|
||
|
/* We do not acquire free_list_lock here because we completely
|
||
|
reconstruct free_list in __malloc_fork_unlock_child. */
|
||
|
|
||
|
- if (mutex_trylock (&list_lock))
|
||
|
- {
|
||
|
- void *my_arena;
|
||
|
- tsd_getspecific (arena_key, my_arena);
|
||
|
- if (my_arena == ATFORK_ARENA_PTR)
|
||
|
- /* This is the same thread which already locks the global list.
|
||
|
- Just bump the counter. */
|
||
|
- goto out;
|
||
|
-
|
||
|
- /* This thread has to wait its turn. */
|
||
|
- (void) mutex_lock (&list_lock);
|
||
|
- }
|
||
|
- for (ar_ptr = &main_arena;; )
|
||
|
+ (void) mutex_lock (&list_lock);
|
||
|
+ for (mstate ar_ptr = &main_arena;; )
|
||
|
{
|
||
|
(void) mutex_lock (&ar_ptr->mutex);
|
||
|
ar_ptr = ar_ptr->next;
|
||
|
if (ar_ptr == &main_arena)
|
||
|
break;
|
||
|
}
|
||
|
- save_malloc_hook = __malloc_hook;
|
||
|
- save_free_hook = __free_hook;
|
||
|
- __malloc_hook = malloc_atfork;
|
||
|
- __free_hook = free_atfork;
|
||
|
- /* Only the current thread may perform malloc/free calls now.
|
||
|
- save_arena will be reattached to the current thread, in
|
||
|
- __malloc_fork_lock_parent, so save_arena->attached_threads is not
|
||
|
- updated. */
|
||
|
- tsd_getspecific (arena_key, save_arena);
|
||
|
- tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
|
||
|
-out:
|
||
|
- ++atfork_recursive_cntr;
|
||
|
}
|
||
|
|
||
|
void
|
||
|
__malloc_fork_unlock_parent (void)
|
||
|
{
|
||
|
- mstate ar_ptr;
|
||
|
-
|
||
|
if (__malloc_initialized < 1)
|
||
|
return;
|
||
|
|
||
|
- if (--atfork_recursive_cntr != 0)
|
||
|
- return;
|
||
|
-
|
||
|
- /* Replace ATFORK_ARENA_PTR with save_arena.
|
||
|
- save_arena->attached_threads was not changed in
|
||
|
- __malloc_fork_lock_parent and is still correct. */
|
||
|
- tsd_setspecific (arena_key, save_arena);
|
||
|
- __malloc_hook = save_malloc_hook;
|
||
|
- __free_hook = save_free_hook;
|
||
|
- for (ar_ptr = &main_arena;; )
|
||
|
+ for (mstate ar_ptr = &main_arena;; )
|
||
|
{
|
||
|
(void) mutex_unlock (&ar_ptr->mutex);
|
||
|
ar_ptr = ar_ptr->next;
|
||
|
@@ -290,25 +181,22 @@ __malloc_fork_unlock_parent (void)
|
||
|
void
|
||
|
__malloc_fork_unlock_child (void)
|
||
|
{
|
||
|
- mstate ar_ptr;
|
||
|
-
|
||
|
if (__malloc_initialized < 1)
|
||
|
return;
|
||
|
|
||
|
- tsd_setspecific (arena_key, save_arena);
|
||
|
- __malloc_hook = save_malloc_hook;
|
||
|
- __free_hook = save_free_hook;
|
||
|
+ void *vptr = NULL;
|
||
|
+ mstate current_arena = tsd_getspecific (arena_key, vptr);
|
||
|
|
||
|
- /* Push all arenas to the free list, except save_arena, which is
|
||
|
+ /* Push all arenas to the free list, except current_arena, which is
|
||
|
attached to the current thread. */
|
||
|
mutex_init (&free_list_lock);
|
||
|
- if (save_arena != NULL)
|
||
|
- ((mstate) save_arena)->attached_threads = 1;
|
||
|
+ if (current_arena != NULL)
|
||
|
+ current_arena->attached_threads = 1;
|
||
|
free_list = NULL;
|
||
|
- for (ar_ptr = &main_arena;; )
|
||
|
+ for (mstate ar_ptr = &main_arena;; )
|
||
|
{
|
||
|
mutex_init (&ar_ptr->mutex);
|
||
|
- if (ar_ptr != save_arena)
|
||
|
+ if (ar_ptr != current_arena)
|
||
|
{
|
||
|
/* This arena is no longer attached to any thread. */
|
||
|
ar_ptr->attached_threads = 0;
|
||
|
@@ -321,7 +209,6 @@ __malloc_fork_unlock_child (void)
|
||
|
}
|
||
|
|
||
|
mutex_init (&list_lock);
|
||
|
- atfork_recursive_cntr = 0;
|
||
|
}
|
||
|
|
||
|
/* Initialization routine. */
|
||
|
Index: b/malloc/malloc.c
|
||
|
===================================================================
|
||
|
--- a/malloc/malloc.c
|
||
|
+++ b/malloc/malloc.c
|
||
|
@@ -1075,8 +1075,6 @@ static void* realloc_check(void* oldme
|
||
|
const void *caller);
|
||
|
static void* memalign_check(size_t alignment, size_t bytes,
|
||
|
const void *caller);
|
||
|
-static void* malloc_atfork(size_t sz, const void *caller);
|
||
|
-static void free_atfork(void* mem, const void *caller);
|
||
|
|
||
|
/* ------------------ MMAP support ------------------ */
|
||
|
|