Fix deadlock between between fflush, getdelim, and fork
This commit is contained in:
Florian Weimer 2016-05-06 20:39:34 +02:00
parent 53d6baa069
commit c3f1918b6c
6 changed files with 928 additions and 0 deletions

82
glibc-rh1332917-1.patch Normal file
View File

@ -0,0 +1,82 @@
commit 59eda029a8a35e5f4e5cd7be0f84c6629e48ec6e
Author: Florian Weimer <fweimer@redhat.com>
Date: Fri Feb 19 17:07:45 2016 +0100
malloc: Remove NO_THREADS
No functional change. It was not possible to build without
threading support before.
Index: b/malloc/arena.c
===================================================================
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -135,8 +135,6 @@ int __malloc_initialized = -1;
/**************************************************************************/
-#ifndef NO_THREADS
-
/* atfork support. */
static void *(*save_malloc_hook)(size_t __size, const void *);
@@ -341,7 +339,6 @@ ptmalloc_unlock_all2 (void)
# define ptmalloc_unlock_all2 ptmalloc_unlock_all
# endif
-#endif /* !NO_THREADS */
/* Initialization routine. */
#include <string.h>
Index: b/malloc/malloc.c
===================================================================
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1074,10 +1074,8 @@ static void* realloc_check(void* oldme
const void *caller);
static void* memalign_check(size_t alignment, size_t bytes,
const void *caller);
-#ifndef NO_THREADS
static void* malloc_atfork(size_t sz, const void *caller);
static void free_atfork(void* mem, const void *caller);
-#endif
/* ------------------ MMAP support ------------------ */
Index: b/sysdeps/generic/malloc-machine.h
===================================================================
--- a/sysdeps/generic/malloc-machine.h
+++ b/sysdeps/generic/malloc-machine.h
@@ -22,30 +22,9 @@
#include <atomic.h>
-#ifndef mutex_init /* No threads, provide dummy macros */
+#ifndef mutex_init
-# define NO_THREADS
-
-/* The mutex functions used to do absolutely nothing, i.e. lock,
- trylock and unlock would always just return 0. However, even
- without any concurrently active threads, a mutex can be used
- legitimately as an `in use' flag. To make the code that is
- protected by a mutex async-signal safe, these macros would have to
- be based on atomic test-and-set operations, for example. */
-typedef int mutex_t;
-
-# define mutex_init(m) (*(m) = 0)
-# define mutex_lock(m) ({ *(m) = 1; 0; })
-# define mutex_trylock(m) (*(m) ? 1 : ((*(m) = 1), 0))
-# define mutex_unlock(m) (*(m) = 0)
-# define MUTEX_INITIALIZER (0)
-
-typedef void *tsd_key_t;
-# define tsd_key_create(key, destr) do {} while(0)
-# define tsd_setspecific(key, data) ((key) = (data))
-# define tsd_getspecific(key, vptr) (vptr = (key))
-
-# define thread_atfork(prepare, parent, child) do {} while(0)
+#error NO_THREADS no longer supported
#endif /* !defined mutex_init */

554
glibc-rh1332917-2.patch Normal file
View File

@ -0,0 +1,554 @@
commit 29d794863cd6e03115d3670707cc873a9965ba92
Author: Florian Weimer <fweimer@redhat.com>
Date: Thu Apr 14 09:17:02 2016 +0200
malloc: Run fork handler as late as possible [BZ #19431]
Previously, a thread M invoking fork would acquire locks in this order:
(M1) malloc arena locks (in the registered fork handler)
(M2) libio list lock
A thread F invoking flush (NULL) would acquire locks in this order:
(F1) libio list lock
(F2) individual _IO_FILE locks
A thread G running getdelim would use this order:
(G1) _IO_FILE lock
(G2) malloc arena lock
After executing (M1), (F1), (G1), none of the threads can make progress.
This commit changes the fork lock order to:
(M'1) libio list lock
(M'2) malloc arena locks
It explicitly encodes the lock order in the implementations of fork,
and does not rely on the registration order, thus avoiding the deadlock.
Index: b/malloc/arena.c
===================================================================
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -141,10 +141,6 @@ static void *(*save_malloc_hook)(size_t
static void (*save_free_hook) (void *__ptr, const void *);
static void *save_arena;
-# ifdef ATFORK_MEM
-ATFORK_MEM;
-# endif
-
/* Magic value for the thread-specific arena pointer when
malloc_atfork() is in use. */
@@ -214,14 +210,14 @@ free_atfork (void *mem, const void *call
/* Counter for number of times the list is locked by the same thread. */
static unsigned int atfork_recursive_cntr;
-/* The following two functions are registered via thread_atfork() to
- make sure that the mutexes remain in a consistent state in the
- fork()ed version of a thread. Also adapt the malloc and free hooks
- temporarily, because the `atfork' handler mechanism may use
- malloc/free internally (e.g. in LinuxThreads). */
+/* The following three functions are called around fork from a
+ multi-threaded process. We do not use the general fork handler
+ mechanism to make sure that our handlers are the last ones being
+ called, so that other fork handlers can use the malloc
+ subsystem. */
-static void
-ptmalloc_lock_all (void)
+void
+__malloc_fork_lock_parent (void)
{
mstate ar_ptr;
@@ -229,7 +225,7 @@ ptmalloc_lock_all (void)
return;
/* We do not acquire free_list_lock here because we completely
- reconstruct free_list in ptmalloc_unlock_all2. */
+ reconstruct free_list in __malloc_fork_unlock_child. */
if (mutex_trylock (&list_lock))
{
@@ -256,7 +252,7 @@ ptmalloc_lock_all (void)
__free_hook = free_atfork;
/* Only the current thread may perform malloc/free calls now.
save_arena will be reattached to the current thread, in
- ptmalloc_lock_all, so save_arena->attached_threads is not
+ __malloc_fork_lock_parent, so save_arena->attached_threads is not
updated. */
tsd_getspecific (arena_key, save_arena);
tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
@@ -264,8 +260,8 @@ out:
++atfork_recursive_cntr;
}
-static void
-ptmalloc_unlock_all (void)
+void
+__malloc_fork_unlock_parent (void)
{
mstate ar_ptr;
@@ -276,8 +272,8 @@ ptmalloc_unlock_all (void)
return;
/* Replace ATFORK_ARENA_PTR with save_arena.
- save_arena->attached_threads was not changed in ptmalloc_lock_all
- and is still correct. */
+ save_arena->attached_threads was not changed in
+ __malloc_fork_lock_parent and is still correct. */
tsd_setspecific (arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
@@ -291,15 +287,8 @@ ptmalloc_unlock_all (void)
(void) mutex_unlock (&list_lock);
}
-# ifdef __linux__
-
-/* In NPTL, unlocking a mutex in the child process after a
- fork() is currently unsafe, whereas re-initializing it is safe and
- does not leak resources. Therefore, a special atfork handler is
- installed for the child. */
-
-static void
-ptmalloc_unlock_all2 (void)
+void
+__malloc_fork_unlock_child (void)
{
mstate ar_ptr;
@@ -335,11 +324,6 @@ ptmalloc_unlock_all2 (void)
atfork_recursive_cntr = 0;
}
-# else
-
-# define ptmalloc_unlock_all2 ptmalloc_unlock_all
-# endif
-
/* Initialization routine. */
#include <string.h>
extern char **_environ;
@@ -409,7 +393,6 @@ ptmalloc_init (void)
tsd_key_create (&arena_key, NULL);
tsd_setspecific (arena_key, (void *) &main_arena);
- thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
const char *s = NULL;
if (__glibc_likely (_environ != NULL))
{
@@ -484,14 +467,6 @@ ptmalloc_init (void)
__malloc_initialized = 1;
}
-/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
-#ifdef thread_atfork_static
-thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all, \
- ptmalloc_unlock_all2)
-#endif
-
-
-
/* Managing heaps and arenas (for concurrent threads) */
#if MALLOC_DEBUG > 1
@@ -838,7 +813,8 @@ _int_new_arena (size_t size)
limit is reached). At this point, some arena has to be attached
to two threads. We could acquire the arena lock before list_lock
to make it less likely that reused_arena picks this new arena,
- but this could result in a deadlock with ptmalloc_lock_all. */
+ but this could result in a deadlock with
+ __malloc_fork_lock_parent. */
(void) mutex_lock (&a->mutex);
Index: b/malloc/malloc-internal.h
===================================================================
--- /dev/null
+++ b/malloc/malloc-internal.h
@@ -0,0 +1,32 @@
+/* Internal declarations for malloc, for use within libc.
+ Copyright (C) 2016 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef _MALLOC_PRIVATE_H
+#define _MALLOC_PRIVATE_H
+
+/* Called in the parent process before a fork. */
+void __malloc_fork_lock_parent (void) internal_function attribute_hidden;
+
+/* Called in the parent process after a fork. */
+void __malloc_fork_unlock_parent (void) internal_function attribute_hidden;
+
+/* Called in the child process after a fork. */
+void __malloc_fork_unlock_child (void) internal_function attribute_hidden;
+
+
+#endif /* _MALLOC_PRIVATE_H */
Index: b/malloc/malloc.c
===================================================================
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -244,6 +244,7 @@
/* For ALIGN_UP. */
#include <libc-internal.h>
+#include <malloc/malloc-internal.h>
/*
Debugging:
Index: b/malloc/tst-malloc-fork-deadlock.c
===================================================================
--- /dev/null
+++ b/malloc/tst-malloc-fork-deadlock.c
@@ -0,0 +1,220 @@
+/* Test concurrent fork, getline, and fflush (NULL).
+ Copyright (C) 2016 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+#include <sys/wait.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <malloc.h>
+#include <time.h>
+#include <string.h>
+#include <signal.h>
+
+static int do_test (void);
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
+
+enum {
+ /* Number of threads which call fork. */
+ fork_thread_count = 4,
+ /* Number of threads which call getline (and, indirectly,
+ malloc). */
+ read_thread_count = 8,
+};
+
+static bool termination_requested;
+
+static void *
+fork_thread_function (void *closure)
+{
+ while (!__atomic_load_n (&termination_requested, __ATOMIC_RELAXED))
+ {
+ pid_t pid = fork ();
+ if (pid < 0)
+ {
+ printf ("error: fork: %m\n");
+ abort ();
+ }
+ else if (pid == 0)
+ _exit (17);
+
+ int status;
+ if (waitpid (pid, &status, 0) < 0)
+ {
+ printf ("error: waitpid: %m\n");
+ abort ();
+ }
+ if (!WIFEXITED (status) || WEXITSTATUS (status) != 17)
+ {
+ printf ("error: waitpid returned invalid status: %d\n", status);
+ abort ();
+ }
+ }
+ return NULL;
+}
+
+static char *file_to_read;
+
+static void *
+read_thread_function (void *closure)
+{
+ FILE *f = fopen (file_to_read, "r");
+ if (f == NULL)
+ {
+ printf ("error: fopen (%s): %m\n", file_to_read);
+ abort ();
+ }
+
+ while (!__atomic_load_n (&termination_requested, __ATOMIC_RELAXED))
+ {
+ rewind (f);
+ char *line = NULL;
+ size_t line_allocated = 0;
+ ssize_t ret = getline (&line, &line_allocated, f);
+ if (ret < 0)
+ {
+ printf ("error: getline: %m\n");
+ abort ();
+ }
+ free (line);
+ }
+ fclose (f);
+
+ return NULL;
+}
+
+static void *
+flushall_thread_function (void *closure)
+{
+ while (!__atomic_load_n (&termination_requested, __ATOMIC_RELAXED))
+ if (fflush (NULL) != 0)
+ {
+ printf ("error: fflush (NULL): %m\n");
+ abort ();
+ }
+ return NULL;
+}
+
+static void
+create_threads (pthread_t *threads, size_t count, void *(*func) (void *))
+{
+ for (size_t i = 0; i < count; ++i)
+ {
+ int ret = pthread_create (threads + i, NULL, func, NULL);
+ if (ret != 0)
+ {
+ errno = ret;
+ printf ("error: pthread_create: %m\n");
+ abort ();
+ }
+ }
+}
+
+static void
+join_threads (pthread_t *threads, size_t count)
+{
+ for (size_t i = 0; i < count; ++i)
+ {
+ int ret = pthread_join (threads[i], NULL);
+ if (ret != 0)
+ {
+ errno = ret;
+ printf ("error: pthread_join: %m\n");
+ abort ();
+ }
+ }
+}
+
+/* Create a file which consists of a single long line, and assigns
+ file_to_read. The hope is that this triggers an allocation in
+ getline which needs a lock. */
+static void
+create_file_with_large_line (void)
+{
+ int fd = create_temp_file ("bug19431-large-line", &file_to_read);
+ if (fd < 0)
+ {
+ printf ("error: create_temp_file: %m\n");
+ abort ();
+ }
+ FILE *f = fdopen (fd, "w+");
+ if (f == NULL)
+ {
+ printf ("error: fdopen: %m\n");
+ abort ();
+ }
+ for (int i = 0; i < 50000; ++i)
+ fputc ('x', f);
+ fputc ('\n', f);
+ if (ferror (f))
+ {
+ printf ("error: fputc: %m\n");
+ abort ();
+ }
+ if (fclose (f) != 0)
+ {
+ printf ("error: fclose: %m\n");
+ abort ();
+ }
+}
+
+static int
+do_test (void)
+{
+ /* Make sure that we do not exceed the arena limit with the number
+ of threads we configured. */
+ if (mallopt (M_ARENA_MAX, 400) == 0)
+ {
+ printf ("error: mallopt (M_ARENA_MAX) failed\n");
+ return 1;
+ }
+
+ /* Leave some room for shutting down all threads gracefully. */
+ int timeout = 3;
+ if (timeout > TIMEOUT)
+ timeout = TIMEOUT - 1;
+
+ create_file_with_large_line ();
+
+ pthread_t fork_threads[fork_thread_count];
+ create_threads (fork_threads, fork_thread_count, fork_thread_function);
+ pthread_t read_threads[read_thread_count];
+ create_threads (read_threads, read_thread_count, read_thread_function);
+ pthread_t flushall_threads[1];
+ create_threads (flushall_threads, 1, flushall_thread_function);
+
+ struct timespec ts = {timeout, 0};
+ if (nanosleep (&ts, NULL))
+ {
+ printf ("error: error: nanosleep: %m\n");
+ abort ();
+ }
+
+ __atomic_store_n (&termination_requested, true, __ATOMIC_RELAXED);
+
+ join_threads (flushall_threads, 1);
+ join_threads (read_threads, read_thread_count);
+ join_threads (fork_threads, fork_thread_count);
+
+ free (file_to_read);
+
+ return 0;
+}
Index: b/manual/memory.texi
===================================================================
--- a/manual/memory.texi
+++ b/manual/memory.texi
@@ -1055,14 +1055,6 @@ systems that do not support @w{ISO C11}.
@c _dl_addr_inside_object ok
@c determine_info ok
@c __rtld_lock_unlock_recursive (dl_load_lock) @aculock
-@c thread_atfork @asulock @aculock @acsfd @acsmem
-@c __register_atfork @asulock @aculock @acsfd @acsmem
-@c lll_lock (__fork_lock) @asulock @aculock
-@c fork_handler_alloc @asulock @aculock @acsfd @acsmem
-@c calloc dup @asulock @aculock @acsfd @acsmem
-@c __linkin_atfork ok
-@c catomic_compare_and_exchange_bool_acq ok
-@c lll_unlock (__fork_lock) @aculock
@c *_environ @mtsenv
@c next_env_entry ok
@c strcspn dup ok
Index: b/sysdeps/mach/hurd/fork.c
===================================================================
--- a/sysdeps/mach/hurd/fork.c
+++ b/sysdeps/mach/hurd/fork.c
@@ -26,6 +26,7 @@
#include <assert.h>
#include "hurdmalloc.h" /* XXX */
#include <tls.h>
+#include <malloc/malloc-internal.h>
#undef __fork
@@ -107,6 +108,12 @@ __fork (void)
/* Run things that prepare for forking before we create the task. */
RUN_HOOK (_hurd_fork_prepare_hook, ());
+ /* Acquire malloc locks. This needs to come last because fork
+ handlers may use malloc, and the libio list lock has an
+ indirect malloc dependency as well (via the getdelim
+ function). */
+ __malloc_fork_lock_parent ();
+
/* Lock things that want to be locked before we fork. */
{
void *const *p;
@@ -608,6 +615,9 @@ __fork (void)
nthreads * sizeof (*threads));
}
+ /* Release malloc locks. */
+ __malloc_fork_unlock_parent ();
+
/* Run things that want to run in the parent to restore it to
normality. Usually prepare hooks and parent hooks are
symmetrical: the prepare hook arrests state in some way for the
@@ -659,6 +669,9 @@ __fork (void)
/* Forking clears the trace flag. */
__sigemptyset (&_hurdsig_traced);
+ /* Release malloc locks. */
+ __malloc_fork_unlock_child ();
+
/* Run things that want to run in the child task to set up. */
RUN_HOOK (_hurd_fork_child_hook, ());
Index: b/sysdeps/nptl/fork.c
===================================================================
--- a/sysdeps/nptl/fork.c
+++ b/sysdeps/nptl/fork.c
@@ -31,7 +31,7 @@
#include <fork.h>
#include <arch-fork.h>
#include <futex-internal.h>
-
+#include <malloc/malloc-internal.h>
static void
fresetlockfiles (void)
@@ -111,6 +111,11 @@ __libc_fork (void)
_IO_list_lock ();
+ /* Acquire malloc locks. This needs to come last because fork
+ handlers may use malloc, and the libio list lock has an indirect
+ malloc dependency as well (via the getdelim function). */
+ __malloc_fork_lock_parent ();
+
#ifndef NDEBUG
pid_t ppid = THREAD_GETMEM (THREAD_SELF, tid);
#endif
@@ -168,6 +173,9 @@ __libc_fork (void)
# endif
#endif
+ /* Release malloc locks. */
+ __malloc_fork_unlock_child ();
+
/* Reset the file list. These are recursive mutexes. */
fresetlockfiles ();
@@ -209,6 +217,9 @@ __libc_fork (void)
/* Restore the PID value. */
THREAD_SETMEM (THREAD_SELF, pid, parentpid);
+ /* Release malloc locks, parent process variant. */
+ __malloc_fork_unlock_parent ();
+
/* We execute this even if the 'fork' call failed. */
_IO_list_unlock ();

218
glibc-rh1332917-3.patch Normal file
View File

@ -0,0 +1,218 @@
commit 8a727af925be63aa6ea0f5f90e16751fd541626b
Author: Florian Weimer <fweimer@redhat.com>
Date: Thu Apr 14 09:18:30 2016 +0200
malloc: Remove malloc hooks from fork handler
The fork handler now runs so late that there is no risk anymore that
other fork handlers in the same thread use malloc, so it is no
longer necessary to install malloc hooks which made a subset
of malloc functionality available to the thread that called fork.
Index: b/malloc/arena.c
===================================================================
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -137,79 +137,6 @@ int __malloc_initialized = -1;
/* atfork support. */
-static void *(*save_malloc_hook)(size_t __size, const void *);
-static void (*save_free_hook) (void *__ptr, const void *);
-static void *save_arena;
-
-/* Magic value for the thread-specific arena pointer when
- malloc_atfork() is in use. */
-
-# define ATFORK_ARENA_PTR ((void *) -1)
-
-/* The following hooks are used while the `atfork' handling mechanism
- is active. */
-
-static void *
-malloc_atfork (size_t sz, const void *caller)
-{
- void *vptr = NULL;
- void *victim;
-
- tsd_getspecific (arena_key, vptr);
- if (vptr == ATFORK_ARENA_PTR)
- {
- /* We are the only thread that may allocate at all. */
- if (save_malloc_hook != malloc_check)
- {
- return _int_malloc (&main_arena, sz);
- }
- else
- {
- if (top_check () < 0)
- return 0;
-
- victim = _int_malloc (&main_arena, sz + 1);
- return mem2mem_check (victim, sz);
- }
- }
- else
- {
- /* Suspend the thread until the `atfork' handlers have completed.
- By that time, the hooks will have been reset as well, so that
- mALLOc() can be used again. */
- (void) mutex_lock (&list_lock);
- (void) mutex_unlock (&list_lock);
- return __libc_malloc (sz);
- }
-}
-
-static void
-free_atfork (void *mem, const void *caller)
-{
- void *vptr = NULL;
- mstate ar_ptr;
- mchunkptr p; /* chunk corresponding to mem */
-
- if (mem == 0) /* free(0) has no effect */
- return;
-
- p = mem2chunk (mem); /* do not bother to replicate free_check here */
-
- if (chunk_is_mmapped (p)) /* release mmapped memory. */
- {
- munmap_chunk (p);
- return;
- }
-
- ar_ptr = arena_for_chunk (p);
- tsd_getspecific (arena_key, vptr);
- _int_free (ar_ptr, p, vptr == ATFORK_ARENA_PTR);
-}
-
-
-/* Counter for number of times the list is locked by the same thread. */
-static unsigned int atfork_recursive_cntr;
-
/* The following three functions are called around fork from a
multi-threaded process. We do not use the general fork handler
mechanism to make sure that our handlers are the last ones being
@@ -219,65 +146,29 @@ static unsigned int atfork_recursive_cnt
void
__malloc_fork_lock_parent (void)
{
- mstate ar_ptr;
-
if (__malloc_initialized < 1)
return;
/* We do not acquire free_list_lock here because we completely
reconstruct free_list in __malloc_fork_unlock_child. */
- if (mutex_trylock (&list_lock))
- {
- void *my_arena;
- tsd_getspecific (arena_key, my_arena);
- if (my_arena == ATFORK_ARENA_PTR)
- /* This is the same thread which already locks the global list.
- Just bump the counter. */
- goto out;
-
- /* This thread has to wait its turn. */
- (void) mutex_lock (&list_lock);
- }
- for (ar_ptr = &main_arena;; )
+ (void) mutex_lock (&list_lock);
+ for (mstate ar_ptr = &main_arena;; )
{
(void) mutex_lock (&ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if (ar_ptr == &main_arena)
break;
}
- save_malloc_hook = __malloc_hook;
- save_free_hook = __free_hook;
- __malloc_hook = malloc_atfork;
- __free_hook = free_atfork;
- /* Only the current thread may perform malloc/free calls now.
- save_arena will be reattached to the current thread, in
- __malloc_fork_lock_parent, so save_arena->attached_threads is not
- updated. */
- tsd_getspecific (arena_key, save_arena);
- tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
-out:
- ++atfork_recursive_cntr;
}
void
__malloc_fork_unlock_parent (void)
{
- mstate ar_ptr;
-
if (__malloc_initialized < 1)
return;
- if (--atfork_recursive_cntr != 0)
- return;
-
- /* Replace ATFORK_ARENA_PTR with save_arena.
- save_arena->attached_threads was not changed in
- __malloc_fork_lock_parent and is still correct. */
- tsd_setspecific (arena_key, save_arena);
- __malloc_hook = save_malloc_hook;
- __free_hook = save_free_hook;
- for (ar_ptr = &main_arena;; )
+ for (mstate ar_ptr = &main_arena;; )
{
(void) mutex_unlock (&ar_ptr->mutex);
ar_ptr = ar_ptr->next;
@@ -290,25 +181,22 @@ __malloc_fork_unlock_parent (void)
void
__malloc_fork_unlock_child (void)
{
- mstate ar_ptr;
-
if (__malloc_initialized < 1)
return;
- tsd_setspecific (arena_key, save_arena);
- __malloc_hook = save_malloc_hook;
- __free_hook = save_free_hook;
+ void *vptr = NULL;
+ mstate current_arena = tsd_getspecific (arena_key, vptr);
- /* Push all arenas to the free list, except save_arena, which is
+ /* Push all arenas to the free list, except current_arena, which is
attached to the current thread. */
mutex_init (&free_list_lock);
- if (save_arena != NULL)
- ((mstate) save_arena)->attached_threads = 1;
+ if (current_arena != NULL)
+ current_arena->attached_threads = 1;
free_list = NULL;
- for (ar_ptr = &main_arena;; )
+ for (mstate ar_ptr = &main_arena;; )
{
mutex_init (&ar_ptr->mutex);
- if (ar_ptr != save_arena)
+ if (ar_ptr != current_arena)
{
/* This arena is no longer attached to any thread. */
ar_ptr->attached_threads = 0;
@@ -321,7 +209,6 @@ __malloc_fork_unlock_child (void)
}
mutex_init (&list_lock);
- atfork_recursive_cntr = 0;
}
/* Initialization routine. */
Index: b/malloc/malloc.c
===================================================================
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1075,8 +1075,6 @@ static void* realloc_check(void* oldme
const void *caller);
static void* memalign_check(size_t alignment, size_t bytes,
const void *caller);
-static void* malloc_atfork(size_t sz, const void *caller);
-static void free_atfork(void* mem, const void *caller);
/* ------------------ MMAP support ------------------ */

36
glibc-rh1332917-4.patch Normal file
View File

@ -0,0 +1,36 @@
commit 186fe877f3df0b84d57dfbf0386f6332c6aa69bc
Author: Florian Weimer <fweimer@redhat.com>
Date: Thu Apr 14 12:53:03 2016 +0200
malloc: Add missing internal_function attributes on function definitions
Fixes build on i386 after commit 29d794863cd6e03115d3670707cc873a9965ba92.
Index: b/malloc/arena.c
===================================================================
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -144,6 +144,7 @@ int __malloc_initialized = -1;
subsystem. */
void
+internal_function
__malloc_fork_lock_parent (void)
{
if (__malloc_initialized < 1)
@@ -163,6 +164,7 @@ __malloc_fork_lock_parent (void)
}
void
+internal_function
__malloc_fork_unlock_parent (void)
{
if (__malloc_initialized < 1)
@@ -179,6 +181,7 @@ __malloc_fork_unlock_parent (void)
}
void
+internal_function
__malloc_fork_unlock_child (void)
{
if (__malloc_initialized < 1)

27
glibc-rh1332917-5.patch Normal file
View File

@ -0,0 +1,27 @@
commit 66355680f840f336eb83e7f46a873556a1bdedcc
Author: Florian Weimer <fweimer@redhat.com>
Date: Wed May 4 15:25:39 2016 +0200
malloc: Adjust header file guard in malloc-internal.h
diff --git a/malloc/malloc-internal.h b/malloc/malloc-internal.h
index b830d3f..98afd14 100644
--- a/malloc/malloc-internal.h
+++ b/malloc/malloc-internal.h
@@ -16,8 +16,8 @@
License along with the GNU C Library; see the file COPYING.LIB. If
not, see <http://www.gnu.org/licenses/>. */
-#ifndef _MALLOC_PRIVATE_H
-#define _MALLOC_PRIVATE_H
+#ifndef _MALLOC_INTERNAL_H
+#define _MALLOC_INTERNAL_H
/* Called in the parent process before a fork. */
void __malloc_fork_lock_parent (void) internal_function attribute_hidden;
@@ -29,4 +29,4 @@ void __malloc_fork_unlock_parent (void) internal_function attribute_hidden;
void __malloc_fork_unlock_child (void) internal_function attribute_hidden;
-#endif /* _MALLOC_PRIVATE_H */
+#endif /* _MALLOC_INTERNAL_H */

View File

@ -247,6 +247,11 @@ Patch1024: glibc-rh1315648-1.patch
Patch1025: glibc-rh1315648-2.patch
Patch1026: glibc-rh1333945.patch
Patch1027: glibc-rh1333945-2.patch
Patch1028: glibc-rh1332917-1.patch
Patch1029: glibc-rh1332917-2.patch
Patch1030: glibc-rh1332917-3.patch
Patch1031: glibc-rh1332917-4.patch
Patch1032: glibc-rh1332917-5.patch
##############################################################################
#
@ -693,6 +698,11 @@ microbenchmark tests on the system.
%patch1025 -p1
%patch1026 -p1
%patch1027 -p1
%patch1028 -p1
%patch1029 -p1
%patch1030 -p1
%patch1031 -p1
%patch1032 -p1
%patch0059 -p1
##############################################################################
@ -1917,6 +1927,7 @@ rm -f *.filelist*
- Fix getnameinfo memory leak and incorrect truncation (#1333901)
- CVE-2016-1234: buffer overflow in glob with GLOB_ALTDIRFUNC (#1315648)
- Set dlerror after dlsym (RTLD_NEXT) failure (#1333945)
- Fix deadlock between between fflush, getdelim, and fork (#1332917)
* Fri May 6 2016 Florian Weimer <fweimer@redhat.com> - 2.22-13
- Avoid build failure in TZ tests (#1333940)