commit 77cdc054e02069d72dcf54a9ad7d7df3a24bcb01 Author: Andreas Schwab Date: Wed Nov 9 17:14:39 2011 +0100 Check malloc arana limit atomically diff --git a/ChangeLog b/ChangeLog index bf09161..edd7dd8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,14 @@ +2011-11-14 Andreas Schwab + + * malloc/arena.c (arena_get2): Don't call reused_arena when + _int_new_arena failed. + +2011-11-10 Andreas Schwab + + * malloc/arena.c (_int_new_arena): Don't increment narenas. + (reused_arena): Don't check arena limit. + (arena_get2): Atomically check arena limit. + 2011-10-19 Andreas Schwab * sysdeps/x86_64/fpu/math_private.h (libc_feupdateenv): Use diff --git a/malloc/arena.c b/malloc/arena.c index 9114fd2..042cac8 100644 --- a/malloc/arena.c +++ b/malloc/arena.c @@ -747,8 +747,6 @@ _int_new_arena(size_t size) main_arena.next = a; #ifdef PER_THREAD - ++narenas; - (void)mutex_unlock(&list_lock); #endif @@ -786,30 +784,6 @@ get_free_list (void) static mstate reused_arena (void) { - if (narenas <= mp_.arena_test) - return NULL; - - static int narenas_limit; - if (narenas_limit == 0) - { - if (mp_.arena_max != 0) - narenas_limit = mp_.arena_max; - else - { - int n = __get_nprocs (); - - if (n >= 1) - narenas_limit = NARENAS_FROM_NCORES (n); - else - /* We have no information about the system. Assume two - cores. */ - narenas_limit = NARENAS_FROM_NCORES (2); - } - } - - if (narenas < narenas_limit) - return NULL; - mstate result; static mstate next_to_use; if (next_to_use == NULL) @@ -844,10 +818,41 @@ arena_get2(mstate a_tsd, size_t size) mstate a; #ifdef PER_THREAD - if ((a = get_free_list ()) == NULL - && (a = reused_arena ()) == NULL) - /* Nothing immediately available, so generate a new arena. */ - a = _int_new_arena(size); + static size_t narenas_limit; + + a = get_free_list (); + if (a == NULL) + { + /* Nothing immediately available, so generate a new arena. */ + if (narenas_limit == 0) + { + if (mp_.arena_max != 0) + narenas_limit = mp_.arena_max; + else + { + int n = __get_nprocs (); + + if (n >= 1) + narenas_limit = NARENAS_FROM_NCORES (n); + else + /* We have no information about the system. Assume two + cores. */ + narenas_limit = NARENAS_FROM_NCORES (2); + } + } + repeat:; + size_t n = narenas; + if (__builtin_expect (n <= mp_.arena_test || n < narenas_limit, 0)) + { + if (catomic_compare_and_exchange_bool_acq(&narenas, n + 1, n)) + goto repeat; + a = _int_new_arena (size); + if (__builtin_expect (a != NULL, 1)) + return a; + catomic_decrement(&narenas); + } + a = reused_arena (); + } #else if(!a_tsd) a = a_tsd = &main_arena; commit a5fb313cb7b7e692fd4684916aaa98e03ec7e8b6 Author: Andreas Schwab Date: Mon Nov 14 11:41:52 2011 +0100 Don't call reused_arena when _int_new_arena failed diff --git a/malloc/arena.c b/malloc/arena.c index 042cac8..cb8548b 100644 --- a/malloc/arena.c +++ b/malloc/arena.c @@ -844,14 +844,14 @@ arena_get2(mstate a_tsd, size_t size) size_t n = narenas; if (__builtin_expect (n <= mp_.arena_test || n < narenas_limit, 0)) { - if (catomic_compare_and_exchange_bool_acq(&narenas, n + 1, n)) + if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n)) goto repeat; a = _int_new_arena (size); - if (__builtin_expect (a != NULL, 1)) - return a; - catomic_decrement(&narenas); + if (__builtin_expect (a == NULL, 0)) + catomic_decrement (&narenas); } - a = reused_arena (); + else + a = reused_arena (); } #else if(!a_tsd)