Short description: Implement allocate_once for internal use. Author(s): Istvan Kurucsai Origin: PATCH Bug-Fedora: #1452750 Upstream status: https://patchwork.sourceware.org/patch/25361/ Internal allocate_once() function should get pushed upstream. commit a829b747cf2ce18ba869ea60f67975c484bf2769 Author: Istvan Kurucsai Date: Fri Jan 12 15:34:13 2018 +0100 Implement allocate_once for atomic initialization with allocation diff --git a/include/allocate_once.h b/include/allocate_once.h new file mode 100644 index 0000000000000000..26902dde7c1a3255 --- /dev/null +++ b/include/allocate_once.h @@ -0,0 +1,95 @@ +/* Allocate and initialize an object once, in a thread-safe fashion. + Copyright (C) 2018 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifndef _ALLOCATE_ONCE_H +#define _ALLOCATE_ONCE_H + +#include + +/* Slow path for allocate_once; see below. */ +void *__libc_allocate_once_slow (void **__place, + void *(*__allocate) (void *__closure), + void (*__deallocate) (void *__closure, + void *__ptr), + void *__closure); + +/* Return an a pointer to an allocated and initialized data structure. + If this function returns a non-NULL value, the caller can assume + that pointed-to data has been initialized according to the ALLOCATE + function. + + It is expected that callers define an inline helper function which + adds type safety, like this. + + struct foo { ... }; + struct foo *global_foo; + static void *allocate_foo (void *closure); + static void *deallocate_foo (void *closure, void *ptr); + + static inline struct foo * + get_foo (void) + { + return allocate_once (&global_foo, allocate_foo, free_foo, NULL); + } + + (Note that the global_foo variable is initialized to zero.) + Usage of this helper function looks like this: + + struct foo *local_foo = get_foo (); + if (local_foo == NULL) + report_allocation_failure (); + + allocate_once first performs an acquire MO load on *PLACE. If the + result is not null, it is returned. Otherwise, ALLOCATE (CLOSURE) + is called, yielding a value RESULT. If RESULT equals NULL, + allocate_once returns NULL, and does not modify *PLACE (but another + thread may concurrently perform an allocation which succeeds, + updating *PLACE). If RESULT does not equal NULL, the function uses + a CAS with acquire-release MO to update the NULL value in *PLACE + with the RESULT value. If it turns out that *PLACE was updated + concurrently, allocate_once calls DEALLOCATE (CLOSURE, RESULT) to + undo the effect of ALLOCATE, and returns the new value of *PLACE + (after an acquire MO load). If DEALLOCATE is NULL, free (RESULT) + is called instead. + + Compared to __libc_once, allocate_once has the advantage that it + does not need separate space for a control variable, and that it is + safe with regards to cancellation and other forms of exception + handling if the supplied callback functions are safe in that + regard. allocate_once passes a closure parameter to the allocation + function, too. */ +static inline void * +allocate_once (void **__place, void *(*__allocate) (void *__closure), + void (*__deallocate) (void *__closure, void *__ptr), + void *__closure) +{ + /* Synchronizes with the release MO CAS in + __allocate_once_slow. */ + void *__result = atomic_load_acquire (__place); + if (__result != NULL) + return __result; + else + return __libc_allocate_once_slow (__place, __allocate, __deallocate, + __closure); +} + +#ifndef _ISOMAC +libc_hidden_proto (__libc_allocate_once_slow) +#endif + +#endif /* _ALLOCATE_ONCE_H */ diff --git a/misc/Makefile b/misc/Makefile index a5076b36728749d6..96afd6d890bd06f3 100644 --- a/misc/Makefile +++ b/misc/Makefile @@ -70,9 +70,11 @@ routines := brk sbrk sstk ioctl \ getloadavg getclktck \ fgetxattr flistxattr fremovexattr fsetxattr getxattr \ listxattr lgetxattr llistxattr lremovexattr lsetxattr \ - removexattr setxattr getauxval ifunc-impl-list makedev + removexattr setxattr getauxval ifunc-impl-list makedev \ + allocate_once -generated += tst-error1.mtrace tst-error1-mem.out +generated += tst-error1.mtrace tst-error1-mem.out \ + tst-allocate_once.mtrace tst-allocate_once-mem.out aux := init-misc install-lib := libg.a @@ -84,11 +86,12 @@ tests := tst-dirname tst-tsearch tst-fdset tst-efgcvt tst-mntent tst-hsearch \ tst-preadvwritev tst-preadvwritev64 tst-makedev tst-empty \ tst-preadvwritev2 tst-preadvwritev64v2 -tests-internal := tst-atomic tst-atomic-long +tests-internal := tst-atomic tst-atomic-long tst-allocate_once tests-static := tst-empty ifeq ($(run-built-tests),yes) -tests-special += $(objpfx)tst-error1-mem.out +tests-special += $(objpfx)tst-error1-mem.out \ + $(objpfx)tst-allocate_once-mem.out endif CFLAGS-select.c += -fexceptions -fasynchronous-unwind-tables @@ -137,3 +140,8 @@ tst-error1-ARGS = $(objpfx)tst-error1.out $(objpfx)tst-error1-mem.out: $(objpfx)tst-error1.out $(common-objpfx)malloc/mtrace $(objpfx)tst-error1.mtrace > $@; \ $(evaluate-test) + +tst-allocate_once-ENV = MALLOC_TRACE=$(objpfx)tst-allocate_once.mtrace +$(objpfx)tst-allocate_once-mem.out: $(objpfx)tst-allocate_once.out + $(common-objpfx)malloc/mtrace $(objpfx)tst-allocate_once.mtrace > $@; \ + $(evaluate-test) diff --git a/misc/Versions b/misc/Versions index bfbda505e4dd8743..900e4ffb798a9e13 100644 --- a/misc/Versions +++ b/misc/Versions @@ -165,5 +165,6 @@ libc { __tdelete; __tfind; __tsearch; __twalk; __mmap; __munmap; __mprotect; __sched_get_priority_min; __sched_get_priority_max; + __libc_allocate_once_slow; } } diff --git a/misc/allocate_once.c b/misc/allocate_once.c new file mode 100644 index 0000000000000000..2108014604cb026e --- /dev/null +++ b/misc/allocate_once.c @@ -0,0 +1,59 @@ +/* Concurrent allocation and initialization of a pointer. + Copyright (C) 2018 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include + +void * +__libc_allocate_once_slow (void **place, void *(*allocate) (void *closure), + void (*deallocate) (void *closure, void *ptr), + void *closure) +{ + void *result = allocate (closure); + if (result == NULL) + return NULL; + + /* This loop implements a strong CAS on *place, with acquire-release + MO semantics, from a weak CAS with relaxed-release MO. */ + while (true) + { + /* Synchronizes with the acquire MO load in allocate_once. */ + void *expected = NULL; + if (atomic_compare_exchange_weak_release (place, &expected, result)) + return result; + + /* The failed CAS has relaxed MO semantics, so perform another + acquire MO load. */ + void *other_result = atomic_load_acquire (place); + if (other_result == NULL) + /* Spurious failure. Try again. */ + continue; + + /* We lost the race. Free what we allocated and return the + other result. */ + if (deallocate == NULL) + free (result); + else + deallocate (closure, result); + return other_result; + } + + return result; +} +libc_hidden_def (__libc_allocate_once_slow) diff --git a/misc/tst-allocate_once.c b/misc/tst-allocate_once.c new file mode 100644 index 0000000000000000..89277b33b769732b --- /dev/null +++ b/misc/tst-allocate_once.c @@ -0,0 +1,181 @@ +/* Test the allocate_once function. + Copyright (C) 2018 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include + +/* Allocate a new string. */ +static void * +allocate_string (void *closure) +{ + return xstrdup (closure); +} + +/* Allocation and deallocation functions which are not expected to be + called. */ + +static void * +allocate_not_called (void *closure) +{ + FAIL_EXIT1 ("allocation function called unexpectedly (%p)", closure); +} + +static void +deallocate_not_called (void *closure, void *ptr) +{ + FAIL_EXIT1 ("deallocate function called unexpectedly (%p, %p)", + closure, ptr); +} + +/* Counter for various function calls. */ +static int function_called; + +/* An allocation function which returns NULL and records that it has + been called. */ +static void * +allocate_return_null (void *closure) +{ + /* The function should only be called once. */ + TEST_COMPARE (function_called, 0); + ++function_called; + return NULL; +} + + +/* The following is used to check the retry logic, by causing a fake + race condition. */ +static void *fake_race_place; +static char fake_race_region[3]; /* To obtain unique addresses. */ + +static void * +fake_race_allocate (void *closure) +{ + TEST_VERIFY (closure == &fake_race_region[0]); + TEST_COMPARE (function_called, 0); + ++function_called; + /* Fake allocation by another thread. */ + fake_race_place = &fake_race_region[1]; + return &fake_race_region[2]; +} + +static void +fake_race_deallocate (void *closure, void *ptr) +{ + /* Check that the pointer returned from fake_race_allocate is + deallocated (and not the one stored in fake_race_place). */ + TEST_VERIFY (ptr == &fake_race_region[2]); + + TEST_VERIFY (fake_race_place == &fake_race_region[1]); + TEST_VERIFY (closure == &fake_race_region[0]); + TEST_COMPARE (function_called, 1); + ++function_called; +} + +/* Similar to fake_race_allocate, but expects to be paired with free + as the deallocation function. */ +static void * +fake_race_allocate_for_free (void *closure) +{ + TEST_VERIFY (closure == &fake_race_region[0]); + TEST_COMPARE (function_called, 0); + ++function_called; + /* Fake allocation by another thread. */ + fake_race_place = &fake_race_region[1]; + return xstrdup ("to be freed"); +} + +static int +do_test (void) +{ + mtrace (); + + /* Simple allocation. */ + void *place1 = NULL; + char *string1 = allocate_once (&place1, allocate_string, + deallocate_not_called, + (char *) "test string 1"); + TEST_VERIFY_EXIT (string1 != NULL); + TEST_VERIFY (strcmp ("test string 1", string1) == 0); + /* Second call returns the first pointer, without calling any + callbacks. */ + TEST_VERIFY (string1 + == allocate_once (&place1, allocate_not_called, + deallocate_not_called, + (char *) "test string 1a")); + + /* Different place should result in another call. */ + void *place2 = NULL; + char *string2 = allocate_once (&place2, allocate_string, + deallocate_not_called, + (char *) "test string 2"); + TEST_VERIFY_EXIT (string2 != NULL); + TEST_VERIFY (strcmp ("test string 2", string2) == 0); + TEST_VERIFY (string1 != string2); + + /* Check error reporting (NULL return value from the allocation + function). */ + void *place3 = NULL; + char *string3 = allocate_once (&place3, allocate_return_null, + deallocate_not_called, NULL); + TEST_VERIFY (string3 == NULL); + TEST_COMPARE (function_called, 1); + + /* Check that the deallocation function is called if the race is + lost. */ + function_called = 0; + TEST_VERIFY (allocate_once (&fake_race_place, + fake_race_allocate, + fake_race_deallocate, + &fake_race_region[0]) + == &fake_race_region[1]); + TEST_COMPARE (function_called, 2); + function_called = 3; + TEST_VERIFY (allocate_once (&fake_race_place, + fake_race_allocate, + fake_race_deallocate, + &fake_race_region[0]) + == &fake_race_region[1]); + TEST_COMPARE (function_called, 3); + + /* Similar, but this time rely on that free is called. */ + function_called = 0; + fake_race_place = NULL; + TEST_VERIFY (allocate_once (&fake_race_place, + fake_race_allocate_for_free, + NULL, + &fake_race_region[0]) + == &fake_race_region[1]); + TEST_COMPARE (function_called, 1); + function_called = 3; + TEST_VERIFY (allocate_once (&fake_race_place, + fake_race_allocate_for_free, + NULL, + &fake_race_region[0]) + == &fake_race_region[1]); + TEST_COMPARE (function_called, 3); + + free (place2); + free (place1); + + return 0; +} + +#include