diff --git a/glibc-fedora-memalign.patch b/glibc-fedora-memalign.patch deleted file mode 100644 index 2b921ac..0000000 --- a/glibc-fedora-memalign.patch +++ /dev/null @@ -1,192 +0,0 @@ -This patch adds a chunk scanning algorithm to the -_int_memalign code path that reduces external fragmentation -by reusing already aligned chunks instead of looking for -chunks of larger sizes and splitting them. - -The goal is it fix the pathological use cases where heaps -grow continuously in Ruby or orther workloads that are -heavy users of memalign. - -diff --git a/malloc/malloc.c b/malloc/malloc.c -index 00ce48cf5879c87f..cc6d8299e272441d 100644 ---- a/malloc/malloc.c -+++ b/malloc/malloc.c -@@ -4665,8 +4665,7 @@ _int_memalign (mstate av, size_t alignment, size_t bytes) - mchunkptr remainder; /* spare room at end to split off */ - unsigned long remainder_size; /* its size */ - INTERNAL_SIZE_T size; -- -- -+ mchunkptr victim; - - if (!checked_request2size (bytes, &nb)) - { -@@ -4674,29 +4673,135 @@ _int_memalign (mstate av, size_t alignment, size_t bytes) - return NULL; - } - -- /* -- Strategy: find a spot within that chunk that meets the alignment -+ /* Strategy: search the bins looking for an existing block that meets -+ our needs. */ -+ -+ /* This will be set if we found a candidate chunk. */ -+ victim = NULL; -+ -+ /* Fast bins are singly-linked, hard to remove a chunk from the middle -+ and unlikely to meet our alignment requirements. We have not done -+ any experimentation with searching for aligned fastbins. */ -+ -+ if (in_smallbin_range (nb)) -+ { -+ /* Check small bins. Small bin chunks are doubly-linked despite -+ being the same size. */ -+ int victim_index; /* its bin index */ -+ -+ victim_index = smallbin_index (nb); -+ mchunkptr fwd; /* misc temp for linking */ -+ mchunkptr bck; /* misc temp for linking */ -+ -+ bck = bin_at (av, victim_index); -+ fwd = bck->fd; -+ while (fwd != bck) -+ { -+ if (((intptr_t)chunk2mem (fwd) & (alignment - 1)) == 0) -+ { -+ victim = fwd; -+ -+ /* Unlink it */ -+ victim->fd->bk = victim->bk; -+ victim->bk->fd = victim->fd; -+ break; -+ } -+ -+ fwd = fwd->fd; -+ } -+ } -+ else -+ { -+ /* Check large bins. */ -+ int victim_index; /* its bin index */ -+ mchunkptr fwd; /* misc temp for linking */ -+ mchunkptr bck; /* misc temp for linking */ -+ mchunkptr best = NULL; -+ size_t best_size = 0; -+ -+ victim_index = largebin_index (nb); -+ bck = bin_at (av, victim_index); -+ fwd = bck->fd; -+ -+ while (fwd != bck) -+ { -+ if (chunksize (fwd) >= nb -+ && (((intptr_t)chunk2mem (fwd) & (alignment - 1)) == 0) -+ && (chunksize (fwd) <= best_size || best == NULL)) -+ { -+ best = fwd; -+ best_size = chunksize(fwd); -+ } -+ -+ fwd = fwd->fd; -+ if (chunksize (fwd) < nb) -+ break; -+ } -+ victim = best; -+ -+ if (victim) -+ { -+ if (victim->fd_nextsize) -+ { -+ if (victim->fd_nextsize != victim->fd -+ && victim->fd != bck) -+ { -+ /* There's more with the same size, but we've chosen the -+ "leader". We need to make the next one the leader. */ -+ victim->fd->fd_nextsize = victim->fd_nextsize; -+ victim->fd->bk_nextsize = victim->bk_nextsize; -+ if (victim->fd_nextsize) -+ victim->fd_nextsize->bk_nextsize = victim->fd; -+ if (victim->bk_nextsize) -+ victim->bk_nextsize->fd_nextsize = victim->fd; -+ } -+ else -+ { -+ /* There's only this one with this size. */ -+ if (victim->fd_nextsize) -+ victim->fd_nextsize->bk_nextsize = victim->bk_nextsize; -+ if (victim->bk_nextsize) -+ victim->bk_nextsize->fd_nextsize = victim->fd_nextsize; -+ } -+ } -+ -+ if (victim->fd) -+ victim->fd->bk = victim->bk; -+ if (victim->bk) -+ victim->bk->fd = victim->fd; -+ } -+ } -+ -+ /* Strategy: find a spot within that chunk that meets the alignment - request, and then possibly free the leading and trailing space. -- */ -+ This strategy is incredibly costly and can lead to external -+ fragmentation if header and footer chunks are unused. */ - -- /* Call malloc with worst case padding to hit alignment. */ -+ if (victim != NULL) -+ { -+ p = victim; -+ m = chunk2mem (p); -+ set_inuse (p); -+ } -+ else -+ { -+ /* Call malloc with worst case padding to hit alignment. */ - -- m = (char *) (_int_malloc (av, nb + alignment + MINSIZE)); -+ m = (char *) (_int_malloc (av, nb + alignment + MINSIZE)); - -- if (m == 0) -- return 0; /* propagate failure */ -+ if (m == 0) -+ return 0; /* propagate failure */ - -- p = mem2chunk (m); -+ p = mem2chunk (m); -+ } - - if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */ -- -- { /* -- Find an aligned spot inside chunk. Since we need to give back -- leading space in a chunk of at least MINSIZE, if the first -- calculation places us at a spot with less than MINSIZE leader, -- we can move to the next aligned spot -- we've allocated enough -- total room so that this is always possible. -- */ -+ { -+ /* Find an aligned spot inside chunk. Since we need to give back -+ leading space in a chunk of at least MINSIZE, if the first -+ calculation places us at a spot with less than MINSIZE leader, -+ we can move to the next aligned spot -- we've allocated enough -+ total room so that this is always possible. */ - brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) & - - ((signed long) alignment)); - if ((unsigned long) (brk - (char *) (p)) < MINSIZE) -@@ -5385,6 +5490,16 @@ __malloc_info (int options, FILE *fp) - - fputs ("\n", fp); - -+ fprintf (fp, "\n", (long) MALLOC_ALIGNMENT); -+ fprintf (fp, "\n", (long) MIN_CHUNK_SIZE); -+ fprintf (fp, "\n", (long) MAX_FAST_SIZE); -+ fprintf (fp, "\n", (long) MAX_TCACHE_SIZE); -+ fprintf (fp, "\n", (long) MIN_LARGE_SIZE); -+ fprintf (fp, "\n", (long) DEFAULT_MMAP_THRESHOLD); -+ fprintf (fp, "\n", (long) DEFAULT_MMAP_THRESHOLD_MAX); -+ fprintf (fp, "\n", (long) HEAP_MIN_SIZE); -+ fprintf (fp, "\n", (long) HEAP_MAX_SIZE); -+ - /* Iterate over all arenas currently in use. */ - mstate ar_ptr = &main_arena; - do diff --git a/glibc.spec b/glibc.spec index 16a73d9..a9a20f8 100644 --- a/glibc.spec +++ b/glibc.spec @@ -87,7 +87,7 @@ Summary: The GNU libc libraries Name: glibc Version: %{glibcversion} -Release: 30%{?dist} +Release: 29%{?dist} # In general, GPLv2+ is used by programs, LGPLv2+ is used for # libraries. @@ -159,9 +159,6 @@ Patch28: glibc-rh1615608.patch # In progress upstream submission for nscd.conf changes: # https://www.sourceware.org/ml/libc-alpha/2019-03/msg00436.html Patch31: glibc-fedora-nscd-warnings.patch -Patch32: glibc-fedora-memalign.patch - - ############################################################################## # Continued list of core "glibc" package information: @@ -2018,9 +2015,6 @@ fi %files -f compat-libpthread-nonshared.filelist -n compat-libpthread-nonshared %changelog -* Fri Jun 21 2019 Carlos O'Donell - 2.29.9000-30 -- Reduce external fragmentation in memalign (swbz#14581) - * Fri Jun 21 2019 Florian Weimer - 2.29.9000-29 - Auto-sync with upstream branch master, commit 21cc130b78a4db9113fb6695e2b951e697662440: