CVE-2014-4171 shmem: denial of service (rhbz 1111180 1118247)
This commit is contained in:
parent
16aafab68c
commit
af406c2d11
11
kernel.spec
11
kernel.spec
|
@ -759,6 +759,11 @@ Patch25118: sched-fix-sched_setparam-policy-1-logic.patch
|
|||
#CVE-2014-5045 rhbz 1122472 1122482
|
||||
Patch25119: fs-umount-on-symlink-leaks-mnt-count.patch
|
||||
|
||||
#CVE-2014-4171 rhbz 1111180 1118247
|
||||
Patch25120: shmem-fix-faulting-into-a-hole-while-it-s-punched.patch
|
||||
Patch25121: shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch
|
||||
Patch25122: shmem-fix-splicing-from-a-hole-while-it-s-punched.patch
|
||||
|
||||
|
||||
# END OF PATCH DEFINITIONS
|
||||
|
||||
|
@ -1478,6 +1483,11 @@ ApplyPatch sched-fix-sched_setparam-policy-1-logic.patch
|
|||
#CVE-2014-5045 rhbz 1122472 1122482
|
||||
ApplyPatch fs-umount-on-symlink-leaks-mnt-count.patch
|
||||
|
||||
#CVE-2014-4171 rhbz 1111180 1118247
|
||||
ApplyPatch shmem-fix-faulting-into-a-hole-while-it-s-punched.patch
|
||||
ApplyPatch shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch
|
||||
ApplyPatch shmem-fix-splicing-from-a-hole-while-it-s-punched.patch
|
||||
|
||||
# END OF PATCH APPLICATIONS
|
||||
|
||||
%endif
|
||||
|
@ -2290,6 +2300,7 @@ fi
|
|||
# || ||
|
||||
%changelog
|
||||
* Thu Jul 24 2014 Josh Boyer <jwboyer@fedoraproject.org>
|
||||
- CVE-2014-4171 shmem: denial of service (rhbz 1111180 1118247)
|
||||
- CVE-2014-5045 vfs: refcount issues during lazy umount on symlink (rhbz 1122471 1122482)
|
||||
- Fix regression in sched_setparam (rhbz 1117942)
|
||||
- CVE-2014-3534 s390: ptrace: insufficient sanitization with psw mask (rhbz 1114089 1122612)
|
||||
|
|
|
@ -0,0 +1,197 @@
|
|||
Bugzilla: 1118247
|
||||
Upstream-status: 3.16 and CC'd for stable
|
||||
|
||||
From 8e205f779d1443a94b5ae81aa359cb535dd3021e Mon Sep 17 00:00:00 2001
|
||||
From: Hugh Dickins <hughd@google.com>
|
||||
Date: Wed, 23 Jul 2014 14:00:10 -0700
|
||||
Subject: shmem: fix faulting into a hole, not taking i_mutex
|
||||
|
||||
From: Hugh Dickins <hughd@google.com>
|
||||
|
||||
commit 8e205f779d1443a94b5ae81aa359cb535dd3021e upstream.
|
||||
|
||||
Commit f00cdc6df7d7 ("shmem: fix faulting into a hole while it's
|
||||
punched") was buggy: Sasha sent a lockdep report to remind us that
|
||||
grabbing i_mutex in the fault path is a no-no (write syscall may already
|
||||
hold i_mutex while faulting user buffer).
|
||||
|
||||
We tried a completely different approach (see following patch) but that
|
||||
proved inadequate: good enough for a rational workload, but not good
|
||||
enough against trinity - which forks off so many mappings of the object
|
||||
that contention on i_mmap_mutex while hole-puncher holds i_mutex builds
|
||||
into serious starvation when concurrent faults force the puncher to fall
|
||||
back to single-page unmap_mapping_range() searches of the i_mmap tree.
|
||||
|
||||
So return to the original umbrella approach, but keep away from i_mutex
|
||||
this time. We really don't want to bloat every shmem inode with a new
|
||||
mutex or completion, just to protect this unlikely case from trinity.
|
||||
So extend the original with wait_queue_head on stack at the hole-punch
|
||||
end, and wait_queue item on the stack at the fault end.
|
||||
|
||||
This involves further use of i_lock to guard against the races: lockdep
|
||||
has been happy so far, and I see fs/inode.c:unlock_new_inode() holds
|
||||
i_lock around wake_up_bit(), which is comparable to what we do here.
|
||||
i_lock is more convenient, but we could switch to shmem's info->lock.
|
||||
|
||||
This issue has been tagged with CVE-2014-4171, which will require commit
|
||||
f00cdc6df7d7 and this and the following patch to be backported: we
|
||||
suggest to 3.1+, though in fact the trinity forkbomb effect might go
|
||||
back as far as 2.6.16, when madvise(,,MADV_REMOVE) came in - or might
|
||||
not, since much has changed, with i_mmap_mutex a spinlock before 3.0.
|
||||
Anyone running trinity on 3.0 and earlier? I don't think we need care.
|
||||
|
||||
Signed-off-by: Hugh Dickins <hughd@google.com>
|
||||
Reported-by: Sasha Levin <sasha.levin@oracle.com>
|
||||
Tested-by: Sasha Levin <sasha.levin@oracle.com>
|
||||
Cc: Vlastimil Babka <vbabka@suse.cz>
|
||||
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Lukas Czerner <lczerner@redhat.com>
|
||||
Cc: Dave Jones <davej@redhat.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
|
||||
---
|
||||
mm/shmem.c | 78 ++++++++++++++++++++++++++++++++++++++++---------------------
|
||||
1 file changed, 52 insertions(+), 26 deletions(-)
|
||||
|
||||
--- a/mm/shmem.c
|
||||
+++ b/mm/shmem.c
|
||||
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
|
||||
* a time): we would prefer not to enlarge the shmem inode just for that.
|
||||
*/
|
||||
struct shmem_falloc {
|
||||
- int mode; /* FALLOC_FL mode currently operating */
|
||||
+ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
|
||||
pgoff_t start; /* start of range currently being fallocated */
|
||||
pgoff_t next; /* the next page offset to be fallocated */
|
||||
pgoff_t nr_falloced; /* how many new pages have been fallocated */
|
||||
@@ -760,7 +760,7 @@ static int shmem_writepage(struct page *
|
||||
spin_lock(&inode->i_lock);
|
||||
shmem_falloc = inode->i_private;
|
||||
if (shmem_falloc &&
|
||||
- !shmem_falloc->mode &&
|
||||
+ !shmem_falloc->waitq &&
|
||||
index >= shmem_falloc->start &&
|
||||
index < shmem_falloc->next)
|
||||
shmem_falloc->nr_unswapped++;
|
||||
@@ -1239,38 +1239,58 @@ static int shmem_fault(struct vm_area_st
|
||||
* Trinity finds that probing a hole which tmpfs is punching can
|
||||
* prevent the hole-punch from ever completing: which in turn
|
||||
* locks writers out with its hold on i_mutex. So refrain from
|
||||
- * faulting pages into the hole while it's being punched, and
|
||||
- * wait on i_mutex to be released if vmf->flags permits.
|
||||
+ * faulting pages into the hole while it's being punched. Although
|
||||
+ * shmem_undo_range() does remove the additions, it may be unable to
|
||||
+ * keep up, as each new page needs its own unmap_mapping_range() call,
|
||||
+ * and the i_mmap tree grows ever slower to scan if new vmas are added.
|
||||
+ *
|
||||
+ * It does not matter if we sometimes reach this check just before the
|
||||
+ * hole-punch begins, so that one fault then races with the punch:
|
||||
+ * we just need to make racing faults a rare case.
|
||||
+ *
|
||||
+ * The implementation below would be much simpler if we just used a
|
||||
+ * standard mutex or completion: but we cannot take i_mutex in fault,
|
||||
+ * and bloating every shmem inode for this unlikely case would be sad.
|
||||
*/
|
||||
if (unlikely(inode->i_private)) {
|
||||
struct shmem_falloc *shmem_falloc;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
shmem_falloc = inode->i_private;
|
||||
- if (!shmem_falloc ||
|
||||
- shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
|
||||
- vmf->pgoff < shmem_falloc->start ||
|
||||
- vmf->pgoff >= shmem_falloc->next)
|
||||
- shmem_falloc = NULL;
|
||||
- spin_unlock(&inode->i_lock);
|
||||
- /*
|
||||
- * i_lock has protected us from taking shmem_falloc seriously
|
||||
- * once return from shmem_fallocate() went back up that stack.
|
||||
- * i_lock does not serialize with i_mutex at all, but it does
|
||||
- * not matter if sometimes we wait unnecessarily, or sometimes
|
||||
- * miss out on waiting: we just need to make those cases rare.
|
||||
- */
|
||||
- if (shmem_falloc) {
|
||||
+ if (shmem_falloc &&
|
||||
+ shmem_falloc->waitq &&
|
||||
+ vmf->pgoff >= shmem_falloc->start &&
|
||||
+ vmf->pgoff < shmem_falloc->next) {
|
||||
+ wait_queue_head_t *shmem_falloc_waitq;
|
||||
+ DEFINE_WAIT(shmem_fault_wait);
|
||||
+
|
||||
+ ret = VM_FAULT_NOPAGE;
|
||||
if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
|
||||
!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
||||
+ /* It's polite to up mmap_sem if we can */
|
||||
up_read(&vma->vm_mm->mmap_sem);
|
||||
- mutex_lock(&inode->i_mutex);
|
||||
- mutex_unlock(&inode->i_mutex);
|
||||
- return VM_FAULT_RETRY;
|
||||
+ ret = VM_FAULT_RETRY;
|
||||
}
|
||||
- /* cond_resched? Leave that to GUP or return to user */
|
||||
- return VM_FAULT_NOPAGE;
|
||||
+
|
||||
+ shmem_falloc_waitq = shmem_falloc->waitq;
|
||||
+ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
|
||||
+ TASK_UNINTERRUPTIBLE);
|
||||
+ spin_unlock(&inode->i_lock);
|
||||
+ schedule();
|
||||
+
|
||||
+ /*
|
||||
+ * shmem_falloc_waitq points into the shmem_fallocate()
|
||||
+ * stack of the hole-punching task: shmem_falloc_waitq
|
||||
+ * is usually invalid by the time we reach here, but
|
||||
+ * finish_wait() does not dereference it in that case;
|
||||
+ * though i_lock needed lest racing with wake_up_all().
|
||||
+ */
|
||||
+ spin_lock(&inode->i_lock);
|
||||
+ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
|
||||
+ spin_unlock(&inode->i_lock);
|
||||
+ return ret;
|
||||
}
|
||||
+ spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
|
||||
@@ -1773,13 +1793,13 @@ static long shmem_fallocate(struct file
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
- shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
|
||||
-
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
loff_t unmap_start = round_up(offset, PAGE_SIZE);
|
||||
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
|
||||
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
|
||||
|
||||
+ shmem_falloc.waitq = &shmem_falloc_waitq;
|
||||
shmem_falloc.start = unmap_start >> PAGE_SHIFT;
|
||||
shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
|
||||
spin_lock(&inode->i_lock);
|
||||
@@ -1791,8 +1811,13 @@ static long shmem_fallocate(struct file
|
||||
1 + unmap_end - unmap_start, 0);
|
||||
shmem_truncate_range(inode, offset, offset + len - 1);
|
||||
/* No need to unmap again: hole-punching leaves COWed pages */
|
||||
+
|
||||
+ spin_lock(&inode->i_lock);
|
||||
+ inode->i_private = NULL;
|
||||
+ wake_up_all(&shmem_falloc_waitq);
|
||||
+ spin_unlock(&inode->i_lock);
|
||||
error = 0;
|
||||
- goto undone;
|
||||
+ goto out;
|
||||
}
|
||||
|
||||
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
|
||||
@@ -1808,6 +1833,7 @@ static long shmem_fallocate(struct file
|
||||
goto out;
|
||||
}
|
||||
|
||||
+ shmem_falloc.waitq = NULL;
|
||||
shmem_falloc.start = start;
|
||||
shmem_falloc.next = start;
|
||||
shmem_falloc.nr_falloced = 0;
|
|
@ -0,0 +1,138 @@
|
|||
Bugzilla: 1118247
|
||||
Upstream-status: 3.16 and CC'd for stable
|
||||
|
||||
From f00cdc6df7d7cfcabb5b740911e6788cb0802bdb Mon Sep 17 00:00:00 2001
|
||||
From: Hugh Dickins <hughd@google.com>
|
||||
Date: Mon, 23 Jun 2014 13:22:06 -0700
|
||||
Subject: shmem: fix faulting into a hole while it's punched
|
||||
|
||||
From: Hugh Dickins <hughd@google.com>
|
||||
|
||||
commit f00cdc6df7d7cfcabb5b740911e6788cb0802bdb upstream.
|
||||
|
||||
Trinity finds that mmap access to a hole while it's punched from shmem
|
||||
can prevent the madvise(MADV_REMOVE) or fallocate(FALLOC_FL_PUNCH_HOLE)
|
||||
from completing, until the reader chooses to stop; with the puncher's
|
||||
hold on i_mutex locking out all other writers until it can complete.
|
||||
|
||||
It appears that the tmpfs fault path is too light in comparison with its
|
||||
hole-punching path, lacking an i_data_sem to obstruct it; but we don't
|
||||
want to slow down the common case.
|
||||
|
||||
Extend shmem_fallocate()'s existing range notification mechanism, so
|
||||
shmem_fault() can refrain from faulting pages into the hole while it's
|
||||
punched, waiting instead on i_mutex (when safe to sleep; or repeatedly
|
||||
faulting when not).
|
||||
|
||||
[akpm@linux-foundation.org: coding-style fixes]
|
||||
Signed-off-by: Hugh Dickins <hughd@google.com>
|
||||
Reported-by: Sasha Levin <sasha.levin@oracle.com>
|
||||
Tested-by: Sasha Levin <sasha.levin@oracle.com>
|
||||
Cc: Dave Jones <davej@redhat.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
|
||||
---
|
||||
mm/shmem.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++----
|
||||
1 file changed, 52 insertions(+), 4 deletions(-)
|
||||
|
||||
--- a/mm/shmem.c
|
||||
+++ b/mm/shmem.c
|
||||
@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
|
||||
#define SHORT_SYMLINK_LEN 128
|
||||
|
||||
/*
|
||||
- * shmem_fallocate and shmem_writepage communicate via inode->i_private
|
||||
- * (with i_mutex making sure that it has only one user at a time):
|
||||
- * we would prefer not to enlarge the shmem inode just for that.
|
||||
+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
|
||||
+ * inode->i_private (with i_mutex making sure that it has only one user at
|
||||
+ * a time): we would prefer not to enlarge the shmem inode just for that.
|
||||
*/
|
||||
struct shmem_falloc {
|
||||
+ int mode; /* FALLOC_FL mode currently operating */
|
||||
pgoff_t start; /* start of range currently being fallocated */
|
||||
pgoff_t next; /* the next page offset to be fallocated */
|
||||
pgoff_t nr_falloced; /* how many new pages have been fallocated */
|
||||
@@ -759,6 +760,7 @@ static int shmem_writepage(struct page *
|
||||
spin_lock(&inode->i_lock);
|
||||
shmem_falloc = inode->i_private;
|
||||
if (shmem_falloc &&
|
||||
+ !shmem_falloc->mode &&
|
||||
index >= shmem_falloc->start &&
|
||||
index < shmem_falloc->next)
|
||||
shmem_falloc->nr_unswapped++;
|
||||
@@ -1233,6 +1235,44 @@ static int shmem_fault(struct vm_area_st
|
||||
int error;
|
||||
int ret = VM_FAULT_LOCKED;
|
||||
|
||||
+ /*
|
||||
+ * Trinity finds that probing a hole which tmpfs is punching can
|
||||
+ * prevent the hole-punch from ever completing: which in turn
|
||||
+ * locks writers out with its hold on i_mutex. So refrain from
|
||||
+ * faulting pages into the hole while it's being punched, and
|
||||
+ * wait on i_mutex to be released if vmf->flags permits.
|
||||
+ */
|
||||
+ if (unlikely(inode->i_private)) {
|
||||
+ struct shmem_falloc *shmem_falloc;
|
||||
+
|
||||
+ spin_lock(&inode->i_lock);
|
||||
+ shmem_falloc = inode->i_private;
|
||||
+ if (!shmem_falloc ||
|
||||
+ shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
|
||||
+ vmf->pgoff < shmem_falloc->start ||
|
||||
+ vmf->pgoff >= shmem_falloc->next)
|
||||
+ shmem_falloc = NULL;
|
||||
+ spin_unlock(&inode->i_lock);
|
||||
+ /*
|
||||
+ * i_lock has protected us from taking shmem_falloc seriously
|
||||
+ * once return from shmem_fallocate() went back up that stack.
|
||||
+ * i_lock does not serialize with i_mutex at all, but it does
|
||||
+ * not matter if sometimes we wait unnecessarily, or sometimes
|
||||
+ * miss out on waiting: we just need to make those cases rare.
|
||||
+ */
|
||||
+ if (shmem_falloc) {
|
||||
+ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
|
||||
+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
||||
+ up_read(&vma->vm_mm->mmap_sem);
|
||||
+ mutex_lock(&inode->i_mutex);
|
||||
+ mutex_unlock(&inode->i_mutex);
|
||||
+ return VM_FAULT_RETRY;
|
||||
+ }
|
||||
+ /* cond_resched? Leave that to GUP or return to user */
|
||||
+ return VM_FAULT_NOPAGE;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
|
||||
if (error)
|
||||
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
|
||||
@@ -1733,18 +1773,26 @@ static long shmem_fallocate(struct file
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
+ shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
|
||||
+
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
loff_t unmap_start = round_up(offset, PAGE_SIZE);
|
||||
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
|
||||
|
||||
+ shmem_falloc.start = unmap_start >> PAGE_SHIFT;
|
||||
+ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
|
||||
+ spin_lock(&inode->i_lock);
|
||||
+ inode->i_private = &shmem_falloc;
|
||||
+ spin_unlock(&inode->i_lock);
|
||||
+
|
||||
if ((u64)unmap_end > (u64)unmap_start)
|
||||
unmap_mapping_range(mapping, unmap_start,
|
||||
1 + unmap_end - unmap_start, 0);
|
||||
shmem_truncate_range(inode, offset, offset + len - 1);
|
||||
/* No need to unmap again: hole-punching leaves COWed pages */
|
||||
error = 0;
|
||||
- goto out;
|
||||
+ goto undone;
|
||||
}
|
||||
|
||||
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
|
|
@ -0,0 +1,132 @@
|
|||
Bugzilla: 1118247
|
||||
Upstream-status: 3.16 and CC'd for stable
|
||||
|
||||
From b1a366500bd537b50c3aad26dc7df083ec03a448 Mon Sep 17 00:00:00 2001
|
||||
From: Hugh Dickins <hughd@google.com>
|
||||
Date: Wed, 23 Jul 2014 14:00:13 -0700
|
||||
Subject: shmem: fix splicing from a hole while it's punched
|
||||
|
||||
From: Hugh Dickins <hughd@google.com>
|
||||
|
||||
commit b1a366500bd537b50c3aad26dc7df083ec03a448 upstream.
|
||||
|
||||
shmem_fault() is the actual culprit in trinity's hole-punch starvation,
|
||||
and the most significant cause of such problems: since a page faulted is
|
||||
one that then appears page_mapped(), needing unmap_mapping_range() and
|
||||
i_mmap_mutex to be unmapped again.
|
||||
|
||||
But it is not the only way in which a page can be brought into a hole in
|
||||
the radix_tree while that hole is being punched; and Vlastimil's testing
|
||||
implies that if enough other processors are busy filling in the hole,
|
||||
then shmem_undo_range() can be kept from completing indefinitely.
|
||||
|
||||
shmem_file_splice_read() is the main other user of SGP_CACHE, which can
|
||||
instantiate shmem pagecache pages in the read-only case (without holding
|
||||
i_mutex, so perhaps concurrently with a hole-punch). Probably it's
|
||||
silly not to use SGP_READ already (using the ZERO_PAGE for holes): which
|
||||
ought to be safe, but might bring surprises - not a change to be rushed.
|
||||
|
||||
shmem_read_mapping_page_gfp() is an internal interface used by
|
||||
drivers/gpu/drm GEM (and next by uprobes): it should be okay. And
|
||||
shmem_file_read_iter() uses the SGP_DIRTY variant of SGP_CACHE, when
|
||||
called internally by the kernel (perhaps for a stacking filesystem,
|
||||
which might rely on holes to be reserved): it's unclear whether it could
|
||||
be provoked to keep hole-punch busy or not.
|
||||
|
||||
We could apply the same umbrella as now used in shmem_fault() to
|
||||
shmem_file_splice_read() and the others; but it looks ugly, and use over
|
||||
a range raises questions - should it actually be per page? can these get
|
||||
starved themselves?
|
||||
|
||||
The origin of this part of the problem is my v3.1 commit d0823576bf4b
|
||||
("mm: pincer in truncate_inode_pages_range"), once it was duplicated
|
||||
into shmem.c. It seemed like a nice idea at the time, to ensure
|
||||
(barring RCU lookup fuzziness) that there's an instant when the entire
|
||||
hole is empty; but the indefinitely repeated scans to ensure that make
|
||||
it vulnerable.
|
||||
|
||||
Revert that "enhancement" to hole-punch from shmem_undo_range(), but
|
||||
retain the unproblematic rescanning when it's truncating; add a couple
|
||||
of comments there.
|
||||
|
||||
Remove the "indices[0] >= end" test: that is now handled satisfactorily
|
||||
by the inner loop, and mem_cgroup_uncharge_start()/end() are too light
|
||||
to be worth avoiding here.
|
||||
|
||||
But if we do not always loop indefinitely, we do need to handle the case
|
||||
of swap swizzled back to page before shmem_free_swap() gets it: add a
|
||||
retry for that case, as suggested by Konstantin Khlebnikov; and for the
|
||||
case of page swizzled back to swap, as suggested by Johannes Weiner.
|
||||
|
||||
Signed-off-by: Hugh Dickins <hughd@google.com>
|
||||
Reported-by: Sasha Levin <sasha.levin@oracle.com>
|
||||
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
|
||||
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
|
||||
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
||||
Cc: Lukas Czerner <lczerner@redhat.com>
|
||||
Cc: Dave Jones <davej@redhat.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
|
||||
---
|
||||
mm/shmem.c | 24 +++++++++++++++---------
|
||||
1 file changed, 15 insertions(+), 9 deletions(-)
|
||||
|
||||
--- a/mm/shmem.c
|
||||
+++ b/mm/shmem.c
|
||||
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inod
|
||||
return;
|
||||
|
||||
index = start;
|
||||
- for ( ; ; ) {
|
||||
+ while (index < end) {
|
||||
cond_resched();
|
||||
|
||||
pvec.nr = find_get_entries(mapping, index,
|
||||
min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
||||
pvec.pages, indices);
|
||||
if (!pvec.nr) {
|
||||
- if (index == start || unfalloc)
|
||||
+ /* If all gone or hole-punch or unfalloc, we're done */
|
||||
+ if (index == start || end != -1)
|
||||
break;
|
||||
+ /* But if truncating, restart to make sure all gone */
|
||||
index = start;
|
||||
continue;
|
||||
}
|
||||
- if ((index == start || unfalloc) && indices[0] >= end) {
|
||||
- pagevec_remove_exceptionals(&pvec);
|
||||
- pagevec_release(&pvec);
|
||||
- break;
|
||||
- }
|
||||
mem_cgroup_uncharge_start();
|
||||
for (i = 0; i < pagevec_count(&pvec); i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inod
|
||||
if (radix_tree_exceptional_entry(page)) {
|
||||
if (unfalloc)
|
||||
continue;
|
||||
- nr_swaps_freed += !shmem_free_swap(mapping,
|
||||
- index, page);
|
||||
+ if (shmem_free_swap(mapping, index, page)) {
|
||||
+ /* Swap was replaced by page: retry */
|
||||
+ index--;
|
||||
+ break;
|
||||
+ }
|
||||
+ nr_swaps_freed++;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inod
|
||||
if (page->mapping == mapping) {
|
||||
VM_BUG_ON_PAGE(PageWriteback(page), page);
|
||||
truncate_inode_page(mapping, page);
|
||||
+ } else {
|
||||
+ /* Page was replaced by swap: retry */
|
||||
+ unlock_page(page);
|
||||
+ index--;
|
||||
+ break;
|
||||
}
|
||||
}
|
||||
unlock_page(page);
|
Loading…
Reference in New Issue