Linux v4.9.16

This commit is contained in:
Laura Abbott 2017-03-20 09:01:39 -07:00
parent 2a1df92dfb
commit 3222081031
3 changed files with 5 additions and 94 deletions

View File

@ -54,7 +54,7 @@ Summary: The Linux kernel
%if 0%{?released_kernel}
# Do we have a -stable update to apply?
%define stable_update 15
%define stable_update 16
# Set rpm version accordingly
%if 0%{?stable_update}
%define stablerev %{stable_update}
@ -646,9 +646,6 @@ Patch862: rt2800-warning.patch
#CVE-2017-6353 rhbz 1428907 1428910
Patch864: sctp-deny-peeloff-operation-on-asocs-with-threads-sl.patch
#CVE-2017-6874 rhbz 1432429 1432430
Patch865: ucount-Remove-the-atomicity-from-ucount-count.patch
# END OF PATCH DEFINITIONS
%endif
@ -2177,6 +2174,9 @@ fi
#
#
%changelog
* Mon Mar 20 2017 Laura Abbott <labbott@fedoraproject.org> - 4.9.16-100
- Linux v4.9.16
* Wed Mar 15 2017 Laura Abbott <labbott@fedoraproject.org> - 4.9.15-100
- Linux v4.9.15

View File

@ -1,3 +1,3 @@
SHA512 (linux-4.9.tar.xz) = bf67ff812cc3cb7e5059e82cc5db0d9a7c5637f7ed9a42e4730c715bf7047c81ed3a571225f92a33ef0b6d65f35595bc32d773356646df2627da55e9bc7f1f1a
SHA512 (perf-man-4.9.tar.gz) = d23bb3da1eadd6623fddbf4696948de7675f3dcf57c711a7427dd7ae111394f58d8f42752938bbea7cd219f1e7f6f116fc67a1c74f769711063940a065f37b99
SHA512 (patch-4.9.15.xz) = 95fbeff3b6cba1e273ea889b96c7cc8750f85587b7abe8f5b6fd29aca10a0105dd8ec7bae9c6a65b1aa8f37a98a67d0cecebe4bf98430d1f0576c21d687d007f
SHA512 (patch-4.9.16.xz) = ccb50acfc83ece070594773f6f3c0e43c349f2dc65e889d9f03752afb2a3b5c9db7e9d2c61da2c9d774dad7556f163ea9fdf2b3690e4e74ef669259520dbd65f

View File

@ -1,89 +0,0 @@
From 040757f738e13caaa9c5078bca79aa97e11dde88 Mon Sep 17 00:00:00 2001
From: "Eric W. Biederman" <ebiederm@xmission.com>
Date: Sun, 5 Mar 2017 15:03:22 -0600
Subject: [PATCH] ucount: Remove the atomicity from ucount->count
Always increment/decrement ucount->count under the ucounts_lock. The
increments are there already and moving the decrements there means the
locking logic of the code is simpler. This simplification in the
locking logic fixes a race between put_ucounts and get_ucounts that
could result in a use-after-free because the count could go zero then
be found by get_ucounts and then be freed by put_ucounts.
A bug presumably this one was found by a combination of syzkaller and
KASAN. JongWhan Kim reported the syzkaller failure and Dmitry Vyukov
spotted the race in the code.
Cc: stable@vger.kernel.org
Fixes: f6b2db1a3e8d ("userns: Make the count of user namespaces per user")
Reported-by: JongHwan Kim <zzoru007@gmail.com>
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Reviewed-by: Andrei Vagin <avagin@gmail.com>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
---
include/linux/user_namespace.h | 2 +-
kernel/ucount.c | 18 +++++++++++-------
2 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index be76523..32354b4 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -72,7 +72,7 @@ struct ucounts {
struct hlist_node node;
struct user_namespace *ns;
kuid_t uid;
- atomic_t count;
+ int count;
atomic_t ucount[UCOUNT_COUNTS];
};
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 62630a4..b4eeee0 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -144,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
new->ns = ns;
new->uid = uid;
- atomic_set(&new->count, 0);
+ new->count = 0;
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
@@ -155,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
ucounts = new;
}
}
- if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
+ if (ucounts->count == INT_MAX)
ucounts = NULL;
+ else
+ ucounts->count += 1;
spin_unlock_irq(&ucounts_lock);
return ucounts;
}
@@ -165,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts)
{
unsigned long flags;
- if (atomic_dec_and_test(&ucounts->count)) {
- spin_lock_irqsave(&ucounts_lock, flags);
+ spin_lock_irqsave(&ucounts_lock, flags);
+ ucounts->count -= 1;
+ if (!ucounts->count)
hlist_del_init(&ucounts->node);
- spin_unlock_irqrestore(&ucounts_lock, flags);
+ else
+ ucounts = NULL;
+ spin_unlock_irqrestore(&ucounts_lock, flags);
- kfree(ucounts);
- }
+ kfree(ucounts);
}
static inline bool atomic_inc_below(atomic_t *v, int u)
--
2.9.3