Linux v3.18.3

This commit is contained in:
Justin M. Forbes 2015-01-16 16:05:18 -06:00
parent c392ecaab5
commit 0455c71065
6 changed files with 6 additions and 280 deletions

View File

@ -1,79 +0,0 @@
From: Takashi Iwai <tiwai@suse.de>
Date: Wed, 10 Dec 2014 16:38:30 +0100
Subject: [PATCH] blk-mq: Fix uninitialized kobject at CPU hotplugging
When a CPU is hotplugged, the current blk-mq spews a warning like:
kobject '(null)' (ffffe8ffffc8b5d8): tried to add an uninitialized object, something is seriously wrong.
CPU: 1 PID: 1386 Comm: systemd-udevd Not tainted 3.18.0-rc7-2.g088d59b-default #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.7.5-20140531_171129-lamiak 04/01/2014
0000000000000000 0000000000000002 ffffffff81605f07 ffffe8ffffc8b5d8
ffffffff8132c7a0 ffff88023341d370 0000000000000020 ffff8800bb05bd58
ffff8800bb05bd08 000000000000a0a0 000000003f441940 0000000000000007
Call Trace:
[<ffffffff81005306>] dump_trace+0x86/0x330
[<ffffffff81005644>] show_stack_log_lvl+0x94/0x170
[<ffffffff81006d21>] show_stack+0x21/0x50
[<ffffffff81605f07>] dump_stack+0x41/0x51
[<ffffffff8132c7a0>] kobject_add+0xa0/0xb0
[<ffffffff8130aee1>] blk_mq_register_hctx+0x91/0xb0
[<ffffffff8130b82e>] blk_mq_sysfs_register+0x3e/0x60
[<ffffffff81309298>] blk_mq_queue_reinit_notify+0xf8/0x190
[<ffffffff8107cfdc>] notifier_call_chain+0x4c/0x70
[<ffffffff8105fd23>] cpu_notify+0x23/0x50
[<ffffffff81060037>] _cpu_up+0x157/0x170
[<ffffffff810600d9>] cpu_up+0x89/0xb0
[<ffffffff815fa5b5>] cpu_subsys_online+0x35/0x80
[<ffffffff814323cd>] device_online+0x5d/0xa0
[<ffffffff81432485>] online_store+0x75/0x80
[<ffffffff81236a5a>] kernfs_fop_write+0xda/0x150
[<ffffffff811c5532>] vfs_write+0xb2/0x1f0
[<ffffffff811c5f42>] SyS_write+0x42/0xb0
[<ffffffff8160c4ed>] system_call_fastpath+0x16/0x1b
[<00007f0132fb24e0>] 0x7f0132fb24e0
This is indeed because of an uninitialized kobject for blk_mq_ctx.
The blk_mq_ctx kobjects are initialized in blk_mq_sysfs_init(), but it
goes loop over hctx_for_each_ctx(), i.e. it initializes only for
online CPUs. Thus, when a CPU is hotplugged, the ctx for the newly
onlined CPU is registered without initialization.
This patch fixes the issue by initializing the all ctx kobjects
belonging to each queue.
Bugzilla: https://bugzilla.novell.com/show_bug.cgi?id=908794
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
---
block/blk-mq-sysfs.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ed5217867555..e0fb3f4a628f 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -390,16 +390,15 @@ static void blk_mq_sysfs_init(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
- int i, j;
+ int i;
kobject_init(&q->mq_kobj, &blk_mq_ktype);
- queue_for_each_hw_ctx(q, hctx, i) {
+ queue_for_each_hw_ctx(q, hctx, i)
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
- hctx_for_each_ctx(hctx, ctx, j)
- kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
- }
+ queue_for_each_ctx(q, ctx, i)
+ kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
}
int blk_mq_register_disk(struct gendisk *disk)
--
2.1.0

View File

@ -4539,6 +4539,7 @@ index 2437196..914287d 100644
request_standard_resources();
efi_idmap_init();
early_ioremap_reset();
- unflatten_device_tree();
-

View File

@ -54,7 +54,7 @@ Summary: The Linux kernel
%if 0%{?released_kernel}
# Do we have a -stable update to apply?
%define stable_update 2
%define stable_update 3
# Set rpm version accordingly
%if 0%{?stable_update}
%define stablerev %{stable_update}
@ -620,9 +620,6 @@ Patch26096: cfg80211-don-t-WARN-about-two-consecutive-Country-IE.patch
#rhbz 1173806
Patch26101: powerpc-powernv-force-all-CPUs-to-be-bootable.patch
#rhbz 1175261
Patch26103: blk-mq-Fix-uninitialized-kobject-at-CPU-hotplugging.patch
Patch26107: uapi-linux-target_core_user.h-fix-headers_install.sh.patch
#rhbz 1163927
@ -634,9 +631,6 @@ Patch26122: batman-adv-Calculate-extra-tail-size-based-on-queued.patch
#CVE-2014-9529 rhbz 1179813 1179853
Patch26124: KEYS-close-race-between-key-lookup-and-freeing.patch
#rhbz 1178975
Patch26125: x86-vdso-Use-asm-volatile-in-__getcpu.patch
#rhbz 1124119
Patch26126: uas-Do-not-blacklist-ASM1153-disk-enclosures.patch
Patch26127: uas-Add-US_FL_NO_ATA_1X-for-2-more-Seagate-disk-encl.patch
@ -649,9 +643,6 @@ Patch26130: acpi-video-Add-disable_native_backlight-quirk-for-De.patch
#rhbz 1094948
Patch26131: acpi-video-Add-disable_native_backlight-quirk-for-Sa.patch
#CVE-2014-9585 rhbz 1181054 1181056
Patch26132: x86_64-vdso-Fix-the-vdso-address-randomization-algor.patch
# git clone ssh://git.fedorahosted.org/git/kernel-arm64.git, git diff master...devel
Patch30000: kernel-arm64.patch
@ -1386,9 +1377,6 @@ ApplyPatch cfg80211-don-t-WARN-about-two-consecutive-Country-IE.patch
#rhbz 1173806
ApplyPatch powerpc-powernv-force-all-CPUs-to-be-bootable.patch
#rhbz 1175261
ApplyPatch blk-mq-Fix-uninitialized-kobject-at-CPU-hotplugging.patch
ApplyPatch uapi-linux-target_core_user.h-fix-headers_install.sh.patch
#rhbz 1163927
@ -1400,9 +1388,6 @@ ApplyPatch batman-adv-Calculate-extra-tail-size-based-on-queued.patch
#CVE-2014-9529 rhbz 1179813 1179853
ApplyPatch KEYS-close-race-between-key-lookup-and-freeing.patch
#rhbz 1178975
ApplyPatch x86-vdso-Use-asm-volatile-in-__getcpu.patch
#rhbz 1124119
ApplyPatch uas-Do-not-blacklist-ASM1153-disk-enclosures.patch
ApplyPatch uas-Add-US_FL_NO_ATA_1X-for-2-more-Seagate-disk-encl.patch
@ -1415,9 +1400,6 @@ ApplyPatch acpi-video-Add-disable_native_backlight-quirk-for-De.patch
#rhbz 1094948
ApplyPatch acpi-video-Add-disable_native_backlight-quirk-for-Sa.patch
#CVE-2014-9585 rhbz 1181054 1181056
ApplyPatch x86_64-vdso-Fix-the-vdso-address-randomization-algor.patch
# Fix for big-endian arches, already upstream
ApplyPatch mpssd-x86-only.patch
@ -2290,6 +2272,9 @@ fi
# |_| |_____|_| (__)\ )\/\
# ||----w |
# || ||
* Fri Jan 16 2015 Justin M. Forbes <jforbes@fedoraproject.org> - 3.18.3-200
- Linux v3.18.3
%changelog
* Thu Jan 15 2015 Justin M. Forbes <jforbes@fedoraproject.org>
- Build fixes for big-endian arches

View File

@ -1,3 +1,3 @@
9e854df51ca3fef8bfe566dbd7b89241 linux-3.18.tar.xz
813ccb96f0b379d656e57442c2587ca3 perf-man-3.18.tar.gz
f99d6d8f844a3fe1ab7f59dd85aaa050 patch-3.18.2.xz
0d4f5406f6fbe34a53e1c5e1d2037e8b patch-3.18.3.xz

View File

@ -1,53 +0,0 @@
From: Andy Lutomirski <luto@amacapital.net>
Date: Sun, 21 Dec 2014 08:57:46 -0800
Subject: [PATCH] x86, vdso: Use asm volatile in __getcpu
In Linux 3.18 and below, GCC hoists the lsl instructions in the
pvclock code all the way to the beginning of __vdso_clock_gettime,
slowing the non-paravirt case significantly. For unknown reasons,
presumably related to the removal of a branch, the performance issue
is gone as of
e76b027e6408 x86,vdso: Use LSL unconditionally for vgetcpu
but I don't trust GCC enough to expect the problem to stay fixed.
There should be no correctness issue, because the __getcpu calls in
__vdso_vlock_gettime were never necessary in the first place.
Note to stable maintainers: In 3.18 and below, depending on
configuration, gcc 4.9.2 generates code like this:
9c3: 44 0f 03 e8 lsl %ax,%r13d
9c7: 45 89 eb mov %r13d,%r11d
9ca: 0f 03 d8 lsl %ax,%ebx
This patch won't apply as is to any released kernel, but I'll send a
trivial backported version if needed.
Fixes: 51c19b4f5927 x86: vdso: pvclock gettime support
Cc: stable@vger.kernel.org # 3.8+
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
(Backported to 3.17.8 by Josh Boyer <jwboyer@fedoraproject.org>)
---
arch/x86/include/asm/vsyscall.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index 2a46ca720afc..3db1fa5fdceb 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void)
native_read_tscp(&p);
} else {
/* Load per CPU data from GDT */
- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ asm volatile("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
}
return p;
--
2.1.0

View File

@ -1,128 +0,0 @@
From: Andy Lutomirski <luto@amacapital.net>
Date: Fri, 19 Dec 2014 16:04:11 -0800
Subject: [PATCH] x86_64, vdso: Fix the vdso address randomization algorithm
The theory behind vdso randomization is that it's mapped at a random
offset above the top of the stack. To avoid wasting a page of
memory for an extra page table, the vdso isn't supposed to extend
past the lowest PMD into which it can fit. Other than that, the
address should be a uniformly distributed address that meets all of
the alignment requirements.
The current algorithm is buggy: the vdso has about a 50% probability
of being at the very end of a PMD. The current algorithm also has a
decent chance of failing outright due to incorrect handling of the
case where the top of the stack is near the top of its PMD.
This fixes the implementation. The paxtest estimate of vdso
"randomisation" improves from 11 bits to 18 bits. (Disclaimer: I
don't know what the paxtest code is actually calculating.)
It's worth noting that this algorithm is inherently biased: the vdso
is more likely to end up near the end of its PMD than near the
beginning. Ideally we would either nix the PMD sharing requirement
or jointly randomize the vdso and the stack to reduce the bias.
In the mean time, this is a considerable improvement with basically
no risk of compatibility issues, since the allowed outputs of the
algorithm are unchanged.
As an easy test, doing this:
for i in `seq 10000`
do grep -P vdso /proc/self/maps |cut -d- -f1
done |sort |uniq -d
used to produce lots of output (1445 lines on my most recent run).
A tiny subset looks like this:
7fffdfffe000
7fffe01fe000
7fffe05fe000
7fffe07fe000
7fffe09fe000
7fffe0bfe000
7fffe0dfe000
Note the suspicious fe000 endings. With the fix, I get a much more
palatable 76 repeated addresses.
Reviewed-by: Kees Cook <keescook@chromium.org>
Cc: stable@vger.kernel.org
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
---
arch/x86/vdso/vma.c | 45 +++++++++++++++++++++++++++++----------------
1 file changed, 29 insertions(+), 16 deletions(-)
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 970463b566cf..208c2206df46 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -54,12 +54,17 @@ subsys_initcall(init_vdso);
struct linux_binprm;
-/* Put the vdso above the (randomized) stack with another randomized offset.
- This way there is no hole in the middle of address space.
- To save memory make sure it is still in the same PTE as the stack top.
- This doesn't give that many random bits.
-
- Only used for the 64-bit and x32 vdsos. */
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset. This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top. This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
#ifdef CONFIG_X86_32
@@ -67,22 +72,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
#else
unsigned long addr, end;
unsigned offset;
- end = (start + PMD_SIZE - 1) & PMD_MASK;
+
+ /*
+ * Round up the start address. It can start out unaligned as a result
+ * of stack start randomization.
+ */
+ start = PAGE_ALIGN(start);
+
+ /* Round the lowest possible end address up to a PMD boundary. */
+ end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
end -= len;
- /* This loses some more bits than a modulo, but is cheaper */
- offset = get_random_int() & (PTRS_PER_PTE - 1);
- addr = start + (offset << PAGE_SHIFT);
- if (addr >= end)
- addr = end;
+
+ if (end > start) {
+ offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+ addr = start + (offset << PAGE_SHIFT);
+ } else {
+ addr = start;
+ }
/*
- * page-align it here so that get_unmapped_area doesn't
- * align it wrongfully again to the next page. addr can come in 4K
- * unaligned here as a result of stack start randomization.
+ * Forcibly align the final address in case we have a hardware
+ * issue that requires alignment for performance reasons.
*/
- addr = PAGE_ALIGN(addr);
addr = align_vdso_addr(addr);
return addr;
--
2.1.0