10350 lines
353 KiB
Diff
10350 lines
353 KiB
Diff
From 2278446e2b7cd33ad894b32e7eb63afc7db6c86e Mon Sep 17 00:00:00 2001
|
|
From: Mathias Nyman <mathias.nyman@linux.intel.com>
|
|
Date: Mon, 14 May 2018 11:57:23 +0300
|
|
Subject: xhci: Fix USB3 NULL pointer dereference at logical disconnect.
|
|
|
|
From: Mathias Nyman <mathias.nyman@linux.intel.com>
|
|
|
|
commit 2278446e2b7cd33ad894b32e7eb63afc7db6c86e upstream.
|
|
|
|
Hub driver will try to disable a USB3 device twice at logical disconnect,
|
|
racing with xhci_free_dev() callback from the first port disable.
|
|
|
|
This can be triggered with "udisksctl power-off --block-device <disk>"
|
|
or by writing "1" to the "remove" sysfs file for a USB3 device
|
|
in 4.17-rc4.
|
|
|
|
USB3 devices don't have a similar disabled link state as USB2 devices,
|
|
and use a U3 suspended link state instead. In this state the port
|
|
is still enabled and connected.
|
|
|
|
hub_port_connect() first disconnects the device, then later it notices
|
|
that device is still enabled (due to U3 states) it will try to disable
|
|
the port again (set to U3).
|
|
|
|
The xhci_free_dev() called during device disable is async, so checking
|
|
for existing xhci->devs[i] when setting link state to U3 the second time
|
|
was successful, even if device was being freed.
|
|
|
|
The regression was caused by, and whole thing revealed by,
|
|
Commit 44a182b9d177 ("xhci: Fix use-after-free in xhci_free_virt_device")
|
|
which sets xhci->devs[i]->udev to NULL before xhci_virt_dev() returned.
|
|
and causes a NULL pointer dereference the second time we try to set U3.
|
|
|
|
Fix this by checking xhci->devs[i]->udev exists before setting link state.
|
|
|
|
The original patch went to stable so this fix needs to be applied there as
|
|
well.
|
|
|
|
Fixes: 44a182b9d177 ("xhci: Fix use-after-free in xhci_free_virt_device")
|
|
Cc: <stable@vger.kernel.org>
|
|
Reported-by: Jordan Glover <Golden_Miller83@protonmail.ch>
|
|
Tested-by: Jordan Glover <Golden_Miller83@protonmail.ch>
|
|
Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/usb/host/xhci-hub.c | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
--- a/drivers/usb/host/xhci-hub.c
|
|
+++ b/drivers/usb/host/xhci-hub.c
|
|
@@ -354,7 +354,7 @@ int xhci_find_slot_id_by_port(struct usb
|
|
|
|
slot_id = 0;
|
|
for (i = 0; i < MAX_HC_SLOTS; i++) {
|
|
- if (!xhci->devs[i])
|
|
+ if (!xhci->devs[i] || !xhci->devs[i]->udev)
|
|
continue;
|
|
speed = xhci->devs[i]->udev->speed;
|
|
if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
|
|
From 28b68acc4a88dcf91fd1dcf2577371dc9bf574cc Mon Sep 17 00:00:00 2001
|
|
From: Shuah Khan <shuahkh@osg.samsung.com>
|
|
Date: Wed, 11 Apr 2018 18:13:30 -0600
|
|
Subject: usbip: usbip_host: refine probe and disconnect debug msgs to be useful
|
|
|
|
From: Shuah Khan <shuahkh@osg.samsung.com>
|
|
|
|
commit 28b68acc4a88dcf91fd1dcf2577371dc9bf574cc upstream.
|
|
|
|
Refine probe and disconnect debug msgs to be useful and say what is
|
|
in progress.
|
|
|
|
Signed-off-by: Shuah Khan <shuahkh@osg.samsung.com>
|
|
Cc: stable <stable@vger.kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/usb/usbip/stub_dev.c | 4 ++--
|
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
|
|
|
--- a/drivers/usb/usbip/stub_dev.c
|
|
+++ b/drivers/usb/usbip/stub_dev.c
|
|
@@ -302,7 +302,7 @@ static int stub_probe(struct usb_device
|
|
struct bus_id_priv *busid_priv;
|
|
int rc;
|
|
|
|
- dev_dbg(&udev->dev, "Enter\n");
|
|
+ dev_dbg(&udev->dev, "Enter probe\n");
|
|
|
|
/* check we should claim or not by busid_table */
|
|
busid_priv = get_busid_priv(udev_busid);
|
|
@@ -404,7 +404,7 @@ static void stub_disconnect(struct usb_d
|
|
struct bus_id_priv *busid_priv;
|
|
int rc;
|
|
|
|
- dev_dbg(&udev->dev, "Enter\n");
|
|
+ dev_dbg(&udev->dev, "Enter disconnect\n");
|
|
|
|
busid_priv = get_busid_priv(udev_busid);
|
|
if (!busid_priv) {
|
|
From 1e180f167d4e413afccbbb4a421b48b2de832549 Mon Sep 17 00:00:00 2001
|
|
From: "Shuah Khan (Samsung OSG)" <shuah@kernel.org>
|
|
Date: Mon, 30 Apr 2018 16:17:19 -0600
|
|
Subject: usbip: usbip_host: delete device from busid_table after rebind
|
|
|
|
From: Shuah Khan (Samsung OSG) <shuah@kernel.org>
|
|
|
|
commit 1e180f167d4e413afccbbb4a421b48b2de832549 upstream.
|
|
|
|
Device is left in the busid_table after unbind and rebind. Rebind
|
|
initiates usb bus scan and the original driver claims the device.
|
|
After rescan the device should be deleted from the busid_table as
|
|
it no longer belongs to usbip_host.
|
|
|
|
Fix it to delete the device after device_attach() succeeds.
|
|
|
|
Signed-off-by: Shuah Khan (Samsung OSG) <shuah@kernel.org>
|
|
Cc: stable <stable@vger.kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/usb/usbip/stub_main.c | 6 ++++++
|
|
1 file changed, 6 insertions(+)
|
|
|
|
--- a/drivers/usb/usbip/stub_main.c
|
|
+++ b/drivers/usb/usbip/stub_main.c
|
|
@@ -186,6 +186,9 @@ static ssize_t rebind_store(struct devic
|
|
if (!bid)
|
|
return -ENODEV;
|
|
|
|
+ /* mark the device for deletion so probe ignores it during rescan */
|
|
+ bid->status = STUB_BUSID_OTHER;
|
|
+
|
|
/* device_attach() callers should hold parent lock for USB */
|
|
if (bid->udev->dev.parent)
|
|
device_lock(bid->udev->dev.parent);
|
|
@@ -197,6 +200,9 @@ static ssize_t rebind_store(struct devic
|
|
return ret;
|
|
}
|
|
|
|
+ /* delete device from busid_table */
|
|
+ del_match_busid((char *) buf);
|
|
+
|
|
return count;
|
|
}
|
|
|
|
From 7510df3f29d44685bab7b1918b61a8ccd57126a9 Mon Sep 17 00:00:00 2001
|
|
From: "Shuah Khan (Samsung OSG)" <shuah@kernel.org>
|
|
Date: Mon, 30 Apr 2018 16:17:20 -0600
|
|
Subject: usbip: usbip_host: run rebind from exit when module is removed
|
|
|
|
From: Shuah Khan (Samsung OSG) <shuah@kernel.org>
|
|
|
|
commit 7510df3f29d44685bab7b1918b61a8ccd57126a9 upstream.
|
|
|
|
After removing usbip_host module, devices it releases are left without
|
|
a driver. For example, when a keyboard or a mass storage device are
|
|
bound to usbip_host when it is removed, these devices are no longer
|
|
bound to any driver.
|
|
|
|
Fix it to run device_attach() from the module exit routine to restore
|
|
the devices to their original drivers. This includes cleanup changes
|
|
and moving device_attach() code to a common routine to be called from
|
|
rebind_store() and usbip_host_exit().
|
|
|
|
Signed-off-by: Shuah Khan (Samsung OSG) <shuah@kernel.org>
|
|
Cc: stable <stable@vger.kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/usb/usbip/stub_dev.c | 6 ----
|
|
drivers/usb/usbip/stub_main.c | 60 +++++++++++++++++++++++++++++++++++-------
|
|
2 files changed, 52 insertions(+), 14 deletions(-)
|
|
|
|
--- a/drivers/usb/usbip/stub_dev.c
|
|
+++ b/drivers/usb/usbip/stub_dev.c
|
|
@@ -448,12 +448,8 @@ static void stub_disconnect(struct usb_d
|
|
busid_priv->sdev = NULL;
|
|
stub_device_free(sdev);
|
|
|
|
- if (busid_priv->status == STUB_BUSID_ALLOC) {
|
|
+ if (busid_priv->status == STUB_BUSID_ALLOC)
|
|
busid_priv->status = STUB_BUSID_ADDED;
|
|
- } else {
|
|
- busid_priv->status = STUB_BUSID_OTHER;
|
|
- del_match_busid((char *)udev_busid);
|
|
- }
|
|
}
|
|
|
|
#ifdef CONFIG_PM
|
|
--- a/drivers/usb/usbip/stub_main.c
|
|
+++ b/drivers/usb/usbip/stub_main.c
|
|
@@ -14,6 +14,7 @@
|
|
#define DRIVER_DESC "USB/IP Host Driver"
|
|
|
|
struct kmem_cache *stub_priv_cache;
|
|
+
|
|
/*
|
|
* busid_tables defines matching busids that usbip can grab. A user can change
|
|
* dynamically what device is locally used and what device is exported to a
|
|
@@ -169,6 +170,51 @@ static ssize_t match_busid_store(struct
|
|
}
|
|
static DRIVER_ATTR_RW(match_busid);
|
|
|
|
+static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ /* device_attach() callers should hold parent lock for USB */
|
|
+ if (busid_priv->udev->dev.parent)
|
|
+ device_lock(busid_priv->udev->dev.parent);
|
|
+ ret = device_attach(&busid_priv->udev->dev);
|
|
+ if (busid_priv->udev->dev.parent)
|
|
+ device_unlock(busid_priv->udev->dev.parent);
|
|
+ if (ret < 0) {
|
|
+ dev_err(&busid_priv->udev->dev, "rebind failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void stub_device_rebind(void)
|
|
+{
|
|
+#if IS_MODULE(CONFIG_USBIP_HOST)
|
|
+ struct bus_id_priv *busid_priv;
|
|
+ int i;
|
|
+
|
|
+ /* update status to STUB_BUSID_OTHER so probe ignores the device */
|
|
+ spin_lock(&busid_table_lock);
|
|
+ for (i = 0; i < MAX_BUSID; i++) {
|
|
+ if (busid_table[i].name[0] &&
|
|
+ busid_table[i].shutdown_busid) {
|
|
+ busid_priv = &(busid_table[i]);
|
|
+ busid_priv->status = STUB_BUSID_OTHER;
|
|
+ }
|
|
+ }
|
|
+ spin_unlock(&busid_table_lock);
|
|
+
|
|
+ /* now run rebind */
|
|
+ for (i = 0; i < MAX_BUSID; i++) {
|
|
+ if (busid_table[i].name[0] &&
|
|
+ busid_table[i].shutdown_busid) {
|
|
+ busid_priv = &(busid_table[i]);
|
|
+ do_rebind(busid_table[i].name, busid_priv);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
static ssize_t rebind_store(struct device_driver *dev, const char *buf,
|
|
size_t count)
|
|
{
|
|
@@ -189,16 +235,9 @@ static ssize_t rebind_store(struct devic
|
|
/* mark the device for deletion so probe ignores it during rescan */
|
|
bid->status = STUB_BUSID_OTHER;
|
|
|
|
- /* device_attach() callers should hold parent lock for USB */
|
|
- if (bid->udev->dev.parent)
|
|
- device_lock(bid->udev->dev.parent);
|
|
- ret = device_attach(&bid->udev->dev);
|
|
- if (bid->udev->dev.parent)
|
|
- device_unlock(bid->udev->dev.parent);
|
|
- if (ret < 0) {
|
|
- dev_err(&bid->udev->dev, "rebind failed\n");
|
|
+ ret = do_rebind((char *) buf, bid);
|
|
+ if (ret < 0)
|
|
return ret;
|
|
- }
|
|
|
|
/* delete device from busid_table */
|
|
del_match_busid((char *) buf);
|
|
@@ -323,6 +362,9 @@ static void __exit usbip_host_exit(void)
|
|
*/
|
|
usb_deregister_device_driver(&stub_driver);
|
|
|
|
+ /* initiate scan to attach devices */
|
|
+ stub_device_rebind();
|
|
+
|
|
kmem_cache_destroy(stub_priv_cache);
|
|
}
|
|
|
|
From 22076557b07c12086eeb16b8ce2b0b735f7a27e7 Mon Sep 17 00:00:00 2001
|
|
From: "Shuah Khan (Samsung OSG)" <shuah@kernel.org>
|
|
Date: Mon, 14 May 2018 20:49:58 -0600
|
|
Subject: usbip: usbip_host: fix NULL-ptr deref and use-after-free errors
|
|
|
|
From: Shuah Khan (Samsung OSG) <shuah@kernel.org>
|
|
|
|
commit 22076557b07c12086eeb16b8ce2b0b735f7a27e7 upstream.
|
|
|
|
usbip_host updates device status without holding lock from stub probe,
|
|
disconnect and rebind code paths. When multiple requests to import a
|
|
device are received, these unprotected code paths step all over each
|
|
other and drive fails with NULL-ptr deref and use-after-free errors.
|
|
|
|
The driver uses a table lock to protect the busid array for adding and
|
|
deleting busids to the table. However, the probe, disconnect and rebind
|
|
paths get the busid table entry and update the status without holding
|
|
the busid table lock. Add a new finer grain lock to protect the busid
|
|
entry. This new lock will be held to search and update the busid entry
|
|
fields from get_busid_idx(), add_match_busid() and del_match_busid().
|
|
|
|
match_busid_show() does the same to access the busid entry fields.
|
|
|
|
get_busid_priv() changed to return the pointer to the busid entry holding
|
|
the busid lock. stub_probe(), stub_disconnect() and stub_device_rebind()
|
|
call put_busid_priv() to release the busid lock before returning. This
|
|
changes fixes the unprotected code paths eliminating the race conditions
|
|
in updating the busid entries.
|
|
|
|
Reported-by: Jakub Jirasek
|
|
Signed-off-by: Shuah Khan (Samsung OSG) <shuah@kernel.org>
|
|
Cc: stable <stable@vger.kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/usb/usbip/stub.h | 2 ++
|
|
drivers/usb/usbip/stub_dev.c | 33 +++++++++++++++++++++++----------
|
|
drivers/usb/usbip/stub_main.c | 40 +++++++++++++++++++++++++++++++++++-----
|
|
3 files changed, 60 insertions(+), 15 deletions(-)
|
|
|
|
--- a/drivers/usb/usbip/stub.h
|
|
+++ b/drivers/usb/usbip/stub.h
|
|
@@ -73,6 +73,7 @@ struct bus_id_priv {
|
|
struct stub_device *sdev;
|
|
struct usb_device *udev;
|
|
char shutdown_busid;
|
|
+ spinlock_t busid_lock;
|
|
};
|
|
|
|
/* stub_priv is allocated from stub_priv_cache */
|
|
@@ -83,6 +84,7 @@ extern struct usb_device_driver stub_dri
|
|
|
|
/* stub_main.c */
|
|
struct bus_id_priv *get_busid_priv(const char *busid);
|
|
+void put_busid_priv(struct bus_id_priv *bid);
|
|
int del_match_busid(char *busid);
|
|
void stub_device_cleanup_urbs(struct stub_device *sdev);
|
|
|
|
--- a/drivers/usb/usbip/stub_dev.c
|
|
+++ b/drivers/usb/usbip/stub_dev.c
|
|
@@ -300,7 +300,7 @@ static int stub_probe(struct usb_device
|
|
struct stub_device *sdev = NULL;
|
|
const char *udev_busid = dev_name(&udev->dev);
|
|
struct bus_id_priv *busid_priv;
|
|
- int rc;
|
|
+ int rc = 0;
|
|
|
|
dev_dbg(&udev->dev, "Enter probe\n");
|
|
|
|
@@ -317,13 +317,15 @@ static int stub_probe(struct usb_device
|
|
* other matched drivers by the driver core.
|
|
* See driver_probe_device() in driver/base/dd.c
|
|
*/
|
|
- return -ENODEV;
|
|
+ rc = -ENODEV;
|
|
+ goto call_put_busid_priv;
|
|
}
|
|
|
|
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
|
|
dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
|
|
udev_busid);
|
|
- return -ENODEV;
|
|
+ rc = -ENODEV;
|
|
+ goto call_put_busid_priv;
|
|
}
|
|
|
|
if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
|
|
@@ -331,13 +333,16 @@ static int stub_probe(struct usb_device
|
|
"%s is attached on vhci_hcd... skip!\n",
|
|
udev_busid);
|
|
|
|
- return -ENODEV;
|
|
+ rc = -ENODEV;
|
|
+ goto call_put_busid_priv;
|
|
}
|
|
|
|
/* ok, this is my device */
|
|
sdev = stub_device_alloc(udev);
|
|
- if (!sdev)
|
|
- return -ENOMEM;
|
|
+ if (!sdev) {
|
|
+ rc = -ENOMEM;
|
|
+ goto call_put_busid_priv;
|
|
+ }
|
|
|
|
dev_info(&udev->dev,
|
|
"usbip-host: register new device (bus %u dev %u)\n",
|
|
@@ -369,7 +374,9 @@ static int stub_probe(struct usb_device
|
|
}
|
|
busid_priv->status = STUB_BUSID_ALLOC;
|
|
|
|
- return 0;
|
|
+ rc = 0;
|
|
+ goto call_put_busid_priv;
|
|
+
|
|
err_files:
|
|
usb_hub_release_port(udev->parent, udev->portnum,
|
|
(struct usb_dev_state *) udev);
|
|
@@ -379,6 +386,9 @@ err_port:
|
|
|
|
busid_priv->sdev = NULL;
|
|
stub_device_free(sdev);
|
|
+
|
|
+call_put_busid_priv:
|
|
+ put_busid_priv(busid_priv);
|
|
return rc;
|
|
}
|
|
|
|
@@ -417,7 +427,7 @@ static void stub_disconnect(struct usb_d
|
|
/* get stub_device */
|
|
if (!sdev) {
|
|
dev_err(&udev->dev, "could not get device");
|
|
- return;
|
|
+ goto call_put_busid_priv;
|
|
}
|
|
|
|
dev_set_drvdata(&udev->dev, NULL);
|
|
@@ -432,12 +442,12 @@ static void stub_disconnect(struct usb_d
|
|
(struct usb_dev_state *) udev);
|
|
if (rc) {
|
|
dev_dbg(&udev->dev, "unable to release port\n");
|
|
- return;
|
|
+ goto call_put_busid_priv;
|
|
}
|
|
|
|
/* If usb reset is called from event handler */
|
|
if (usbip_in_eh(current))
|
|
- return;
|
|
+ goto call_put_busid_priv;
|
|
|
|
/* shutdown the current connection */
|
|
shutdown_busid(busid_priv);
|
|
@@ -450,6 +460,9 @@ static void stub_disconnect(struct usb_d
|
|
|
|
if (busid_priv->status == STUB_BUSID_ALLOC)
|
|
busid_priv->status = STUB_BUSID_ADDED;
|
|
+
|
|
+call_put_busid_priv:
|
|
+ put_busid_priv(busid_priv);
|
|
}
|
|
|
|
#ifdef CONFIG_PM
|
|
--- a/drivers/usb/usbip/stub_main.c
|
|
+++ b/drivers/usb/usbip/stub_main.c
|
|
@@ -26,6 +26,8 @@ static spinlock_t busid_table_lock;
|
|
|
|
static void init_busid_table(void)
|
|
{
|
|
+ int i;
|
|
+
|
|
/*
|
|
* This also sets the bus_table[i].status to
|
|
* STUB_BUSID_OTHER, which is 0.
|
|
@@ -33,6 +35,9 @@ static void init_busid_table(void)
|
|
memset(busid_table, 0, sizeof(busid_table));
|
|
|
|
spin_lock_init(&busid_table_lock);
|
|
+
|
|
+ for (i = 0; i < MAX_BUSID; i++)
|
|
+ spin_lock_init(&busid_table[i].busid_lock);
|
|
}
|
|
|
|
/*
|
|
@@ -44,15 +49,20 @@ static int get_busid_idx(const char *bus
|
|
int i;
|
|
int idx = -1;
|
|
|
|
- for (i = 0; i < MAX_BUSID; i++)
|
|
+ for (i = 0; i < MAX_BUSID; i++) {
|
|
+ spin_lock(&busid_table[i].busid_lock);
|
|
if (busid_table[i].name[0])
|
|
if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
|
|
idx = i;
|
|
+ spin_unlock(&busid_table[i].busid_lock);
|
|
break;
|
|
}
|
|
+ spin_unlock(&busid_table[i].busid_lock);
|
|
+ }
|
|
return idx;
|
|
}
|
|
|
|
+/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
|
|
struct bus_id_priv *get_busid_priv(const char *busid)
|
|
{
|
|
int idx;
|
|
@@ -60,13 +70,21 @@ struct bus_id_priv *get_busid_priv(const
|
|
|
|
spin_lock(&busid_table_lock);
|
|
idx = get_busid_idx(busid);
|
|
- if (idx >= 0)
|
|
+ if (idx >= 0) {
|
|
bid = &(busid_table[idx]);
|
|
+ /* get busid_lock before returning */
|
|
+ spin_lock(&bid->busid_lock);
|
|
+ }
|
|
spin_unlock(&busid_table_lock);
|
|
|
|
return bid;
|
|
}
|
|
|
|
+void put_busid_priv(struct bus_id_priv *bid)
|
|
+{
|
|
+ spin_unlock(&bid->busid_lock);
|
|
+}
|
|
+
|
|
static int add_match_busid(char *busid)
|
|
{
|
|
int i;
|
|
@@ -79,15 +97,19 @@ static int add_match_busid(char *busid)
|
|
goto out;
|
|
}
|
|
|
|
- for (i = 0; i < MAX_BUSID; i++)
|
|
+ for (i = 0; i < MAX_BUSID; i++) {
|
|
+ spin_lock(&busid_table[i].busid_lock);
|
|
if (!busid_table[i].name[0]) {
|
|
strlcpy(busid_table[i].name, busid, BUSID_SIZE);
|
|
if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
|
|
(busid_table[i].status != STUB_BUSID_REMOV))
|
|
busid_table[i].status = STUB_BUSID_ADDED;
|
|
ret = 0;
|
|
+ spin_unlock(&busid_table[i].busid_lock);
|
|
break;
|
|
}
|
|
+ spin_unlock(&busid_table[i].busid_lock);
|
|
+ }
|
|
|
|
out:
|
|
spin_unlock(&busid_table_lock);
|
|
@@ -108,6 +130,8 @@ int del_match_busid(char *busid)
|
|
/* found */
|
|
ret = 0;
|
|
|
|
+ spin_lock(&busid_table[idx].busid_lock);
|
|
+
|
|
if (busid_table[idx].status == STUB_BUSID_OTHER)
|
|
memset(busid_table[idx].name, 0, BUSID_SIZE);
|
|
|
|
@@ -115,6 +139,7 @@ int del_match_busid(char *busid)
|
|
(busid_table[idx].status != STUB_BUSID_ADDED))
|
|
busid_table[idx].status = STUB_BUSID_REMOV;
|
|
|
|
+ spin_unlock(&busid_table[idx].busid_lock);
|
|
out:
|
|
spin_unlock(&busid_table_lock);
|
|
|
|
@@ -127,9 +152,12 @@ static ssize_t match_busid_show(struct d
|
|
char *out = buf;
|
|
|
|
spin_lock(&busid_table_lock);
|
|
- for (i = 0; i < MAX_BUSID; i++)
|
|
+ for (i = 0; i < MAX_BUSID; i++) {
|
|
+ spin_lock(&busid_table[i].busid_lock);
|
|
if (busid_table[i].name[0])
|
|
out += sprintf(out, "%s ", busid_table[i].name);
|
|
+ spin_unlock(&busid_table[i].busid_lock);
|
|
+ }
|
|
spin_unlock(&busid_table_lock);
|
|
out += sprintf(out, "\n");
|
|
|
|
@@ -204,7 +232,7 @@ static void stub_device_rebind(void)
|
|
}
|
|
spin_unlock(&busid_table_lock);
|
|
|
|
- /* now run rebind */
|
|
+ /* now run rebind - no need to hold locks. driver files are removed */
|
|
for (i = 0; i < MAX_BUSID; i++) {
|
|
if (busid_table[i].name[0] &&
|
|
busid_table[i].shutdown_busid) {
|
|
@@ -234,6 +262,8 @@ static ssize_t rebind_store(struct devic
|
|
|
|
/* mark the device for deletion so probe ignores it during rescan */
|
|
bid->status = STUB_BUSID_OTHER;
|
|
+ /* release the busid lock */
|
|
+ put_busid_priv(bid);
|
|
|
|
ret = do_rebind((char *) buf, bid);
|
|
if (ret < 0)
|
|
From c171654caa875919be3c533d3518da8be5be966e Mon Sep 17 00:00:00 2001
|
|
From: "Shuah Khan (Samsung OSG)" <shuah@kernel.org>
|
|
Date: Tue, 15 May 2018 17:57:23 -0600
|
|
Subject: usbip: usbip_host: fix bad unlock balance during stub_probe()
|
|
|
|
From: Shuah Khan (Samsung OSG) <shuah@kernel.org>
|
|
|
|
commit c171654caa875919be3c533d3518da8be5be966e upstream.
|
|
|
|
stub_probe() calls put_busid_priv() in an error path when device isn't
|
|
found in the busid_table. Fix it by making put_busid_priv() safe to be
|
|
called with null struct bus_id_priv pointer.
|
|
|
|
This problem happens when "usbip bind" is run without loading usbip_host
|
|
driver and then running modprobe. The first failed bind attempt unbinds
|
|
the device from the original driver and when usbip_host is modprobed,
|
|
stub_probe() runs and doesn't find the device in its busid table and calls
|
|
put_busid_priv(0 with null bus_id_priv pointer.
|
|
|
|
usbip-host 3-10.2: 3-10.2 is not in match_busid table... skip!
|
|
|
|
[ 367.359679] =====================================
|
|
[ 367.359681] WARNING: bad unlock balance detected!
|
|
[ 367.359683] 4.17.0-rc4+ #5 Not tainted
|
|
[ 367.359685] -------------------------------------
|
|
[ 367.359688] modprobe/2768 is trying to release lock (
|
|
[ 367.359689]
|
|
==================================================================
|
|
[ 367.359696] BUG: KASAN: null-ptr-deref in print_unlock_imbalance_bug+0x99/0x110
|
|
[ 367.359699] Read of size 8 at addr 0000000000000058 by task modprobe/2768
|
|
|
|
[ 367.359705] CPU: 4 PID: 2768 Comm: modprobe Not tainted 4.17.0-rc4+ #5
|
|
|
|
Fixes: 22076557b07c ("usbip: usbip_host: fix NULL-ptr deref and use-after-free errors") in usb-linus
|
|
Signed-off-by: Shuah Khan (Samsung OSG) <shuah@kernel.org>
|
|
Cc: stable <stable@vger.kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/usb/usbip/stub_main.c | 3 ++-
|
|
1 file changed, 2 insertions(+), 1 deletion(-)
|
|
|
|
--- a/drivers/usb/usbip/stub_main.c
|
|
+++ b/drivers/usb/usbip/stub_main.c
|
|
@@ -82,7 +82,8 @@ struct bus_id_priv *get_busid_priv(const
|
|
|
|
void put_busid_priv(struct bus_id_priv *bid)
|
|
{
|
|
- spin_unlock(&bid->busid_lock);
|
|
+ if (bid)
|
|
+ spin_unlock(&bid->busid_lock);
|
|
}
|
|
|
|
static int add_match_busid(char *busid)
|
|
From 21493316a3c4598f308d5a9fa31cc74639c4caff Mon Sep 17 00:00:00 2001
|
|
From: Federico Cuello <fedux@fedux.com.ar>
|
|
Date: Wed, 9 May 2018 00:13:38 +0200
|
|
Subject: ALSA: usb: mixer: volume quirk for CM102-A+/102S+
|
|
|
|
From: Federico Cuello <fedux@fedux.com.ar>
|
|
|
|
commit 21493316a3c4598f308d5a9fa31cc74639c4caff upstream.
|
|
|
|
Currently it's not possible to set volume lower than 26% (it just mutes).
|
|
|
|
Also fixes this warning:
|
|
|
|
Warning! Unlikely big volume range (=9472), cval->res is probably wrong.
|
|
[13] FU [PCM Playback Volume] ch = 2, val = -9473/-1/1
|
|
|
|
, and volume works fine for full range.
|
|
|
|
Signed-off-by: Federico Cuello <fedux@fedux.com.ar>
|
|
Cc: <stable@vger.kernel.org>
|
|
Signed-off-by: Takashi Iwai <tiwai@suse.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
sound/usb/mixer.c | 8 ++++++++
|
|
1 file changed, 8 insertions(+)
|
|
|
|
--- a/sound/usb/mixer.c
|
|
+++ b/sound/usb/mixer.c
|
|
@@ -915,6 +915,14 @@ static void volume_control_quirks(struct
|
|
}
|
|
break;
|
|
|
|
+ case USB_ID(0x0d8c, 0x0103):
|
|
+ if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
|
|
+ usb_audio_info(chip,
|
|
+ "set volume quirk for CM102-A+/102S+\n");
|
|
+ cval->min = -256;
|
|
+ }
|
|
+ break;
|
|
+
|
|
case USB_ID(0x0471, 0x0101):
|
|
case USB_ID(0x0471, 0x0104):
|
|
case USB_ID(0x0471, 0x0105):
|
|
From 2f0d520a1a73555ac51c19cd494493f60b4c1cea Mon Sep 17 00:00:00 2001
|
|
From: Jeremy Soller <jeremy@system76.com>
|
|
Date: Mon, 7 May 2018 09:28:45 -0600
|
|
Subject: ALSA: hda/realtek - Clevo P950ER ALC1220 Fixup
|
|
|
|
From: Jeremy Soller <jeremy@system76.com>
|
|
|
|
commit 2f0d520a1a73555ac51c19cd494493f60b4c1cea upstream.
|
|
|
|
This adds support for the P950ER, which has the same required fixup as
|
|
the P950HR, but has a different PCI ID.
|
|
|
|
Signed-off-by: Jeremy Soller <jeremy@system76.com>
|
|
Cc: <stable@vger.kernel.org>
|
|
Signed-off-by: Takashi Iwai <tiwai@suse.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
sound/pci/hda/patch_realtek.c | 1 +
|
|
1 file changed, 1 insertion(+)
|
|
|
|
--- a/sound/pci/hda/patch_realtek.c
|
|
+++ b/sound/pci/hda/patch_realtek.c
|
|
@@ -2363,6 +2363,7 @@ static const struct snd_pci_quirk alc882
|
|
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
|
|
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
|
|
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
|
|
+ SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
|
|
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
|
|
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
|
|
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
|
|
From c8beccc19b92f5172994c0732db689c08f4f98e5 Mon Sep 17 00:00:00 2001
|
|
From: Hans de Goede <hdegoede@redhat.com>
|
|
Date: Tue, 8 May 2018 09:27:46 +0200
|
|
Subject: ALSA: hda: Add Lenovo C50 All in one to the power_save blacklist
|
|
|
|
From: Hans de Goede <hdegoede@redhat.com>
|
|
|
|
commit c8beccc19b92f5172994c0732db689c08f4f98e5 upstream.
|
|
|
|
Power-saving is causing loud plops on the Lenovo C50 All in one, add it
|
|
to the blacklist.
|
|
|
|
BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1572975
|
|
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
|
|
Cc: <stable@vger.kernel.org>
|
|
Signed-off-by: Takashi Iwai <tiwai@suse.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
sound/pci/hda/hda_intel.c | 2 ++
|
|
1 file changed, 2 insertions(+)
|
|
|
|
--- a/sound/pci/hda/hda_intel.c
|
|
+++ b/sound/pci/hda/hda_intel.c
|
|
@@ -2208,6 +2208,8 @@ static struct snd_pci_quirk power_save_b
|
|
SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
|
|
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
|
|
SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
|
|
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
|
|
+ SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
|
|
/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
|
|
SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
|
|
{}
|
|
From 3f12888dfae2a48741c4caa9214885b3aaf350f9 Mon Sep 17 00:00:00 2001
|
|
From: Wenwen Wang <wang6495@umn.edu>
|
|
Date: Sat, 5 May 2018 13:38:03 -0500
|
|
Subject: ALSA: control: fix a redundant-copy issue
|
|
|
|
From: Wenwen Wang <wang6495@umn.edu>
|
|
|
|
commit 3f12888dfae2a48741c4caa9214885b3aaf350f9 upstream.
|
|
|
|
In snd_ctl_elem_add_compat(), the fields of the struct 'data' need to be
|
|
copied from the corresponding fields of the struct 'data32' in userspace.
|
|
This is achieved by invoking copy_from_user() and get_user() functions. The
|
|
problem here is that the 'type' field is copied twice. One is by
|
|
copy_from_user() and one is by get_user(). Given that the 'type' field is
|
|
not used between the two copies, the second copy is *completely* redundant
|
|
and should be removed for better performance and cleanup. Also, these two
|
|
copies can cause inconsistent data: as the struct 'data32' resides in
|
|
userspace and a malicious userspace process can race to change the 'type'
|
|
field between the two copies to cause inconsistent data. Depending on how
|
|
the data is used in the future, such an inconsistency may cause potential
|
|
security risks.
|
|
|
|
For above reasons, we should take out the second copy.
|
|
|
|
Signed-off-by: Wenwen Wang <wang6495@umn.edu>
|
|
Cc: <stable@vger.kernel.org>
|
|
Signed-off-by: Takashi Iwai <tiwai@suse.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
sound/core/control_compat.c | 3 +--
|
|
1 file changed, 1 insertion(+), 2 deletions(-)
|
|
|
|
--- a/sound/core/control_compat.c
|
|
+++ b/sound/core/control_compat.c
|
|
@@ -396,8 +396,7 @@ static int snd_ctl_elem_add_compat(struc
|
|
if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
|
|
copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
|
|
goto error;
|
|
- if (get_user(data->owner, &data32->owner) ||
|
|
- get_user(data->type, &data32->type))
|
|
+ if (get_user(data->owner, &data32->owner))
|
|
goto error;
|
|
switch (data->type) {
|
|
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
|
|
From efc4a13724b852ddaa3358402a8dec024ffbcb17 Mon Sep 17 00:00:00 2001
|
|
From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
|
Date: Thu, 19 Apr 2018 19:53:32 +0300
|
|
Subject: spi: pxa2xx: Allow 64-bit DMA
|
|
|
|
From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
|
|
|
commit efc4a13724b852ddaa3358402a8dec024ffbcb17 upstream.
|
|
|
|
Currently the 32-bit device address only is supported for DMA. However,
|
|
starting from Intel Sunrisepoint PCH the DMA address of the device FIFO
|
|
can be 64-bit.
|
|
|
|
Change the respective variable to be compatible with DMA engine
|
|
expectations, i.e. to phys_addr_t.
|
|
|
|
Fixes: 34cadd9c1bcb ("spi: pxa2xx: Add support for Intel Sunrisepoint")
|
|
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
|
Signed-off-by: Mark Brown <broonie@kernel.org>
|
|
Cc: stable@vger.kernel.org
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/spi/spi-pxa2xx.h | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
--- a/drivers/spi/spi-pxa2xx.h
|
|
+++ b/drivers/spi/spi-pxa2xx.h
|
|
@@ -38,7 +38,7 @@ struct driver_data {
|
|
|
|
/* SSP register addresses */
|
|
void __iomem *ioaddr;
|
|
- u32 ssdr_physical;
|
|
+ phys_addr_t ssdr_physical;
|
|
|
|
/* SSP masks*/
|
|
u32 dma_cr1;
|
|
From 5eb9a07a4ae1008b67d8bcd47bddb3dae97456b7 Mon Sep 17 00:00:00 2001
|
|
From: Kamal Dasu <kdasu.kdev@gmail.com>
|
|
Date: Thu, 26 Apr 2018 14:48:00 -0400
|
|
Subject: spi: bcm-qspi: Avoid setting MSPI_CDRAM_PCS for spi-nor master
|
|
|
|
From: Kamal Dasu <kdasu.kdev@gmail.com>
|
|
|
|
commit 5eb9a07a4ae1008b67d8bcd47bddb3dae97456b7 upstream.
|
|
|
|
Added fix for probing of spi-nor device non-zero chip selects. Set
|
|
MSPI_CDRAM_PCS (peripheral chip select) with spi master for MSPI
|
|
controller and not for MSPI/BSPI spi-nor master controller. Ensure
|
|
setting of cs bit in chip select register on chip select change.
|
|
|
|
Fixes: fa236a7ef24048 ("spi: bcm-qspi: Add Broadcom MSPI driver")
|
|
Signed-off-by: Kamal Dasu <kdasu.kdev@gmail.com>
|
|
Signed-off-by: Mark Brown <broonie@kernel.org>
|
|
Cc: stable@vger.kernel.org
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/spi/spi-bcm-qspi.c | 24 ++++++++++++++++--------
|
|
1 file changed, 16 insertions(+), 8 deletions(-)
|
|
|
|
--- a/drivers/spi/spi-bcm-qspi.c
|
|
+++ b/drivers/spi/spi-bcm-qspi.c
|
|
@@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct
|
|
|
|
static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
|
|
{
|
|
- u32 data = 0;
|
|
+ u32 rd = 0;
|
|
+ u32 wr = 0;
|
|
|
|
- if (qspi->curr_cs == cs)
|
|
- return;
|
|
if (qspi->base[CHIP_SELECT]) {
|
|
- data = bcm_qspi_read(qspi, CHIP_SELECT, 0);
|
|
- data = (data & ~0xff) | (1 << cs);
|
|
- bcm_qspi_write(qspi, CHIP_SELECT, 0, data);
|
|
+ rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
|
|
+ wr = (rd & ~0xff) | (1 << cs);
|
|
+ if (rd == wr)
|
|
+ return;
|
|
+ bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
|
|
usleep_range(10, 20);
|
|
}
|
|
+
|
|
+ dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
|
|
qspi->curr_cs = cs;
|
|
}
|
|
|
|
@@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *
|
|
dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
|
|
}
|
|
mspi_cdram = MSPI_CDRAM_CONT_BIT;
|
|
- mspi_cdram |= (~(1 << spi->chip_select) &
|
|
- MSPI_CDRAM_PCS);
|
|
+
|
|
+ if (has_bspi(qspi))
|
|
+ mspi_cdram &= ~1;
|
|
+ else
|
|
+ mspi_cdram |= (~(1 << spi->chip_select) &
|
|
+ MSPI_CDRAM_PCS);
|
|
+
|
|
mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
|
|
MSPI_CDRAM_BITSE_BIT);
|
|
|
|
From 602805fb618b018b7a41fbb3f93c1992b078b1ae Mon Sep 17 00:00:00 2001
|
|
From: Kamal Dasu <kdasu.kdev@gmail.com>
|
|
Date: Thu, 26 Apr 2018 14:48:01 -0400
|
|
Subject: spi: bcm-qspi: Always read and set BSPI_MAST_N_BOOT_CTRL
|
|
|
|
From: Kamal Dasu <kdasu.kdev@gmail.com>
|
|
|
|
commit 602805fb618b018b7a41fbb3f93c1992b078b1ae upstream.
|
|
|
|
Always confirm the BSPI_MAST_N_BOOT_CTRL bit when enabling
|
|
or disabling BSPI transfers.
|
|
|
|
Fixes: 4e3b2d236fe00 ("spi: bcm-qspi: Add BSPI spi-nor flash controller driver")
|
|
Signed-off-by: Kamal Dasu <kdasu.kdev@gmail.com>
|
|
Signed-off-by: Mark Brown <broonie@kernel.org>
|
|
Cc: stable@vger.kernel.org
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/spi/spi-bcm-qspi.c | 4 ++--
|
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
|
|
|
--- a/drivers/spi/spi-bcm-qspi.c
|
|
+++ b/drivers/spi/spi-bcm-qspi.c
|
|
@@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct
|
|
|
|
static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
|
|
{
|
|
- if (!has_bspi(qspi) || (qspi->bspi_enabled))
|
|
+ if (!has_bspi(qspi))
|
|
return;
|
|
|
|
qspi->bspi_enabled = 1;
|
|
@@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct
|
|
|
|
static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
|
|
{
|
|
- if (!has_bspi(qspi) || (!qspi->bspi_enabled))
|
|
+ if (!has_bspi(qspi))
|
|
return;
|
|
|
|
qspi->bspi_enabled = 0;
|
|
From 64f7a11586ab9262f00b8b6eceef6d8154921bd8 Mon Sep 17 00:00:00 2001
|
|
From: Sean Christopherson <sean.j.christopherson@intel.com>
|
|
Date: Mon, 30 Apr 2018 10:01:06 -0700
|
|
Subject: KVM: vmx: update sec exec controls for UMIP iff emulating UMIP
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
From: Sean Christopherson <sean.j.christopherson@intel.com>
|
|
|
|
commit 64f7a11586ab9262f00b8b6eceef6d8154921bd8 upstream.
|
|
|
|
Update SECONDARY_EXEC_DESC for UMIP emulation if and only UMIP
|
|
is actually being emulated. Skipping the VMCS update eliminates
|
|
unnecessary VMREAD/VMWRITE when UMIP is supported in hardware,
|
|
and on platforms that don't have SECONDARY_VM_EXEC_CONTROL. The
|
|
latter case resolves a bug where KVM would fill the kernel log
|
|
with warnings due to failed VMWRITEs on older platforms.
|
|
|
|
Fixes: 0367f205a3b7 ("KVM: vmx: add support for emulating UMIP")
|
|
Cc: stable@vger.kernel.org #4.16
|
|
Reported-by: Paolo Zeppegno <pzeppegno@gmail.com>
|
|
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Suggested-by: Radim KrÄmář <rkrcmar@redhat.com>
|
|
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/x86/kvm/vmx.c | 28 +++++++++++++++-------------
|
|
1 file changed, 15 insertions(+), 13 deletions(-)
|
|
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -1314,6 +1314,12 @@ static inline bool cpu_has_vmx_vmfunc(vo
|
|
SECONDARY_EXEC_ENABLE_VMFUNC;
|
|
}
|
|
|
|
+static bool vmx_umip_emulated(void)
|
|
+{
|
|
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
+ SECONDARY_EXEC_DESC;
|
|
+}
|
|
+
|
|
static inline bool report_flexpriority(void)
|
|
{
|
|
return flexpriority_enabled;
|
|
@@ -4494,14 +4500,16 @@ static int vmx_set_cr4(struct kvm_vcpu *
|
|
(to_vmx(vcpu)->rmode.vm86_active ?
|
|
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
|
|
|
|
- if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) {
|
|
- vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
|
|
- SECONDARY_EXEC_DESC);
|
|
- hw_cr4 &= ~X86_CR4_UMIP;
|
|
- } else if (!is_guest_mode(vcpu) ||
|
|
- !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
|
|
- vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
|
|
+ if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
|
|
+ if (cr4 & X86_CR4_UMIP) {
|
|
+ vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
|
|
SECONDARY_EXEC_DESC);
|
|
+ hw_cr4 &= ~X86_CR4_UMIP;
|
|
+ } else if (!is_guest_mode(vcpu) ||
|
|
+ !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
|
|
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
|
|
+ SECONDARY_EXEC_DESC);
|
|
+ }
|
|
|
|
if (cr4 & X86_CR4_VMXE) {
|
|
/*
|
|
@@ -9243,12 +9251,6 @@ static bool vmx_xsaves_supported(void)
|
|
SECONDARY_EXEC_XSAVES;
|
|
}
|
|
|
|
-static bool vmx_umip_emulated(void)
|
|
-{
|
|
- return vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
- SECONDARY_EXEC_DESC;
|
|
-}
|
|
-
|
|
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
|
|
{
|
|
u32 exit_intr_info;
|
|
From 388d4359680b56dba82fe2ffca05871e9fd2b73e Mon Sep 17 00:00:00 2001
|
|
From: Andre Przywara <andre.przywara@arm.com>
|
|
Date: Fri, 11 May 2018 15:20:12 +0100
|
|
Subject: KVM: arm/arm64: Properly protect VGIC locks from IRQs
|
|
|
|
From: Andre Przywara <andre.przywara@arm.com>
|
|
|
|
commit 388d4359680b56dba82fe2ffca05871e9fd2b73e upstream.
|
|
|
|
As Jan reported [1], lockdep complains about the VGIC not being bullet
|
|
proof. This seems to be due to two issues:
|
|
- When commit 006df0f34930 ("KVM: arm/arm64: Support calling
|
|
vgic_update_irq_pending from irq context") promoted irq_lock and
|
|
ap_list_lock to _irqsave, we forgot two instances of irq_lock.
|
|
lockdeps seems to pick those up.
|
|
- If a lock is _irqsave, any other locks we take inside them should be
|
|
_irqsafe as well. So the lpi_list_lock needs to be promoted also.
|
|
|
|
This fixes both issues by simply making the remaining instances of those
|
|
locks _irqsave.
|
|
One irq_lock is addressed in a separate patch, to simplify backporting.
|
|
|
|
[1] http://lists.infradead.org/pipermail/linux-arm-kernel/2018-May/575718.html
|
|
|
|
Cc: stable@vger.kernel.org
|
|
Fixes: 006df0f34930 ("KVM: arm/arm64: Support calling vgic_update_irq_pending from irq context")
|
|
Reported-by: Jan Glauber <jan.glauber@caviumnetworks.com>
|
|
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
|
|
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
virt/kvm/arm/vgic/vgic-debug.c | 5 +++--
|
|
virt/kvm/arm/vgic/vgic-its.c | 10 ++++++----
|
|
virt/kvm/arm/vgic/vgic.c | 22 ++++++++++++++--------
|
|
3 files changed, 23 insertions(+), 14 deletions(-)
|
|
|
|
--- a/virt/kvm/arm/vgic/vgic-debug.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-debug.c
|
|
@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_fi
|
|
struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
|
|
struct vgic_irq *irq;
|
|
struct kvm_vcpu *vcpu = NULL;
|
|
+ unsigned long flags;
|
|
|
|
if (iter->dist_id == 0) {
|
|
print_dist_state(s, &kvm->arch.vgic);
|
|
@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_fi
|
|
irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
|
|
}
|
|
|
|
- spin_lock(&irq->irq_lock);
|
|
+ spin_lock_irqsave(&irq->irq_lock, flags);
|
|
print_irq_state(s, irq, vcpu);
|
|
- spin_unlock(&irq->irq_lock);
|
|
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
--- a/virt/kvm/arm/vgic/vgic-its.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-its.c
|
|
@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(str
|
|
{
|
|
struct vgic_dist *dist = &kvm->arch.vgic;
|
|
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
|
|
+ unsigned long flags;
|
|
int ret;
|
|
|
|
/* In this case there is no put, since we keep the reference. */
|
|
@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(str
|
|
irq->intid = intid;
|
|
irq->target_vcpu = vcpu;
|
|
|
|
- spin_lock(&dist->lpi_list_lock);
|
|
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
|
|
/*
|
|
* There could be a race with another vgic_add_lpi(), so we need to
|
|
@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(str
|
|
dist->lpi_list_count++;
|
|
|
|
out_unlock:
|
|
- spin_unlock(&dist->lpi_list_lock);
|
|
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
/*
|
|
* We "cache" the configuration table entries in our struct vgic_irq's.
|
|
@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm
|
|
{
|
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
|
struct vgic_irq *irq;
|
|
+ unsigned long flags;
|
|
u32 *intids;
|
|
int irq_count, i = 0;
|
|
|
|
@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm
|
|
if (!intids)
|
|
return -ENOMEM;
|
|
|
|
- spin_lock(&dist->lpi_list_lock);
|
|
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
|
if (i == irq_count)
|
|
break;
|
|
@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm
|
|
continue;
|
|
intids[i++] = irq->intid;
|
|
}
|
|
- spin_unlock(&dist->lpi_list_lock);
|
|
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
*intid_ptr = intids;
|
|
return i;
|
|
--- a/virt/kvm/arm/vgic/vgic.c
|
|
+++ b/virt/kvm/arm/vgic/vgic.c
|
|
@@ -40,9 +40,13 @@ struct vgic_global kvm_vgic_global_state
|
|
* kvm->lock (mutex)
|
|
* its->cmd_lock (mutex)
|
|
* its->its_lock (mutex)
|
|
- * vgic_cpu->ap_list_lock
|
|
- * kvm->lpi_list_lock
|
|
- * vgic_irq->irq_lock
|
|
+ * vgic_cpu->ap_list_lock must be taken with IRQs disabled
|
|
+ * kvm->lpi_list_lock must be taken with IRQs disabled
|
|
+ * vgic_irq->irq_lock must be taken with IRQs disabled
|
|
+ *
|
|
+ * As the ap_list_lock might be taken from the timer interrupt handler,
|
|
+ * we have to disable IRQs before taking this lock and everything lower
|
|
+ * than it.
|
|
*
|
|
* If you need to take multiple locks, always take the upper lock first,
|
|
* then the lower ones, e.g. first take the its_lock, then the irq_lock.
|
|
@@ -69,8 +73,9 @@ static struct vgic_irq *vgic_get_lpi(str
|
|
{
|
|
struct vgic_dist *dist = &kvm->arch.vgic;
|
|
struct vgic_irq *irq = NULL;
|
|
+ unsigned long flags;
|
|
|
|
- spin_lock(&dist->lpi_list_lock);
|
|
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
|
|
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
|
if (irq->intid != intid)
|
|
@@ -86,7 +91,7 @@ static struct vgic_irq *vgic_get_lpi(str
|
|
irq = NULL;
|
|
|
|
out_unlock:
|
|
- spin_unlock(&dist->lpi_list_lock);
|
|
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
return irq;
|
|
}
|
|
@@ -127,19 +132,20 @@ static void vgic_irq_release(struct kref
|
|
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
|
|
{
|
|
struct vgic_dist *dist = &kvm->arch.vgic;
|
|
+ unsigned long flags;
|
|
|
|
if (irq->intid < VGIC_MIN_LPI)
|
|
return;
|
|
|
|
- spin_lock(&dist->lpi_list_lock);
|
|
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
|
if (!kref_put(&irq->refcount, vgic_irq_release)) {
|
|
- spin_unlock(&dist->lpi_list_lock);
|
|
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
return;
|
|
};
|
|
|
|
list_del(&irq->lpi_list);
|
|
dist->lpi_list_count--;
|
|
- spin_unlock(&dist->lpi_list_lock);
|
|
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
|
|
|
kfree(irq);
|
|
}
|
|
From 9c4188762f7fee032abf8451fd9865a9abfc5516 Mon Sep 17 00:00:00 2001
|
|
From: Andre Przywara <andre.przywara@arm.com>
|
|
Date: Fri, 11 May 2018 15:20:13 +0100
|
|
Subject: KVM: arm/arm64: VGIC/ITS: Promote irq_lock() in update_affinity
|
|
|
|
From: Andre Przywara <andre.przywara@arm.com>
|
|
|
|
commit 9c4188762f7fee032abf8451fd9865a9abfc5516 upstream.
|
|
|
|
Apparently the development of update_affinity() overlapped with the
|
|
promotion of irq_lock to be _irqsave, so the patch didn't convert this
|
|
lock over. This will make lockdep complain.
|
|
|
|
Fix this by disabling IRQs around the lock.
|
|
|
|
Cc: stable@vger.kernel.org
|
|
Fixes: 08c9fd042117 ("KVM: arm/arm64: vITS: Add a helper to update the affinity of an LPI")
|
|
Reported-by: Jan Glauber <jan.glauber@caviumnetworks.com>
|
|
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
|
|
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
virt/kvm/arm/vgic/vgic-its.c | 5 +++--
|
|
1 file changed, 3 insertions(+), 2 deletions(-)
|
|
|
|
--- a/virt/kvm/arm/vgic/vgic-its.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-its.c
|
|
@@ -350,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm
|
|
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
|
|
{
|
|
int ret = 0;
|
|
+ unsigned long flags;
|
|
|
|
- spin_lock(&irq->irq_lock);
|
|
+ spin_lock_irqsave(&irq->irq_lock, flags);
|
|
irq->target_vcpu = vcpu;
|
|
- spin_unlock(&irq->irq_lock);
|
|
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
|
|
|
|
if (irq->hw) {
|
|
struct its_vlpi_map map;
|
|
From 711702b57cc3c50b84bd648de0f1ca0a378805be Mon Sep 17 00:00:00 2001
|
|
From: Andre Przywara <andre.przywara@arm.com>
|
|
Date: Fri, 11 May 2018 15:20:15 +0100
|
|
Subject: KVM: arm/arm64: VGIC/ITS save/restore: protect kvm_read_guest() calls
|
|
|
|
From: Andre Przywara <andre.przywara@arm.com>
|
|
|
|
commit 711702b57cc3c50b84bd648de0f1ca0a378805be upstream.
|
|
|
|
kvm_read_guest() will eventually look up in kvm_memslots(), which requires
|
|
either to hold the kvm->slots_lock or to be inside a kvm->srcu critical
|
|
section.
|
|
In contrast to x86 and s390 we don't take the SRCU lock on every guest
|
|
exit, so we have to do it individually for each kvm_read_guest() call.
|
|
Use the newly introduced wrapper for that.
|
|
|
|
Cc: Stable <stable@vger.kernel.org> # 4.12+
|
|
Reported-by: Jan Glauber <jan.glauber@caviumnetworks.com>
|
|
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
|
|
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
virt/kvm/arm/vgic/vgic-its.c | 4 ++--
|
|
virt/kvm/arm/vgic/vgic-v3.c | 4 ++--
|
|
2 files changed, 4 insertions(+), 4 deletions(-)
|
|
|
|
--- a/virt/kvm/arm/vgic/vgic-its.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-its.c
|
|
@@ -1896,7 +1896,7 @@ static int scan_its_table(struct vgic_it
|
|
int next_offset;
|
|
size_t byte_offset;
|
|
|
|
- ret = kvm_read_guest(kvm, gpa, entry, esz);
|
|
+ ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -2266,7 +2266,7 @@ static int vgic_its_restore_cte(struct v
|
|
int ret;
|
|
|
|
BUG_ON(esz > sizeof(val));
|
|
- ret = kvm_read_guest(kvm, gpa, &val, esz);
|
|
+ ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
|
|
if (ret)
|
|
return ret;
|
|
val = le64_to_cpu(val);
|
|
--- a/virt/kvm/arm/vgic/vgic-v3.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-v3.c
|
|
@@ -329,7 +329,7 @@ retry:
|
|
bit_nr = irq->intid % BITS_PER_BYTE;
|
|
ptr = pendbase + byte_offset;
|
|
|
|
- ret = kvm_read_guest(kvm, ptr, &val, 1);
|
|
+ ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -382,7 +382,7 @@ int vgic_v3_save_pending_tables(struct k
|
|
ptr = pendbase + byte_offset;
|
|
|
|
if (byte_offset != last_byte_offset) {
|
|
- ret = kvm_read_guest(kvm, ptr, &val, 1);
|
|
+ ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
|
|
if (ret)
|
|
return ret;
|
|
last_byte_offset = byte_offset;
|
|
From bf308242ab98b5d1648c3663e753556bef9bec01 Mon Sep 17 00:00:00 2001
|
|
From: Andre Przywara <andre.przywara@arm.com>
|
|
Date: Fri, 11 May 2018 15:20:14 +0100
|
|
Subject: KVM: arm/arm64: VGIC/ITS: protect kvm_read_guest() calls with SRCU lock
|
|
|
|
From: Andre Przywara <andre.przywara@arm.com>
|
|
|
|
commit bf308242ab98b5d1648c3663e753556bef9bec01 upstream.
|
|
|
|
kvm_read_guest() will eventually look up in kvm_memslots(), which requires
|
|
either to hold the kvm->slots_lock or to be inside a kvm->srcu critical
|
|
section.
|
|
In contrast to x86 and s390 we don't take the SRCU lock on every guest
|
|
exit, so we have to do it individually for each kvm_read_guest() call.
|
|
|
|
Provide a wrapper which does that and use that everywhere.
|
|
|
|
Note that ending the SRCU critical section before returning from the
|
|
kvm_read_guest() wrapper is safe, because the data has been *copied*, so
|
|
we don't need to rely on valid references to the memslot anymore.
|
|
|
|
Cc: Stable <stable@vger.kernel.org> # 4.8+
|
|
Reported-by: Jan Glauber <jan.glauber@caviumnetworks.com>
|
|
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
|
|
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/arm/include/asm/kvm_mmu.h | 16 ++++++++++++++++
|
|
arch/arm64/include/asm/kvm_mmu.h | 16 ++++++++++++++++
|
|
virt/kvm/arm/vgic/vgic-its.c | 15 ++++++++-------
|
|
3 files changed, 40 insertions(+), 7 deletions(-)
|
|
|
|
--- a/arch/arm/include/asm/kvm_mmu.h
|
|
+++ b/arch/arm/include/asm/kvm_mmu.h
|
|
@@ -295,6 +295,22 @@ static inline unsigned int kvm_get_vmid_
|
|
return 8;
|
|
}
|
|
|
|
+/*
|
|
+ * We are not in the kvm->srcu critical section most of the time, so we take
|
|
+ * the SRCU read lock here. Since we copy the data from the user page, we
|
|
+ * can immediately drop the lock again.
|
|
+ */
|
|
+static inline int kvm_read_guest_lock(struct kvm *kvm,
|
|
+ gpa_t gpa, void *data, unsigned long len)
|
|
+{
|
|
+ int srcu_idx = srcu_read_lock(&kvm->srcu);
|
|
+ int ret = kvm_read_guest(kvm, gpa, data, len);
|
|
+
|
|
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static inline void *kvm_get_hyp_vector(void)
|
|
{
|
|
return kvm_ksym_ref(__kvm_hyp_vector);
|
|
--- a/arch/arm64/include/asm/kvm_mmu.h
|
|
+++ b/arch/arm64/include/asm/kvm_mmu.h
|
|
@@ -348,6 +348,22 @@ static inline unsigned int kvm_get_vmid_
|
|
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
|
|
}
|
|
|
|
+/*
|
|
+ * We are not in the kvm->srcu critical section most of the time, so we take
|
|
+ * the SRCU read lock here. Since we copy the data from the user page, we
|
|
+ * can immediately drop the lock again.
|
|
+ */
|
|
+static inline int kvm_read_guest_lock(struct kvm *kvm,
|
|
+ gpa_t gpa, void *data, unsigned long len)
|
|
+{
|
|
+ int srcu_idx = srcu_read_lock(&kvm->srcu);
|
|
+ int ret = kvm_read_guest(kvm, gpa, data, len);
|
|
+
|
|
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
|
#include <asm/mmu.h>
|
|
|
|
--- a/virt/kvm/arm/vgic/vgic-its.c
|
|
+++ b/virt/kvm/arm/vgic/vgic-its.c
|
|
@@ -281,8 +281,8 @@ static int update_lpi_config(struct kvm
|
|
int ret;
|
|
unsigned long flags;
|
|
|
|
- ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
|
|
- &prop, 1);
|
|
+ ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
|
|
+ &prop, 1);
|
|
|
|
if (ret)
|
|
return ret;
|
|
@@ -444,8 +444,9 @@ static int its_sync_lpi_pending_table(st
|
|
* this very same byte in the last iteration. Reuse that.
|
|
*/
|
|
if (byte_offset != last_byte_offset) {
|
|
- ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
|
|
- &pendmask, 1);
|
|
+ ret = kvm_read_guest_lock(vcpu->kvm,
|
|
+ pendbase + byte_offset,
|
|
+ &pendmask, 1);
|
|
if (ret) {
|
|
kfree(intids);
|
|
return ret;
|
|
@@ -789,7 +790,7 @@ static bool vgic_its_check_id(struct vgi
|
|
return false;
|
|
|
|
/* Each 1st level entry is represented by a 64-bit value. */
|
|
- if (kvm_read_guest(its->dev->kvm,
|
|
+ if (kvm_read_guest_lock(its->dev->kvm,
|
|
BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
|
|
&indirect_ptr, sizeof(indirect_ptr)))
|
|
return false;
|
|
@@ -1370,8 +1371,8 @@ static void vgic_its_process_commands(st
|
|
cbaser = CBASER_ADDRESS(its->cbaser);
|
|
|
|
while (its->cwriter != its->creadr) {
|
|
- int ret = kvm_read_guest(kvm, cbaser + its->creadr,
|
|
- cmd_buf, ITS_CMD_SIZE);
|
|
+ int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
|
|
+ cmd_buf, ITS_CMD_SIZE);
|
|
/*
|
|
* If kvm_read_guest() fails, this could be due to the guest
|
|
* programming a bogus value in CBASER or something else going
|
|
From 40626a1bf657eef557fcee9e1b8ef5b4f5b56dcd Mon Sep 17 00:00:00 2001
|
|
From: Guenter Roeck <linux@roeck-us.net>
|
|
Date: Sun, 29 Apr 2018 08:08:24 -0700
|
|
Subject: hwmon: (k10temp) Fix reading critical temperature register
|
|
|
|
From: Guenter Roeck <linux@roeck-us.net>
|
|
|
|
commit 40626a1bf657eef557fcee9e1b8ef5b4f5b56dcd upstream.
|
|
|
|
The HTC (Hardware Temperature Control) register has moved
|
|
for recent chips.
|
|
|
|
Cc: stable@vger.kernel.org # v4.16+
|
|
Tested-by: Gabriel Craciunescu <nix.or.die@gmail.com>
|
|
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/hwmon/k10temp.c | 40 ++++++++++++++++++++++++++++++----------
|
|
1 file changed, 30 insertions(+), 10 deletions(-)
|
|
|
|
--- a/drivers/hwmon/k10temp.c
|
|
+++ b/drivers/hwmon/k10temp.c
|
|
@@ -63,10 +63,12 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
|
|
#define NB_CAP_HTC 0x00000400
|
|
|
|
/*
|
|
- * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE
|
|
- * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature
|
|
- * Control]
|
|
+ * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
|
|
+ * and REG_REPORTED_TEMPERATURE have been moved to
|
|
+ * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
|
|
+ * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
|
|
*/
|
|
+#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
|
|
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
|
|
|
|
/* F17h M01h Access througn SMN */
|
|
@@ -74,6 +76,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
|
|
|
|
struct k10temp_data {
|
|
struct pci_dev *pdev;
|
|
+ void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
|
|
void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
|
|
int temp_offset;
|
|
u32 temp_adjust_mask;
|
|
@@ -98,6 +101,11 @@ static const struct tctl_offset tctl_off
|
|
{ 0x17, "AMD Ryzen Threadripper 1910", 10000 },
|
|
};
|
|
|
|
+static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
|
|
+{
|
|
+ pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
|
|
+}
|
|
+
|
|
static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
|
|
{
|
|
pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
|
|
@@ -114,6 +122,12 @@ static void amd_nb_index_read(struct pci
|
|
mutex_unlock(&nb_smu_ind_mutex);
|
|
}
|
|
|
|
+static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
|
|
+{
|
|
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
|
|
+ F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
|
|
+}
|
|
+
|
|
static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
|
|
{
|
|
amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
|
|
@@ -160,8 +174,7 @@ static ssize_t show_temp_crit(struct dev
|
|
u32 regval;
|
|
int value;
|
|
|
|
- pci_read_config_dword(data->pdev,
|
|
- REG_HARDWARE_THERMAL_CONTROL, ®val);
|
|
+ data->read_htcreg(data->pdev, ®val);
|
|
value = ((regval >> 16) & 0x7f) * 500 + 52000;
|
|
if (show_hyst)
|
|
value -= ((regval >> 24) & 0xf) * 500;
|
|
@@ -181,13 +194,18 @@ static umode_t k10temp_is_visible(struct
|
|
struct pci_dev *pdev = data->pdev;
|
|
|
|
if (index >= 2) {
|
|
- u32 reg_caps, reg_htc;
|
|
+ u32 reg;
|
|
+
|
|
+ if (!data->read_htcreg)
|
|
+ return 0;
|
|
|
|
pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
|
|
- ®_caps);
|
|
- pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL,
|
|
- ®_htc);
|
|
- if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE))
|
|
+ ®);
|
|
+ if (!(reg & NB_CAP_HTC))
|
|
+ return 0;
|
|
+
|
|
+ data->read_htcreg(data->pdev, ®);
|
|
+ if (!(reg & HTC_ENABLE))
|
|
return 0;
|
|
}
|
|
return attr->mode;
|
|
@@ -268,11 +286,13 @@ static int k10temp_probe(struct pci_dev
|
|
|
|
if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
|
|
boot_cpu_data.x86_model == 0x70)) {
|
|
+ data->read_htcreg = read_htcreg_nb_f15;
|
|
data->read_tempreg = read_tempreg_nb_f15;
|
|
} else if (boot_cpu_data.x86 == 0x17) {
|
|
data->temp_adjust_mask = 0x80000;
|
|
data->read_tempreg = read_tempreg_nb_f17;
|
|
} else {
|
|
+ data->read_htcreg = read_htcreg_pci;
|
|
data->read_tempreg = read_tempreg_pci;
|
|
}
|
|
|
|
From 3b031622f598481970400519bd5abc2a16708282 Mon Sep 17 00:00:00 2001
|
|
From: Guenter Roeck <linux@roeck-us.net>
|
|
Date: Fri, 4 May 2018 13:01:33 -0700
|
|
Subject: hwmon: (k10temp) Use API function to access System Management Network
|
|
|
|
From: Guenter Roeck <linux@roeck-us.net>
|
|
|
|
commit 3b031622f598481970400519bd5abc2a16708282 upstream.
|
|
|
|
The SMN (System Management Network) on Family 17h AMD CPUs is also accessed
|
|
from other drivers, specifically EDAC. Accessing it directly is racy.
|
|
On top of that, accessing the SMN through root bridge 00:00 is wrong on
|
|
multi-die CPUs and may result in reading the temperature from the wrong
|
|
die. Use available API functions to fix the problem.
|
|
|
|
For this to work, add dependency on AMD_NB. Also change the Raven Ridge
|
|
PCI device ID to point to Data Fabric Function 3, since this ID is used
|
|
by the API functions to find the CPU node.
|
|
|
|
Cc: stable@vger.kernel.org # v4.16+
|
|
Tested-by: Gabriel Craciunescu <nix.or.die@gmail.com>
|
|
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/hwmon/Kconfig | 2 +-
|
|
drivers/hwmon/k10temp.c | 11 ++++++-----
|
|
2 files changed, 7 insertions(+), 6 deletions(-)
|
|
|
|
--- a/drivers/hwmon/Kconfig
|
|
+++ b/drivers/hwmon/Kconfig
|
|
@@ -272,7 +272,7 @@ config SENSORS_K8TEMP
|
|
|
|
config SENSORS_K10TEMP
|
|
tristate "AMD Family 10h+ temperature sensor"
|
|
- depends on X86 && PCI
|
|
+ depends on X86 && PCI && AMD_NB
|
|
help
|
|
If you say yes here you get support for the temperature
|
|
sensor(s) inside your CPU. Supported are later revisions of
|
|
--- a/drivers/hwmon/k10temp.c
|
|
+++ b/drivers/hwmon/k10temp.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <linux/init.h>
|
|
#include <linux/module.h>
|
|
#include <linux/pci.h>
|
|
+#include <asm/amd_nb.h>
|
|
#include <asm/processor.h>
|
|
|
|
MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
|
|
@@ -40,8 +41,8 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
|
|
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
|
|
#endif
|
|
|
|
-#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
|
|
-#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0
|
|
+#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3
|
|
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
|
|
#endif
|
|
|
|
/* CPUID function 0x80000001, ebx */
|
|
@@ -136,8 +137,8 @@ static void read_tempreg_nb_f15(struct p
|
|
|
|
static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
|
|
{
|
|
- amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
|
|
- F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
|
|
+ amd_smn_read(amd_pci_dev_to_node_id(pdev),
|
|
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
|
|
}
|
|
|
|
static ssize_t temp1_input_show(struct device *dev,
|
|
@@ -322,7 +323,7 @@ static const struct pci_device_id k10tem
|
|
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
|
|
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
|
|
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
|
|
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
|
|
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
|
|
{}
|
|
};
|
|
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
|
|
From d66a7355717ec903d455277a550d930ba13df4a8 Mon Sep 17 00:00:00 2001
|
|
From: Halil Pasic <pasic@linux.vnet.ibm.com>
|
|
Date: Tue, 24 Apr 2018 13:26:56 +0200
|
|
Subject: vfio: ccw: fix cleanup if cp_prefetch fails
|
|
|
|
From: Halil Pasic <pasic@linux.vnet.ibm.com>
|
|
|
|
commit d66a7355717ec903d455277a550d930ba13df4a8 upstream.
|
|
|
|
If the translation of a channel program fails, we may end up attempting
|
|
to clean up (free, unpin) stuff that never got translated (and allocated,
|
|
pinned) in the first place.
|
|
|
|
By adjusting the lengths of the chains accordingly (so the element that
|
|
failed, and all subsequent elements are excluded) cleanup activities
|
|
based on false assumptions can be avoided.
|
|
|
|
Let's make sure cp_free works properly after cp_prefetch returns with an
|
|
error by setting ch_len of a ccw chain to the number of the translated
|
|
CCWs on that chain.
|
|
|
|
Cc: stable@vger.kernel.org #v4.12+
|
|
Acked-by: Pierre Morel <pmorel@linux.vnet.ibm.com>
|
|
Reviewed-by: Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
|
|
Signed-off-by: Halil Pasic <pasic@linux.vnet.ibm.com>
|
|
Signed-off-by: Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
|
|
Message-Id: <20180423110113.59385-2-bjsdjshi@linux.vnet.ibm.com>
|
|
[CH: fixed typos]
|
|
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
|
|
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/s390/cio/vfio_ccw_cp.c | 13 ++++++++++++-
|
|
1 file changed, 12 insertions(+), 1 deletion(-)
|
|
|
|
--- a/drivers/s390/cio/vfio_ccw_cp.c
|
|
+++ b/drivers/s390/cio/vfio_ccw_cp.c
|
|
@@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp)
|
|
* and stores the result to ccwchain list. @cp must have been
|
|
* initialized by a previous call with cp_init(). Otherwise, undefined
|
|
* behavior occurs.
|
|
+ * For each chain composing the channel program:
|
|
+ * - On entry ch_len holds the count of CCWs to be translated.
|
|
+ * - On exit ch_len is adjusted to the count of successfully translated CCWs.
|
|
+ * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
|
|
*
|
|
* The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
|
|
* as helpers to do ccw chain translation inside the kernel. Basically
|
|
@@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *
|
|
for (idx = 0; idx < len; idx++) {
|
|
ret = ccwchain_fetch_one(chain, idx, cp);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto out_err;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
+out_err:
|
|
+ /* Only cleanup the chain elements that were actually translated. */
|
|
+ chain->ch_len = idx;
|
|
+ list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
|
|
+ chain->ch_len = 0;
|
|
+ }
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
From 45dd9b0666a162f8e4be76096716670cf1741f0e Mon Sep 17 00:00:00 2001
|
|
From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
|
|
Date: Wed, 9 May 2018 14:36:09 -0400
|
|
Subject: tracing/x86/xen: Remove zero data size trace events trace_xen_mmu_flush_tlb{_all}
|
|
|
|
From: Steven Rostedt (VMware) <rostedt@goodmis.org>
|
|
|
|
commit 45dd9b0666a162f8e4be76096716670cf1741f0e upstream.
|
|
|
|
Doing an audit of trace events, I discovered two trace events in the xen
|
|
subsystem that use a hack to create zero data size trace events. This is not
|
|
what trace events are for. Trace events add memory footprint overhead, and
|
|
if all you need to do is see if a function is hit or not, simply make that
|
|
function noinline and use function tracer filtering.
|
|
|
|
Worse yet, the hack used was:
|
|
|
|
__array(char, x, 0)
|
|
|
|
Which creates a static string of zero in length. There's assumptions about
|
|
such constructs in ftrace that this is a dynamic string that is nul
|
|
terminated. This is not the case with these tracepoints and can cause
|
|
problems in various parts of ftrace.
|
|
|
|
Nuke the trace events!
|
|
|
|
Link: http://lkml.kernel.org/r/20180509144605.5a220327@gandalf.local.home
|
|
|
|
Cc: stable@vger.kernel.org
|
|
Fixes: 95a7d76897c1e ("xen/mmu: Use Xen specific TLB flush instead of the generic one.")
|
|
Reviewed-by: Juergen Gross <jgross@suse.com>
|
|
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/x86/xen/mmu.c | 4 +---
|
|
arch/x86/xen/mmu_pv.c | 4 +---
|
|
include/trace/events/xen.h | 16 ----------------
|
|
3 files changed, 2 insertions(+), 22 deletions(-)
|
|
|
|
--- a/arch/x86/xen/mmu.c
|
|
+++ b/arch/x86/xen/mmu.c
|
|
@@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void
|
|
}
|
|
EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
|
|
|
|
-static void xen_flush_tlb_all(void)
|
|
+static noinline void xen_flush_tlb_all(void)
|
|
{
|
|
struct mmuext_op *op;
|
|
struct multicall_space mcs;
|
|
|
|
- trace_xen_mmu_flush_tlb_all(0);
|
|
-
|
|
preempt_disable();
|
|
|
|
mcs = xen_mc_entry(sizeof(*op));
|
|
--- a/arch/x86/xen/mmu_pv.c
|
|
+++ b/arch/x86/xen/mmu_pv.c
|
|
@@ -1280,13 +1280,11 @@ unsigned long xen_read_cr2_direct(void)
|
|
return this_cpu_read(xen_vcpu_info.arch.cr2);
|
|
}
|
|
|
|
-static void xen_flush_tlb(void)
|
|
+static noinline void xen_flush_tlb(void)
|
|
{
|
|
struct mmuext_op *op;
|
|
struct multicall_space mcs;
|
|
|
|
- trace_xen_mmu_flush_tlb(0);
|
|
-
|
|
preempt_disable();
|
|
|
|
mcs = xen_mc_entry(sizeof(*op));
|
|
--- a/include/trace/events/xen.h
|
|
+++ b/include/trace/events/xen.h
|
|
@@ -352,22 +352,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
|
|
DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
|
|
DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
|
|
|
|
-TRACE_EVENT(xen_mmu_flush_tlb_all,
|
|
- TP_PROTO(int x),
|
|
- TP_ARGS(x),
|
|
- TP_STRUCT__entry(__array(char, x, 0)),
|
|
- TP_fast_assign((void)x),
|
|
- TP_printk("%s", "")
|
|
- );
|
|
-
|
|
-TRACE_EVENT(xen_mmu_flush_tlb,
|
|
- TP_PROTO(int x),
|
|
- TP_ARGS(x),
|
|
- TP_STRUCT__entry(__array(char, x, 0)),
|
|
- TP_fast_assign((void)x),
|
|
- TP_printk("%s", "")
|
|
- );
|
|
-
|
|
TRACE_EVENT(xen_mmu_flush_tlb_one_user,
|
|
TP_PROTO(unsigned long addr),
|
|
TP_ARGS(addr),
|
|
From 85f4f12d51397f1648e1f4350f77e24039b82d61 Mon Sep 17 00:00:00 2001
|
|
From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
|
|
Date: Tue, 15 May 2018 22:24:52 -0400
|
|
Subject: vsprintf: Replace memory barrier with static_key for random_ptr_key update
|
|
|
|
From: Steven Rostedt (VMware) <rostedt@goodmis.org>
|
|
|
|
commit 85f4f12d51397f1648e1f4350f77e24039b82d61 upstream.
|
|
|
|
Reviewing Tobin's patches for getting pointers out early before
|
|
entropy has been established, I noticed that there's a lone smp_mb() in
|
|
the code. As with most lone memory barriers, this one appears to be
|
|
incorrectly used.
|
|
|
|
We currently basically have this:
|
|
|
|
get_random_bytes(&ptr_key, sizeof(ptr_key));
|
|
/*
|
|
* have_filled_random_ptr_key==true is dependent on get_random_bytes().
|
|
* ptr_to_id() needs to see have_filled_random_ptr_key==true
|
|
* after get_random_bytes() returns.
|
|
*/
|
|
smp_mb();
|
|
WRITE_ONCE(have_filled_random_ptr_key, true);
|
|
|
|
And later we have:
|
|
|
|
if (unlikely(!have_filled_random_ptr_key))
|
|
return string(buf, end, "(ptrval)", spec);
|
|
|
|
/* Missing memory barrier here. */
|
|
|
|
hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
|
|
|
|
As the CPU can perform speculative loads, we could have a situation
|
|
with the following:
|
|
|
|
CPU0 CPU1
|
|
---- ----
|
|
load ptr_key = 0
|
|
store ptr_key = random
|
|
smp_mb()
|
|
store have_filled_random_ptr_key
|
|
|
|
load have_filled_random_ptr_key = true
|
|
|
|
BAD BAD BAD! (you're so bad!)
|
|
|
|
Because nothing prevents CPU1 from loading ptr_key before loading
|
|
have_filled_random_ptr_key.
|
|
|
|
But this race is very unlikely, but we can't keep an incorrect smp_mb() in
|
|
place. Instead, replace the have_filled_random_ptr_key with a static_branch
|
|
not_filled_random_ptr_key, that is initialized to true and changed to false
|
|
when we get enough entropy. If the update happens in early boot, the
|
|
static_key is updated immediately, otherwise it will have to wait till
|
|
entropy is filled and this happens in an interrupt handler which can't
|
|
enable a static_key, as that requires a preemptible context. In that case, a
|
|
work_queue is used to enable it, as entropy already took too long to
|
|
establish in the first place waiting a little more shouldn't hurt anything.
|
|
|
|
The benefit of using the static key is that the unlikely branch in
|
|
vsprintf() now becomes a nop.
|
|
|
|
Link: http://lkml.kernel.org/r/20180515100558.21df515e@gandalf.local.home
|
|
|
|
Cc: stable@vger.kernel.org
|
|
Fixes: ad67b74d2469d ("printk: hash addresses printed with %p")
|
|
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
lib/vsprintf.c | 26 +++++++++++++++-----------
|
|
1 file changed, 15 insertions(+), 11 deletions(-)
|
|
|
|
--- a/lib/vsprintf.c
|
|
+++ b/lib/vsprintf.c
|
|
@@ -1659,19 +1659,22 @@ char *pointer_string(char *buf, char *en
|
|
return number(buf, end, (unsigned long int)ptr, spec);
|
|
}
|
|
|
|
-static bool have_filled_random_ptr_key __read_mostly;
|
|
+static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
|
|
static siphash_key_t ptr_key __read_mostly;
|
|
|
|
-static void fill_random_ptr_key(struct random_ready_callback *unused)
|
|
+static void enable_ptr_key_workfn(struct work_struct *work)
|
|
{
|
|
get_random_bytes(&ptr_key, sizeof(ptr_key));
|
|
- /*
|
|
- * have_filled_random_ptr_key==true is dependent on get_random_bytes().
|
|
- * ptr_to_id() needs to see have_filled_random_ptr_key==true
|
|
- * after get_random_bytes() returns.
|
|
- */
|
|
- smp_mb();
|
|
- WRITE_ONCE(have_filled_random_ptr_key, true);
|
|
+ /* Needs to run from preemptible context */
|
|
+ static_branch_disable(¬_filled_random_ptr_key);
|
|
+}
|
|
+
|
|
+static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
|
|
+
|
|
+static void fill_random_ptr_key(struct random_ready_callback *unused)
|
|
+{
|
|
+ /* This may be in an interrupt handler. */
|
|
+ queue_work(system_unbound_wq, &enable_ptr_key_work);
|
|
}
|
|
|
|
static struct random_ready_callback random_ready = {
|
|
@@ -1685,7 +1688,8 @@ static int __init initialize_ptr_random(
|
|
if (!ret) {
|
|
return 0;
|
|
} else if (ret == -EALREADY) {
|
|
- fill_random_ptr_key(&random_ready);
|
|
+ /* This is in preemptible context */
|
|
+ enable_ptr_key_workfn(&enable_ptr_key_work);
|
|
return 0;
|
|
}
|
|
|
|
@@ -1699,7 +1703,7 @@ static char *ptr_to_id(char *buf, char *
|
|
unsigned long hashval;
|
|
const int default_width = 2 * sizeof(ptr);
|
|
|
|
- if (unlikely(!have_filled_random_ptr_key)) {
|
|
+ if (static_branch_unlikely(¬_filled_random_ptr_key)) {
|
|
spec.field_width = default_width;
|
|
/* string length must be less than default_width */
|
|
return string(buf, end, "(ptrval)", spec);
|
|
From f9bc6b2dd9cf025f827f471769e1d88b527bfb91 Mon Sep 17 00:00:00 2001
|
|
From: Guenter Roeck <linux@roeck-us.net>
|
|
Date: Fri, 4 May 2018 13:01:32 -0700
|
|
Subject: x86/amd_nb: Add support for Raven Ridge CPUs
|
|
|
|
From: Guenter Roeck <linux@roeck-us.net>
|
|
|
|
commit f9bc6b2dd9cf025f827f471769e1d88b527bfb91 upstream.
|
|
|
|
Add Raven Ridge root bridge and data fabric PCI IDs.
|
|
This is required for amd_pci_dev_to_node_id() and amd_smn_read().
|
|
|
|
Cc: stable@vger.kernel.org # v4.16+
|
|
Tested-by: Gabriel Craciunescu <nix.or.die@gmail.com>
|
|
Acked-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/x86/kernel/amd_nb.c | 6 ++++++
|
|
1 file changed, 6 insertions(+)
|
|
|
|
--- a/arch/x86/kernel/amd_nb.c
|
|
+++ b/arch/x86/kernel/amd_nb.c
|
|
@@ -14,8 +14,11 @@
|
|
#include <asm/amd_nb.h>
|
|
|
|
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
|
|
+#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
|
|
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
|
|
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
|
|
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
|
|
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
|
|
|
|
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
|
|
static DEFINE_MUTEX(smn_mutex);
|
|
@@ -24,6 +27,7 @@ static u32 *flush_words;
|
|
|
|
static const struct pci_device_id amd_root_ids[] = {
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
|
|
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
|
|
{}
|
|
};
|
|
|
|
@@ -39,6 +43,7 @@ const struct pci_device_id amd_nb_misc_i
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
|
|
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
|
|
{}
|
|
};
|
|
@@ -51,6 +56,7 @@ static const struct pci_device_id amd_nb
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
|
|
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
|
|
{}
|
|
};
|
|
From bb765d1c331f62b59049d35607ed2e365802bef9 Mon Sep 17 00:00:00 2001
|
|
From: Jann Horn <jannh@google.com>
|
|
Date: Wed, 4 Apr 2018 21:03:21 +0200
|
|
Subject: tee: shm: fix use-after-free via temporarily dropped reference
|
|
|
|
From: Jann Horn <jannh@google.com>
|
|
|
|
commit bb765d1c331f62b59049d35607ed2e365802bef9 upstream.
|
|
|
|
Bump the file's refcount before moving the reference into the fd table,
|
|
not afterwards. The old code could drop the file's refcount to zero for a
|
|
short moment before calling get_file() via get_dma_buf().
|
|
|
|
This code can only be triggered on ARM systems that use Linaro's OP-TEE.
|
|
|
|
Fixes: 967c9cca2cc5 ("tee: generic TEE subsystem")
|
|
Signed-off-by: Jann Horn <jannh@google.com>
|
|
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/tee/tee_shm.c | 5 +++--
|
|
1 file changed, 3 insertions(+), 2 deletions(-)
|
|
|
|
--- a/drivers/tee/tee_shm.c
|
|
+++ b/drivers/tee/tee_shm.c
|
|
@@ -360,9 +360,10 @@ int tee_shm_get_fd(struct tee_shm *shm)
|
|
if (!(shm->flags & TEE_SHM_DMA_BUF))
|
|
return -EINVAL;
|
|
|
|
+ get_dma_buf(shm->dmabuf);
|
|
fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
|
|
- if (fd >= 0)
|
|
- get_dma_buf(shm->dmabuf);
|
|
+ if (fd < 0)
|
|
+ dma_buf_put(shm->dmabuf);
|
|
return fd;
|
|
}
|
|
|
|
From 2f6adf481527c8ab8033c601f55bfb5b3712b2ac Mon Sep 17 00:00:00 2001
|
|
From: Florian Westphal <fw@strlen.de>
|
|
Date: Tue, 10 Apr 2018 09:00:24 +0200
|
|
Subject: netfilter: nf_tables: free set name in error path
|
|
|
|
From: Florian Westphal <fw@strlen.de>
|
|
|
|
commit 2f6adf481527c8ab8033c601f55bfb5b3712b2ac upstream.
|
|
|
|
set->name must be free'd here in case ops->init fails.
|
|
|
|
Fixes: 387454901bd6 ("netfilter: nf_tables: Allow set names of up to 255 chars")
|
|
Signed-off-by: Florian Westphal <fw@strlen.de>
|
|
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
net/netfilter/nf_tables_api.c | 8 +++++---
|
|
1 file changed, 5 insertions(+), 3 deletions(-)
|
|
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -3203,18 +3203,20 @@ static int nf_tables_newset(struct net *
|
|
|
|
err = ops->init(set, &desc, nla);
|
|
if (err < 0)
|
|
- goto err2;
|
|
+ goto err3;
|
|
|
|
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
|
|
if (err < 0)
|
|
- goto err3;
|
|
+ goto err4;
|
|
|
|
list_add_tail_rcu(&set->list, &table->sets);
|
|
table->use++;
|
|
return 0;
|
|
|
|
-err3:
|
|
+err4:
|
|
ops->destroy(set);
|
|
+err3:
|
|
+ kfree(set->name);
|
|
err2:
|
|
kvfree(set);
|
|
err1:
|
|
From 569ccae68b38654f04b6842b034aa33857f605fe Mon Sep 17 00:00:00 2001
|
|
From: Florian Westphal <fw@strlen.de>
|
|
Date: Tue, 10 Apr 2018 09:30:27 +0200
|
|
Subject: netfilter: nf_tables: can't fail after linking rule into active rule list
|
|
|
|
From: Florian Westphal <fw@strlen.de>
|
|
|
|
commit 569ccae68b38654f04b6842b034aa33857f605fe upstream.
|
|
|
|
rules in nftables a free'd using kfree, but protected by rcu, i.e. we
|
|
must wait for a grace period to elapse.
|
|
|
|
Normal removal patch does this, but nf_tables_newrule() doesn't obey
|
|
this rule during error handling.
|
|
|
|
It calls nft_trans_rule_add() *after* linking rule, and, if that
|
|
fails to allocate memory, it unlinks the rule and then kfree() it --
|
|
this is unsafe.
|
|
|
|
Switch order -- first add rule to transaction list, THEN link it
|
|
to public list.
|
|
|
|
Note: nft_trans_rule_add() uses GFP_KERNEL; it will not fail so this
|
|
is not a problem in practice (spotted only during code review).
|
|
|
|
Fixes: 0628b123c96d12 ("netfilter: nfnetlink: add batch support and use it from nf_tables")
|
|
Signed-off-by: Florian Westphal <fw@strlen.de>
|
|
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
net/netfilter/nf_tables_api.c | 59 ++++++++++++++++++++++--------------------
|
|
1 file changed, 32 insertions(+), 27 deletions(-)
|
|
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -2357,41 +2357,46 @@ static int nf_tables_newrule(struct net
|
|
}
|
|
|
|
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
|
|
- if (nft_is_active_next(net, old_rule)) {
|
|
- trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
|
|
- old_rule);
|
|
- if (trans == NULL) {
|
|
- err = -ENOMEM;
|
|
- goto err2;
|
|
- }
|
|
- nft_deactivate_next(net, old_rule);
|
|
- chain->use--;
|
|
- list_add_tail_rcu(&rule->list, &old_rule->list);
|
|
- } else {
|
|
+ if (!nft_is_active_next(net, old_rule)) {
|
|
err = -ENOENT;
|
|
goto err2;
|
|
}
|
|
- } else if (nlh->nlmsg_flags & NLM_F_APPEND)
|
|
- if (old_rule)
|
|
- list_add_rcu(&rule->list, &old_rule->list);
|
|
- else
|
|
- list_add_tail_rcu(&rule->list, &chain->rules);
|
|
- else {
|
|
- if (old_rule)
|
|
- list_add_tail_rcu(&rule->list, &old_rule->list);
|
|
- else
|
|
- list_add_rcu(&rule->list, &chain->rules);
|
|
- }
|
|
+ trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
|
|
+ old_rule);
|
|
+ if (trans == NULL) {
|
|
+ err = -ENOMEM;
|
|
+ goto err2;
|
|
+ }
|
|
+ nft_deactivate_next(net, old_rule);
|
|
+ chain->use--;
|
|
+
|
|
+ if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
|
|
+ err = -ENOMEM;
|
|
+ goto err2;
|
|
+ }
|
|
|
|
- if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
|
|
- err = -ENOMEM;
|
|
- goto err3;
|
|
+ list_add_tail_rcu(&rule->list, &old_rule->list);
|
|
+ } else {
|
|
+ if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
|
|
+ err = -ENOMEM;
|
|
+ goto err2;
|
|
+ }
|
|
+
|
|
+ if (nlh->nlmsg_flags & NLM_F_APPEND) {
|
|
+ if (old_rule)
|
|
+ list_add_rcu(&rule->list, &old_rule->list);
|
|
+ else
|
|
+ list_add_tail_rcu(&rule->list, &chain->rules);
|
|
+ } else {
|
|
+ if (old_rule)
|
|
+ list_add_tail_rcu(&rule->list, &old_rule->list);
|
|
+ else
|
|
+ list_add_rcu(&rule->list, &chain->rules);
|
|
+ }
|
|
}
|
|
chain->use++;
|
|
return 0;
|
|
|
|
-err3:
|
|
- list_del_rcu(&rule->list);
|
|
err2:
|
|
nf_tables_rule_destroy(&ctx, rule);
|
|
err1:
|
|
From ae0662f84b105776734cb089703a7bf834bac195 Mon Sep 17 00:00:00 2001
|
|
From: kbuild test robot <fengguang.wu@intel.com>
|
|
Date: Sat, 20 Jan 2018 04:27:58 +0800
|
|
Subject: netfilter: nf_tables: nf_tables_obj_lookup_byhandle() can be static
|
|
|
|
From: kbuild test robot <fengguang.wu@intel.com>
|
|
|
|
commit ae0662f84b105776734cb089703a7bf834bac195 upstream.
|
|
|
|
Fixes: 3ecbfd65f50e ("netfilter: nf_tables: allocate handle and delete objects via handle")
|
|
Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
|
|
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
net/netfilter/nf_tables_api.c | 8 ++++----
|
|
1 file changed, 4 insertions(+), 4 deletions(-)
|
|
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -4399,9 +4399,9 @@ struct nft_object *nf_tables_obj_lookup(
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
|
|
|
|
-struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table,
|
|
- const struct nlattr *nla,
|
|
- u32 objtype, u8 genmask)
|
|
+static struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table,
|
|
+ const struct nlattr *nla,
|
|
+ u32 objtype, u8 genmask)
|
|
{
|
|
struct nft_object *obj;
|
|
|
|
@@ -4921,7 +4921,7 @@ struct nft_flowtable *nf_tables_flowtabl
|
|
}
|
|
EXPORT_SYMBOL_GPL(nf_tables_flowtable_lookup);
|
|
|
|
-struct nft_flowtable *
|
|
+static struct nft_flowtable *
|
|
nf_tables_flowtable_lookup_byhandle(const struct nft_table *table,
|
|
const struct nlattr *nla, u8 genmask)
|
|
{
|
|
From a057344806d035cb9ac991619fa07854e807562d Mon Sep 17 00:00:00 2001
|
|
From: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
|
Date: Wed, 25 Apr 2018 13:07:31 +0200
|
|
Subject: ARM64: dts: marvell: armada-cp110: Add clocks for the xmdio node
|
|
|
|
From: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
|
|
|
commit a057344806d035cb9ac991619fa07854e807562d upstream.
|
|
|
|
The Marvell XSMI controller needs 3 clocks to operate correctly :
|
|
- The MG clock (clk 5)
|
|
- The MG Core clock (clk 6)
|
|
- The GOP clock (clk 18)
|
|
|
|
This commit adds them, to avoid system hangs when using these
|
|
interfaces.
|
|
|
|
[gregory.clement: use the real first commit to fix and add the cc:stable
|
|
flag]
|
|
Fixes: f66b2aff46ea ("arm64: dts: marvell: add xmdio nodes for 7k/8k")
|
|
Cc: <stable@vger.kernel.org>
|
|
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
|
Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/arm64/boot/dts/marvell/armada-cp110.dtsi | 2 ++
|
|
1 file changed, 2 insertions(+)
|
|
|
|
--- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
|
|
@@ -143,6 +143,8 @@
|
|
#size-cells = <0>;
|
|
compatible = "marvell,xmdio";
|
|
reg = <0x12a600 0x10>;
|
|
+ clocks = <&CP110_LABEL(clk) 1 5>,
|
|
+ <&CP110_LABEL(clk) 1 6>, <&CP110_LABEL(clk) 1 18>;
|
|
status = "disabled";
|
|
};
|
|
|
|
From f43194c1447c9536efb0859c2f3f46f6bf2b9154 Mon Sep 17 00:00:00 2001
|
|
From: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
|
Date: Wed, 25 Apr 2018 20:19:47 +0200
|
|
Subject: ARM64: dts: marvell: armada-cp110: Add mg_core_clk for ethernet node
|
|
|
|
From: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
|
|
|
commit f43194c1447c9536efb0859c2f3f46f6bf2b9154 upstream.
|
|
|
|
Marvell PPv2.2 controller present on CP-110 need the extra "mg_core_clk"
|
|
clock to avoid system hangs when powering some network interfaces up.
|
|
|
|
This issue appeared after a recent clock rework on Armada 7K/8K platforms.
|
|
|
|
This commit adds the new clock and updates the documentation accordingly.
|
|
|
|
[gregory.clement: use the real first commit to fix and add the cc:stable
|
|
flag]
|
|
Fixes: e3af9f7c6ece ("RM64: dts: marvell: armada-cp110: Fix clock resources for various node")
|
|
Cc: <stable@vger.kernel.org>
|
|
Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
|
|
Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
Documentation/devicetree/bindings/net/marvell-pp2.txt | 9 +++++----
|
|
arch/arm64/boot/dts/marvell/armada-cp110.dtsi | 5 +++--
|
|
2 files changed, 8 insertions(+), 6 deletions(-)
|
|
|
|
--- a/Documentation/devicetree/bindings/net/marvell-pp2.txt
|
|
+++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
|
|
@@ -21,9 +21,10 @@ Required properties:
|
|
- main controller clock (for both armada-375-pp2 and armada-7k-pp2)
|
|
- GOP clock (for both armada-375-pp2 and armada-7k-pp2)
|
|
- MG clock (only for armada-7k-pp2)
|
|
+ - MG Core clock (only for armada-7k-pp2)
|
|
- AXI clock (only for armada-7k-pp2)
|
|
-- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk"
|
|
- and "axi_clk" (the 2 latter only for armada-7k-pp2).
|
|
+- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk",
|
|
+ "mg_core_clk" and "axi_clk" (the 3 latter only for armada-7k-pp2).
|
|
|
|
The ethernet ports are represented by subnodes. At least one port is
|
|
required.
|
|
@@ -80,8 +81,8 @@ cpm_ethernet: ethernet@0 {
|
|
compatible = "marvell,armada-7k-pp22";
|
|
reg = <0x0 0x100000>, <0x129000 0xb000>;
|
|
clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>,
|
|
- <&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>;
|
|
- clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk";
|
|
+ <&cpm_syscon0 1 5>, <&cpm_syscon0 1 6>, <&cpm_syscon0 1 18>;
|
|
+ clock-names = "pp_clk", "gop_clk", "mg_clk", "mg_core_clk", "axi_clk";
|
|
|
|
eth0: eth0 {
|
|
interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>,
|
|
--- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
|
|
+++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
|
|
@@ -40,9 +40,10 @@
|
|
compatible = "marvell,armada-7k-pp22";
|
|
reg = <0x0 0x100000>, <0x129000 0xb000>;
|
|
clocks = <&CP110_LABEL(clk) 1 3>, <&CP110_LABEL(clk) 1 9>,
|
|
- <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 18>;
|
|
+ <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 6>,
|
|
+ <&CP110_LABEL(clk) 1 18>;
|
|
clock-names = "pp_clk", "gop_clk",
|
|
- "mg_clk", "axi_clk";
|
|
+ "mg_clk", "mg_core_clk", "axi_clk";
|
|
marvell,system-controller = <&CP110_LABEL(syscon0)>;
|
|
status = "disabled";
|
|
dma-coherent;
|
|
From 06cb616b1bca7080824acfedb3d4c898e7a64836 Mon Sep 17 00:00:00 2001
|
|
From: Alexander Monakov <amonakov@ispras.ru>
|
|
Date: Sat, 28 Apr 2018 16:56:06 +0300
|
|
Subject: i2c: designware: fix poll-after-enable regression
|
|
|
|
From: Alexander Monakov <amonakov@ispras.ru>
|
|
|
|
commit 06cb616b1bca7080824acfedb3d4c898e7a64836 upstream.
|
|
|
|
Not all revisions of DW I2C controller implement the enable status register.
|
|
On platforms where that's the case (e.g. BG2CD and SPEAr ARM SoCs), waiting
|
|
for enable will time out as reading the unimplemented register yields zero.
|
|
|
|
It was observed that reading the IC_ENABLE_STATUS register once suffices to
|
|
avoid getting it stuck on Bay Trail hardware, so replace polling with one
|
|
dummy read of the register.
|
|
|
|
Fixes: fba4adbbf670 ("i2c: designware: must wait for enable")
|
|
Signed-off-by: Alexander Monakov <amonakov@ispras.ru>
|
|
Tested-by: Ben Gardner <gardner.ben@gmail.com>
|
|
Acked-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
|
|
Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
|
|
Cc: stable@kernel.org
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/i2c/busses/i2c-designware-master.c | 5 ++++-
|
|
1 file changed, 4 insertions(+), 1 deletion(-)
|
|
|
|
--- a/drivers/i2c/busses/i2c-designware-master.c
|
|
+++ b/drivers/i2c/busses/i2c-designware-master.c
|
|
@@ -209,7 +209,10 @@ static void i2c_dw_xfer_init(struct dw_i
|
|
i2c_dw_disable_int(dev);
|
|
|
|
/* Enable the adapter */
|
|
- __i2c_dw_enable_and_wait(dev, true);
|
|
+ __i2c_dw_enable(dev, true);
|
|
+
|
|
+ /* Dummy read to avoid the register getting stuck on Bay Trail */
|
|
+ dw_readl(dev, DW_IC_ENABLE_STATUS);
|
|
|
|
/* Clear and enable interrupts */
|
|
dw_readl(dev, DW_IC_CLR_INTR);
|
|
From 90d617633368ab97a2c7571c6e66dad54f39228d Mon Sep 17 00:00:00 2001
|
|
From: Boris Brezillon <boris.brezillon@bootlin.com>
|
|
Date: Wed, 9 May 2018 09:13:58 +0200
|
|
Subject: mtd: rawnand: marvell: Fix read logic for layouts with ->nchunks > 2
|
|
|
|
From: Boris Brezillon <boris.brezillon@bootlin.com>
|
|
|
|
commit 90d617633368ab97a2c7571c6e66dad54f39228d upstream.
|
|
|
|
The code is doing monolithic reads for all chunks except the last one
|
|
which is wrong since a monolithic read will issue the
|
|
READ0+ADDRS+READ_START sequence. It not only takes longer because it
|
|
forces the NAND chip to reload the page content into its internal
|
|
cache, but by doing that we also reset the column pointer to 0, which
|
|
means we'll always read the first chunk instead of moving to the next
|
|
one.
|
|
|
|
Rework the code to do a monolithic read only for the first chunk,
|
|
then switch to naked reads for all intermediate chunks and finally
|
|
issue a last naked read for the last chunk.
|
|
|
|
Fixes: 02f26ecf8c77 mtd: nand: add reworked Marvell NAND controller driver
|
|
Cc: stable@vger.kernel.org
|
|
Reported-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
|
|
Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
|
|
Tested-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
|
|
Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/mtd/nand/marvell_nand.c | 8 +++++---
|
|
1 file changed, 5 insertions(+), 3 deletions(-)
|
|
|
|
--- a/drivers/mtd/nand/marvell_nand.c
|
|
+++ b/drivers/mtd/nand/marvell_nand.c
|
|
@@ -1190,11 +1190,13 @@ static void marvell_nfc_hw_ecc_bch_read_
|
|
NDCB0_CMD2(NAND_CMD_READSTART);
|
|
|
|
/*
|
|
- * Trigger the naked read operation only on the last chunk.
|
|
- * Otherwise, use monolithic read.
|
|
+ * Trigger the monolithic read on the first chunk, then naked read on
|
|
+ * intermediate chunks and finally a last naked read on the last chunk.
|
|
*/
|
|
- if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
|
|
+ if (chunk == 0)
|
|
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
|
|
+ else if (chunk < lt->nchunks - 1)
|
|
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
|
|
else
|
|
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
|
|
|
|
From c1d2a31397ec51f0370f6bd17b19b39152c263cb Mon Sep 17 00:00:00 2001
|
|
From: Nicholas Piggin <npiggin@gmail.com>
|
|
Date: Tue, 15 May 2018 01:59:47 +1000
|
|
Subject: powerpc/powernv: Fix NVRAM sleep in invalid context when crashing
|
|
|
|
From: Nicholas Piggin <npiggin@gmail.com>
|
|
|
|
commit c1d2a31397ec51f0370f6bd17b19b39152c263cb upstream.
|
|
|
|
Similarly to opal_event_shutdown, opal_nvram_write can be called in
|
|
the crash path with irqs disabled. Special case the delay to avoid
|
|
sleeping in invalid context.
|
|
|
|
Fixes: 3b8070335f75 ("powerpc/powernv: Fix OPAL NVRAM driver OPAL_BUSY loops")
|
|
Cc: stable@vger.kernel.org # v3.2
|
|
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
|
|
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/powerpc/platforms/powernv/opal-nvram.c | 14 ++++++++++++--
|
|
1 file changed, 12 insertions(+), 2 deletions(-)
|
|
|
|
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
|
|
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
|
|
@@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf
|
|
return count;
|
|
}
|
|
|
|
+/*
|
|
+ * This can be called in the panic path with interrupts off, so use
|
|
+ * mdelay in that case.
|
|
+ */
|
|
static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
|
|
{
|
|
s64 rc = OPAL_BUSY;
|
|
@@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *bu
|
|
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
|
|
rc = opal_write_nvram(__pa(buf), count, off);
|
|
if (rc == OPAL_BUSY_EVENT) {
|
|
- msleep(OPAL_BUSY_DELAY_MS);
|
|
+ if (in_interrupt() || irqs_disabled())
|
|
+ mdelay(OPAL_BUSY_DELAY_MS);
|
|
+ else
|
|
+ msleep(OPAL_BUSY_DELAY_MS);
|
|
opal_poll_events(NULL);
|
|
} else if (rc == OPAL_BUSY) {
|
|
- msleep(OPAL_BUSY_DELAY_MS);
|
|
+ if (in_interrupt() || irqs_disabled())
|
|
+ mdelay(OPAL_BUSY_DELAY_MS);
|
|
+ else
|
|
+ msleep(OPAL_BUSY_DELAY_MS);
|
|
}
|
|
}
|
|
|
|
From 7f6df440b8623c441c42d070bf592e2d2c1fa9bb Mon Sep 17 00:00:00 2001
|
|
From: Haneen Mohammed <hamohammed.sa@gmail.com>
|
|
Date: Fri, 11 May 2018 07:15:42 +0300
|
|
Subject: drm: Match sysfs name in link removal to link creation
|
|
|
|
From: Haneen Mohammed <hamohammed.sa@gmail.com>
|
|
|
|
commit 7f6df440b8623c441c42d070bf592e2d2c1fa9bb upstream.
|
|
|
|
This patch matches the sysfs name used in the unlinking with the
|
|
linking function. Otherwise, remove_compat_control_link() fails to remove
|
|
sysfs created by create_compat_control_link() in drm_dev_register().
|
|
|
|
Fixes: 6449b088dd51 ("drm: Add fake controlD* symlinks for backwards
|
|
compat")
|
|
Cc: Dave Airlie <airlied@gmail.com>
|
|
Cc: Alex Deucher <alexander.deucher@amd.com>
|
|
Cc: Emil Velikov <emil.l.velikov@gmail.com>
|
|
Cc: David Herrmann <dh.herrmann@gmail.com>
|
|
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
Cc: Daniel Vetter <daniel.vetter@intel.com>
|
|
Cc: Gustavo Padovan <gustavo@padovan.org>
|
|
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
|
|
Cc: Sean Paul <seanpaul@chromium.org>
|
|
Cc: David Airlie <airlied@linux.ie>
|
|
Cc: dri-devel@lists.freedesktop.org
|
|
Cc: <stable@vger.kernel.org> # v4.10+
|
|
Signed-off-by: Haneen Mohammed <hamohammed.sa@gmail.com>
|
|
[seanpaul added Fixes and Cc tags]
|
|
Signed-off-by: Sean Paul <seanpaul@chromium.org>
|
|
Link: https://patchwork.freedesktop.org/patch/msgid/20180511041542.GA4253@haneen-vb
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/gpu/drm/drm_drv.c | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
--- a/drivers/gpu/drm/drm_drv.c
|
|
+++ b/drivers/gpu/drm/drm_drv.c
|
|
@@ -716,7 +716,7 @@ static void remove_compat_control_link(s
|
|
if (!minor)
|
|
return;
|
|
|
|
- name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
|
|
+ name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
|
|
if (!name)
|
|
return;
|
|
|
|
From 0cf442c6bcf572e04f5690340d5b8e62afcee2ca Mon Sep 17 00:00:00 2001
|
|
From: Miquel Raynal <miquel.raynal@bootlin.com>
|
|
Date: Tue, 24 Apr 2018 17:45:06 +0200
|
|
Subject: cpufreq: armada-37xx: driver relies on cpufreq-dt
|
|
|
|
From: Miquel Raynal <miquel.raynal@bootlin.com>
|
|
|
|
commit 0cf442c6bcf572e04f5690340d5b8e62afcee2ca upstream.
|
|
|
|
Armada-37xx driver registers a cpufreq-dt driver. Not having
|
|
CONFIG_CPUFREQ_DT selected leads to a silent abort during the probe.
|
|
Prevent that situation by having the former depending on the latter.
|
|
|
|
Fixes: 92ce45fb875d7 (cpufreq: Add DVFS support for Armada 37xx)
|
|
Cc: 4.16+ <stable@vger.kernel.org> # 4.16+
|
|
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
|
|
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
|
|
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/cpufreq/Kconfig.arm | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
--- a/drivers/cpufreq/Kconfig.arm
|
|
+++ b/drivers/cpufreq/Kconfig.arm
|
|
@@ -20,7 +20,7 @@ config ACPI_CPPC_CPUFREQ
|
|
|
|
config ARM_ARMADA_37XX_CPUFREQ
|
|
tristate "Armada 37xx CPUFreq support"
|
|
- depends on ARCH_MVEBU
|
|
+ depends on ARCH_MVEBU && CPUFREQ_DT
|
|
help
|
|
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
|
|
The Armada 37xx PMU supports 4 frequency and VDD levels.
|
|
From 1e3054b98c5415d5cb5f8824fc33b548ae5644c3 Mon Sep 17 00:00:00 2001
|
|
From: Matthew Wilcox <mawilcox@microsoft.com>
|
|
Date: Fri, 18 May 2018 16:08:44 -0700
|
|
Subject: lib/test_bitmap.c: fix bitmap optimisation tests to report errors correctly
|
|
|
|
From: Matthew Wilcox <mawilcox@microsoft.com>
|
|
|
|
commit 1e3054b98c5415d5cb5f8824fc33b548ae5644c3 upstream.
|
|
|
|
I had neglected to increment the error counter when the tests failed,
|
|
which made the tests noisy when they fail, but not actually return an
|
|
error code.
|
|
|
|
Link: http://lkml.kernel.org/r/20180509114328.9887-1-mpe@ellerman.id.au
|
|
Fixes: 3cc78125a081 ("lib/test_bitmap.c: add optimisation tests")
|
|
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
|
|
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
|
|
Reported-by: Michael Ellerman <mpe@ellerman.id.au>
|
|
Tested-by: Michael Ellerman <mpe@ellerman.id.au>
|
|
Reviewed-by: Kees Cook <keescook@chromium.org>
|
|
Cc: Yury Norov <ynorov@caviumnetworks.com>
|
|
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
|
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
|
|
Cc: <stable@vger.kernel.org> [4.13+]
|
|
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
|
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
lib/test_bitmap.c | 21 +++++++++++++++------
|
|
1 file changed, 15 insertions(+), 6 deletions(-)
|
|
|
|
--- a/lib/test_bitmap.c
|
|
+++ b/lib/test_bitmap.c
|
|
@@ -329,23 +329,32 @@ static void noinline __init test_mem_opt
|
|
unsigned int start, nbits;
|
|
|
|
for (start = 0; start < 1024; start += 8) {
|
|
- memset(bmap1, 0x5a, sizeof(bmap1));
|
|
- memset(bmap2, 0x5a, sizeof(bmap2));
|
|
for (nbits = 0; nbits < 1024 - start; nbits += 8) {
|
|
+ memset(bmap1, 0x5a, sizeof(bmap1));
|
|
+ memset(bmap2, 0x5a, sizeof(bmap2));
|
|
+
|
|
bitmap_set(bmap1, start, nbits);
|
|
__bitmap_set(bmap2, start, nbits);
|
|
- if (!bitmap_equal(bmap1, bmap2, 1024))
|
|
+ if (!bitmap_equal(bmap1, bmap2, 1024)) {
|
|
printk("set not equal %d %d\n", start, nbits);
|
|
- if (!__bitmap_equal(bmap1, bmap2, 1024))
|
|
+ failed_tests++;
|
|
+ }
|
|
+ if (!__bitmap_equal(bmap1, bmap2, 1024)) {
|
|
printk("set not __equal %d %d\n", start, nbits);
|
|
+ failed_tests++;
|
|
+ }
|
|
|
|
bitmap_clear(bmap1, start, nbits);
|
|
__bitmap_clear(bmap2, start, nbits);
|
|
- if (!bitmap_equal(bmap1, bmap2, 1024))
|
|
+ if (!bitmap_equal(bmap1, bmap2, 1024)) {
|
|
printk("clear not equal %d %d\n", start, nbits);
|
|
- if (!__bitmap_equal(bmap1, bmap2, 1024))
|
|
+ failed_tests++;
|
|
+ }
|
|
+ if (!__bitmap_equal(bmap1, bmap2, 1024)) {
|
|
printk("clear not __equal %d %d\n", start,
|
|
nbits);
|
|
+ failed_tests++;
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
From 9f418224e8114156d995b98fa4e0f4fd21f685fe Mon Sep 17 00:00:00 2001
|
|
From: Ross Zwisler <ross.zwisler@linux.intel.com>
|
|
Date: Fri, 18 May 2018 16:09:06 -0700
|
|
Subject: radix tree: fix multi-order iteration race
|
|
|
|
From: Ross Zwisler <ross.zwisler@linux.intel.com>
|
|
|
|
commit 9f418224e8114156d995b98fa4e0f4fd21f685fe upstream.
|
|
|
|
Fix a race in the multi-order iteration code which causes the kernel to
|
|
hit a GP fault. This was first seen with a production v4.15 based
|
|
kernel (4.15.6-300.fc27.x86_64) utilizing a DAX workload which used
|
|
order 9 PMD DAX entries.
|
|
|
|
The race has to do with how we tear down multi-order sibling entries
|
|
when we are removing an item from the tree. Remember for example that
|
|
an order 2 entry looks like this:
|
|
|
|
struct radix_tree_node.slots[] = [entry][sibling][sibling][sibling]
|
|
|
|
where 'entry' is in some slot in the struct radix_tree_node, and the
|
|
three slots following 'entry' contain sibling pointers which point back
|
|
to 'entry.'
|
|
|
|
When we delete 'entry' from the tree, we call :
|
|
|
|
radix_tree_delete()
|
|
radix_tree_delete_item()
|
|
__radix_tree_delete()
|
|
replace_slot()
|
|
|
|
replace_slot() first removes the siblings in order from the first to the
|
|
last, then at then replaces 'entry' with NULL. This means that for a
|
|
brief period of time we end up with one or more of the siblings removed,
|
|
so:
|
|
|
|
struct radix_tree_node.slots[] = [entry][NULL][sibling][sibling]
|
|
|
|
This causes an issue if you have a reader iterating over the slots in
|
|
the tree via radix_tree_for_each_slot() while only under
|
|
rcu_read_lock()/rcu_read_unlock() protection. This is a common case in
|
|
mm/filemap.c.
|
|
|
|
The issue is that when __radix_tree_next_slot() => skip_siblings() tries
|
|
to skip over the sibling entries in the slots, it currently does so with
|
|
an exact match on the slot directly preceding our current slot.
|
|
Normally this works:
|
|
|
|
V preceding slot
|
|
struct radix_tree_node.slots[] = [entry][sibling][sibling][sibling]
|
|
^ current slot
|
|
|
|
This lets you find the first sibling, and you skip them all in order.
|
|
|
|
But in the case where one of the siblings is NULL, that slot is skipped
|
|
and then our sibling detection is interrupted:
|
|
|
|
V preceding slot
|
|
struct radix_tree_node.slots[] = [entry][NULL][sibling][sibling]
|
|
^ current slot
|
|
|
|
This means that the sibling pointers aren't recognized since they point
|
|
all the way back to 'entry', so we think that they are normal internal
|
|
radix tree pointers. This causes us to think we need to walk down to a
|
|
struct radix_tree_node starting at the address of 'entry'.
|
|
|
|
In a real running kernel this will crash the thread with a GP fault when
|
|
you try and dereference the slots in your broken node starting at
|
|
'entry'.
|
|
|
|
We fix this race by fixing the way that skip_siblings() detects sibling
|
|
nodes. Instead of testing against the preceding slot we instead look
|
|
for siblings via is_sibling_entry() which compares against the position
|
|
of the struct radix_tree_node.slots[] array. This ensures that sibling
|
|
entries are properly identified, even if they are no longer contiguous
|
|
with the 'entry' they point to.
|
|
|
|
Link: http://lkml.kernel.org/r/20180503192430.7582-6-ross.zwisler@linux.intel.com
|
|
Fixes: 148deab223b2 ("radix-tree: improve multiorder iterators")
|
|
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
|
|
Reported-by: CR, Sapthagirish <sapthagirish.cr@intel.com>
|
|
Reviewed-by: Jan Kara <jack@suse.cz>
|
|
Cc: Matthew Wilcox <willy@infradead.org>
|
|
Cc: Christoph Hellwig <hch@lst.de>
|
|
Cc: Dan Williams <dan.j.williams@intel.com>
|
|
Cc: Dave Chinner <david@fromorbit.com>
|
|
Cc: <stable@vger.kernel.org>
|
|
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
|
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
lib/radix-tree.c | 6 ++----
|
|
1 file changed, 2 insertions(+), 4 deletions(-)
|
|
|
|
--- a/lib/radix-tree.c
|
|
+++ b/lib/radix-tree.c
|
|
@@ -1612,11 +1612,9 @@ static void set_iter_tags(struct radix_t
|
|
static void __rcu **skip_siblings(struct radix_tree_node **nodep,
|
|
void __rcu **slot, struct radix_tree_iter *iter)
|
|
{
|
|
- void *sib = node_to_entry(slot - 1);
|
|
-
|
|
while (iter->index < iter->next_index) {
|
|
*nodep = rcu_dereference_raw(*slot);
|
|
- if (*nodep && *nodep != sib)
|
|
+ if (*nodep && !is_sibling_entry(iter->node, *nodep))
|
|
return slot;
|
|
slot++;
|
|
iter->index = __radix_tree_iter_add(iter, 1);
|
|
@@ -1631,7 +1629,7 @@ void __rcu **__radix_tree_next_slot(void
|
|
struct radix_tree_iter *iter, unsigned flags)
|
|
{
|
|
unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
|
|
- struct radix_tree_node *node = rcu_dereference_raw(*slot);
|
|
+ struct radix_tree_node *node;
|
|
|
|
slot = skip_siblings(&node, slot, iter);
|
|
|
|
From ab1e8d8960b68f54af42b6484b5950bd13a4054b Mon Sep 17 00:00:00 2001
|
|
From: Pavel Tatashin <pasha.tatashin@oracle.com>
|
|
Date: Fri, 18 May 2018 16:09:13 -0700
|
|
Subject: mm: don't allow deferred pages with NEED_PER_CPU_KM
|
|
|
|
From: Pavel Tatashin <pasha.tatashin@oracle.com>
|
|
|
|
commit ab1e8d8960b68f54af42b6484b5950bd13a4054b upstream.
|
|
|
|
It is unsafe to do virtual to physical translations before mm_init() is
|
|
called if struct page is needed in order to determine the memory section
|
|
number (see SECTION_IN_PAGE_FLAGS). This is because only in mm_init()
|
|
we initialize struct pages for all the allocated memory when deferred
|
|
struct pages are used.
|
|
|
|
My recent fix in commit c9e97a1997 ("mm: initialize pages on demand
|
|
during boot") exposed this problem, because it greatly reduced number of
|
|
pages that are initialized before mm_init(), but the problem existed
|
|
even before my fix, as Fengguang Wu found.
|
|
|
|
Below is a more detailed explanation of the problem.
|
|
|
|
We initialize struct pages in four places:
|
|
|
|
1. Early in boot a small set of struct pages is initialized to fill the
|
|
first section, and lower zones.
|
|
|
|
2. During mm_init() we initialize "struct pages" for all the memory that
|
|
is allocated, i.e reserved in memblock.
|
|
|
|
3. Using on-demand logic when pages are allocated after mm_init call
|
|
(when memblock is finished)
|
|
|
|
4. After smp_init() when the rest free deferred pages are initialized.
|
|
|
|
The problem occurs if we try to do va to phys translation of a memory
|
|
between steps 1 and 2. Because we have not yet initialized struct pages
|
|
for all the reserved pages, it is inherently unsafe to do va to phys if
|
|
the translation itself requires access of "struct page" as in case of
|
|
this combination: CONFIG_SPARSE && !CONFIG_SPARSE_VMEMMAP
|
|
|
|
The following path exposes the problem:
|
|
|
|
start_kernel()
|
|
trap_init()
|
|
setup_cpu_entry_areas()
|
|
setup_cpu_entry_area(cpu)
|
|
get_cpu_gdt_paddr(cpu)
|
|
per_cpu_ptr_to_phys(addr)
|
|
pcpu_addr_to_page(addr)
|
|
virt_to_page(addr)
|
|
pfn_to_page(__pa(addr) >> PAGE_SHIFT)
|
|
|
|
We disable this path by not allowing NEED_PER_CPU_KM with deferred
|
|
struct pages feature.
|
|
|
|
The problems are discussed in these threads:
|
|
http://lkml.kernel.org/r/20180418135300.inazvpxjxowogyge@wfg-t540p.sh.intel.com
|
|
http://lkml.kernel.org/r/20180419013128.iurzouiqxvcnpbvz@wfg-t540p.sh.intel.com
|
|
http://lkml.kernel.org/r/20180426202619.2768-1-pasha.tatashin@oracle.com
|
|
|
|
Link: http://lkml.kernel.org/r/20180515175124.1770-1-pasha.tatashin@oracle.com
|
|
Fixes: 3a80a7fa7989 ("mm: meminit: initialise a subset of struct pages if CONFIG_DEFERRED_STRUCT_PAGE_INIT is set")
|
|
Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
|
|
Acked-by: Michal Hocko <mhocko@suse.com>
|
|
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
|
|
Cc: Steven Sistare <steven.sistare@oracle.com>
|
|
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
|
|
Cc: Mel Gorman <mgorman@techsingularity.net>
|
|
Cc: Fengguang Wu <fengguang.wu@intel.com>
|
|
Cc: Dennis Zhou <dennisszhou@gmail.com>
|
|
Cc: <stable@vger.kernel.org>
|
|
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
|
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
mm/Kconfig | 1 +
|
|
1 file changed, 1 insertion(+)
|
|
|
|
--- a/mm/Kconfig
|
|
+++ b/mm/Kconfig
|
|
@@ -644,6 +644,7 @@ config DEFERRED_STRUCT_PAGE_INIT
|
|
default n
|
|
depends on NO_BOOTMEM
|
|
depends on !FLATMEM
|
|
+ depends on !NEED_PER_CPU_KM
|
|
help
|
|
Ordinarily all struct pages are initialised during early boot in a
|
|
single thread. On very large machines this can take a considerable
|
|
From b579f924a90f42fa561afd8201514fc216b71949 Mon Sep 17 00:00:00 2001
|
|
From: Michel Thierry <michel.thierry@intel.com>
|
|
Date: Mon, 14 May 2018 09:54:45 -0700
|
|
Subject: drm/i915/gen9: Add WaClearHIZ_WM_CHICKEN3 for bxt and glk
|
|
|
|
From: Michel Thierry <michel.thierry@intel.com>
|
|
|
|
commit b579f924a90f42fa561afd8201514fc216b71949 upstream.
|
|
|
|
Factor in clear values wherever required while updating destination
|
|
min/max.
|
|
|
|
References: HSDES#1604444184
|
|
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
|
|
Cc: mesa-dev@lists.freedesktop.org
|
|
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
|
|
Cc: Oscar Mateo <oscar.mateo@intel.com>
|
|
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
|
|
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
|
|
Link: https://patchwork.freedesktop.org/patch/msgid/20180510200708.18097-1-michel.thierry@intel.com
|
|
Cc: stable@vger.kernel.org
|
|
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
|
|
Link: https://patchwork.freedesktop.org/patch/msgid/20180514165445.9198-1-michel.thierry@intel.com
|
|
(backported from commit 0c79f9cb77eae28d48a4f9fc1b3341aacbbd260c)
|
|
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/gpu/drm/i915/i915_reg.h | 3 +++
|
|
drivers/gpu/drm/i915/intel_engine_cs.c | 4 ++++
|
|
2 files changed, 7 insertions(+)
|
|
|
|
--- a/drivers/gpu/drm/i915/i915_reg.h
|
|
+++ b/drivers/gpu/drm/i915/i915_reg.h
|
|
@@ -7139,6 +7139,9 @@ enum {
|
|
#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
|
|
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
|
|
|
|
+#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
|
|
+#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
|
|
+
|
|
/* WaCatErrorRejectionIssue */
|
|
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
|
|
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
|
|
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
|
|
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
|
|
@@ -1098,6 +1098,10 @@ static int gen9_init_workarounds(struct
|
|
WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
|
|
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
|
|
|
|
+ /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
|
|
+ if (IS_GEN9_LP(dev_priv))
|
|
+ WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
|
|
+
|
|
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
|
|
ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
|
|
if (ret)
|
|
From e521813468f786271a87e78e8644243bead48fad Mon Sep 17 00:00:00 2001
|
|
From: Julian Wiedmann <jwi@linux.ibm.com>
|
|
Date: Wed, 2 May 2018 08:48:43 +0200
|
|
Subject: s390/qdio: fix access to uninitialized qdio_q fields
|
|
|
|
From: Julian Wiedmann <jwi@linux.ibm.com>
|
|
|
|
commit e521813468f786271a87e78e8644243bead48fad upstream.
|
|
|
|
Ever since CQ/QAOB support was added, calling qdio_free() straight after
|
|
qdio_alloc() results in qdio_release_memory() accessing uninitialized
|
|
memory (ie. q->u.out.use_cq and q->u.out.aobs). Followed by a
|
|
kmem_cache_free() on the random AOB addresses.
|
|
|
|
For older kernels that don't have 6e30c549f6ca, the same applies if
|
|
qdio_establish() fails in the DEV_STATE_ONLINE check.
|
|
|
|
While initializing q->u.out.use_cq would be enough to fix this
|
|
particular bug, the more future-proof change is to just zero-alloc the
|
|
whole struct.
|
|
|
|
Fixes: 104ea556ee7f ("qdio: support asynchronous delivery of storage blocks")
|
|
Cc: <stable@vger.kernel.org> #v3.2+
|
|
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
|
|
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/s390/cio/qdio_setup.c | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
--- a/drivers/s390/cio/qdio_setup.c
|
|
+++ b/drivers/s390/cio/qdio_setup.c
|
|
@@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdi
|
|
int i;
|
|
|
|
for (i = 0; i < nr_queues; i++) {
|
|
- q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
|
|
+ q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
|
|
if (!q)
|
|
return -ENOMEM;
|
|
|
|
From 4bbaf2584b86b0772413edeac22ff448f36351b1 Mon Sep 17 00:00:00 2001
|
|
From: Hendrik Brueckner <brueckner@linux.ibm.com>
|
|
Date: Thu, 3 May 2018 15:56:15 +0200
|
|
Subject: s390/cpum_sf: ensure sample frequency of perf event attributes is non-zero
|
|
|
|
From: Hendrik Brueckner <brueckner@linux.ibm.com>
|
|
|
|
commit 4bbaf2584b86b0772413edeac22ff448f36351b1 upstream.
|
|
|
|
Correct a trinity finding for the perf_event_open() system call with
|
|
a perf event attribute structure that uses a frequency but has the
|
|
sampling frequency set to zero. This causes a FP divide exception during
|
|
the sample rate initialization for the hardware sampling facility.
|
|
|
|
Fixes: 8c069ff4bd606 ("s390/perf: add support for the CPU-Measurement Sampling Facility")
|
|
Cc: stable@vger.kernel.org # 3.14+
|
|
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
|
|
Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
|
|
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/s390/kernel/perf_cpum_sf.c | 4 ++++
|
|
1 file changed, 4 insertions(+)
|
|
|
|
--- a/arch/s390/kernel/perf_cpum_sf.c
|
|
+++ b/arch/s390/kernel/perf_cpum_sf.c
|
|
@@ -753,6 +753,10 @@ static int __hw_perf_event_init(struct p
|
|
*/
|
|
rate = 0;
|
|
if (attr->freq) {
|
|
+ if (!attr->sample_freq) {
|
|
+ err = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
rate = freq_to_sample_rate(&si, attr->sample_freq);
|
|
rate = hw_limit_rate(&si, rate);
|
|
attr->freq = 0;
|
|
From 2e68adcd2fb21b7188ba449f0fab3bee2910e500 Mon Sep 17 00:00:00 2001
|
|
From: Julian Wiedmann <jwi@linux.ibm.com>
|
|
Date: Wed, 2 May 2018 08:28:34 +0200
|
|
Subject: s390/qdio: don't release memory in qdio_setup_irq()
|
|
|
|
From: Julian Wiedmann <jwi@linux.ibm.com>
|
|
|
|
commit 2e68adcd2fb21b7188ba449f0fab3bee2910e500 upstream.
|
|
|
|
Calling qdio_release_memory() on error is just plain wrong. It frees
|
|
the main qdio_irq struct, when following code still uses it.
|
|
|
|
Also, no other error path in qdio_establish() does this. So trust
|
|
callers to clean up via qdio_free() if some step of the QDIO
|
|
initialization fails.
|
|
|
|
Fixes: 779e6e1c724d ("[S390] qdio: new qdio driver.")
|
|
Cc: <stable@vger.kernel.org> #v2.6.27+
|
|
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
|
|
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/s390/cio/qdio_setup.c | 10 ++--------
|
|
1 file changed, 2 insertions(+), 8 deletions(-)
|
|
|
|
--- a/drivers/s390/cio/qdio_setup.c
|
|
+++ b/drivers/s390/cio/qdio_setup.c
|
|
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initializ
|
|
{
|
|
struct ciw *ciw;
|
|
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
|
|
- int rc;
|
|
|
|
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
|
|
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
|
|
@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initializ
|
|
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
|
|
if (!ciw) {
|
|
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
|
|
- rc = -EINVAL;
|
|
- goto out_err;
|
|
+ return -EINVAL;
|
|
}
|
|
irq_ptr->equeue = *ciw;
|
|
|
|
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
|
|
if (!ciw) {
|
|
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
|
|
- rc = -EINVAL;
|
|
- goto out_err;
|
|
+ return -EINVAL;
|
|
}
|
|
irq_ptr->aqueue = *ciw;
|
|
|
|
@@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initializ
|
|
irq_ptr->orig_handler = init_data->cdev->handler;
|
|
init_data->cdev->handler = qdio_int_handler;
|
|
return 0;
|
|
-out_err:
|
|
- qdio_release_memory(irq_ptr);
|
|
- return rc;
|
|
}
|
|
|
|
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
|
|
From 9f18fff63cfd6f559daa1eaae60640372c65f84b Mon Sep 17 00:00:00 2001
|
|
From: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
Date: Tue, 24 Apr 2018 11:18:49 +0200
|
|
Subject: s390: remove indirect branch from do_softirq_own_stack
|
|
|
|
From: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
|
|
commit 9f18fff63cfd6f559daa1eaae60640372c65f84b upstream.
|
|
|
|
The inline assembly to call __do_softirq on the irq stack uses
|
|
an indirect branch. This can be replaced with a normal relative
|
|
branch.
|
|
|
|
Cc: stable@vger.kernel.org # 4.16
|
|
Fixes: f19fbd5ed6 ("s390: introduce execute-trampolines for branches")
|
|
Reviewed-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
|
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/s390/kernel/irq.c | 5 ++---
|
|
1 file changed, 2 insertions(+), 3 deletions(-)
|
|
|
|
--- a/arch/s390/kernel/irq.c
|
|
+++ b/arch/s390/kernel/irq.c
|
|
@@ -176,10 +176,9 @@ void do_softirq_own_stack(void)
|
|
new -= STACK_FRAME_OVERHEAD;
|
|
((struct stack_frame *) new)->back_chain = old;
|
|
asm volatile(" la 15,0(%0)\n"
|
|
- " basr 14,%2\n"
|
|
+ " brasl 14,__do_softirq\n"
|
|
" la 15,0(%1)\n"
|
|
- : : "a" (new), "a" (old),
|
|
- "a" (__do_softirq)
|
|
+ : : "a" (new), "a" (old)
|
|
: "0", "1", "2", "3", "4", "5", "14",
|
|
"cc", "memory" );
|
|
} else {
|
|
From 1c1a2ee1b53b006754073eefc65d2b2cedb5264b Mon Sep 17 00:00:00 2001
|
|
From: Coly Li <colyli@suse.de>
|
|
Date: Thu, 17 May 2018 23:33:26 +0800
|
|
Subject: bcache: return 0 from bch_debug_init() if CONFIG_DEBUG_FS=n
|
|
|
|
From: Coly Li <colyli@suse.de>
|
|
|
|
commit 1c1a2ee1b53b006754073eefc65d2b2cedb5264b upstream.
|
|
|
|
Commit 539d39eb2708 ("bcache: fix wrong return value in bch_debug_init()")
|
|
returns the return value of debugfs_create_dir() to bcache_init(). When
|
|
CONFIG_DEBUG_FS=n, bch_debug_init() always returns 1 and makes
|
|
bcache_init() failedi.
|
|
|
|
This patch makes bch_debug_init() always returns 0 if CONFIG_DEBUG_FS=n,
|
|
so bcache can continue to work for the kernels which don't have debugfs
|
|
enanbled.
|
|
|
|
Changelog:
|
|
v4: Add Acked-by from Kent Overstreet.
|
|
v3: Use IS_ENABLED(CONFIG_DEBUG_FS) to replace #ifdef DEBUG_FS.
|
|
v2: Remove a warning information
|
|
v1: Initial version.
|
|
|
|
Fixes: Commit 539d39eb2708 ("bcache: fix wrong return value in bch_debug_init()")
|
|
Cc: stable@vger.kernel.org
|
|
Signed-off-by: Coly Li <colyli@suse.de>
|
|
Reported-by: Massimo B. <massimo.b@gmx.net>
|
|
Reported-by: Kai Krakow <kai@kaishome.de>
|
|
Tested-by: Kai Krakow <kai@kaishome.de>
|
|
Acked-by: Kent Overstreet <kent.overstreet@gmail.com>
|
|
Signed-off-by: Jens Axboe <axboe@kernel.dk>
|
|
Signed-off-by: Kai Krakow <kai@kaishome.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
drivers/md/bcache/debug.c | 3 +++
|
|
1 file changed, 3 insertions(+)
|
|
|
|
--- a/drivers/md/bcache/debug.c
|
|
+++ b/drivers/md/bcache/debug.c
|
|
@@ -251,6 +251,9 @@ void bch_debug_exit(void)
|
|
|
|
int __init bch_debug_init(struct kobject *kobj)
|
|
{
|
|
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
|
|
+ return 0;
|
|
+
|
|
debug = debugfs_create_dir("bcache", NULL);
|
|
|
|
return IS_ERR_OR_NULL(debug);
|
|
From 0a0b152083cfc44ec1bb599b57b7aab41327f998 Mon Sep 17 00:00:00 2001
|
|
From: Dave Hansen <dave.hansen@linux.intel.com>
|
|
Date: Wed, 9 May 2018 10:13:51 -0700
|
|
Subject: x86/pkeys: Override pkey when moving away from PROT_EXEC
|
|
|
|
From: Dave Hansen <dave.hansen@linux.intel.com>
|
|
|
|
commit 0a0b152083cfc44ec1bb599b57b7aab41327f998 upstream.
|
|
|
|
I got a bug report that the following code (roughly) was
|
|
causing a SIGSEGV:
|
|
|
|
mprotect(ptr, size, PROT_EXEC);
|
|
mprotect(ptr, size, PROT_NONE);
|
|
mprotect(ptr, size, PROT_READ);
|
|
*ptr = 100;
|
|
|
|
The problem is hit when the mprotect(PROT_EXEC)
|
|
is implicitly assigned a protection key to the VMA, and made
|
|
that key ACCESS_DENY|WRITE_DENY. The PROT_NONE mprotect()
|
|
failed to remove the protection key, and the PROT_NONE->
|
|
PROT_READ left the PTE usable, but the pkey still in place
|
|
and left the memory inaccessible.
|
|
|
|
To fix this, we ensure that we always "override" the pkee
|
|
at mprotect() if the VMA does not have execute-only
|
|
permissions, but the VMA has the execute-only pkey.
|
|
|
|
We had a check for PROT_READ/WRITE, but it did not work
|
|
for PROT_NONE. This entirely removes the PROT_* checks,
|
|
which ensures that PROT_NONE now works.
|
|
|
|
Reported-by: Shakeel Butt <shakeelb@google.com>
|
|
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
|
|
Cc: Andrew Morton <akpm@linux-foundation.org>
|
|
Cc: Dave Hansen <dave.hansen@intel.com>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Michael Ellermen <mpe@ellerman.id.au>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Ram Pai <linuxram@us.ibm.com>
|
|
Cc: Shuah Khan <shuah@kernel.org>
|
|
Cc: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: linux-mm@kvack.org
|
|
Cc: stable@vger.kernel.org
|
|
Fixes: 62b5f7d013f ("mm/core, x86/mm/pkeys: Add execute-only protection keys support")
|
|
Link: http://lkml.kernel.org/r/20180509171351.084C5A71@viggo.jf.intel.com
|
|
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/x86/include/asm/pkeys.h | 12 +++++++++++-
|
|
arch/x86/mm/pkeys.c | 21 +++++++++++----------
|
|
2 files changed, 22 insertions(+), 11 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/pkeys.h
|
|
+++ b/arch/x86/include/asm/pkeys.h
|
|
@@ -2,6 +2,8 @@
|
|
#ifndef _ASM_X86_PKEYS_H
|
|
#define _ASM_X86_PKEYS_H
|
|
|
|
+#define ARCH_DEFAULT_PKEY 0
|
|
+
|
|
#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
|
|
|
|
extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
|
|
@@ -15,7 +17,7 @@ extern int __execute_only_pkey(struct mm
|
|
static inline int execute_only_pkey(struct mm_struct *mm)
|
|
{
|
|
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
|
- return 0;
|
|
+ return ARCH_DEFAULT_PKEY;
|
|
|
|
return __execute_only_pkey(mm);
|
|
}
|
|
@@ -56,6 +58,14 @@ bool mm_pkey_is_allocated(struct mm_stru
|
|
return false;
|
|
if (pkey >= arch_max_pkey())
|
|
return false;
|
|
+ /*
|
|
+ * The exec-only pkey is set in the allocation map, but
|
|
+ * is not available to any of the user interfaces like
|
|
+ * mprotect_pkey().
|
|
+ */
|
|
+ if (pkey == mm->context.execute_only_pkey)
|
|
+ return false;
|
|
+
|
|
return mm_pkey_allocation_map(mm) & (1U << pkey);
|
|
}
|
|
|
|
--- a/arch/x86/mm/pkeys.c
|
|
+++ b/arch/x86/mm/pkeys.c
|
|
@@ -94,26 +94,27 @@ int __arch_override_mprotect_pkey(struct
|
|
*/
|
|
if (pkey != -1)
|
|
return pkey;
|
|
- /*
|
|
- * Look for a protection-key-drive execute-only mapping
|
|
- * which is now being given permissions that are not
|
|
- * execute-only. Move it back to the default pkey.
|
|
- */
|
|
- if (vma_is_pkey_exec_only(vma) &&
|
|
- (prot & (PROT_READ|PROT_WRITE))) {
|
|
- return 0;
|
|
- }
|
|
+
|
|
/*
|
|
* The mapping is execute-only. Go try to get the
|
|
* execute-only protection key. If we fail to do that,
|
|
* fall through as if we do not have execute-only
|
|
- * support.
|
|
+ * support in this mm.
|
|
*/
|
|
if (prot == PROT_EXEC) {
|
|
pkey = execute_only_pkey(vma->vm_mm);
|
|
if (pkey > 0)
|
|
return pkey;
|
|
+ } else if (vma_is_pkey_exec_only(vma)) {
|
|
+ /*
|
|
+ * Protections are *not* PROT_EXEC, but the mapping
|
|
+ * is using the exec-only pkey. This mapping was
|
|
+ * PROT_EXEC and will no longer be. Move back to
|
|
+ * the default pkey.
|
|
+ */
|
|
+ return ARCH_DEFAULT_PKEY;
|
|
}
|
|
+
|
|
/*
|
|
* This is a vanilla, non-pkey mprotect (or we failed to
|
|
* setup execute-only), inherit the pkey from the VMA we
|
|
From 2fa9d1cfaf0e02f8abef0757002bff12dfcfa4e6 Mon Sep 17 00:00:00 2001
|
|
From: Dave Hansen <dave.hansen@linux.intel.com>
|
|
Date: Wed, 9 May 2018 10:13:58 -0700
|
|
Subject: x86/pkeys: Do not special case protection key 0
|
|
|
|
From: Dave Hansen <dave.hansen@linux.intel.com>
|
|
|
|
commit 2fa9d1cfaf0e02f8abef0757002bff12dfcfa4e6 upstream.
|
|
|
|
mm_pkey_is_allocated() treats pkey 0 as unallocated. That is
|
|
inconsistent with the manpages, and also inconsistent with
|
|
mm->context.pkey_allocation_map. Stop special casing it and only
|
|
disallow values that are actually bad (< 0).
|
|
|
|
The end-user visible effect of this is that you can now use
|
|
mprotect_pkey() to set pkey=0.
|
|
|
|
This is a bit nicer than what Ram proposed[1] because it is simpler
|
|
and removes special-casing for pkey 0. On the other hand, it does
|
|
allow applications to pkey_free() pkey-0, but that's just a silly
|
|
thing to do, so we are not going to protect against it.
|
|
|
|
The scenario that could happen is similar to what happens if you free
|
|
any other pkey that is in use: it might get reallocated later and used
|
|
to protect some other data. The most likely scenario is that pkey-0
|
|
comes back from pkey_alloc(), an access-disable or write-disable bit
|
|
is set in PKRU for it, and the next stack access will SIGSEGV. It's
|
|
not horribly different from if you mprotect()'d your stack or heap to
|
|
be unreadable or unwritable, which is generally very foolish, but also
|
|
not explicitly prevented by the kernel.
|
|
|
|
1. http://lkml.kernel.org/r/1522112702-27853-1-git-send-email-linuxram@us.ibm.com
|
|
|
|
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
|
|
Cc: Andrew Morton <akpm@linux-foundation.org>p
|
|
Cc: Dave Hansen <dave.hansen@intel.com>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Michael Ellermen <mpe@ellerman.id.au>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Ram Pai <linuxram@us.ibm.com>
|
|
Cc: Shuah Khan <shuah@kernel.org>
|
|
Cc: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: linux-mm@kvack.org
|
|
Cc: stable@vger.kernel.org
|
|
Fixes: 58ab9a088dda ("x86/pkeys: Check against max pkey to avoid overflows")
|
|
Link: http://lkml.kernel.org/r/20180509171358.47FD785E@viggo.jf.intel.com
|
|
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/x86/include/asm/mmu_context.h | 2 +-
|
|
arch/x86/include/asm/pkeys.h | 6 +++---
|
|
2 files changed, 4 insertions(+), 4 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/mmu_context.h
|
|
+++ b/arch/x86/include/asm/mmu_context.h
|
|
@@ -192,7 +192,7 @@ static inline int init_new_context(struc
|
|
|
|
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
|
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
|
|
- /* pkey 0 is the default and always allocated */
|
|
+ /* pkey 0 is the default and allocated implicitly */
|
|
mm->context.pkey_allocation_map = 0x1;
|
|
/* -1 means unallocated or invalid */
|
|
mm->context.execute_only_pkey = -1;
|
|
--- a/arch/x86/include/asm/pkeys.h
|
|
+++ b/arch/x86/include/asm/pkeys.h
|
|
@@ -51,10 +51,10 @@ bool mm_pkey_is_allocated(struct mm_stru
|
|
{
|
|
/*
|
|
* "Allocated" pkeys are those that have been returned
|
|
- * from pkey_alloc(). pkey 0 is special, and never
|
|
- * returned from pkey_alloc().
|
|
+ * from pkey_alloc() or pkey 0 which is allocated
|
|
+ * implicitly when the mm is created.
|
|
*/
|
|
- if (pkey <= 0)
|
|
+ if (pkey < 0)
|
|
return false;
|
|
if (pkey >= arch_max_pkey())
|
|
return false;
|
|
From 0b3225ab9407f557a8e20f23f37aa7236c10a9b1 Mon Sep 17 00:00:00 2001
|
|
From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
|
|
Date: Fri, 4 May 2018 07:59:58 +0200
|
|
Subject: efi: Avoid potential crashes, fix the 'struct efi_pci_io_protocol_32' definition for mixed mode
|
|
|
|
From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
|
|
|
|
commit 0b3225ab9407f557a8e20f23f37aa7236c10a9b1 upstream.
|
|
|
|
Mixed mode allows a kernel built for x86_64 to interact with 32-bit
|
|
EFI firmware, but requires us to define all struct definitions carefully
|
|
when it comes to pointer sizes.
|
|
|
|
'struct efi_pci_io_protocol_32' currently uses a 'void *' for the
|
|
'romimage' field, which will be interpreted as a 64-bit field
|
|
on such kernels, potentially resulting in bogus memory references
|
|
and subsequent crashes.
|
|
|
|
Tested-by: Hans de Goede <hdegoede@redhat.com>
|
|
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
|
|
Cc: <stable@vger.kernel.org>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Matt Fleming <matt@codeblueprint.co.uk>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: linux-efi@vger.kernel.org
|
|
Link: http://lkml.kernel.org/r/20180504060003.19618-13-ard.biesheuvel@linaro.org
|
|
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/x86/boot/compressed/eboot.c | 6 ++++--
|
|
include/linux/efi.h | 8 ++++----
|
|
2 files changed, 8 insertions(+), 6 deletions(-)
|
|
|
|
--- a/arch/x86/boot/compressed/eboot.c
|
|
+++ b/arch/x86/boot/compressed/eboot.c
|
|
@@ -163,7 +163,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32
|
|
if (status != EFI_SUCCESS)
|
|
goto free_struct;
|
|
|
|
- memcpy(rom->romdata, pci->romimage, pci->romsize);
|
|
+ memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
|
|
+ pci->romsize);
|
|
return status;
|
|
|
|
free_struct:
|
|
@@ -269,7 +270,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64
|
|
if (status != EFI_SUCCESS)
|
|
goto free_struct;
|
|
|
|
- memcpy(rom->romdata, pci->romimage, pci->romsize);
|
|
+ memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
|
|
+ pci->romsize);
|
|
return status;
|
|
|
|
free_struct:
|
|
--- a/include/linux/efi.h
|
|
+++ b/include/linux/efi.h
|
|
@@ -395,8 +395,8 @@ typedef struct {
|
|
u32 attributes;
|
|
u32 get_bar_attributes;
|
|
u32 set_bar_attributes;
|
|
- uint64_t romsize;
|
|
- void *romimage;
|
|
+ u64 romsize;
|
|
+ u32 romimage;
|
|
} efi_pci_io_protocol_32;
|
|
|
|
typedef struct {
|
|
@@ -415,8 +415,8 @@ typedef struct {
|
|
u64 attributes;
|
|
u64 get_bar_attributes;
|
|
u64 set_bar_attributes;
|
|
- uint64_t romsize;
|
|
- void *romimage;
|
|
+ u64 romsize;
|
|
+ u64 romimage;
|
|
} efi_pci_io_protocol_64;
|
|
|
|
typedef struct {
|
|
From eb0146daefdde65665b7f076fbff7b49dade95b9 Mon Sep 17 00:00:00 2001
|
|
From: Masami Hiramatsu <mhiramat@kernel.org>
|
|
Date: Sun, 13 May 2018 05:04:16 +0100
|
|
Subject: ARM: 8771/1: kprobes: Prohibit kprobes on do_undefinstr
|
|
|
|
From: Masami Hiramatsu <mhiramat@kernel.org>
|
|
|
|
commit eb0146daefdde65665b7f076fbff7b49dade95b9 upstream.
|
|
|
|
Prohibit kprobes on do_undefinstr because kprobes on
|
|
arm is implemented by undefined instruction. This means
|
|
if we probe do_undefinstr(), it can cause infinit
|
|
recursive exception.
|
|
|
|
Fixes: 24ba613c9d6c ("ARM kprobes: core code")
|
|
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
|
|
Cc: stable@vger.kernel.org
|
|
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/arm/kernel/traps.c | 5 ++++-
|
|
1 file changed, 4 insertions(+), 1 deletion(-)
|
|
|
|
--- a/arch/arm/kernel/traps.c
|
|
+++ b/arch/arm/kernel/traps.c
|
|
@@ -19,6 +19,7 @@
|
|
#include <linux/uaccess.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/kdebug.h>
|
|
+#include <linux/kprobes.h>
|
|
#include <linux/module.h>
|
|
#include <linux/kexec.h>
|
|
#include <linux/bug.h>
|
|
@@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_
|
|
raw_spin_unlock_irqrestore(&undef_lock, flags);
|
|
}
|
|
|
|
-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
|
|
+static nokprobe_inline
|
|
+int call_undef_hook(struct pt_regs *regs, unsigned int instr)
|
|
{
|
|
struct undef_hook *hook;
|
|
unsigned long flags;
|
|
@@ -490,6 +492,7 @@ die_sig:
|
|
|
|
arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
|
|
}
|
|
+NOKPROBE_SYMBOL(do_undefinstr)
|
|
|
|
/*
|
|
* Handle FIQ similarly to NMI on x86 systems.
|
|
From fed71f7d98795ed0fa1d431910787f0f4a68324f Mon Sep 17 00:00:00 2001
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 17 May 2018 14:36:39 +0200
|
|
Subject: x86/apic/x2apic: Initialize cluster ID properly
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit fed71f7d98795ed0fa1d431910787f0f4a68324f upstream.
|
|
|
|
Rick bisected a regression on large systems which use the x2apic cluster
|
|
mode for interrupt delivery to the commit wich reworked the cluster
|
|
management.
|
|
|
|
The problem is caused by a missing initialization of the clusterid field
|
|
in the shared cluster data structures. So all structures end up with
|
|
cluster ID 0 which only allows sharing between all CPUs which belong to
|
|
cluster 0. All other CPUs with a cluster ID > 0 cannot share the data
|
|
structure because they cannot find existing data with their cluster
|
|
ID. This causes malfunction with IPIs because IPIs are sent to the wrong
|
|
cluster and the caller waits for ever that the target CPU handles the IPI.
|
|
|
|
Add the missing initialization when a upcoming CPU is the first in a
|
|
cluster so that the later booting CPUs can find the data and share it for
|
|
proper operation.
|
|
|
|
Fixes: 023a611748fd ("x86/apic/x2apic: Simplify cluster management")
|
|
Reported-by: Rick Warner <rick@microway.com>
|
|
Bisected-by: Rick Warner <rick@microway.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Tested-by: Rick Warner <rick@microway.com>
|
|
Cc: stable@vger.kernel.org
|
|
Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1805171418210.1947@nanos.tec.linutronix.de
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/x86/kernel/apic/x2apic_cluster.c | 1 +
|
|
1 file changed, 1 insertion(+)
|
|
|
|
--- a/arch/x86/kernel/apic/x2apic_cluster.c
|
|
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
|
|
@@ -116,6 +116,7 @@ static void init_x2apic_ldr(void)
|
|
goto update;
|
|
}
|
|
cmsk = cluster_hotplug_mask;
|
|
+ cmsk->clusterid = cluster;
|
|
cluster_hotplug_mask = NULL;
|
|
update:
|
|
this_cpu_write(cluster_masks, cmsk);
|
|
From acf46020012ccbca1172e9c7aeab399c950d9212 Mon Sep 17 00:00:00 2001
|
|
From: Dmitry Safonov <dima@arista.com>
|
|
Date: Fri, 18 May 2018 00:35:10 +0100
|
|
Subject: x86/mm: Drop TS_COMPAT on 64-bit exec() syscall
|
|
|
|
From: Dmitry Safonov <dima@arista.com>
|
|
|
|
commit acf46020012ccbca1172e9c7aeab399c950d9212 upstream.
|
|
|
|
The x86 mmap() code selects the mmap base for an allocation depending on
|
|
the bitness of the syscall. For 64bit sycalls it select mm->mmap_base and
|
|
for 32bit mm->mmap_compat_base.
|
|
|
|
exec() calls mmap() which in turn uses in_compat_syscall() to check whether
|
|
the mapping is for a 32bit or a 64bit task. The decision is made on the
|
|
following criteria:
|
|
|
|
ia32 child->thread.status & TS_COMPAT
|
|
x32 child->pt_regs.orig_ax & __X32_SYSCALL_BIT
|
|
ia64 !ia32 && !x32
|
|
|
|
__set_personality_x32() was dropping TS_COMPAT flag, but
|
|
set_personality_64bit() has kept compat syscall flag making
|
|
in_compat_syscall() return true during the first exec() syscall.
|
|
|
|
Which in result has user-visible effects, mentioned by Alexey:
|
|
1) It breaks ASAN
|
|
$ gcc -fsanitize=address wrap.c -o wrap-asan
|
|
$ ./wrap32 ./wrap-asan true
|
|
==1217==Shadow memory range interleaves with an existing memory mapping. ASan cannot proceed correctly. ABORTING.
|
|
==1217==ASan shadow was supposed to be located in the [0x00007fff7000-0x10007fff7fff] range.
|
|
==1217==Process memory map follows:
|
|
0x000000400000-0x000000401000 /home/izbyshev/test/gcc/asan-exec-from-32bit/wrap-asan
|
|
0x000000600000-0x000000601000 /home/izbyshev/test/gcc/asan-exec-from-32bit/wrap-asan
|
|
0x000000601000-0x000000602000 /home/izbyshev/test/gcc/asan-exec-from-32bit/wrap-asan
|
|
0x0000f7dbd000-0x0000f7de2000 /lib64/ld-2.27.so
|
|
0x0000f7fe2000-0x0000f7fe3000 /lib64/ld-2.27.so
|
|
0x0000f7fe3000-0x0000f7fe4000 /lib64/ld-2.27.so
|
|
0x0000f7fe4000-0x0000f7fe5000
|
|
0x7fed9abff000-0x7fed9af54000
|
|
0x7fed9af54000-0x7fed9af6b000 /lib64/libgcc_s.so.1
|
|
[snip]
|
|
|
|
2) It doesn't seem to be great for security if an attacker always knows
|
|
that ld.so is going to be mapped into the first 4GB in this case
|
|
(the same thing happens for PIEs as well).
|
|
|
|
The testcase:
|
|
$ cat wrap.c
|
|
|
|
int main(int argc, char *argv[]) {
|
|
execvp(argv[1], &argv[1]);
|
|
return 127;
|
|
}
|
|
|
|
$ gcc wrap.c -o wrap
|
|
$ LD_SHOW_AUXV=1 ./wrap ./wrap true |& grep AT_BASE
|
|
AT_BASE: 0x7f63b8309000
|
|
AT_BASE: 0x7faec143c000
|
|
AT_BASE: 0x7fbdb25fa000
|
|
|
|
$ gcc -m32 wrap.c -o wrap32
|
|
$ LD_SHOW_AUXV=1 ./wrap32 ./wrap true |& grep AT_BASE
|
|
AT_BASE: 0xf7eff000
|
|
AT_BASE: 0xf7cee000
|
|
AT_BASE: 0x7f8b9774e000
|
|
|
|
Fixes: 1b028f784e8c ("x86/mm: Introduce mmap_compat_base() for 32-bit mmap()")
|
|
Fixes: ada26481dfe6 ("x86/mm: Make in_compat_syscall() work during exec")
|
|
Reported-by: Alexey Izbyshev <izbyshev@ispras.ru>
|
|
Bisected-by: Alexander Monakov <amonakov@ispras.ru>
|
|
Investigated-by: Andy Lutomirski <luto@kernel.org>
|
|
Signed-off-by: Dmitry Safonov <dima@arista.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Cyrill Gorcunov <gorcunov@openvz.org>
|
|
Cc: Borislav Petkov <bp@suse.de>
|
|
Cc: Alexander Monakov <amonakov@ispras.ru>
|
|
Cc: Dmitry Safonov <0x7f454c46@gmail.com>
|
|
Cc: stable@vger.kernel.org
|
|
Cc: linux-mm@kvack.org
|
|
Cc: Andy Lutomirski <luto@kernel.org>
|
|
Cc: "H. Peter Anvin" <hpa@zytor.com>
|
|
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
|
|
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
|
|
Link: https://lkml.kernel.org/r/20180517233510.24996-1-dima@arista.com
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/x86/kernel/process_64.c | 1 +
|
|
1 file changed, 1 insertion(+)
|
|
|
|
--- a/arch/x86/kernel/process_64.c
|
|
+++ b/arch/x86/kernel/process_64.c
|
|
@@ -528,6 +528,7 @@ void set_personality_64bit(void)
|
|
clear_thread_flag(TIF_X32);
|
|
/* Pretend that this comes from a 64bit execve */
|
|
task_pt_regs(current)->orig_ax = __NR_execve;
|
|
+ current_thread_info()->status &= ~TS_COMPAT;
|
|
|
|
/* Ensure the corresponding mm is not marked. */
|
|
if (current->mm)
|
|
From 5596fe34495cf0f645f417eb928ef224df3e3cb4 Mon Sep 17 00:00:00 2001
|
|
From: Dexuan Cui <decui@microsoft.com>
|
|
Date: Tue, 15 May 2018 19:52:50 +0000
|
|
Subject: tick/broadcast: Use for_each_cpu() specially on UP kernels
|
|
|
|
From: Dexuan Cui <decui@microsoft.com>
|
|
|
|
commit 5596fe34495cf0f645f417eb928ef224df3e3cb4 upstream.
|
|
|
|
for_each_cpu() unintuitively reports CPU0 as set independent of the actual
|
|
cpumask content on UP kernels. This causes an unexpected PIT interrupt
|
|
storm on a UP kernel running in an SMP virtual machine on Hyper-V, and as
|
|
a result, the virtual machine can suffer from a strange random delay of 1~20
|
|
minutes during boot-up, and sometimes it can hang forever.
|
|
|
|
Protect if by checking whether the cpumask is empty before entering the
|
|
for_each_cpu() loop.
|
|
|
|
[ tglx: Use !IS_ENABLED(CONFIG_SMP) instead of #ifdeffery ]
|
|
|
|
Signed-off-by: Dexuan Cui <decui@microsoft.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Cc: Josh Poulson <jopoulso@microsoft.com>
|
|
Cc: "Michael Kelley (EOSG)" <Michael.H.Kelley@microsoft.com>
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Cc: Frederic Weisbecker <fweisbec@gmail.com>
|
|
Cc: stable@vger.kernel.org
|
|
Cc: Rakib Mullick <rakib.mullick@gmail.com>
|
|
Cc: Jork Loeser <Jork.Loeser@microsoft.com>
|
|
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
Cc: Andrew Morton <akpm@linux-foundation.org>
|
|
Cc: KY Srinivasan <kys@microsoft.com>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: Alexey Dobriyan <adobriyan@gmail.com>
|
|
Cc: Dmitry Vyukov <dvyukov@google.com>
|
|
Link: https://lkml.kernel.org/r/KL1P15301MB000678289FE55BA365B3279ABF990@KL1P15301MB0006.APCP153.PROD.OUTLOOK.COM
|
|
Link: https://lkml.kernel.org/r/KL1P15301MB0006FA63BC22BEB64902EAA0BF930@KL1P15301MB0006.APCP153.PROD.OUTLOOK.COM
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
kernel/time/tick-broadcast.c | 8 ++++++++
|
|
1 file changed, 8 insertions(+)
|
|
|
|
--- a/kernel/time/tick-broadcast.c
|
|
+++ b/kernel/time/tick-broadcast.c
|
|
@@ -612,6 +612,14 @@ static void tick_handle_oneshot_broadcas
|
|
now = ktime_get();
|
|
/* Find all expired events */
|
|
for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
|
|
+ /*
|
|
+ * Required for !SMP because for_each_cpu() reports
|
|
+ * unconditionally CPU0 as set on UP kernels.
|
|
+ */
|
|
+ if (!IS_ENABLED(CONFIG_SMP) &&
|
|
+ cpumask_empty(tick_broadcast_oneshot_mask))
|
|
+ break;
|
|
+
|
|
td = &per_cpu(tick_cpu_device, cpu);
|
|
if (td->evtdev->next_event <= now) {
|
|
cpumask_set_cpu(cpu, tmpmask);
|
|
From 69af7e23a6870df2ea6fa79ca16493d59b3eebeb Mon Sep 17 00:00:00 2001
|
|
From: Masami Hiramatsu <mhiramat@kernel.org>
|
|
Date: Sun, 13 May 2018 05:03:54 +0100
|
|
Subject: ARM: 8769/1: kprobes: Fix to use get_kprobe_ctlblk after irq-disabed
|
|
|
|
From: Masami Hiramatsu <mhiramat@kernel.org>
|
|
|
|
commit 69af7e23a6870df2ea6fa79ca16493d59b3eebeb upstream.
|
|
|
|
Since get_kprobe_ctlblk() uses smp_processor_id() to access
|
|
per-cpu variable, it hits smp_processor_id sanity check as below.
|
|
|
|
[ 7.006928] BUG: using smp_processor_id() in preemptible [00000000] code: swapper/0/1
|
|
[ 7.007859] caller is debug_smp_processor_id+0x20/0x24
|
|
[ 7.008438] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.16.0-rc1-00192-g4eb17253e4b5 #1
|
|
[ 7.008890] Hardware name: Generic DT based system
|
|
[ 7.009917] [<c0313f0c>] (unwind_backtrace) from [<c030e6d8>] (show_stack+0x20/0x24)
|
|
[ 7.010473] [<c030e6d8>] (show_stack) from [<c0c64694>] (dump_stack+0x84/0x98)
|
|
[ 7.010990] [<c0c64694>] (dump_stack) from [<c071ca5c>] (check_preemption_disabled+0x138/0x13c)
|
|
[ 7.011592] [<c071ca5c>] (check_preemption_disabled) from [<c071ca80>] (debug_smp_processor_id+0x20/0x24)
|
|
[ 7.012214] [<c071ca80>] (debug_smp_processor_id) from [<c03335e0>] (optimized_callback+0x2c/0xe4)
|
|
[ 7.013077] [<c03335e0>] (optimized_callback) from [<bf0021b0>] (0xbf0021b0)
|
|
|
|
To fix this issue, call get_kprobe_ctlblk() right after
|
|
irq-disabled since that disables preemption.
|
|
|
|
Fixes: 0dc016dbd820 ("ARM: kprobes: enable OPTPROBES for ARM 32")
|
|
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
|
|
Cc: stable@vger.kernel.org
|
|
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/arm/probes/kprobes/opt-arm.c | 3 ++-
|
|
1 file changed, 2 insertions(+), 1 deletion(-)
|
|
|
|
--- a/arch/arm/probes/kprobes/opt-arm.c
|
|
+++ b/arch/arm/probes/kprobes/opt-arm.c
|
|
@@ -165,13 +165,14 @@ optimized_callback(struct optimized_kpro
|
|
{
|
|
unsigned long flags;
|
|
struct kprobe *p = &op->kp;
|
|
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
|
+ struct kprobe_ctlblk *kcb;
|
|
|
|
/* Save skipped registers */
|
|
regs->ARM_pc = (unsigned long)op->kp.addr;
|
|
regs->ARM_ORIG_r0 = ~0UL;
|
|
|
|
local_irq_save(flags);
|
|
+ kcb = get_kprobe_ctlblk();
|
|
|
|
if (kprobe_running()) {
|
|
kprobes_inc_nmissed_count(&op->kp);
|
|
From 70948c05fdde0aac32f9667856a88725c192fa40 Mon Sep 17 00:00:00 2001
|
|
From: Masami Hiramatsu <mhiramat@kernel.org>
|
|
Date: Sun, 13 May 2018 05:04:10 +0100
|
|
Subject: ARM: 8770/1: kprobes: Prohibit probing on optimized_callback
|
|
|
|
From: Masami Hiramatsu <mhiramat@kernel.org>
|
|
|
|
commit 70948c05fdde0aac32f9667856a88725c192fa40 upstream.
|
|
|
|
Prohibit probing on optimized_callback() because
|
|
it is called from kprobes itself. If we put a kprobes
|
|
on it, that will cause a recursive call loop.
|
|
Mark it NOKPROBE_SYMBOL.
|
|
|
|
Fixes: 0dc016dbd820 ("ARM: kprobes: enable OPTPROBES for ARM 32")
|
|
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
|
|
Cc: stable@vger.kernel.org
|
|
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/arm/probes/kprobes/opt-arm.c | 1 +
|
|
1 file changed, 1 insertion(+)
|
|
|
|
--- a/arch/arm/probes/kprobes/opt-arm.c
|
|
+++ b/arch/arm/probes/kprobes/opt-arm.c
|
|
@@ -192,6 +192,7 @@ optimized_callback(struct optimized_kpro
|
|
|
|
local_irq_restore(flags);
|
|
}
|
|
+NOKPROBE_SYMBOL(optimized_callback)
|
|
|
|
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
|
|
{
|
|
From 0d73c3f8e7f6ee2aab1bb350f60c180f5ae21a2c Mon Sep 17 00:00:00 2001
|
|
From: Masami Hiramatsu <mhiramat@kernel.org>
|
|
Date: Sun, 13 May 2018 05:04:29 +0100
|
|
Subject: ARM: 8772/1: kprobes: Prohibit kprobes on get_user functions
|
|
|
|
From: Masami Hiramatsu <mhiramat@kernel.org>
|
|
|
|
commit 0d73c3f8e7f6ee2aab1bb350f60c180f5ae21a2c upstream.
|
|
|
|
Since do_undefinstr() uses get_user to get the undefined
|
|
instruction, it can be called before kprobes processes
|
|
recursive check. This can cause an infinit recursive
|
|
exception.
|
|
Prohibit probing on get_user functions.
|
|
|
|
Fixes: 24ba613c9d6c ("ARM kprobes: core code")
|
|
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
|
|
Cc: stable@vger.kernel.org
|
|
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
arch/arm/include/asm/assembler.h | 10 ++++++++++
|
|
arch/arm/lib/getuser.S | 10 ++++++++++
|
|
2 files changed, 20 insertions(+)
|
|
|
|
--- a/arch/arm/include/asm/assembler.h
|
|
+++ b/arch/arm/include/asm/assembler.h
|
|
@@ -536,4 +536,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
|
#endif
|
|
.endm
|
|
|
|
+#ifdef CONFIG_KPROBES
|
|
+#define _ASM_NOKPROBE(entry) \
|
|
+ .pushsection "_kprobe_blacklist", "aw" ; \
|
|
+ .balign 4 ; \
|
|
+ .long entry; \
|
|
+ .popsection
|
|
+#else
|
|
+#define _ASM_NOKPROBE(entry)
|
|
+#endif
|
|
+
|
|
#endif /* __ASM_ASSEMBLER_H__ */
|
|
--- a/arch/arm/lib/getuser.S
|
|
+++ b/arch/arm/lib/getuser.S
|
|
@@ -38,6 +38,7 @@ ENTRY(__get_user_1)
|
|
mov r0, #0
|
|
ret lr
|
|
ENDPROC(__get_user_1)
|
|
+_ASM_NOKPROBE(__get_user_1)
|
|
|
|
ENTRY(__get_user_2)
|
|
check_uaccess r0, 2, r1, r2, __get_user_bad
|
|
@@ -58,6 +59,7 @@ rb .req r0
|
|
mov r0, #0
|
|
ret lr
|
|
ENDPROC(__get_user_2)
|
|
+_ASM_NOKPROBE(__get_user_2)
|
|
|
|
ENTRY(__get_user_4)
|
|
check_uaccess r0, 4, r1, r2, __get_user_bad
|
|
@@ -65,6 +67,7 @@ ENTRY(__get_user_4)
|
|
mov r0, #0
|
|
ret lr
|
|
ENDPROC(__get_user_4)
|
|
+_ASM_NOKPROBE(__get_user_4)
|
|
|
|
ENTRY(__get_user_8)
|
|
check_uaccess r0, 8, r1, r2, __get_user_bad8
|
|
@@ -78,6 +81,7 @@ ENTRY(__get_user_8)
|
|
mov r0, #0
|
|
ret lr
|
|
ENDPROC(__get_user_8)
|
|
+_ASM_NOKPROBE(__get_user_8)
|
|
|
|
#ifdef __ARMEB__
|
|
ENTRY(__get_user_32t_8)
|
|
@@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
|
|
mov r0, #0
|
|
ret lr
|
|
ENDPROC(__get_user_32t_8)
|
|
+_ASM_NOKPROBE(__get_user_32t_8)
|
|
|
|
ENTRY(__get_user_64t_1)
|
|
check_uaccess r0, 1, r1, r2, __get_user_bad8
|
|
@@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
|
|
mov r0, #0
|
|
ret lr
|
|
ENDPROC(__get_user_64t_1)
|
|
+_ASM_NOKPROBE(__get_user_64t_1)
|
|
|
|
ENTRY(__get_user_64t_2)
|
|
check_uaccess r0, 2, r1, r2, __get_user_bad8
|
|
@@ -114,6 +120,7 @@ rb .req r0
|
|
mov r0, #0
|
|
ret lr
|
|
ENDPROC(__get_user_64t_2)
|
|
+_ASM_NOKPROBE(__get_user_64t_2)
|
|
|
|
ENTRY(__get_user_64t_4)
|
|
check_uaccess r0, 4, r1, r2, __get_user_bad8
|
|
@@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
|
|
mov r0, #0
|
|
ret lr
|
|
ENDPROC(__get_user_64t_4)
|
|
+_ASM_NOKPROBE(__get_user_64t_4)
|
|
#endif
|
|
|
|
__get_user_bad8:
|
|
@@ -131,6 +139,8 @@ __get_user_bad:
|
|
ret lr
|
|
ENDPROC(__get_user_bad)
|
|
ENDPROC(__get_user_bad8)
|
|
+_ASM_NOKPROBE(__get_user_bad)
|
|
+_ASM_NOKPROBE(__get_user_bad8)
|
|
|
|
.pushsection __ex_table, "a"
|
|
.long 1b, __get_user_bad
|
|
From 9a8fca62aacc1599fea8e813d01e1955513e4fad Mon Sep 17 00:00:00 2001
|
|
From: Filipe Manana <fdmanana@suse.com>
|
|
Date: Fri, 11 May 2018 16:42:42 +0100
|
|
Subject: Btrfs: fix xattr loss after power failure
|
|
|
|
From: Filipe Manana <fdmanana@suse.com>
|
|
|
|
commit 9a8fca62aacc1599fea8e813d01e1955513e4fad upstream.
|
|
|
|
If a file has xattrs, we fsync it, to ensure we clear the flags
|
|
BTRFS_INODE_NEEDS_FULL_SYNC and BTRFS_INODE_COPY_EVERYTHING from its
|
|
inode, the current transaction commits and then we fsync it (without
|
|
either of those bits being set in its inode), we end up not logging
|
|
all its xattrs. This results in deleting all xattrs when replying the
|
|
log after a power failure.
|
|
|
|
Trivial reproducer
|
|
|
|
$ mkfs.btrfs -f /dev/sdb
|
|
$ mount /dev/sdb /mnt
|
|
|
|
$ touch /mnt/foobar
|
|
$ setfattr -n user.xa -v qwerty /mnt/foobar
|
|
$ xfs_io -c "fsync" /mnt/foobar
|
|
|
|
$ sync
|
|
|
|
$ xfs_io -c "pwrite -S 0xab 0 64K" /mnt/foobar
|
|
$ xfs_io -c "fsync" /mnt/foobar
|
|
<power failure>
|
|
|
|
$ mount /dev/sdb /mnt
|
|
$ getfattr --absolute-names --dump /mnt/foobar
|
|
<empty output>
|
|
$
|
|
|
|
So fix this by making sure all xattrs are logged if we log a file's inode
|
|
item and neither the flags BTRFS_INODE_NEEDS_FULL_SYNC nor
|
|
BTRFS_INODE_COPY_EVERYTHING were set in the inode.
|
|
|
|
Fixes: 36283bf777d9 ("Btrfs: fix fsync xattr loss in the fast fsync path")
|
|
Cc: <stable@vger.kernel.org> # 4.2+
|
|
Signed-off-by: Filipe Manana <fdmanana@suse.com>
|
|
Signed-off-by: David Sterba <dsterba@suse.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
fs/btrfs/tree-log.c | 7 +++++++
|
|
1 file changed, 7 insertions(+)
|
|
|
|
--- a/fs/btrfs/tree-log.c
|
|
+++ b/fs/btrfs/tree-log.c
|
|
@@ -4749,6 +4749,7 @@ static int btrfs_log_inode(struct btrfs_
|
|
struct extent_map_tree *em_tree = &inode->extent_tree;
|
|
u64 logged_isize = 0;
|
|
bool need_log_inode_item = true;
|
|
+ bool xattrs_logged = false;
|
|
|
|
path = btrfs_alloc_path();
|
|
if (!path)
|
|
@@ -5050,6 +5051,7 @@ next_key:
|
|
err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
|
|
if (err)
|
|
goto out_unlock;
|
|
+ xattrs_logged = true;
|
|
if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
|
|
btrfs_release_path(path);
|
|
btrfs_release_path(dst_path);
|
|
@@ -5062,6 +5064,11 @@ log_extents:
|
|
btrfs_release_path(dst_path);
|
|
if (need_log_inode_item) {
|
|
err = log_inode_item(trans, log, dst_path, inode);
|
|
+ if (!err && !xattrs_logged) {
|
|
+ err = btrfs_log_all_xattrs(trans, root, inode, path,
|
|
+ dst_path);
|
|
+ btrfs_release_path(path);
|
|
+ }
|
|
if (err)
|
|
goto out_unlock;
|
|
}
|
|
From 6f2f0b394b54e2b159ef969a0b5274e9bbf82ff2 Mon Sep 17 00:00:00 2001
|
|
From: Robbie Ko <robbieko@synology.com>
|
|
Date: Mon, 14 May 2018 10:51:34 +0800
|
|
Subject: Btrfs: send, fix invalid access to commit roots due to concurrent snapshotting
|
|
|
|
From: Robbie Ko <robbieko@synology.com>
|
|
|
|
commit 6f2f0b394b54e2b159ef969a0b5274e9bbf82ff2 upstream.
|
|
|
|
[BUG]
|
|
btrfs incremental send BUG happens when creating a snapshot of snapshot
|
|
that is being used by send.
|
|
|
|
[REASON]
|
|
The problem can happen if while we are doing a send one of the snapshots
|
|
used (parent or send) is snapshotted, because snapshoting implies COWing
|
|
the root of the source subvolume/snapshot.
|
|
|
|
1. When doing an incremental send, the send process will get the commit
|
|
roots from the parent and send snapshots, and add references to them
|
|
through extent_buffer_get().
|
|
|
|
2. When a snapshot/subvolume is snapshotted, its root node is COWed
|
|
(transaction.c:create_pending_snapshot()).
|
|
|
|
3. COWing releases the space used by the node immediately, through:
|
|
|
|
__btrfs_cow_block()
|
|
--btrfs_free_tree_block()
|
|
----btrfs_add_free_space(bytenr of node)
|
|
|
|
4. Because send doesn't hold a transaction open, it's possible that
|
|
the transaction used to create the snapshot commits, switches the
|
|
commit root and the old space used by the previous root node gets
|
|
assigned to some other node allocation. Allocation of a new node will
|
|
use the existing extent buffer found in memory, which we previously
|
|
got a reference through extent_buffer_get(), and allow the extent
|
|
buffer's content (pages) to be modified:
|
|
|
|
btrfs_alloc_tree_block
|
|
--btrfs_reserve_extent
|
|
----find_free_extent (get bytenr of old node)
|
|
--btrfs_init_new_buffer (use bytenr of old node)
|
|
----btrfs_find_create_tree_block
|
|
------alloc_extent_buffer
|
|
--------find_extent_buffer (get old node)
|
|
|
|
5. So send can access invalid memory content and have unpredictable
|
|
behaviour.
|
|
|
|
[FIX]
|
|
So we fix the problem by copying the commit roots of the send and
|
|
parent snapshots and use those copies.
|
|
|
|
CallTrace looks like this:
|
|
------------[ cut here ]------------
|
|
kernel BUG at fs/btrfs/ctree.c:1861!
|
|
invalid opcode: 0000 [#1] SMP
|
|
CPU: 6 PID: 24235 Comm: btrfs Tainted: P O 3.10.105 #23721
|
|
ffff88046652d680 ti: ffff88041b720000 task.ti: ffff88041b720000
|
|
RIP: 0010:[<ffffffffa08dd0e8>] read_node_slot+0x108/0x110 [btrfs]
|
|
RSP: 0018:ffff88041b723b68 EFLAGS: 00010246
|
|
RAX: ffff88043ca6b000 RBX: ffff88041b723c50 RCX: ffff880000000000
|
|
RDX: 000000000000004c RSI: ffff880314b133f8 RDI: ffff880458b24000
|
|
RBP: 0000000000000000 R08: 0000000000000001 R09: ffff88041b723c66
|
|
R10: 0000000000000001 R11: 0000000000001000 R12: ffff8803f3e48890
|
|
R13: ffff8803f3e48880 R14: ffff880466351800 R15: 0000000000000001
|
|
FS: 00007f8c321dc8c0(0000) GS:ffff88047fcc0000(0000)
|
|
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
|
|
R2: 00007efd1006d000 CR3: 0000000213a24000 CR4: 00000000003407e0
|
|
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
|
|
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
|
|
Stack:
|
|
ffff88041b723c50 ffff8803f3e48880 ffff8803f3e48890 ffff8803f3e48880
|
|
ffff880466351800 0000000000000001 ffffffffa08dd9d7 ffff88041b723c50
|
|
ffff8803f3e48880 ffff88041b723c66 ffffffffa08dde85 a9ff88042d2c4400
|
|
Call Trace:
|
|
[<ffffffffa08dd9d7>] ? tree_move_down.isra.33+0x27/0x50 [btrfs]
|
|
[<ffffffffa08dde85>] ? tree_advance+0xb5/0xc0 [btrfs]
|
|
[<ffffffffa08e83d4>] ? btrfs_compare_trees+0x2d4/0x760 [btrfs]
|
|
[<ffffffffa0982050>] ? finish_inode_if_needed+0x870/0x870 [btrfs]
|
|
[<ffffffffa09841ea>] ? btrfs_ioctl_send+0xeda/0x1050 [btrfs]
|
|
[<ffffffffa094bd3d>] ? btrfs_ioctl+0x1e3d/0x33f0 [btrfs]
|
|
[<ffffffff81111133>] ? handle_pte_fault+0x373/0x990
|
|
[<ffffffff8153a096>] ? atomic_notifier_call_chain+0x16/0x20
|
|
[<ffffffff81063256>] ? set_task_cpu+0xb6/0x1d0
|
|
[<ffffffff811122c3>] ? handle_mm_fault+0x143/0x2a0
|
|
[<ffffffff81539cc0>] ? __do_page_fault+0x1d0/0x500
|
|
[<ffffffff81062f07>] ? check_preempt_curr+0x57/0x90
|
|
[<ffffffff8115075a>] ? do_vfs_ioctl+0x4aa/0x990
|
|
[<ffffffff81034f83>] ? do_fork+0x113/0x3b0
|
|
[<ffffffff812dd7d7>] ? trace_hardirqs_off_thunk+0x3a/0x6c
|
|
[<ffffffff81150cc8>] ? SyS_ioctl+0x88/0xa0
|
|
[<ffffffff8153e422>] ? system_call_fastpath+0x16/0x1b
|
|
---[ end trace 29576629ee80b2e1 ]---
|
|
|
|
Fixes: 7069830a9e38 ("Btrfs: add btrfs_compare_trees function")
|
|
CC: stable@vger.kernel.org # 3.6+
|
|
Signed-off-by: Robbie Ko <robbieko@synology.com>
|
|
Reviewed-by: Filipe Manana <fdmanana@suse.com>
|
|
Signed-off-by: David Sterba <dsterba@suse.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
fs/btrfs/ctree.c | 16 ++++++++++++++--
|
|
1 file changed, 14 insertions(+), 2 deletions(-)
|
|
|
|
--- a/fs/btrfs/ctree.c
|
|
+++ b/fs/btrfs/ctree.c
|
|
@@ -5460,12 +5460,24 @@ int btrfs_compare_trees(struct btrfs_roo
|
|
down_read(&fs_info->commit_root_sem);
|
|
left_level = btrfs_header_level(left_root->commit_root);
|
|
left_root_level = left_level;
|
|
- left_path->nodes[left_level] = left_root->commit_root;
|
|
+ left_path->nodes[left_level] =
|
|
+ btrfs_clone_extent_buffer(left_root->commit_root);
|
|
+ if (!left_path->nodes[left_level]) {
|
|
+ up_read(&fs_info->commit_root_sem);
|
|
+ ret = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
extent_buffer_get(left_path->nodes[left_level]);
|
|
|
|
right_level = btrfs_header_level(right_root->commit_root);
|
|
right_root_level = right_level;
|
|
- right_path->nodes[right_level] = right_root->commit_root;
|
|
+ right_path->nodes[right_level] =
|
|
+ btrfs_clone_extent_buffer(right_root->commit_root);
|
|
+ if (!right_path->nodes[right_level]) {
|
|
+ up_read(&fs_info->commit_root_sem);
|
|
+ ret = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
extent_buffer_get(right_path->nodes[right_level]);
|
|
up_read(&fs_info->commit_root_sem);
|
|
|
|
From 1a63c198ddb810c790101d693c7071cca703b3c7 Mon Sep 17 00:00:00 2001
|
|
From: Misono Tomohiro <misono.tomohiro@jp.fujitsu.com>
|
|
Date: Tue, 15 May 2018 16:51:26 +0900
|
|
Subject: btrfs: property: Set incompat flag if lzo/zstd compression is set
|
|
|
|
From: Misono Tomohiro <misono.tomohiro@jp.fujitsu.com>
|
|
|
|
commit 1a63c198ddb810c790101d693c7071cca703b3c7 upstream.
|
|
|
|
Incompat flag of LZO/ZSTD compression should be set at:
|
|
|
|
1. mount time (-o compress/compress-force)
|
|
2. when defrag is done
|
|
3. when property is set
|
|
|
|
Currently 3. is missing and this commit adds this.
|
|
|
|
This could lead to a filesystem that uses ZSTD but is not marked as
|
|
such. If a kernel without a ZSTD support encounteres a ZSTD compressed
|
|
extent, it will handle that but this could be confusing to the user.
|
|
|
|
Typically the filesystem is mounted with the ZSTD option, but the
|
|
discrepancy can arise when a filesystem is never mounted with ZSTD and
|
|
then the property on some file is set (and some new extents are
|
|
written). A simple mount with -o compress=zstd will fix that up on an
|
|
unpatched kernel.
|
|
|
|
Same goes for LZO, but this has been around for a very long time
|
|
(2.6.37) so it's unlikely that a pre-LZO kernel would be used.
|
|
|
|
Fixes: 5c1aab1dd544 ("btrfs: Add zstd support")
|
|
CC: stable@vger.kernel.org # 4.14+
|
|
Signed-off-by: Tomohiro Misono <misono.tomohiro@jp.fujitsu.com>
|
|
Reviewed-by: Anand Jain <anand.jain@oracle.com>
|
|
Reviewed-by: David Sterba <dsterba@suse.com>
|
|
[ add user visible impact ]
|
|
Signed-off-by: David Sterba <dsterba@suse.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
fs/btrfs/props.c | 12 ++++++++----
|
|
1 file changed, 8 insertions(+), 4 deletions(-)
|
|
|
|
--- a/fs/btrfs/props.c
|
|
+++ b/fs/btrfs/props.c
|
|
@@ -393,6 +393,7 @@ static int prop_compression_apply(struct
|
|
const char *value,
|
|
size_t len)
|
|
{
|
|
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
|
int type;
|
|
|
|
if (len == 0) {
|
|
@@ -403,14 +404,17 @@ static int prop_compression_apply(struct
|
|
return 0;
|
|
}
|
|
|
|
- if (!strncmp("lzo", value, 3))
|
|
+ if (!strncmp("lzo", value, 3)) {
|
|
type = BTRFS_COMPRESS_LZO;
|
|
- else if (!strncmp("zlib", value, 4))
|
|
+ btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
|
|
+ } else if (!strncmp("zlib", value, 4)) {
|
|
type = BTRFS_COMPRESS_ZLIB;
|
|
- else if (!strncmp("zstd", value, len))
|
|
+ } else if (!strncmp("zstd", value, len)) {
|
|
type = BTRFS_COMPRESS_ZSTD;
|
|
- else
|
|
+ btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
|
|
+ } else {
|
|
return -EINVAL;
|
|
+ }
|
|
|
|
BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
|
|
BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
|
|
From 02ee654d3a04563c67bfe658a05384548b9bb105 Mon Sep 17 00:00:00 2001
|
|
From: Anand Jain <anand.jain@oracle.com>
|
|
Date: Thu, 17 May 2018 15:16:51 +0800
|
|
Subject: btrfs: fix crash when trying to resume balance without the resume flag
|
|
|
|
From: Anand Jain <anand.jain@oracle.com>
|
|
|
|
commit 02ee654d3a04563c67bfe658a05384548b9bb105 upstream.
|
|
|
|
We set the BTRFS_BALANCE_RESUME flag in the btrfs_recover_balance()
|
|
only, which isn't called during the remount. So when resuming from
|
|
the paused balance we hit the bug:
|
|
|
|
kernel: kernel BUG at fs/btrfs/volumes.c:3890!
|
|
::
|
|
kernel: balance_kthread+0x51/0x60 [btrfs]
|
|
kernel: kthread+0x111/0x130
|
|
::
|
|
kernel: RIP: btrfs_balance+0x12e1/0x1570 [btrfs] RSP: ffffba7d0090bde8
|
|
|
|
Reproducer:
|
|
On a mounted filesystem:
|
|
|
|
btrfs balance start --full-balance /btrfs
|
|
btrfs balance pause /btrfs
|
|
mount -o remount,ro /dev/sdb /btrfs
|
|
mount -o remount,rw /dev/sdb /btrfs
|
|
|
|
To fix this set the BTRFS_BALANCE_RESUME flag in
|
|
btrfs_resume_balance_async().
|
|
|
|
CC: stable@vger.kernel.org # 4.4+
|
|
Signed-off-by: Anand Jain <anand.jain@oracle.com>
|
|
Reviewed-by: David Sterba <dsterba@suse.com>
|
|
Signed-off-by: David Sterba <dsterba@suse.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
fs/btrfs/volumes.c | 9 +++++++++
|
|
1 file changed, 9 insertions(+)
|
|
|
|
--- a/fs/btrfs/volumes.c
|
|
+++ b/fs/btrfs/volumes.c
|
|
@@ -4046,6 +4046,15 @@ int btrfs_resume_balance_async(struct bt
|
|
return 0;
|
|
}
|
|
|
|
+ /*
|
|
+ * A ro->rw remount sequence should continue with the paused balance
|
|
+ * regardless of who pauses it, system or the user as of now, so set
|
|
+ * the resume flag.
|
|
+ */
|
|
+ spin_lock(&fs_info->balance_lock);
|
|
+ fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
|
|
+ spin_unlock(&fs_info->balance_lock);
|
|
+
|
|
tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
|
|
return PTR_ERR_OR_ZERO(tsk);
|
|
}
|
|
From 2b8773313494ede83a26fb372466e634564002ed Mon Sep 17 00:00:00 2001
|
|
From: Nikolay Borisov <nborisov@suse.com>
|
|
Date: Fri, 27 Apr 2018 12:21:51 +0300
|
|
Subject: btrfs: Split btrfs_del_delalloc_inode into 2 functions
|
|
|
|
From: Nikolay Borisov <nborisov@suse.com>
|
|
|
|
commit 2b8773313494ede83a26fb372466e634564002ed upstream.
|
|
|
|
This is in preparation of fixing delalloc inodes leakage on transaction
|
|
abort. Also export the new function.
|
|
|
|
Signed-off-by: Nikolay Borisov <nborisov@suse.com>
|
|
Reviewed-by: David Sterba <dsterba@suse.com>
|
|
Reviewed-by: Anand Jain <anand.jain@oracle.com>
|
|
Signed-off-by: David Sterba <dsterba@suse.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
fs/btrfs/ctree.h | 2 ++
|
|
fs/btrfs/inode.c | 13 ++++++++++---
|
|
2 files changed, 12 insertions(+), 3 deletions(-)
|
|
|
|
--- a/fs/btrfs/ctree.h
|
|
+++ b/fs/btrfs/ctree.h
|
|
@@ -3153,6 +3153,8 @@ noinline int can_nocow_extent(struct ino
|
|
u64 *orig_start, u64 *orig_block_len,
|
|
u64 *ram_bytes);
|
|
|
|
+void __btrfs_del_delalloc_inode(struct btrfs_root *root,
|
|
+ struct btrfs_inode *inode);
|
|
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
|
|
int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
|
|
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
|
|
--- a/fs/btrfs/inode.c
|
|
+++ b/fs/btrfs/inode.c
|
|
@@ -1762,12 +1762,12 @@ static void btrfs_add_delalloc_inodes(st
|
|
spin_unlock(&root->delalloc_lock);
|
|
}
|
|
|
|
-static void btrfs_del_delalloc_inode(struct btrfs_root *root,
|
|
- struct btrfs_inode *inode)
|
|
+
|
|
+void __btrfs_del_delalloc_inode(struct btrfs_root *root,
|
|
+ struct btrfs_inode *inode)
|
|
{
|
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
|
|
|
- spin_lock(&root->delalloc_lock);
|
|
if (!list_empty(&inode->delalloc_inodes)) {
|
|
list_del_init(&inode->delalloc_inodes);
|
|
clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
|
@@ -1780,6 +1780,13 @@ static void btrfs_del_delalloc_inode(str
|
|
spin_unlock(&fs_info->delalloc_root_lock);
|
|
}
|
|
}
|
|
+}
|
|
+
|
|
+static void btrfs_del_delalloc_inode(struct btrfs_root *root,
|
|
+ struct btrfs_inode *inode)
|
|
+{
|
|
+ spin_lock(&root->delalloc_lock);
|
|
+ __btrfs_del_delalloc_inode(root, inode);
|
|
spin_unlock(&root->delalloc_lock);
|
|
}
|
|
|
|
From fe816d0f1d4c31c4c31d42ca78a87660565fc800 Mon Sep 17 00:00:00 2001
|
|
From: Nikolay Borisov <nborisov@suse.com>
|
|
Date: Fri, 27 Apr 2018 12:21:53 +0300
|
|
Subject: btrfs: Fix delalloc inodes invalidation during transaction abort
|
|
|
|
From: Nikolay Borisov <nborisov@suse.com>
|
|
|
|
commit fe816d0f1d4c31c4c31d42ca78a87660565fc800 upstream.
|
|
|
|
When a transaction is aborted btrfs_cleanup_transaction is called to
|
|
cleanup all the various in-flight bits and pieces which migth be
|
|
active. One of those is delalloc inodes - inodes which have dirty
|
|
pages which haven't been persisted yet. Currently the process of
|
|
freeing such delalloc inodes in exceptional circumstances such as
|
|
transaction abort boiled down to calling btrfs_invalidate_inodes whose
|
|
sole job is to invalidate the dentries for all inodes related to a
|
|
root. This is in fact wrong and insufficient since such delalloc inodes
|
|
will likely have pending pages or ordered-extents and will be linked to
|
|
the sb->s_inode_list. This means that unmounting a btrfs instance with
|
|
an aborted transaction could potentially lead inodes/their pages
|
|
visible to the system long after their superblock has been freed. This
|
|
in turn leads to a "use-after-free" situation once page shrink is
|
|
triggered. This situation could be simulated by running generic/019
|
|
which would cause such inodes to be left hanging, followed by
|
|
generic/176 which causes memory pressure and page eviction which lead
|
|
to touching the freed super block instance. This situation is
|
|
additionally detected by the unmount code of VFS with the following
|
|
message:
|
|
|
|
"VFS: Busy inodes after unmount of Self-destruct in 5 seconds. Have a nice day..."
|
|
|
|
Additionally btrfs hits WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
|
|
in free_fs_root for the same reason.
|
|
|
|
This patch aims to rectify the sitaution by doing the following:
|
|
|
|
1. Change btrfs_destroy_delalloc_inodes so that it calls
|
|
invalidate_inode_pages2 for every inode on the delalloc list, this
|
|
ensures that all the pages of the inode are released. This function
|
|
boils down to calling btrfs_releasepage. During test I observed cases
|
|
where inodes on the delalloc list were having an i_count of 0, so this
|
|
necessitates using igrab to be sure we are working on a non-freed inode.
|
|
|
|
2. Since calling btrfs_releasepage might queue delayed iputs move the
|
|
call out to btrfs_cleanup_transaction in btrfs_error_commit_super before
|
|
calling run_delayed_iputs for the last time. This is necessary to ensure
|
|
that delayed iputs are run.
|
|
|
|
Note: this patch is tagged for 4.14 stable but the fix applies to older
|
|
versions too but needs to be backported manually due to conflicts.
|
|
|
|
CC: stable@vger.kernel.org # 4.14.x: 2b8773313494: btrfs: Split btrfs_del_delalloc_inode into 2 functions
|
|
CC: stable@vger.kernel.org # 4.14.x
|
|
Signed-off-by: Nikolay Borisov <nborisov@suse.com>
|
|
Reviewed-by: David Sterba <dsterba@suse.com>
|
|
[ add comment to igrab ]
|
|
Signed-off-by: David Sterba <dsterba@suse.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
fs/btrfs/disk-io.c | 26 +++++++++++++++-----------
|
|
1 file changed, 15 insertions(+), 11 deletions(-)
|
|
|
|
--- a/fs/btrfs/disk-io.c
|
|
+++ b/fs/btrfs/disk-io.c
|
|
@@ -3744,6 +3744,7 @@ void close_ctree(struct btrfs_fs_info *f
|
|
set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
|
|
|
|
btrfs_free_qgroup_config(fs_info);
|
|
+ ASSERT(list_empty(&fs_info->delalloc_roots));
|
|
|
|
if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
|
|
btrfs_info(fs_info, "at unmount delalloc count %lld",
|
|
@@ -4049,15 +4050,15 @@ static int btrfs_check_super_valid(struc
|
|
|
|
static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
|
|
{
|
|
+ /* cleanup FS via transaction */
|
|
+ btrfs_cleanup_transaction(fs_info);
|
|
+
|
|
mutex_lock(&fs_info->cleaner_mutex);
|
|
btrfs_run_delayed_iputs(fs_info);
|
|
mutex_unlock(&fs_info->cleaner_mutex);
|
|
|
|
down_write(&fs_info->cleanup_work_sem);
|
|
up_write(&fs_info->cleanup_work_sem);
|
|
-
|
|
- /* cleanup FS via transaction */
|
|
- btrfs_cleanup_transaction(fs_info);
|
|
}
|
|
|
|
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
|
|
@@ -4182,19 +4183,23 @@ static void btrfs_destroy_delalloc_inode
|
|
list_splice_init(&root->delalloc_inodes, &splice);
|
|
|
|
while (!list_empty(&splice)) {
|
|
+ struct inode *inode = NULL;
|
|
btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
|
|
delalloc_inodes);
|
|
-
|
|
- list_del_init(&btrfs_inode->delalloc_inodes);
|
|
- clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
|
- &btrfs_inode->runtime_flags);
|
|
+ __btrfs_del_delalloc_inode(root, btrfs_inode);
|
|
spin_unlock(&root->delalloc_lock);
|
|
|
|
- btrfs_invalidate_inodes(btrfs_inode->root);
|
|
-
|
|
+ /*
|
|
+ * Make sure we get a live inode and that it'll not disappear
|
|
+ * meanwhile.
|
|
+ */
|
|
+ inode = igrab(&btrfs_inode->vfs_inode);
|
|
+ if (inode) {
|
|
+ invalidate_inode_pages2(inode->i_mapping);
|
|
+ iput(inode);
|
|
+ }
|
|
spin_lock(&root->delalloc_lock);
|
|
}
|
|
-
|
|
spin_unlock(&root->delalloc_lock);
|
|
}
|
|
|
|
@@ -4210,7 +4215,6 @@ static void btrfs_destroy_all_delalloc_i
|
|
while (!list_empty(&splice)) {
|
|
root = list_first_entry(&splice, struct btrfs_root,
|
|
delalloc_root);
|
|
- list_del_init(&root->delalloc_root);
|
|
root = btrfs_grab_fs_root(root);
|
|
BUG_ON(!root);
|
|
spin_unlock(&fs_info->delalloc_root_lock);
|
|
From 02a3307aa9c20b4f6626255b028f07f6cfa16feb Mon Sep 17 00:00:00 2001
|
|
From: Liu Bo <bo.liu@linux.alibaba.com>
|
|
Date: Wed, 16 May 2018 01:37:36 +0800
|
|
Subject: btrfs: fix reading stale metadata blocks after degraded raid1 mounts
|
|
|
|
From: Liu Bo <bo.liu@linux.alibaba.com>
|
|
|
|
commit 02a3307aa9c20b4f6626255b028f07f6cfa16feb upstream.
|
|
|
|
If a btree block, aka. extent buffer, is not available in the extent
|
|
buffer cache, it'll be read out from the disk instead, i.e.
|
|
|
|
btrfs_search_slot()
|
|
read_block_for_search() # hold parent and its lock, go to read child
|
|
btrfs_release_path()
|
|
read_tree_block() # read child
|
|
|
|
Unfortunately, the parent lock got released before reading child, so
|
|
commit 5bdd3536cbbe ("Btrfs: Fix block generation verification race") had
|
|
used 0 as parent transid to read the child block. It forces
|
|
read_tree_block() not to check if parent transid is different with the
|
|
generation id of the child that it reads out from disk.
|
|
|
|
A simple PoC is included in btrfs/124,
|
|
|
|
0. A two-disk raid1 btrfs,
|
|
|
|
1. Right after mkfs.btrfs, block A is allocated to be device tree's root.
|
|
|
|
2. Mount this filesystem and put it in use, after a while, device tree's
|
|
root got COW but block A hasn't been allocated/overwritten yet.
|
|
|
|
3. Umount it and reload the btrfs module to remove both disks from the
|
|
global @fs_devices list.
|
|
|
|
4. mount -odegraded dev1 and write some data, so now block A is allocated
|
|
to be a leaf in checksum tree. Note that only dev1 has the latest
|
|
metadata of this filesystem.
|
|
|
|
5. Umount it and mount it again normally (with both disks), since raid1
|
|
can pick up one disk by the writer task's pid, if btrfs_search_slot()
|
|
needs to read block A, dev2 which does NOT have the latest metadata
|
|
might be read for block A, then we got a stale block A.
|
|
|
|
6. As parent transid is not checked, block A is marked as uptodate and
|
|
put into the extent buffer cache, so the future search won't bother
|
|
to read disk again, which means it'll make changes on this stale
|
|
one and make it dirty and flush it onto disk.
|
|
|
|
To avoid the problem, parent transid needs to be passed to
|
|
read_tree_block().
|
|
|
|
In order to get a valid parent transid, we need to hold the parent's
|
|
lock until finishing reading child.
|
|
|
|
This patch needs to be slightly adapted for stable kernels, the
|
|
&first_key parameter added to read_tree_block() is from 4.16+
|
|
(581c1760415c4). The fix is to replace 0 by 'gen'.
|
|
|
|
Fixes: 5bdd3536cbbe ("Btrfs: Fix block generation verification race")
|
|
CC: stable@vger.kernel.org # 4.4+
|
|
Signed-off-by: Liu Bo <bo.liu@linux.alibaba.com>
|
|
Reviewed-by: Filipe Manana <fdmanana@suse.com>
|
|
Reviewed-by: Qu Wenruo <wqu@suse.com>
|
|
[ update changelog ]
|
|
Signed-off-by: David Sterba <dsterba@suse.com>
|
|
Signed-off-by: Nikolay Borisov <nborisov@suse.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
---
|
|
fs/btrfs/ctree.c | 6 +++---
|
|
1 file changed, 3 insertions(+), 3 deletions(-)
|
|
|
|
--- a/fs/btrfs/ctree.c
|
|
+++ b/fs/btrfs/ctree.c
|
|
@@ -2491,10 +2491,8 @@ read_block_for_search(struct btrfs_root
|
|
if (p->reada != READA_NONE)
|
|
reada_for_search(fs_info, p, level, slot, key->objectid);
|
|
|
|
- btrfs_release_path(p);
|
|
-
|
|
ret = -EAGAIN;
|
|
- tmp = read_tree_block(fs_info, blocknr, 0);
|
|
+ tmp = read_tree_block(fs_info, blocknr, gen);
|
|
if (!IS_ERR(tmp)) {
|
|
/*
|
|
* If the read above didn't mark this buffer up to date,
|
|
@@ -2508,6 +2506,8 @@ read_block_for_search(struct btrfs_root
|
|
} else {
|
|
ret = PTR_ERR(tmp);
|
|
}
|
|
+
|
|
+ btrfs_release_path(p);
|
|
return ret;
|
|
}
|
|
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Date: Tue, 1 May 2018 15:55:51 +0200
|
|
Subject: x86/nospec: Simplify alternative_msr_write()
|
|
|
|
From: Linus Torvalds <torvalds@linux-foundation.org>
|
|
|
|
commit 1aa7a5735a41418d8e01fa7c9565eb2657e2ea3f upstream
|
|
|
|
The macro is not type safe and I did look for why that "g" constraint for
|
|
the asm doesn't work: it's because the asm is more fundamentally wrong.
|
|
|
|
It does
|
|
|
|
movl %[val], %%eax
|
|
|
|
but "val" isn't a 32-bit value, so then gcc will pass it in a register,
|
|
and generate code like
|
|
|
|
movl %rsi, %eax
|
|
|
|
and gas will complain about a nonsensical 'mov' instruction (it's moving a
|
|
64-bit register to a 32-bit one).
|
|
|
|
Passing it through memory will just hide the real bug - gcc still thinks
|
|
the memory location is 64-bit, but the "movl" will only load the first 32
|
|
bits and it all happens to work because x86 is little-endian.
|
|
|
|
Convert it to a type safe inline function with a little trick which hands
|
|
the feature into the ALTERNATIVE macro.
|
|
|
|
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/nospec-branch.h | 19 ++++++++++---------
|
|
1 file changed, 10 insertions(+), 9 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -241,15 +241,16 @@ static inline void vmexit_fill_RSB(void)
|
|
#endif
|
|
}
|
|
|
|
-#define alternative_msr_write(_msr, _val, _feature) \
|
|
- asm volatile(ALTERNATIVE("", \
|
|
- "movl %[msr], %%ecx\n\t" \
|
|
- "movl %[val], %%eax\n\t" \
|
|
- "movl $0, %%edx\n\t" \
|
|
- "wrmsr", \
|
|
- _feature) \
|
|
- : : [msr] "i" (_msr), [val] "i" (_val) \
|
|
- : "eax", "ecx", "edx", "memory")
|
|
+static __always_inline
|
|
+void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
|
+{
|
|
+ asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
|
|
+ : : "c" (msr),
|
|
+ "a" (val),
|
|
+ "d" (val >> 32),
|
|
+ [feature] "i" (feature)
|
|
+ : "memory");
|
|
+}
|
|
|
|
static inline void indirect_branch_prediction_barrier(void)
|
|
{
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 25 Apr 2018 22:04:16 -0400
|
|
Subject: x86/bugs: Concentrate bug detection into a separate function
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit 4a28bfe3267b68e22c663ac26185aa16c9b879ef upstream
|
|
|
|
Combine the various logic which goes through all those
|
|
x86_cpu_id matching structures in one function.
|
|
|
|
Suggested-by: Borislav Petkov <bp@suse.de>
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kernel/cpu/common.c | 21 +++++++++++----------
|
|
1 file changed, 11 insertions(+), 10 deletions(-)
|
|
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -918,21 +918,27 @@ static const __initconst struct x86_cpu_
|
|
{}
|
|
};
|
|
|
|
-static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
|
|
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|
{
|
|
u64 ia32_cap = 0;
|
|
|
|
+ if (x86_match_cpu(cpu_no_speculation))
|
|
+ return;
|
|
+
|
|
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
|
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
|
+
|
|
if (x86_match_cpu(cpu_no_meltdown))
|
|
- return false;
|
|
+ return;
|
|
|
|
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
|
|
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
|
|
|
/* Rogue Data Cache Load? No! */
|
|
if (ia32_cap & ARCH_CAP_RDCL_NO)
|
|
- return false;
|
|
+ return;
|
|
|
|
- return true;
|
|
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
|
}
|
|
|
|
/*
|
|
@@ -982,12 +988,7 @@ static void __init early_identify_cpu(st
|
|
|
|
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
|
|
|
- if (!x86_match_cpu(cpu_no_speculation)) {
|
|
- if (cpu_vulnerable_to_meltdown(c))
|
|
- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
|
- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
|
- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
|
- }
|
|
+ cpu_set_bug_bits(c);
|
|
|
|
fpu__init_system(c);
|
|
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 25 Apr 2018 22:04:17 -0400
|
|
Subject: x86/bugs: Concentrate bug reporting into a separate function
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit d1059518b4789cabe34bb4b714d07e6089c82ca1 upstream
|
|
|
|
Those SysFS functions have a similar preamble, as such make common
|
|
code to handle them.
|
|
|
|
Suggested-by: Borislav Petkov <bp@suse.de>
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kernel/cpu/bugs.c | 46 +++++++++++++++++++++++++++++++--------------
|
|
1 file changed, 32 insertions(+), 14 deletions(-)
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -314,30 +314,48 @@ retpoline_auto:
|
|
#undef pr_fmt
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
|
|
+
|
|
+ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf, unsigned int bug)
|
|
{
|
|
- if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
|
|
+ if (!boot_cpu_has_bug(bug))
|
|
return sprintf(buf, "Not affected\n");
|
|
- if (boot_cpu_has(X86_FEATURE_PTI))
|
|
- return sprintf(buf, "Mitigation: PTI\n");
|
|
+
|
|
+ switch (bug) {
|
|
+ case X86_BUG_CPU_MELTDOWN:
|
|
+ if (boot_cpu_has(X86_FEATURE_PTI))
|
|
+ return sprintf(buf, "Mitigation: PTI\n");
|
|
+
|
|
+ break;
|
|
+
|
|
+ case X86_BUG_SPECTRE_V1:
|
|
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
|
+
|
|
+ case X86_BUG_SPECTRE_V2:
|
|
+ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
|
+ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
|
|
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
|
+ spectre_v2_module_string());
|
|
+
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
return sprintf(buf, "Vulnerable\n");
|
|
}
|
|
|
|
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
|
|
+}
|
|
+
|
|
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
|
|
{
|
|
- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
|
|
- return sprintf(buf, "Not affected\n");
|
|
- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
|
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
|
|
}
|
|
|
|
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
|
|
{
|
|
- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
|
- return sprintf(buf, "Not affected\n");
|
|
-
|
|
- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
|
- boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
|
|
- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
|
- spectre_v2_module_string());
|
|
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
|
|
}
|
|
#endif
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 25 Apr 2018 22:04:18 -0400
|
|
Subject: x86/bugs: Read SPEC_CTRL MSR during boot and re-use reserved bits
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit 1b86883ccb8d5d9506529d42dbe1a5257cb30b18 upstream
|
|
|
|
The 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to all
|
|
the other bits as reserved. The Intel SDM glossary defines reserved as
|
|
implementation specific - aka unknown.
|
|
|
|
As such at bootup this must be taken it into account and proper masking for
|
|
the bits in use applied.
|
|
|
|
A copy of this document is available at
|
|
https://bugzilla.kernel.org/show_bug.cgi?id=199511
|
|
|
|
[ tglx: Made x86_spec_ctrl_base __ro_after_init ]
|
|
|
|
Suggested-by: Jon Masters <jcm@redhat.com>
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/nospec-branch.h | 24 ++++++++++++++++++++----
|
|
arch/x86/kernel/cpu/bugs.c | 28 ++++++++++++++++++++++++++++
|
|
2 files changed, 48 insertions(+), 4 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -217,6 +217,17 @@ enum spectre_v2_mitigation {
|
|
SPECTRE_V2_IBRS,
|
|
};
|
|
|
|
+/*
|
|
+ * The Intel specification for the SPEC_CTRL MSR requires that we
|
|
+ * preserve any already set reserved bits at boot time (e.g. for
|
|
+ * future additions that this kernel is not currently aware of).
|
|
+ * We then set any additional mitigation bits that we want
|
|
+ * ourselves and always use this as the base for SPEC_CTRL.
|
|
+ * We also use this when handling guest entry/exit as below.
|
|
+ */
|
|
+extern void x86_spec_ctrl_set(u64);
|
|
+extern u64 x86_spec_ctrl_get_default(void);
|
|
+
|
|
extern char __indirect_thunk_start[];
|
|
extern char __indirect_thunk_end[];
|
|
|
|
@@ -254,8 +265,9 @@ void alternative_msr_write(unsigned int
|
|
|
|
static inline void indirect_branch_prediction_barrier(void)
|
|
{
|
|
- alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
|
|
- X86_FEATURE_USE_IBPB);
|
|
+ u64 val = PRED_CMD_IBPB;
|
|
+
|
|
+ alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
|
|
}
|
|
|
|
/*
|
|
@@ -266,14 +278,18 @@ static inline void indirect_branch_predi
|
|
*/
|
|
#define firmware_restrict_branch_speculation_start() \
|
|
do { \
|
|
+ u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \
|
|
+ \
|
|
preempt_disable(); \
|
|
- alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
|
|
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
|
X86_FEATURE_USE_IBRS_FW); \
|
|
} while (0)
|
|
|
|
#define firmware_restrict_branch_speculation_end() \
|
|
do { \
|
|
- alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
|
|
+ u64 val = x86_spec_ctrl_get_default(); \
|
|
+ \
|
|
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
|
X86_FEATURE_USE_IBRS_FW); \
|
|
preempt_enable(); \
|
|
} while (0)
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -28,6 +28,12 @@
|
|
|
|
static void __init spectre_v2_select_mitigation(void);
|
|
|
|
+/*
|
|
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
|
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
|
|
+ */
|
|
+static u64 __ro_after_init x86_spec_ctrl_base;
|
|
+
|
|
void __init check_bugs(void)
|
|
{
|
|
identify_boot_cpu();
|
|
@@ -37,6 +43,13 @@ void __init check_bugs(void)
|
|
print_cpu_info(&boot_cpu_data);
|
|
}
|
|
|
|
+ /*
|
|
+ * Read the SPEC_CTRL MSR to account for reserved bits which may
|
|
+ * have unknown values.
|
|
+ */
|
|
+ if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
|
+
|
|
/* Select the proper spectre mitigation before patching alternatives */
|
|
spectre_v2_select_mitigation();
|
|
|
|
@@ -95,6 +108,21 @@ static const char *spectre_v2_strings[]
|
|
|
|
static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
|
|
|
|
+void x86_spec_ctrl_set(u64 val)
|
|
+{
|
|
+ if (val & ~SPEC_CTRL_IBRS)
|
|
+ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
|
|
+ else
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
|
|
+
|
|
+u64 x86_spec_ctrl_get_default(void)
|
|
+{
|
|
+ return x86_spec_ctrl_base;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
|
|
+
|
|
#ifdef RETPOLINE
|
|
static bool spectre_v2_bad_module;
|
|
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 25 Apr 2018 22:04:19 -0400
|
|
Subject: x86/bugs, KVM: Support the combination of guest and host IBRS
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit 5cf687548705412da47c9cec342fd952d71ed3d5 upstream
|
|
|
|
A guest may modify the SPEC_CTRL MSR from the value used by the
|
|
kernel. Since the kernel doesn't use IBRS, this means a value of zero is
|
|
what is needed in the host.
|
|
|
|
But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
|
|
the other bits as reserved so the kernel should respect the boot time
|
|
SPEC_CTRL value and use that.
|
|
|
|
This allows to deal with future extensions to the SPEC_CTRL interface if
|
|
any at all.
|
|
|
|
Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
|
|
difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
|
|
assembler code.
|
|
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/nospec-branch.h | 10 ++++++++++
|
|
arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++++++++
|
|
arch/x86/kvm/svm.c | 6 ++----
|
|
arch/x86/kvm/vmx.c | 6 ++----
|
|
4 files changed, 32 insertions(+), 8 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -228,6 +228,16 @@ enum spectre_v2_mitigation {
|
|
extern void x86_spec_ctrl_set(u64);
|
|
extern u64 x86_spec_ctrl_get_default(void);
|
|
|
|
+/*
|
|
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
|
|
+ * the guest has, while on VMEXIT we restore the host view. This
|
|
+ * would be easier if SPEC_CTRL were architecturally maskable or
|
|
+ * shadowable for guests but this is not (currently) the case.
|
|
+ * Takes the guest view of SPEC_CTRL MSR as a parameter.
|
|
+ */
|
|
+extern void x86_spec_ctrl_set_guest(u64);
|
|
+extern void x86_spec_ctrl_restore_host(u64);
|
|
+
|
|
extern char __indirect_thunk_start[];
|
|
extern char __indirect_thunk_end[];
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -123,6 +123,24 @@ u64 x86_spec_ctrl_get_default(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
|
|
|
|
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
|
|
+{
|
|
+ if (!boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ return;
|
|
+ if (x86_spec_ctrl_base != guest_spec_ctrl)
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
|
|
+
|
|
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
|
|
+{
|
|
+ if (!boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ return;
|
|
+ if (x86_spec_ctrl_base != guest_spec_ctrl)
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
|
|
+
|
|
#ifdef RETPOLINE
|
|
static bool spectre_v2_bad_module;
|
|
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -5401,8 +5401,7 @@ static void svm_vcpu_run(struct kvm_vcpu
|
|
* is no need to worry about the conditional branch over the wrmsr
|
|
* being speculatively taken.
|
|
*/
|
|
- if (svm->spec_ctrl)
|
|
- native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
|
|
+ x86_spec_ctrl_set_guest(svm->spec_ctrl);
|
|
|
|
asm volatile (
|
|
"push %%" _ASM_BP "; \n\t"
|
|
@@ -5514,8 +5513,7 @@ static void svm_vcpu_run(struct kvm_vcpu
|
|
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
|
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
|
|
|
- if (svm->spec_ctrl)
|
|
- native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
|
+ x86_spec_ctrl_restore_host(svm->spec_ctrl);
|
|
|
|
/* Eliminate branch target predictions from guest mode */
|
|
vmexit_fill_RSB();
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -9468,8 +9468,7 @@ static void __noclone vmx_vcpu_run(struc
|
|
* is no need to worry about the conditional branch over the wrmsr
|
|
* being speculatively taken.
|
|
*/
|
|
- if (vmx->spec_ctrl)
|
|
- native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
|
|
+ x86_spec_ctrl_set_guest(vmx->spec_ctrl);
|
|
|
|
vmx->__launched = vmx->loaded_vmcs->launched;
|
|
asm(
|
|
@@ -9607,8 +9606,7 @@ static void __noclone vmx_vcpu_run(struc
|
|
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
|
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
|
|
|
- if (vmx->spec_ctrl)
|
|
- native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
|
+ x86_spec_ctrl_restore_host(vmx->spec_ctrl);
|
|
|
|
/* Eliminate branch target predictions from guest mode */
|
|
vmexit_fill_RSB();
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 25 Apr 2018 22:04:20 -0400
|
|
Subject: x86/bugs: Expose /sys/../spec_store_bypass
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit c456442cd3a59eeb1d60293c26cbe2ff2c4e42cf upstream
|
|
|
|
Add the sysfs file for the new vulerability. It does not do much except
|
|
show the words 'Vulnerable' for recent x86 cores.
|
|
|
|
Intel cores prior to family 6 are known not to be vulnerable, and so are
|
|
some Atoms and some Xeon Phi.
|
|
|
|
It assumes that older Cyrix, Centaur, etc. cores are immune.
|
|
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
Documentation/ABI/testing/sysfs-devices-system-cpu | 1
|
|
arch/x86/include/asm/cpufeatures.h | 1
|
|
arch/x86/kernel/cpu/bugs.c | 5 ++++
|
|
arch/x86/kernel/cpu/common.c | 23 +++++++++++++++++++++
|
|
drivers/base/cpu.c | 8 +++++++
|
|
include/linux/cpu.h | 2 +
|
|
6 files changed, 40 insertions(+)
|
|
|
|
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
|
|
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
|
|
@@ -453,6 +453,7 @@ What: /sys/devices/system/cpu/vulnerabi
|
|
/sys/devices/system/cpu/vulnerabilities/meltdown
|
|
/sys/devices/system/cpu/vulnerabilities/spectre_v1
|
|
/sys/devices/system/cpu/vulnerabilities/spectre_v2
|
|
+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
|
|
Date: January 2018
|
|
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
|
Description: Information about CPU vulnerabilities
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -362,5 +362,6 @@
|
|
#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
|
|
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
|
|
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
|
+#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
|
|
|
#endif /* _ASM_X86_CPUFEATURES_H */
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -404,4 +404,9 @@ ssize_t cpu_show_spectre_v2(struct devic
|
|
{
|
|
return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
|
|
}
|
|
+
|
|
+ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
|
|
+}
|
|
#endif
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -918,10 +918,33 @@ static const __initconst struct x86_cpu_
|
|
{}
|
|
};
|
|
|
|
+static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
|
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
|
+ { X86_VENDOR_CENTAUR, 5, },
|
|
+ { X86_VENDOR_INTEL, 5, },
|
|
+ { X86_VENDOR_NSC, 5, },
|
|
+ { X86_VENDOR_ANY, 4, },
|
|
+ {}
|
|
+};
|
|
+
|
|
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|
{
|
|
u64 ia32_cap = 0;
|
|
|
|
+ if (!x86_match_cpu(cpu_no_spec_store_bypass))
|
|
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
|
+
|
|
if (x86_match_cpu(cpu_no_speculation))
|
|
return;
|
|
|
|
--- a/drivers/base/cpu.c
|
|
+++ b/drivers/base/cpu.c
|
|
@@ -532,14 +532,22 @@ ssize_t __weak cpu_show_spectre_v2(struc
|
|
return sprintf(buf, "Not affected\n");
|
|
}
|
|
|
|
+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ return sprintf(buf, "Not affected\n");
|
|
+}
|
|
+
|
|
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
|
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
|
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
|
+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
|
|
|
|
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
|
&dev_attr_meltdown.attr,
|
|
&dev_attr_spectre_v1.attr,
|
|
&dev_attr_spectre_v2.attr,
|
|
+ &dev_attr_spec_store_bypass.attr,
|
|
NULL
|
|
};
|
|
|
|
--- a/include/linux/cpu.h
|
|
+++ b/include/linux/cpu.h
|
|
@@ -53,6 +53,8 @@ extern ssize_t cpu_show_spectre_v1(struc
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spectre_v2(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
+extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf);
|
|
|
|
extern __printf(4, 5)
|
|
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Sat, 28 Apr 2018 22:34:17 +0200
|
|
Subject: x86/cpufeatures: Add X86_FEATURE_RDS
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit 0cc5fa00b0a88dad140b4e5c2cead9951ad36822 upstream
|
|
|
|
Add the CPU feature bit CPUID.7.0.EDX[31] which indicates whether the CPU
|
|
supports Reduced Data Speculation.
|
|
|
|
[ tglx: Split it out from a later patch ]
|
|
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/cpufeatures.h | 1 +
|
|
1 file changed, 1 insertion(+)
|
|
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -333,6 +333,7 @@
|
|
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
|
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
|
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
|
+#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
|
|
|
|
/*
|
|
* BUG word(s)
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 25 Apr 2018 22:04:21 -0400
|
|
Subject: x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit 24f7fc83b9204d20f878c57cb77d261ae825e033 upstream
|
|
|
|
Contemporary high performance processors use a common industry-wide
|
|
optimization known as "Speculative Store Bypass" in which loads from
|
|
addresses to which a recent store has occurred may (speculatively) see an
|
|
older value. Intel refers to this feature as "Memory Disambiguation" which
|
|
is part of their "Smart Memory Access" capability.
|
|
|
|
Memory Disambiguation can expose a cache side-channel attack against such
|
|
speculatively read values. An attacker can create exploit code that allows
|
|
them to read memory outside of a sandbox environment (for example,
|
|
malicious JavaScript in a web page), or to perform more complex attacks
|
|
against code running within the same privilege level, e.g. via the stack.
|
|
|
|
As a first step to mitigate against such attacks, provide two boot command
|
|
line control knobs:
|
|
|
|
nospec_store_bypass_disable
|
|
spec_store_bypass_disable=[off,auto,on]
|
|
|
|
By default affected x86 processors will power on with Speculative
|
|
Store Bypass enabled. Hence the provided kernel parameters are written
|
|
from the point of view of whether to enable a mitigation or not.
|
|
The parameters are as follows:
|
|
|
|
- auto - Kernel detects whether your CPU model contains an implementation
|
|
of Speculative Store Bypass and picks the most appropriate
|
|
mitigation.
|
|
|
|
- on - disable Speculative Store Bypass
|
|
- off - enable Speculative Store Bypass
|
|
|
|
[ tglx: Reordered the checks so that the whole evaluation is not done
|
|
when the CPU does not support RDS ]
|
|
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
Documentation/admin-guide/kernel-parameters.txt | 33 +++++++
|
|
arch/x86/include/asm/cpufeatures.h | 1
|
|
arch/x86/include/asm/nospec-branch.h | 6 +
|
|
arch/x86/kernel/cpu/bugs.c | 103 ++++++++++++++++++++++++
|
|
4 files changed, 143 insertions(+)
|
|
|
|
--- a/Documentation/admin-guide/kernel-parameters.txt
|
|
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
|
@@ -2647,6 +2647,9 @@
|
|
allow data leaks with this option, which is equivalent
|
|
to spectre_v2=off.
|
|
|
|
+ nospec_store_bypass_disable
|
|
+ [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
|
|
+
|
|
noxsave [BUGS=X86] Disables x86 extended register state save
|
|
and restore using xsave. The kernel will fallback to
|
|
enabling legacy floating-point and sse state.
|
|
@@ -3997,6 +4000,36 @@
|
|
Not specifying this option is equivalent to
|
|
spectre_v2=auto.
|
|
|
|
+ spec_store_bypass_disable=
|
|
+ [HW] Control Speculative Store Bypass (SSB) Disable mitigation
|
|
+ (Speculative Store Bypass vulnerability)
|
|
+
|
|
+ Certain CPUs are vulnerable to an exploit against a
|
|
+ a common industry wide performance optimization known
|
|
+ as "Speculative Store Bypass" in which recent stores
|
|
+ to the same memory location may not be observed by
|
|
+ later loads during speculative execution. The idea
|
|
+ is that such stores are unlikely and that they can
|
|
+ be detected prior to instruction retirement at the
|
|
+ end of a particular speculation execution window.
|
|
+
|
|
+ In vulnerable processors, the speculatively forwarded
|
|
+ store can be used in a cache side channel attack, for
|
|
+ example to read memory to which the attacker does not
|
|
+ directly have access (e.g. inside sandboxed code).
|
|
+
|
|
+ This parameter controls whether the Speculative Store
|
|
+ Bypass optimization is used.
|
|
+
|
|
+ on - Unconditionally disable Speculative Store Bypass
|
|
+ off - Unconditionally enable Speculative Store Bypass
|
|
+ auto - Kernel detects whether the CPU model contains an
|
|
+ implementation of Speculative Store Bypass and
|
|
+ picks the most appropriate mitigation
|
|
+
|
|
+ Not specifying this option is equivalent to
|
|
+ spec_store_bypass_disable=auto.
|
|
+
|
|
spia_io_base= [HW,MTD]
|
|
spia_fio_base=
|
|
spia_pedr=
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -214,6 +214,7 @@
|
|
|
|
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
|
|
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
|
|
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
|
|
|
|
/* Virtualization flags: Linux defined, word 8 */
|
|
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -238,6 +238,12 @@ extern u64 x86_spec_ctrl_get_default(voi
|
|
extern void x86_spec_ctrl_set_guest(u64);
|
|
extern void x86_spec_ctrl_restore_host(u64);
|
|
|
|
+/* The Speculative Store Bypass disable variants */
|
|
+enum ssb_mitigation {
|
|
+ SPEC_STORE_BYPASS_NONE,
|
|
+ SPEC_STORE_BYPASS_DISABLE,
|
|
+};
|
|
+
|
|
extern char __indirect_thunk_start[];
|
|
extern char __indirect_thunk_end[];
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -27,6 +27,7 @@
|
|
#include <asm/intel-family.h>
|
|
|
|
static void __init spectre_v2_select_mitigation(void);
|
|
+static void __init ssb_select_mitigation(void);
|
|
|
|
/*
|
|
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
|
@@ -53,6 +54,12 @@ void __init check_bugs(void)
|
|
/* Select the proper spectre mitigation before patching alternatives */
|
|
spectre_v2_select_mitigation();
|
|
|
|
+ /*
|
|
+ * Select proper mitigation for any exposure to the Speculative Store
|
|
+ * Bypass vulnerability.
|
|
+ */
|
|
+ ssb_select_mitigation();
|
|
+
|
|
#ifdef CONFIG_X86_32
|
|
/*
|
|
* Check whether we are able to run this kernel safely on SMP.
|
|
@@ -358,6 +365,99 @@ retpoline_auto:
|
|
}
|
|
|
|
#undef pr_fmt
|
|
+#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
|
|
+
|
|
+static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
|
|
+
|
|
+/* The kernel command line selection */
|
|
+enum ssb_mitigation_cmd {
|
|
+ SPEC_STORE_BYPASS_CMD_NONE,
|
|
+ SPEC_STORE_BYPASS_CMD_AUTO,
|
|
+ SPEC_STORE_BYPASS_CMD_ON,
|
|
+};
|
|
+
|
|
+static const char *ssb_strings[] = {
|
|
+ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
|
|
+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
|
|
+};
|
|
+
|
|
+static const struct {
|
|
+ const char *option;
|
|
+ enum ssb_mitigation_cmd cmd;
|
|
+} ssb_mitigation_options[] = {
|
|
+ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
|
+ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
|
+ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
|
+};
|
|
+
|
|
+static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
|
|
+{
|
|
+ enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
|
|
+ char arg[20];
|
|
+ int ret, i;
|
|
+
|
|
+ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
|
|
+ return SPEC_STORE_BYPASS_CMD_NONE;
|
|
+ } else {
|
|
+ ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
|
|
+ arg, sizeof(arg));
|
|
+ if (ret < 0)
|
|
+ return SPEC_STORE_BYPASS_CMD_AUTO;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
|
|
+ if (!match_option(arg, ret, ssb_mitigation_options[i].option))
|
|
+ continue;
|
|
+
|
|
+ cmd = ssb_mitigation_options[i].cmd;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
|
|
+ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
|
|
+ return SPEC_STORE_BYPASS_CMD_AUTO;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return cmd;
|
|
+}
|
|
+
|
|
+static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
|
|
+{
|
|
+ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
|
|
+ enum ssb_mitigation_cmd cmd;
|
|
+
|
|
+ if (!boot_cpu_has(X86_FEATURE_RDS))
|
|
+ return mode;
|
|
+
|
|
+ cmd = ssb_parse_cmdline();
|
|
+ if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
|
|
+ (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
|
|
+ cmd == SPEC_STORE_BYPASS_CMD_AUTO))
|
|
+ return mode;
|
|
+
|
|
+ switch (cmd) {
|
|
+ case SPEC_STORE_BYPASS_CMD_AUTO:
|
|
+ case SPEC_STORE_BYPASS_CMD_ON:
|
|
+ mode = SPEC_STORE_BYPASS_DISABLE;
|
|
+ break;
|
|
+ case SPEC_STORE_BYPASS_CMD_NONE:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (mode != SPEC_STORE_BYPASS_NONE)
|
|
+ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
|
|
+ return mode;
|
|
+}
|
|
+
|
|
+static void ssb_select_mitigation()
|
|
+{
|
|
+ ssb_mode = __ssb_select_mitigation();
|
|
+
|
|
+ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
|
|
+ pr_info("%s\n", ssb_strings[ssb_mode]);
|
|
+}
|
|
+
|
|
+#undef pr_fmt
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
|
|
@@ -383,6 +483,9 @@ ssize_t cpu_show_common(struct device *d
|
|
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
|
spectre_v2_module_string());
|
|
|
|
+ case X86_BUG_SPEC_STORE_BYPASS:
|
|
+ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
|
+
|
|
default:
|
|
break;
|
|
}
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 25 Apr 2018 22:04:22 -0400
|
|
Subject: x86/bugs/intel: Set proper CPU features and setup RDS
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit 772439717dbf703b39990be58d8d4e3e4ad0598a upstream
|
|
|
|
Intel CPUs expose methods to:
|
|
|
|
- Detect whether RDS capability is available via CPUID.7.0.EDX[31],
|
|
|
|
- The SPEC_CTRL MSR(0x48), bit 2 set to enable RDS.
|
|
|
|
- MSR_IA32_ARCH_CAPABILITIES, Bit(4) no need to enable RRS.
|
|
|
|
With that in mind if spec_store_bypass_disable=[auto,on] is selected set at
|
|
boot-time the SPEC_CTRL MSR to enable RDS if the platform requires it.
|
|
|
|
Note that this does not fix the KVM case where the SPEC_CTRL is exposed to
|
|
guests which can muck with it, see patch titled :
|
|
KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS.
|
|
|
|
And for the firmware (IBRS to be set), see patch titled:
|
|
x86/spectre_v2: Read SPEC_CTRL MSR during boot and re-use reserved bits
|
|
|
|
[ tglx: Distangled it from the intel implementation and kept the call order ]
|
|
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/msr-index.h | 6 ++++++
|
|
arch/x86/kernel/cpu/bugs.c | 30 ++++++++++++++++++++++++++++--
|
|
arch/x86/kernel/cpu/common.c | 10 ++++++----
|
|
arch/x86/kernel/cpu/cpu.h | 2 ++
|
|
arch/x86/kernel/cpu/intel.c | 1 +
|
|
5 files changed, 43 insertions(+), 6 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/msr-index.h
|
|
+++ b/arch/x86/include/asm/msr-index.h
|
|
@@ -42,6 +42,7 @@
|
|
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
|
|
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
|
|
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
|
|
+#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
|
|
|
|
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
|
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
|
|
@@ -68,6 +69,11 @@
|
|
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
|
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
|
|
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
|
|
+#define ARCH_CAP_RDS_NO (1 << 4) /*
|
|
+ * Not susceptible to Speculative Store Bypass
|
|
+ * attack, so no Reduced Data Speculation control
|
|
+ * required.
|
|
+ */
|
|
|
|
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
|
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -117,7 +117,7 @@ static enum spectre_v2_mitigation spectr
|
|
|
|
void x86_spec_ctrl_set(u64 val)
|
|
{
|
|
- if (val & ~SPEC_CTRL_IBRS)
|
|
+ if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS))
|
|
WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
|
|
else
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
|
|
@@ -444,8 +444,28 @@ static enum ssb_mitigation_cmd __init __
|
|
break;
|
|
}
|
|
|
|
- if (mode != SPEC_STORE_BYPASS_NONE)
|
|
+ /*
|
|
+ * We have three CPU feature flags that are in play here:
|
|
+ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
|
|
+ * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
|
|
+ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
|
|
+ */
|
|
+ if (mode != SPEC_STORE_BYPASS_NONE) {
|
|
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
|
|
+ /*
|
|
+ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
|
|
+ * a completely different MSR and bit dependent on family.
|
|
+ */
|
|
+ switch (boot_cpu_data.x86_vendor) {
|
|
+ case X86_VENDOR_INTEL:
|
|
+ x86_spec_ctrl_base |= SPEC_CTRL_RDS;
|
|
+ x86_spec_ctrl_set(SPEC_CTRL_RDS);
|
|
+ break;
|
|
+ case X86_VENDOR_AMD:
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
return mode;
|
|
}
|
|
|
|
@@ -459,6 +479,12 @@ static void ssb_select_mitigation()
|
|
|
|
#undef pr_fmt
|
|
|
|
+void x86_spec_ctrl_setup_ap(void)
|
|
+{
|
|
+ if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS));
|
|
+}
|
|
+
|
|
#ifdef CONFIG_SYSFS
|
|
|
|
ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -942,7 +942,11 @@ static void __init cpu_set_bug_bits(stru
|
|
{
|
|
u64 ia32_cap = 0;
|
|
|
|
- if (!x86_match_cpu(cpu_no_spec_store_bypass))
|
|
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
|
|
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
|
+
|
|
+ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
|
|
+ !(ia32_cap & ARCH_CAP_RDS_NO))
|
|
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
|
|
|
if (x86_match_cpu(cpu_no_speculation))
|
|
@@ -954,9 +958,6 @@ static void __init cpu_set_bug_bits(stru
|
|
if (x86_match_cpu(cpu_no_meltdown))
|
|
return;
|
|
|
|
- if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
|
|
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
|
-
|
|
/* Rogue Data Cache Load? No! */
|
|
if (ia32_cap & ARCH_CAP_RDCL_NO)
|
|
return;
|
|
@@ -1371,6 +1372,7 @@ void identify_secondary_cpu(struct cpuin
|
|
#endif
|
|
mtrr_ap_init();
|
|
validate_apic_and_package_id(c);
|
|
+ x86_spec_ctrl_setup_ap();
|
|
}
|
|
|
|
static __init int setup_noclflush(char *arg)
|
|
--- a/arch/x86/kernel/cpu/cpu.h
|
|
+++ b/arch/x86/kernel/cpu/cpu.h
|
|
@@ -50,4 +50,6 @@ extern void cpu_detect_cache_sizes(struc
|
|
|
|
unsigned int aperfmperf_get_khz(int cpu);
|
|
|
|
+extern void x86_spec_ctrl_setup_ap(void);
|
|
+
|
|
#endif /* ARCH_X86_CPU_H */
|
|
--- a/arch/x86/kernel/cpu/intel.c
|
|
+++ b/arch/x86/kernel/cpu/intel.c
|
|
@@ -189,6 +189,7 @@ static void early_init_intel(struct cpui
|
|
setup_clear_cpu_cap(X86_FEATURE_STIBP);
|
|
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
|
|
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
|
|
+ setup_clear_cpu_cap(X86_FEATURE_RDS);
|
|
}
|
|
|
|
/*
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 25 Apr 2018 22:04:23 -0400
|
|
Subject: x86/bugs: Whitelist allowed SPEC_CTRL MSR values
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit 1115a859f33276fe8afb31c60cf9d8e657872558 upstream
|
|
|
|
Intel and AMD SPEC_CTRL (0x48) MSR semantics may differ in the
|
|
future (or in fact use different MSRs for the same functionality).
|
|
|
|
As such a run-time mechanism is required to whitelist the appropriate MSR
|
|
values.
|
|
|
|
[ tglx: Made the variable __ro_after_init ]
|
|
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kernel/cpu/bugs.c | 11 +++++++++--
|
|
1 file changed, 9 insertions(+), 2 deletions(-)
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -35,6 +35,12 @@ static void __init ssb_select_mitigation
|
|
*/
|
|
static u64 __ro_after_init x86_spec_ctrl_base;
|
|
|
|
+/*
|
|
+ * The vendor and possibly platform specific bits which can be modified in
|
|
+ * x86_spec_ctrl_base.
|
|
+ */
|
|
+static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
|
|
+
|
|
void __init check_bugs(void)
|
|
{
|
|
identify_boot_cpu();
|
|
@@ -117,7 +123,7 @@ static enum spectre_v2_mitigation spectr
|
|
|
|
void x86_spec_ctrl_set(u64 val)
|
|
{
|
|
- if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS))
|
|
+ if (val & x86_spec_ctrl_mask)
|
|
WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
|
|
else
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
|
|
@@ -459,6 +465,7 @@ static enum ssb_mitigation_cmd __init __
|
|
switch (boot_cpu_data.x86_vendor) {
|
|
case X86_VENDOR_INTEL:
|
|
x86_spec_ctrl_base |= SPEC_CTRL_RDS;
|
|
+ x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
|
|
x86_spec_ctrl_set(SPEC_CTRL_RDS);
|
|
break;
|
|
case X86_VENDOR_AMD:
|
|
@@ -482,7 +489,7 @@ static void ssb_select_mitigation()
|
|
void x86_spec_ctrl_setup_ap(void)
|
|
{
|
|
if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
- x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS));
|
|
+ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
|
|
}
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 25 Apr 2018 22:04:24 -0400
|
|
Subject: x86/bugs/AMD: Add support to disable RDS on Fam[15,16,17]h if requested
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit 764f3c21588a059cd783c6ba0734d4db2d72822d upstream
|
|
|
|
AMD does not need the Speculative Store Bypass mitigation to be enabled.
|
|
|
|
The parameters for this are already available and can be done via MSR
|
|
C001_1020. Each family uses a different bit in that MSR for this.
|
|
|
|
[ tglx: Expose the bit mask via a variable and move the actual MSR fiddling
|
|
into the bugs code as that's the right thing to do and also required
|
|
to prepare for dynamic enable/disable ]
|
|
|
|
Suggested-by: Borislav Petkov <bp@suse.de>
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/cpufeatures.h | 1 +
|
|
arch/x86/include/asm/nospec-branch.h | 4 ++++
|
|
arch/x86/kernel/cpu/amd.c | 26 ++++++++++++++++++++++++++
|
|
arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++++++++++++-
|
|
arch/x86/kernel/cpu/common.c | 4 ++++
|
|
5 files changed, 61 insertions(+), 1 deletion(-)
|
|
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -215,6 +215,7 @@
|
|
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
|
|
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
|
|
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
|
|
+#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */
|
|
|
|
/* Virtualization flags: Linux defined, word 8 */
|
|
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -244,6 +244,10 @@ enum ssb_mitigation {
|
|
SPEC_STORE_BYPASS_DISABLE,
|
|
};
|
|
|
|
+/* AMD specific Speculative Store Bypass MSR data */
|
|
+extern u64 x86_amd_ls_cfg_base;
|
|
+extern u64 x86_amd_ls_cfg_rds_mask;
|
|
+
|
|
extern char __indirect_thunk_start[];
|
|
extern char __indirect_thunk_end[];
|
|
|
|
--- a/arch/x86/kernel/cpu/amd.c
|
|
+++ b/arch/x86/kernel/cpu/amd.c
|
|
@@ -10,6 +10,7 @@
|
|
#include <asm/processor.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/cpu.h>
|
|
+#include <asm/nospec-branch.h>
|
|
#include <asm/smp.h>
|
|
#include <asm/pci-direct.h>
|
|
#include <asm/delay.h>
|
|
@@ -554,6 +555,26 @@ static void bsp_init_amd(struct cpuinfo_
|
|
rdmsrl(MSR_FAM10H_NODE_ID, value);
|
|
nodes_per_socket = ((value >> 3) & 7) + 1;
|
|
}
|
|
+
|
|
+ if (c->x86 >= 0x15 && c->x86 <= 0x17) {
|
|
+ unsigned int bit;
|
|
+
|
|
+ switch (c->x86) {
|
|
+ case 0x15: bit = 54; break;
|
|
+ case 0x16: bit = 33; break;
|
|
+ case 0x17: bit = 10; break;
|
|
+ default: return;
|
|
+ }
|
|
+ /*
|
|
+ * Try to cache the base value so further operations can
|
|
+ * avoid RMW. If that faults, do not enable RDS.
|
|
+ */
|
|
+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
|
|
+ setup_force_cpu_cap(X86_FEATURE_RDS);
|
|
+ setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
|
|
+ x86_amd_ls_cfg_rds_mask = 1ULL << bit;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
|
|
@@ -898,6 +919,11 @@ static void init_amd(struct cpuinfo_x86
|
|
/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
|
|
if (!cpu_has(c, X86_FEATURE_XENPV))
|
|
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
|
+
|
|
+ if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
|
|
+ set_cpu_cap(c, X86_FEATURE_RDS);
|
|
+ set_cpu_cap(c, X86_FEATURE_AMD_RDS);
|
|
+ }
|
|
}
|
|
|
|
#ifdef CONFIG_X86_32
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -41,6 +41,13 @@ static u64 __ro_after_init x86_spec_ctrl
|
|
*/
|
|
static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
|
|
|
|
+/*
|
|
+ * AMD specific MSR info for Speculative Store Bypass control.
|
|
+ * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
|
|
+ */
|
|
+u64 __ro_after_init x86_amd_ls_cfg_base;
|
|
+u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
|
|
+
|
|
void __init check_bugs(void)
|
|
{
|
|
identify_boot_cpu();
|
|
@@ -52,7 +59,8 @@ void __init check_bugs(void)
|
|
|
|
/*
|
|
* Read the SPEC_CTRL MSR to account for reserved bits which may
|
|
- * have unknown values.
|
|
+ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
|
|
+ * init code as it is not enumerated and depends on the family.
|
|
*/
|
|
if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
|
@@ -154,6 +162,14 @@ void x86_spec_ctrl_restore_host(u64 gues
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
|
|
|
|
+static void x86_amd_rds_enable(void)
|
|
+{
|
|
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
|
|
+
|
|
+ if (boot_cpu_has(X86_FEATURE_AMD_RDS))
|
|
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
|
|
+}
|
|
+
|
|
#ifdef RETPOLINE
|
|
static bool spectre_v2_bad_module;
|
|
|
|
@@ -443,6 +459,11 @@ static enum ssb_mitigation_cmd __init __
|
|
|
|
switch (cmd) {
|
|
case SPEC_STORE_BYPASS_CMD_AUTO:
|
|
+ /*
|
|
+ * AMD platforms by default don't need SSB mitigation.
|
|
+ */
|
|
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
|
+ break;
|
|
case SPEC_STORE_BYPASS_CMD_ON:
|
|
mode = SPEC_STORE_BYPASS_DISABLE;
|
|
break;
|
|
@@ -469,6 +490,7 @@ static enum ssb_mitigation_cmd __init __
|
|
x86_spec_ctrl_set(SPEC_CTRL_RDS);
|
|
break;
|
|
case X86_VENDOR_AMD:
|
|
+ x86_amd_rds_enable();
|
|
break;
|
|
}
|
|
}
|
|
@@ -490,6 +512,9 @@ void x86_spec_ctrl_setup_ap(void)
|
|
{
|
|
if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
|
|
+
|
|
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
|
+ x86_amd_rds_enable();
|
|
}
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -934,6 +934,10 @@ static const __initconst struct x86_cpu_
|
|
{ X86_VENDOR_CENTAUR, 5, },
|
|
{ X86_VENDOR_INTEL, 5, },
|
|
{ X86_VENDOR_NSC, 5, },
|
|
+ { X86_VENDOR_AMD, 0x12, },
|
|
+ { X86_VENDOR_AMD, 0x11, },
|
|
+ { X86_VENDOR_AMD, 0x10, },
|
|
+ { X86_VENDOR_AMD, 0xf, },
|
|
{ X86_VENDOR_ANY, 4, },
|
|
{}
|
|
};
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 25 Apr 2018 22:04:25 -0400
|
|
Subject: x86/KVM/VMX: Expose SPEC_CTRL Bit(2) to the guest
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit da39556f66f5cfe8f9c989206974f1cb16ca5d7c upstream
|
|
|
|
Expose the CPUID.7.EDX[31] bit to the guest, and also guard against various
|
|
combinations of SPEC_CTRL MSR values.
|
|
|
|
The handling of the MSR (to take into account the host value of SPEC_CTRL
|
|
Bit(2)) is taken care of in patch:
|
|
|
|
KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS
|
|
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kvm/cpuid.c | 2 +-
|
|
arch/x86/kvm/vmx.c | 8 +++++---
|
|
2 files changed, 6 insertions(+), 4 deletions(-)
|
|
|
|
--- a/arch/x86/kvm/cpuid.c
|
|
+++ b/arch/x86/kvm/cpuid.c
|
|
@@ -402,7 +402,7 @@ static inline int __do_cpuid_ent(struct
|
|
|
|
/* cpuid 7.0.edx*/
|
|
const u32 kvm_cpuid_7_0_edx_x86_features =
|
|
- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
|
|
+ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(RDS) |
|
|
F(ARCH_CAPABILITIES);
|
|
|
|
/* all calls to cpuid_count() should be made on the same cpu */
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -3276,7 +3276,8 @@ static int vmx_get_msr(struct kvm_vcpu *
|
|
case MSR_IA32_SPEC_CTRL:
|
|
if (!msr_info->host_initiated &&
|
|
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
|
|
return 1;
|
|
|
|
msr_info->data = to_vmx(vcpu)->spec_ctrl;
|
|
@@ -3397,11 +3398,12 @@ static int vmx_set_msr(struct kvm_vcpu *
|
|
case MSR_IA32_SPEC_CTRL:
|
|
if (!msr_info->host_initiated &&
|
|
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
|
|
return 1;
|
|
|
|
/* The STIBP bit doesn't fault even if it's not advertised */
|
|
- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
|
|
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS))
|
|
return 1;
|
|
|
|
vmx->spec_ctrl = data;
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Sun, 29 Apr 2018 15:01:37 +0200
|
|
Subject: x86/speculation: Create spec-ctrl.h to avoid include hell
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 28a2775217b17208811fa43a9e96bd1fdf417b86 upstream
|
|
|
|
Having everything in nospec-branch.h creates a hell of dependencies when
|
|
adding the prctl based switching mechanism. Move everything which is not
|
|
required in nospec-branch.h to spec-ctrl.h and fix up the includes in the
|
|
relevant files.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/nospec-branch.h | 14 --------------
|
|
arch/x86/include/asm/spec-ctrl.h | 21 +++++++++++++++++++++
|
|
arch/x86/kernel/cpu/amd.c | 2 +-
|
|
arch/x86/kernel/cpu/bugs.c | 2 +-
|
|
arch/x86/kvm/svm.c | 2 +-
|
|
arch/x86/kvm/vmx.c | 2 +-
|
|
6 files changed, 25 insertions(+), 18 deletions(-)
|
|
create mode 100644 arch/x86/include/asm/spec-ctrl.h
|
|
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -228,26 +228,12 @@ enum spectre_v2_mitigation {
|
|
extern void x86_spec_ctrl_set(u64);
|
|
extern u64 x86_spec_ctrl_get_default(void);
|
|
|
|
-/*
|
|
- * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
|
|
- * the guest has, while on VMEXIT we restore the host view. This
|
|
- * would be easier if SPEC_CTRL were architecturally maskable or
|
|
- * shadowable for guests but this is not (currently) the case.
|
|
- * Takes the guest view of SPEC_CTRL MSR as a parameter.
|
|
- */
|
|
-extern void x86_spec_ctrl_set_guest(u64);
|
|
-extern void x86_spec_ctrl_restore_host(u64);
|
|
-
|
|
/* The Speculative Store Bypass disable variants */
|
|
enum ssb_mitigation {
|
|
SPEC_STORE_BYPASS_NONE,
|
|
SPEC_STORE_BYPASS_DISABLE,
|
|
};
|
|
|
|
-/* AMD specific Speculative Store Bypass MSR data */
|
|
-extern u64 x86_amd_ls_cfg_base;
|
|
-extern u64 x86_amd_ls_cfg_rds_mask;
|
|
-
|
|
extern char __indirect_thunk_start[];
|
|
extern char __indirect_thunk_end[];
|
|
|
|
--- /dev/null
|
|
+++ b/arch/x86/include/asm/spec-ctrl.h
|
|
@@ -0,0 +1,21 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0 */
|
|
+#ifndef _ASM_X86_SPECCTRL_H_
|
|
+#define _ASM_X86_SPECCTRL_H_
|
|
+
|
|
+#include <asm/nospec-branch.h>
|
|
+
|
|
+/*
|
|
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
|
|
+ * the guest has, while on VMEXIT we restore the host view. This
|
|
+ * would be easier if SPEC_CTRL were architecturally maskable or
|
|
+ * shadowable for guests but this is not (currently) the case.
|
|
+ * Takes the guest view of SPEC_CTRL MSR as a parameter.
|
|
+ */
|
|
+extern void x86_spec_ctrl_set_guest(u64);
|
|
+extern void x86_spec_ctrl_restore_host(u64);
|
|
+
|
|
+/* AMD specific Speculative Store Bypass MSR data */
|
|
+extern u64 x86_amd_ls_cfg_base;
|
|
+extern u64 x86_amd_ls_cfg_rds_mask;
|
|
+
|
|
+#endif
|
|
--- a/arch/x86/kernel/cpu/amd.c
|
|
+++ b/arch/x86/kernel/cpu/amd.c
|
|
@@ -10,7 +10,7 @@
|
|
#include <asm/processor.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/cpu.h>
|
|
-#include <asm/nospec-branch.h>
|
|
+#include <asm/spec-ctrl.h>
|
|
#include <asm/smp.h>
|
|
#include <asm/pci-direct.h>
|
|
#include <asm/delay.h>
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -13,7 +13,7 @@
|
|
#include <linux/cpu.h>
|
|
#include <linux/module.h>
|
|
|
|
-#include <asm/nospec-branch.h>
|
|
+#include <asm/spec-ctrl.h>
|
|
#include <asm/cmdline.h>
|
|
#include <asm/bugs.h>
|
|
#include <asm/processor.h>
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -50,7 +50,7 @@
|
|
#include <asm/kvm_para.h>
|
|
#include <asm/irq_remapping.h>
|
|
#include <asm/microcode.h>
|
|
-#include <asm/nospec-branch.h>
|
|
+#include <asm/spec-ctrl.h>
|
|
|
|
#include <asm/virtext.h>
|
|
#include "trace.h"
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -52,7 +52,7 @@
|
|
#include <asm/irq_remapping.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/microcode.h>
|
|
-#include <asm/nospec-branch.h>
|
|
+#include <asm/spec-ctrl.h>
|
|
|
|
#include "trace.h"
|
|
#include "pmu.h"
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Sun, 29 Apr 2018 15:20:11 +0200
|
|
Subject: prctl: Add speculation control prctls
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit b617cfc858161140d69cc0b5cc211996b557a1c7 upstream
|
|
|
|
Add two new prctls to control aspects of speculation related vulnerabilites
|
|
and their mitigations to provide finer grained control over performance
|
|
impacting mitigations.
|
|
|
|
PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
|
|
which is selected with arg2 of prctl(2). The return value uses bit 0-2 with
|
|
the following meaning:
|
|
|
|
Bit Define Description
|
|
0 PR_SPEC_PRCTL Mitigation can be controlled per task by
|
|
PR_SET_SPECULATION_CTRL
|
|
1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
|
|
disabled
|
|
2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
|
|
enabled
|
|
|
|
If all bits are 0 the CPU is not affected by the speculation misfeature.
|
|
|
|
If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
|
|
available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
|
|
misfeature will fail.
|
|
|
|
PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
|
|
is selected by arg2 of prctl(2) per task. arg3 is used to hand in the
|
|
control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
|
|
|
|
The common return values are:
|
|
|
|
EINVAL prctl is not implemented by the architecture or the unused prctl()
|
|
arguments are not 0
|
|
ENODEV arg2 is selecting a not supported speculation misfeature
|
|
|
|
PR_SET_SPECULATION_CTRL has these additional return values:
|
|
|
|
ERANGE arg3 is incorrect, i.e. it's not either PR_SPEC_ENABLE or PR_SPEC_DISABLE
|
|
ENXIO prctl control of the selected speculation misfeature is disabled
|
|
|
|
The first supported controlable speculation misfeature is
|
|
PR_SPEC_STORE_BYPASS. Add the define so this can be shared between
|
|
architectures.
|
|
|
|
Based on an initial patch from Tim Chen and mostly rewritten.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
Documentation/userspace-api/index.rst | 1
|
|
Documentation/userspace-api/spec_ctrl.rst | 86 ++++++++++++++++++++++++++++++
|
|
include/linux/nospec.h | 5 +
|
|
include/uapi/linux/prctl.h | 11 +++
|
|
kernel/sys.c | 22 +++++++
|
|
5 files changed, 125 insertions(+)
|
|
create mode 100644 Documentation/userspace-api/spec_ctrl.rst
|
|
|
|
--- a/Documentation/userspace-api/index.rst
|
|
+++ b/Documentation/userspace-api/index.rst
|
|
@@ -19,6 +19,7 @@ place where this information is gathered
|
|
no_new_privs
|
|
seccomp_filter
|
|
unshare
|
|
+ spec_ctrl
|
|
|
|
.. only:: subproject and html
|
|
|
|
--- /dev/null
|
|
+++ b/Documentation/userspace-api/spec_ctrl.rst
|
|
@@ -0,0 +1,86 @@
|
|
+===================
|
|
+Speculation Control
|
|
+===================
|
|
+
|
|
+Quite some CPUs have speculation related misfeatures which are in fact
|
|
+vulnerabilites causing data leaks in various forms even accross privilege
|
|
+domains.
|
|
+
|
|
+The kernel provides mitigation for such vulnerabilities in various
|
|
+forms. Some of these mitigations are compile time configurable and some on
|
|
+the kernel command line.
|
|
+
|
|
+There is also a class of mitigations which are very expensive, but they can
|
|
+be restricted to a certain set of processes or tasks in controlled
|
|
+environments. The mechanism to control these mitigations is via
|
|
+:manpage:`prctl(2)`.
|
|
+
|
|
+There are two prctl options which are related to this:
|
|
+
|
|
+ * PR_GET_SPECULATION_CTRL
|
|
+
|
|
+ * PR_SET_SPECULATION_CTRL
|
|
+
|
|
+PR_GET_SPECULATION_CTRL
|
|
+-----------------------
|
|
+
|
|
+PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
|
|
+which is selected with arg2 of prctl(2). The return value uses bits 0-2 with
|
|
+the following meaning:
|
|
+
|
|
+==== ================ ===================================================
|
|
+Bit Define Description
|
|
+==== ================ ===================================================
|
|
+0 PR_SPEC_PRCTL Mitigation can be controlled per task by
|
|
+ PR_SET_SPECULATION_CTRL
|
|
+1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
|
|
+ disabled
|
|
+2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
|
|
+ enabled
|
|
+==== ================ ===================================================
|
|
+
|
|
+If all bits are 0 the CPU is not affected by the speculation misfeature.
|
|
+
|
|
+If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
|
|
+available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
|
|
+misfeature will fail.
|
|
+
|
|
+PR_SET_SPECULATION_CTRL
|
|
+-----------------------
|
|
+PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
|
|
+is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
|
|
+in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
|
|
+
|
|
+Common error codes
|
|
+------------------
|
|
+======= =================================================================
|
|
+Value Meaning
|
|
+======= =================================================================
|
|
+EINVAL The prctl is not implemented by the architecture or unused
|
|
+ prctl(2) arguments are not 0
|
|
+
|
|
+ENODEV arg2 is selecting a not supported speculation misfeature
|
|
+======= =================================================================
|
|
+
|
|
+PR_SET_SPECULATION_CTRL error codes
|
|
+-----------------------------------
|
|
+======= =================================================================
|
|
+Value Meaning
|
|
+======= =================================================================
|
|
+0 Success
|
|
+
|
|
+ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
|
|
+ PR_SPEC_DISABLE
|
|
+
|
|
+ENXIO Control of the selected speculation misfeature is not possible.
|
|
+ See PR_GET_SPECULATION_CTRL.
|
|
+======= =================================================================
|
|
+
|
|
+Speculation misfeature controls
|
|
+-------------------------------
|
|
+- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
|
|
+
|
|
+ Invocations:
|
|
+ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
|
|
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
|
|
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
|
|
--- a/include/linux/nospec.h
|
|
+++ b/include/linux/nospec.h
|
|
@@ -55,4 +55,9 @@ static inline unsigned long array_index_
|
|
\
|
|
(typeof(_i)) (_i & _mask); \
|
|
})
|
|
+
|
|
+/* Speculation control prctl */
|
|
+int arch_prctl_spec_ctrl_get(unsigned long which);
|
|
+int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl);
|
|
+
|
|
#endif /* _LINUX_NOSPEC_H */
|
|
--- a/include/uapi/linux/prctl.h
|
|
+++ b/include/uapi/linux/prctl.h
|
|
@@ -207,4 +207,15 @@ struct prctl_mm_map {
|
|
# define PR_SVE_VL_LEN_MASK 0xffff
|
|
# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
|
|
|
|
+/* Per task speculation control */
|
|
+#define PR_GET_SPECULATION_CTRL 52
|
|
+#define PR_SET_SPECULATION_CTRL 53
|
|
+/* Speculation control variants */
|
|
+# define PR_SPEC_STORE_BYPASS 0
|
|
+/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
|
|
+# define PR_SPEC_NOT_AFFECTED 0
|
|
+# define PR_SPEC_PRCTL (1UL << 0)
|
|
+# define PR_SPEC_ENABLE (1UL << 1)
|
|
+# define PR_SPEC_DISABLE (1UL << 2)
|
|
+
|
|
#endif /* _LINUX_PRCTL_H */
|
|
--- a/kernel/sys.c
|
|
+++ b/kernel/sys.c
|
|
@@ -61,6 +61,8 @@
|
|
#include <linux/uidgid.h>
|
|
#include <linux/cred.h>
|
|
|
|
+#include <linux/nospec.h>
|
|
+
|
|
#include <linux/kmsg_dump.h>
|
|
/* Move somewhere else to avoid recompiling? */
|
|
#include <generated/utsrelease.h>
|
|
@@ -2190,6 +2192,16 @@ static int propagate_has_child_subreaper
|
|
return 1;
|
|
}
|
|
|
|
+int __weak arch_prctl_spec_ctrl_get(unsigned long which)
|
|
+{
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
|
|
+{
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
|
|
unsigned long, arg4, unsigned long, arg5)
|
|
{
|
|
@@ -2398,6 +2410,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
|
|
case PR_SVE_GET_VL:
|
|
error = SVE_GET_VL();
|
|
break;
|
|
+ case PR_GET_SPECULATION_CTRL:
|
|
+ if (arg3 || arg4 || arg5)
|
|
+ return -EINVAL;
|
|
+ error = arch_prctl_spec_ctrl_get(arg2);
|
|
+ break;
|
|
+ case PR_SET_SPECULATION_CTRL:
|
|
+ if (arg4 || arg5)
|
|
+ return -EINVAL;
|
|
+ error = arch_prctl_spec_ctrl_set(arg2, arg3);
|
|
+ break;
|
|
default:
|
|
error = -EINVAL;
|
|
break;
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Sun, 29 Apr 2018 15:21:42 +0200
|
|
Subject: x86/process: Allow runtime control of Speculative Store Bypass
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 885f82bfbc6fefb6664ea27965c3ab9ac4194b8c upstream
|
|
|
|
The Speculative Store Bypass vulnerability can be mitigated with the
|
|
Reduced Data Speculation (RDS) feature. To allow finer grained control of
|
|
this eventually expensive mitigation a per task mitigation control is
|
|
required.
|
|
|
|
Add a new TIF_RDS flag and put it into the group of TIF flags which are
|
|
evaluated for mismatch in switch_to(). If these bits differ in the previous
|
|
and the next task, then the slow path function __switch_to_xtra() is
|
|
invoked. Implement the TIF_RDS dependent mitigation control in the slow
|
|
path.
|
|
|
|
If the prctl for controlling Speculative Store Bypass is disabled or no
|
|
task uses the prctl then there is no overhead in the switch_to() fast
|
|
path.
|
|
|
|
Update the KVM related speculation control functions to take TID_RDS into
|
|
account as well.
|
|
|
|
Based on a patch from Tim Chen. Completely rewritten.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Ingo Molnar <mingo@kernel.org>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/msr-index.h | 3 ++-
|
|
arch/x86/include/asm/spec-ctrl.h | 17 +++++++++++++++++
|
|
arch/x86/include/asm/thread_info.h | 4 +++-
|
|
arch/x86/kernel/cpu/bugs.c | 26 +++++++++++++++++++++-----
|
|
arch/x86/kernel/process.c | 22 ++++++++++++++++++++++
|
|
5 files changed, 65 insertions(+), 7 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/msr-index.h
|
|
+++ b/arch/x86/include/asm/msr-index.h
|
|
@@ -42,7 +42,8 @@
|
|
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
|
|
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
|
|
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
|
|
-#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
|
|
+#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
|
|
+#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
|
|
|
|
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
|
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
|
|
--- a/arch/x86/include/asm/spec-ctrl.h
|
|
+++ b/arch/x86/include/asm/spec-ctrl.h
|
|
@@ -2,6 +2,7 @@
|
|
#ifndef _ASM_X86_SPECCTRL_H_
|
|
#define _ASM_X86_SPECCTRL_H_
|
|
|
|
+#include <linux/thread_info.h>
|
|
#include <asm/nospec-branch.h>
|
|
|
|
/*
|
|
@@ -18,4 +19,20 @@ extern void x86_spec_ctrl_restore_host(u
|
|
extern u64 x86_amd_ls_cfg_base;
|
|
extern u64 x86_amd_ls_cfg_rds_mask;
|
|
|
|
+/* The Intel SPEC CTRL MSR base value cache */
|
|
+extern u64 x86_spec_ctrl_base;
|
|
+
|
|
+static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
|
|
+{
|
|
+ BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
|
|
+ return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
|
|
+}
|
|
+
|
|
+static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
|
|
+{
|
|
+ return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
|
|
+}
|
|
+
|
|
+extern void speculative_store_bypass_update(void);
|
|
+
|
|
#endif
|
|
--- a/arch/x86/include/asm/thread_info.h
|
|
+++ b/arch/x86/include/asm/thread_info.h
|
|
@@ -79,6 +79,7 @@ struct thread_info {
|
|
#define TIF_SIGPENDING 2 /* signal pending */
|
|
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
|
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
|
|
+#define TIF_RDS 5 /* Reduced data speculation */
|
|
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
|
|
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
|
#define TIF_SECCOMP 8 /* secure computing */
|
|
@@ -105,6 +106,7 @@ struct thread_info {
|
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
|
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
|
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
|
+#define _TIF_RDS (1 << TIF_RDS)
|
|
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
|
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
|
@@ -144,7 +146,7 @@ struct thread_info {
|
|
|
|
/* flags to check in __switch_to() */
|
|
#define _TIF_WORK_CTXSW \
|
|
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
|
|
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
|
|
|
|
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
|
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -33,7 +33,7 @@ static void __init ssb_select_mitigation
|
|
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
|
* writes to SPEC_CTRL contain whatever reserved bits have been set.
|
|
*/
|
|
-static u64 __ro_after_init x86_spec_ctrl_base;
|
|
+u64 __ro_after_init x86_spec_ctrl_base;
|
|
|
|
/*
|
|
* The vendor and possibly platform specific bits which can be modified in
|
|
@@ -140,25 +140,41 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
|
|
|
|
u64 x86_spec_ctrl_get_default(void)
|
|
{
|
|
- return x86_spec_ctrl_base;
|
|
+ u64 msrval = x86_spec_ctrl_base;
|
|
+
|
|
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
+ msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
+ return msrval;
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
|
|
|
|
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
|
|
{
|
|
+ u64 host = x86_spec_ctrl_base;
|
|
+
|
|
if (!boot_cpu_has(X86_FEATURE_IBRS))
|
|
return;
|
|
- if (x86_spec_ctrl_base != guest_spec_ctrl)
|
|
+
|
|
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
+ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
+
|
|
+ if (host != guest_spec_ctrl)
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
|
|
|
|
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
|
|
{
|
|
+ u64 host = x86_spec_ctrl_base;
|
|
+
|
|
if (!boot_cpu_has(X86_FEATURE_IBRS))
|
|
return;
|
|
- if (x86_spec_ctrl_base != guest_spec_ctrl)
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
|
+
|
|
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
+ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
+
|
|
+ if (host != guest_spec_ctrl)
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, host);
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
|
|
|
|
--- a/arch/x86/kernel/process.c
|
|
+++ b/arch/x86/kernel/process.c
|
|
@@ -38,6 +38,7 @@
|
|
#include <asm/switch_to.h>
|
|
#include <asm/desc.h>
|
|
#include <asm/prctl.h>
|
|
+#include <asm/spec-ctrl.h>
|
|
|
|
/*
|
|
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
|
@@ -278,6 +279,24 @@ static inline void switch_to_bitmap(stru
|
|
}
|
|
}
|
|
|
|
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
|
|
+{
|
|
+ u64 msr;
|
|
+
|
|
+ if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
|
|
+ msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
|
|
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
+ } else {
|
|
+ msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
+ }
|
|
+}
|
|
+
|
|
+void speculative_store_bypass_update(void)
|
|
+{
|
|
+ __speculative_store_bypass_update(current_thread_info()->flags);
|
|
+}
|
|
+
|
|
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|
struct tss_struct *tss)
|
|
{
|
|
@@ -309,6 +328,9 @@ void __switch_to_xtra(struct task_struct
|
|
|
|
if ((tifp ^ tifn) & _TIF_NOCPUID)
|
|
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
|
|
+
|
|
+ if ((tifp ^ tifn) & _TIF_RDS)
|
|
+ __speculative_store_bypass_update(tifn);
|
|
}
|
|
|
|
/*
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Sun, 29 Apr 2018 15:26:40 +0200
|
|
Subject: x86/speculation: Add prctl for Speculative Store Bypass mitigation
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit a73ec77ee17ec556fe7f165d00314cb7c047b1ac upstream
|
|
|
|
Add prctl based control for Speculative Store Bypass mitigation and make it
|
|
the default mitigation for Intel and AMD.
|
|
|
|
Andi Kleen provided the following rationale (slightly redacted):
|
|
|
|
There are multiple levels of impact of Speculative Store Bypass:
|
|
|
|
1) JITed sandbox.
|
|
It cannot invoke system calls, but can do PRIME+PROBE and may have call
|
|
interfaces to other code
|
|
|
|
2) Native code process.
|
|
No protection inside the process at this level.
|
|
|
|
3) Kernel.
|
|
|
|
4) Between processes.
|
|
|
|
The prctl tries to protect against case (1) doing attacks.
|
|
|
|
If the untrusted code can do random system calls then control is already
|
|
lost in a much worse way. So there needs to be system call protection in
|
|
some way (using a JIT not allowing them or seccomp). Or rather if the
|
|
process can subvert its environment somehow to do the prctl it can already
|
|
execute arbitrary code, which is much worse than SSB.
|
|
|
|
To put it differently, the point of the prctl is to not allow JITed code
|
|
to read data it shouldn't read from its JITed sandbox. If it already has
|
|
escaped its sandbox then it can already read everything it wants in its
|
|
address space, and do much worse.
|
|
|
|
The ability to control Speculative Store Bypass allows to enable the
|
|
protection selectively without affecting overall system performance.
|
|
|
|
Based on an initial patch from Tim Chen. Completely rewritten.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
Documentation/admin-guide/kernel-parameters.txt | 6 +
|
|
arch/x86/include/asm/nospec-branch.h | 1
|
|
arch/x86/kernel/cpu/bugs.c | 83 +++++++++++++++++++++---
|
|
3 files changed, 79 insertions(+), 11 deletions(-)
|
|
|
|
--- a/Documentation/admin-guide/kernel-parameters.txt
|
|
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
|
@@ -4025,7 +4025,11 @@
|
|
off - Unconditionally enable Speculative Store Bypass
|
|
auto - Kernel detects whether the CPU model contains an
|
|
implementation of Speculative Store Bypass and
|
|
- picks the most appropriate mitigation
|
|
+ picks the most appropriate mitigation.
|
|
+ prctl - Control Speculative Store Bypass per thread
|
|
+ via prctl. Speculative Store Bypass is enabled
|
|
+ for a process by default. The state of the control
|
|
+ is inherited on fork.
|
|
|
|
Not specifying this option is equivalent to
|
|
spec_store_bypass_disable=auto.
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -232,6 +232,7 @@ extern u64 x86_spec_ctrl_get_default(voi
|
|
enum ssb_mitigation {
|
|
SPEC_STORE_BYPASS_NONE,
|
|
SPEC_STORE_BYPASS_DISABLE,
|
|
+ SPEC_STORE_BYPASS_PRCTL,
|
|
};
|
|
|
|
extern char __indirect_thunk_start[];
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -12,6 +12,8 @@
|
|
#include <linux/utsname.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/nospec.h>
|
|
+#include <linux/prctl.h>
|
|
|
|
#include <asm/spec-ctrl.h>
|
|
#include <asm/cmdline.h>
|
|
@@ -412,20 +414,23 @@ enum ssb_mitigation_cmd {
|
|
SPEC_STORE_BYPASS_CMD_NONE,
|
|
SPEC_STORE_BYPASS_CMD_AUTO,
|
|
SPEC_STORE_BYPASS_CMD_ON,
|
|
+ SPEC_STORE_BYPASS_CMD_PRCTL,
|
|
};
|
|
|
|
static const char *ssb_strings[] = {
|
|
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
|
|
- [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
|
|
+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
|
|
+ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
|
|
};
|
|
|
|
static const struct {
|
|
const char *option;
|
|
enum ssb_mitigation_cmd cmd;
|
|
} ssb_mitigation_options[] = {
|
|
- { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
|
- { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
|
- { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
|
+ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
|
+ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
|
+ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
|
+ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
|
|
};
|
|
|
|
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
|
|
@@ -475,14 +480,15 @@ static enum ssb_mitigation_cmd __init __
|
|
|
|
switch (cmd) {
|
|
case SPEC_STORE_BYPASS_CMD_AUTO:
|
|
- /*
|
|
- * AMD platforms by default don't need SSB mitigation.
|
|
- */
|
|
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
|
- break;
|
|
+ /* Choose prctl as the default mode */
|
|
+ mode = SPEC_STORE_BYPASS_PRCTL;
|
|
+ break;
|
|
case SPEC_STORE_BYPASS_CMD_ON:
|
|
mode = SPEC_STORE_BYPASS_DISABLE;
|
|
break;
|
|
+ case SPEC_STORE_BYPASS_CMD_PRCTL:
|
|
+ mode = SPEC_STORE_BYPASS_PRCTL;
|
|
+ break;
|
|
case SPEC_STORE_BYPASS_CMD_NONE:
|
|
break;
|
|
}
|
|
@@ -493,7 +499,7 @@ static enum ssb_mitigation_cmd __init __
|
|
* - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
|
|
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
|
|
*/
|
|
- if (mode != SPEC_STORE_BYPASS_NONE) {
|
|
+ if (mode == SPEC_STORE_BYPASS_DISABLE) {
|
|
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
|
|
/*
|
|
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
|
|
@@ -524,6 +530,63 @@ static void ssb_select_mitigation()
|
|
|
|
#undef pr_fmt
|
|
|
|
+static int ssb_prctl_set(unsigned long ctrl)
|
|
+{
|
|
+ bool rds = !!test_tsk_thread_flag(current, TIF_RDS);
|
|
+
|
|
+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
|
|
+ return -ENXIO;
|
|
+
|
|
+ if (ctrl == PR_SPEC_ENABLE)
|
|
+ clear_tsk_thread_flag(current, TIF_RDS);
|
|
+ else
|
|
+ set_tsk_thread_flag(current, TIF_RDS);
|
|
+
|
|
+ if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
|
|
+ speculative_store_bypass_update();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssb_prctl_get(void)
|
|
+{
|
|
+ switch (ssb_mode) {
|
|
+ case SPEC_STORE_BYPASS_DISABLE:
|
|
+ return PR_SPEC_DISABLE;
|
|
+ case SPEC_STORE_BYPASS_PRCTL:
|
|
+ if (test_tsk_thread_flag(current, TIF_RDS))
|
|
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
|
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
|
+ default:
|
|
+ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
|
|
+ return PR_SPEC_ENABLE;
|
|
+ return PR_SPEC_NOT_AFFECTED;
|
|
+ }
|
|
+}
|
|
+
|
|
+int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
|
|
+{
|
|
+ if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
|
|
+ return -ERANGE;
|
|
+
|
|
+ switch (which) {
|
|
+ case PR_SPEC_STORE_BYPASS:
|
|
+ return ssb_prctl_set(ctrl);
|
|
+ default:
|
|
+ return -ENODEV;
|
|
+ }
|
|
+}
|
|
+
|
|
+int arch_prctl_spec_ctrl_get(unsigned long which)
|
|
+{
|
|
+ switch (which) {
|
|
+ case PR_SPEC_STORE_BYPASS:
|
|
+ return ssb_prctl_get();
|
|
+ default:
|
|
+ return -ENODEV;
|
|
+ }
|
|
+}
|
|
+
|
|
void x86_spec_ctrl_setup_ap(void)
|
|
{
|
|
if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Kees Cook <keescook@chromium.org>
|
|
Date: Tue, 1 May 2018 15:19:04 -0700
|
|
Subject: nospec: Allow getting/setting on non-current task
|
|
|
|
From: Kees Cook <keescook@chromium.org>
|
|
|
|
commit 7bbf1373e228840bb0295a2ca26d548ef37f448e upstream
|
|
|
|
Adjust arch_prctl_get/set_spec_ctrl() to operate on tasks other than
|
|
current.
|
|
|
|
This is needed both for /proc/$pid/status queries and for seccomp (since
|
|
thread-syncing can trigger seccomp in non-current threads).
|
|
|
|
Signed-off-by: Kees Cook <keescook@chromium.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++-----------
|
|
include/linux/nospec.h | 7 +++++--
|
|
kernel/sys.c | 9 +++++----
|
|
3 files changed, 26 insertions(+), 17 deletions(-)
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -530,31 +530,35 @@ static void ssb_select_mitigation()
|
|
|
|
#undef pr_fmt
|
|
|
|
-static int ssb_prctl_set(unsigned long ctrl)
|
|
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
|
|
{
|
|
- bool rds = !!test_tsk_thread_flag(current, TIF_RDS);
|
|
+ bool rds = !!test_tsk_thread_flag(task, TIF_RDS);
|
|
|
|
if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
|
|
return -ENXIO;
|
|
|
|
if (ctrl == PR_SPEC_ENABLE)
|
|
- clear_tsk_thread_flag(current, TIF_RDS);
|
|
+ clear_tsk_thread_flag(task, TIF_RDS);
|
|
else
|
|
- set_tsk_thread_flag(current, TIF_RDS);
|
|
+ set_tsk_thread_flag(task, TIF_RDS);
|
|
|
|
- if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
|
|
+ /*
|
|
+ * If being set on non-current task, delay setting the CPU
|
|
+ * mitigation until it is next scheduled.
|
|
+ */
|
|
+ if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS))
|
|
speculative_store_bypass_update();
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static int ssb_prctl_get(void)
|
|
+static int ssb_prctl_get(struct task_struct *task)
|
|
{
|
|
switch (ssb_mode) {
|
|
case SPEC_STORE_BYPASS_DISABLE:
|
|
return PR_SPEC_DISABLE;
|
|
case SPEC_STORE_BYPASS_PRCTL:
|
|
- if (test_tsk_thread_flag(current, TIF_RDS))
|
|
+ if (test_tsk_thread_flag(task, TIF_RDS))
|
|
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
|
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
|
default:
|
|
@@ -564,24 +568,25 @@ static int ssb_prctl_get(void)
|
|
}
|
|
}
|
|
|
|
-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
|
|
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
|
+ unsigned long ctrl)
|
|
{
|
|
if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
|
|
return -ERANGE;
|
|
|
|
switch (which) {
|
|
case PR_SPEC_STORE_BYPASS:
|
|
- return ssb_prctl_set(ctrl);
|
|
+ return ssb_prctl_set(task, ctrl);
|
|
default:
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
|
|
-int arch_prctl_spec_ctrl_get(unsigned long which)
|
|
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
|
{
|
|
switch (which) {
|
|
case PR_SPEC_STORE_BYPASS:
|
|
- return ssb_prctl_get();
|
|
+ return ssb_prctl_get(task);
|
|
default:
|
|
return -ENODEV;
|
|
}
|
|
--- a/include/linux/nospec.h
|
|
+++ b/include/linux/nospec.h
|
|
@@ -7,6 +7,8 @@
|
|
#define _LINUX_NOSPEC_H
|
|
#include <asm/barrier.h>
|
|
|
|
+struct task_struct;
|
|
+
|
|
/**
|
|
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
|
|
* @index: array element index
|
|
@@ -57,7 +59,8 @@ static inline unsigned long array_index_
|
|
})
|
|
|
|
/* Speculation control prctl */
|
|
-int arch_prctl_spec_ctrl_get(unsigned long which);
|
|
-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl);
|
|
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
|
|
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
|
+ unsigned long ctrl);
|
|
|
|
#endif /* _LINUX_NOSPEC_H */
|
|
--- a/kernel/sys.c
|
|
+++ b/kernel/sys.c
|
|
@@ -2192,12 +2192,13 @@ static int propagate_has_child_subreaper
|
|
return 1;
|
|
}
|
|
|
|
-int __weak arch_prctl_spec_ctrl_get(unsigned long which)
|
|
+int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
-int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
|
|
+int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
|
|
+ unsigned long ctrl)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
@@ -2413,12 +2414,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
|
|
case PR_GET_SPECULATION_CTRL:
|
|
if (arg3 || arg4 || arg5)
|
|
return -EINVAL;
|
|
- error = arch_prctl_spec_ctrl_get(arg2);
|
|
+ error = arch_prctl_spec_ctrl_get(me, arg2);
|
|
break;
|
|
case PR_SET_SPECULATION_CTRL:
|
|
if (arg4 || arg5)
|
|
return -EINVAL;
|
|
- error = arch_prctl_spec_ctrl_set(arg2, arg3);
|
|
+ error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
|
|
break;
|
|
default:
|
|
error = -EINVAL;
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Kees Cook <keescook@chromium.org>
|
|
Date: Tue, 1 May 2018 15:31:45 -0700
|
|
Subject: proc: Provide details on speculation flaw mitigations
|
|
|
|
From: Kees Cook <keescook@chromium.org>
|
|
|
|
commit fae1fa0fc6cca8beee3ab8ed71d54f9a78fa3f64 upstream
|
|
|
|
As done with seccomp and no_new_privs, also show speculation flaw
|
|
mitigation state in /proc/$pid/status.
|
|
|
|
Signed-off-by: Kees Cook <keescook@chromium.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
fs/proc/array.c | 22 ++++++++++++++++++++++
|
|
1 file changed, 22 insertions(+)
|
|
|
|
--- a/fs/proc/array.c
|
|
+++ b/fs/proc/array.c
|
|
@@ -85,6 +85,7 @@
|
|
#include <linux/delayacct.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/pid_namespace.h>
|
|
+#include <linux/prctl.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/tracehook.h>
|
|
#include <linux/string_helpers.h>
|
|
@@ -347,6 +348,27 @@ static inline void task_seccomp(struct s
|
|
#ifdef CONFIG_SECCOMP
|
|
seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
|
|
#endif
|
|
+ seq_printf(m, "\nSpeculation Store Bypass:\t");
|
|
+ switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
|
|
+ case -EINVAL:
|
|
+ seq_printf(m, "unknown");
|
|
+ break;
|
|
+ case PR_SPEC_NOT_AFFECTED:
|
|
+ seq_printf(m, "not vulnerable");
|
|
+ break;
|
|
+ case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
|
|
+ seq_printf(m, "thread mitigated");
|
|
+ break;
|
|
+ case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
|
|
+ seq_printf(m, "thread vulnerable");
|
|
+ break;
|
|
+ case PR_SPEC_DISABLE:
|
|
+ seq_printf(m, "globally mitigated");
|
|
+ break;
|
|
+ default:
|
|
+ seq_printf(m, "vulnerable");
|
|
+ break;
|
|
+ }
|
|
seq_putc(m, '\n');
|
|
}
|
|
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Kees Cook <keescook@chromium.org>
|
|
Date: Tue, 1 May 2018 15:07:31 -0700
|
|
Subject: seccomp: Enable speculation flaw mitigations
|
|
|
|
From: Kees Cook <keescook@chromium.org>
|
|
|
|
commit 5c3070890d06ff82eecb808d02d2ca39169533ef upstream
|
|
|
|
When speculation flaw mitigations are opt-in (via prctl), using seccomp
|
|
will automatically opt-in to these protections, since using seccomp
|
|
indicates at least some level of sandboxing is desired.
|
|
|
|
Signed-off-by: Kees Cook <keescook@chromium.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
kernel/seccomp.c | 17 +++++++++++++++++
|
|
1 file changed, 17 insertions(+)
|
|
|
|
--- a/kernel/seccomp.c
|
|
+++ b/kernel/seccomp.c
|
|
@@ -19,6 +19,8 @@
|
|
#include <linux/compat.h>
|
|
#include <linux/coredump.h>
|
|
#include <linux/kmemleak.h>
|
|
+#include <linux/nospec.h>
|
|
+#include <linux/prctl.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/seccomp.h>
|
|
@@ -227,6 +229,19 @@ static inline bool seccomp_may_assign_mo
|
|
return true;
|
|
}
|
|
|
|
+/*
|
|
+ * If a given speculation mitigation is opt-in (prctl()-controlled),
|
|
+ * select it, by disabling speculation (enabling mitigation).
|
|
+ */
|
|
+static inline void spec_mitigate(struct task_struct *task,
|
|
+ unsigned long which)
|
|
+{
|
|
+ int state = arch_prctl_spec_ctrl_get(task, which);
|
|
+
|
|
+ if (state > 0 && (state & PR_SPEC_PRCTL))
|
|
+ arch_prctl_spec_ctrl_set(task, which, PR_SPEC_DISABLE);
|
|
+}
|
|
+
|
|
static inline void seccomp_assign_mode(struct task_struct *task,
|
|
unsigned long seccomp_mode)
|
|
{
|
|
@@ -238,6 +253,8 @@ static inline void seccomp_assign_mode(s
|
|
* filter) is set.
|
|
*/
|
|
smp_mb__before_atomic();
|
|
+ /* Assume seccomp processes want speculation flaw mitigation. */
|
|
+ spec_mitigate(task, PR_SPEC_STORE_BYPASS);
|
|
set_tsk_thread_flag(task, TIF_SECCOMP);
|
|
}
|
|
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Kees Cook <keescook@chromium.org>
|
|
Date: Thu, 3 May 2018 15:03:30 -0700
|
|
Subject: x86/bugs: Make boot modes __ro_after_init
|
|
|
|
From: Kees Cook <keescook@chromium.org>
|
|
|
|
commit f9544b2b076ca90d887c5ae5d74fab4c21bb7c13 upstream
|
|
|
|
There's no reason for these to be changed after boot.
|
|
|
|
Signed-off-by: Kees Cook <keescook@chromium.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kernel/cpu/bugs.c | 5 +++--
|
|
1 file changed, 3 insertions(+), 2 deletions(-)
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -129,7 +129,8 @@ static const char *spectre_v2_strings[]
|
|
#undef pr_fmt
|
|
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
|
|
|
-static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
|
|
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
|
+ SPECTRE_V2_NONE;
|
|
|
|
void x86_spec_ctrl_set(u64 val)
|
|
{
|
|
@@ -407,7 +408,7 @@ retpoline_auto:
|
|
#undef pr_fmt
|
|
#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
|
|
|
|
-static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
|
|
+static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
|
|
|
|
/* The kernel command line selection */
|
|
enum ssb_mitigation_cmd {
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 3 May 2018 22:09:15 +0200
|
|
Subject: prctl: Add force disable speculation
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 356e4bfff2c5489e016fdb925adbf12a1e3950ee upstream
|
|
|
|
For certain use cases it is desired to enforce mitigations so they cannot
|
|
be undone afterwards. That's important for loader stubs which want to
|
|
prevent a child from disabling the mitigation again. Will also be used for
|
|
seccomp(). The extra state preserving of the prctl state for SSB is a
|
|
preparatory step for EBPF dymanic speculation control.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
Documentation/userspace-api/spec_ctrl.rst | 34 ++++++++++++++++++-----------
|
|
arch/x86/kernel/cpu/bugs.c | 35 +++++++++++++++++++++---------
|
|
fs/proc/array.c | 3 ++
|
|
include/linux/sched.h | 10 +++++++-
|
|
include/uapi/linux/prctl.h | 1
|
|
5 files changed, 59 insertions(+), 24 deletions(-)
|
|
|
|
--- a/Documentation/userspace-api/spec_ctrl.rst
|
|
+++ b/Documentation/userspace-api/spec_ctrl.rst
|
|
@@ -25,19 +25,21 @@ PR_GET_SPECULATION_CTRL
|
|
-----------------------
|
|
|
|
PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
|
|
-which is selected with arg2 of prctl(2). The return value uses bits 0-2 with
|
|
+which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
|
|
the following meaning:
|
|
|
|
-==== ================ ===================================================
|
|
-Bit Define Description
|
|
-==== ================ ===================================================
|
|
-0 PR_SPEC_PRCTL Mitigation can be controlled per task by
|
|
- PR_SET_SPECULATION_CTRL
|
|
-1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
|
|
- disabled
|
|
-2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
|
|
- enabled
|
|
-==== ================ ===================================================
|
|
+==== ===================== ===================================================
|
|
+Bit Define Description
|
|
+==== ===================== ===================================================
|
|
+0 PR_SPEC_PRCTL Mitigation can be controlled per task by
|
|
+ PR_SET_SPECULATION_CTRL
|
|
+1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
|
|
+ disabled
|
|
+2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
|
|
+ enabled
|
|
+3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
|
|
+ subsequent prctl(..., PR_SPEC_ENABLE) will fail.
|
|
+==== ===================== ===================================================
|
|
|
|
If all bits are 0 the CPU is not affected by the speculation misfeature.
|
|
|
|
@@ -47,9 +49,11 @@ misfeature will fail.
|
|
|
|
PR_SET_SPECULATION_CTRL
|
|
-----------------------
|
|
+
|
|
PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
|
|
is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
|
|
-in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
|
|
+in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
|
|
+PR_SPEC_FORCE_DISABLE.
|
|
|
|
Common error codes
|
|
------------------
|
|
@@ -70,10 +74,13 @@ Value Meaning
|
|
0 Success
|
|
|
|
ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
|
|
- PR_SPEC_DISABLE
|
|
+ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE
|
|
|
|
ENXIO Control of the selected speculation misfeature is not possible.
|
|
See PR_GET_SPECULATION_CTRL.
|
|
+
|
|
+EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
|
|
+ tried to enable it again.
|
|
======= =================================================================
|
|
|
|
Speculation misfeature controls
|
|
@@ -84,3 +91,4 @@ Speculation misfeature controls
|
|
* prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
|
|
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
|
|
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
|
|
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -533,21 +533,37 @@ static void ssb_select_mitigation()
|
|
|
|
static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
|
|
{
|
|
- bool rds = !!test_tsk_thread_flag(task, TIF_RDS);
|
|
+ bool update;
|
|
|
|
if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
|
|
return -ENXIO;
|
|
|
|
- if (ctrl == PR_SPEC_ENABLE)
|
|
- clear_tsk_thread_flag(task, TIF_RDS);
|
|
- else
|
|
- set_tsk_thread_flag(task, TIF_RDS);
|
|
+ switch (ctrl) {
|
|
+ case PR_SPEC_ENABLE:
|
|
+ /* If speculation is force disabled, enable is not allowed */
|
|
+ if (task_spec_ssb_force_disable(task))
|
|
+ return -EPERM;
|
|
+ task_clear_spec_ssb_disable(task);
|
|
+ update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
|
|
+ break;
|
|
+ case PR_SPEC_DISABLE:
|
|
+ task_set_spec_ssb_disable(task);
|
|
+ update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
|
|
+ break;
|
|
+ case PR_SPEC_FORCE_DISABLE:
|
|
+ task_set_spec_ssb_disable(task);
|
|
+ task_set_spec_ssb_force_disable(task);
|
|
+ update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
|
|
+ break;
|
|
+ default:
|
|
+ return -ERANGE;
|
|
+ }
|
|
|
|
/*
|
|
* If being set on non-current task, delay setting the CPU
|
|
* mitigation until it is next scheduled.
|
|
*/
|
|
- if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS))
|
|
+ if (task == current && update)
|
|
speculative_store_bypass_update();
|
|
|
|
return 0;
|
|
@@ -559,7 +575,9 @@ static int ssb_prctl_get(struct task_str
|
|
case SPEC_STORE_BYPASS_DISABLE:
|
|
return PR_SPEC_DISABLE;
|
|
case SPEC_STORE_BYPASS_PRCTL:
|
|
- if (test_tsk_thread_flag(task, TIF_RDS))
|
|
+ if (task_spec_ssb_force_disable(task))
|
|
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
|
+ if (task_spec_ssb_disable(task))
|
|
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
|
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
|
default:
|
|
@@ -572,9 +590,6 @@ static int ssb_prctl_get(struct task_str
|
|
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
|
unsigned long ctrl)
|
|
{
|
|
- if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
|
|
- return -ERANGE;
|
|
-
|
|
switch (which) {
|
|
case PR_SPEC_STORE_BYPASS:
|
|
return ssb_prctl_set(task, ctrl);
|
|
--- a/fs/proc/array.c
|
|
+++ b/fs/proc/array.c
|
|
@@ -356,6 +356,9 @@ static inline void task_seccomp(struct s
|
|
case PR_SPEC_NOT_AFFECTED:
|
|
seq_printf(m, "not vulnerable");
|
|
break;
|
|
+ case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
|
|
+ seq_printf(m, "thread force mitigated");
|
|
+ break;
|
|
case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
|
|
seq_printf(m, "thread mitigated");
|
|
break;
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -1365,7 +1365,8 @@ static inline bool is_percpu_thread(void
|
|
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
|
|
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
|
|
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
|
|
-
|
|
+#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
|
|
+#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
|
|
|
|
#define TASK_PFA_TEST(name, func) \
|
|
static inline bool task_##func(struct task_struct *p) \
|
|
@@ -1390,6 +1391,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
|
|
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
|
|
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
|
|
|
|
+TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
|
|
+TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
|
|
+TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
|
|
+
|
|
+TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
|
|
+TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
|
|
+
|
|
static inline void
|
|
current_restore_flags(unsigned long orig_flags, unsigned long flags)
|
|
{
|
|
--- a/include/uapi/linux/prctl.h
|
|
+++ b/include/uapi/linux/prctl.h
|
|
@@ -217,5 +217,6 @@ struct prctl_mm_map {
|
|
# define PR_SPEC_PRCTL (1UL << 0)
|
|
# define PR_SPEC_ENABLE (1UL << 1)
|
|
# define PR_SPEC_DISABLE (1UL << 2)
|
|
+# define PR_SPEC_FORCE_DISABLE (1UL << 3)
|
|
|
|
#endif /* _LINUX_PRCTL_H */
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Fri, 4 May 2018 09:40:03 +0200
|
|
Subject: seccomp: Use PR_SPEC_FORCE_DISABLE
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit b849a812f7eb92e96d1c8239b06581b2cfd8b275 upstream
|
|
|
|
Use PR_SPEC_FORCE_DISABLE in seccomp() because seccomp does not allow to
|
|
widen restrictions.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
kernel/seccomp.c | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
--- a/kernel/seccomp.c
|
|
+++ b/kernel/seccomp.c
|
|
@@ -239,7 +239,7 @@ static inline void spec_mitigate(struct
|
|
int state = arch_prctl_spec_ctrl_get(task, which);
|
|
|
|
if (state > 0 && (state & PR_SPEC_PRCTL))
|
|
- arch_prctl_spec_ctrl_set(task, which, PR_SPEC_DISABLE);
|
|
+ arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE);
|
|
}
|
|
|
|
static inline void seccomp_assign_mode(struct task_struct *task,
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Kees Cook <keescook@chromium.org>
|
|
Date: Thu, 3 May 2018 14:56:12 -0700
|
|
Subject: seccomp: Add filter flag to opt-out of SSB mitigation
|
|
|
|
From: Kees Cook <keescook@chromium.org>
|
|
|
|
commit 00a02d0c502a06d15e07b857f8ff921e3e402675 upstream
|
|
|
|
If a seccomp user is not interested in Speculative Store Bypass mitigation
|
|
by default, it can set the new SECCOMP_FILTER_FLAG_SPEC_ALLOW flag when
|
|
adding filters.
|
|
|
|
Signed-off-by: Kees Cook <keescook@chromium.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
include/linux/seccomp.h | 5 +++--
|
|
include/uapi/linux/seccomp.h | 5 +++--
|
|
kernel/seccomp.c | 19 +++++++++++--------
|
|
tools/testing/selftests/seccomp/seccomp_bpf.c | 22 +++++++++++++++++++---
|
|
4 files changed, 36 insertions(+), 15 deletions(-)
|
|
|
|
--- a/include/linux/seccomp.h
|
|
+++ b/include/linux/seccomp.h
|
|
@@ -4,8 +4,9 @@
|
|
|
|
#include <uapi/linux/seccomp.h>
|
|
|
|
-#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
|
|
- SECCOMP_FILTER_FLAG_LOG)
|
|
+#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
|
|
+ SECCOMP_FILTER_FLAG_LOG | \
|
|
+ SECCOMP_FILTER_FLAG_SPEC_ALLOW)
|
|
|
|
#ifdef CONFIG_SECCOMP
|
|
|
|
--- a/include/uapi/linux/seccomp.h
|
|
+++ b/include/uapi/linux/seccomp.h
|
|
@@ -17,8 +17,9 @@
|
|
#define SECCOMP_GET_ACTION_AVAIL 2
|
|
|
|
/* Valid flags for SECCOMP_SET_MODE_FILTER */
|
|
-#define SECCOMP_FILTER_FLAG_TSYNC 1
|
|
-#define SECCOMP_FILTER_FLAG_LOG 2
|
|
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
|
|
+#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
|
|
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
|
|
|
|
/*
|
|
* All BPF programs must return a 32-bit value.
|
|
--- a/kernel/seccomp.c
|
|
+++ b/kernel/seccomp.c
|
|
@@ -243,7 +243,8 @@ static inline void spec_mitigate(struct
|
|
}
|
|
|
|
static inline void seccomp_assign_mode(struct task_struct *task,
|
|
- unsigned long seccomp_mode)
|
|
+ unsigned long seccomp_mode,
|
|
+ unsigned long flags)
|
|
{
|
|
assert_spin_locked(&task->sighand->siglock);
|
|
|
|
@@ -253,8 +254,9 @@ static inline void seccomp_assign_mode(s
|
|
* filter) is set.
|
|
*/
|
|
smp_mb__before_atomic();
|
|
- /* Assume seccomp processes want speculation flaw mitigation. */
|
|
- spec_mitigate(task, PR_SPEC_STORE_BYPASS);
|
|
+ /* Assume default seccomp processes want spec flaw mitigation. */
|
|
+ if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
|
|
+ spec_mitigate(task, PR_SPEC_STORE_BYPASS);
|
|
set_tsk_thread_flag(task, TIF_SECCOMP);
|
|
}
|
|
|
|
@@ -322,7 +324,7 @@ static inline pid_t seccomp_can_sync_thr
|
|
* without dropping the locks.
|
|
*
|
|
*/
|
|
-static inline void seccomp_sync_threads(void)
|
|
+static inline void seccomp_sync_threads(unsigned long flags)
|
|
{
|
|
struct task_struct *thread, *caller;
|
|
|
|
@@ -363,7 +365,8 @@ static inline void seccomp_sync_threads(
|
|
* allow one thread to transition the other.
|
|
*/
|
|
if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
|
|
- seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
|
|
+ seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
|
|
+ flags);
|
|
}
|
|
}
|
|
|
|
@@ -486,7 +489,7 @@ static long seccomp_attach_filter(unsign
|
|
|
|
/* Now that the new filter is in place, synchronize to all threads. */
|
|
if (flags & SECCOMP_FILTER_FLAG_TSYNC)
|
|
- seccomp_sync_threads();
|
|
+ seccomp_sync_threads(flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -835,7 +838,7 @@ static long seccomp_set_mode_strict(void
|
|
#ifdef TIF_NOTSC
|
|
disable_TSC();
|
|
#endif
|
|
- seccomp_assign_mode(current, seccomp_mode);
|
|
+ seccomp_assign_mode(current, seccomp_mode, 0);
|
|
ret = 0;
|
|
|
|
out:
|
|
@@ -893,7 +896,7 @@ static long seccomp_set_mode_filter(unsi
|
|
/* Do not free the successfully attached filter. */
|
|
prepared = NULL;
|
|
|
|
- seccomp_assign_mode(current, seccomp_mode);
|
|
+ seccomp_assign_mode(current, seccomp_mode, flags);
|
|
out:
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
if (flags & SECCOMP_FILTER_FLAG_TSYNC)
|
|
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
|
|
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
|
|
@@ -134,11 +134,15 @@ struct seccomp_data {
|
|
#endif
|
|
|
|
#ifndef SECCOMP_FILTER_FLAG_TSYNC
|
|
-#define SECCOMP_FILTER_FLAG_TSYNC 1
|
|
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
|
|
#endif
|
|
|
|
#ifndef SECCOMP_FILTER_FLAG_LOG
|
|
-#define SECCOMP_FILTER_FLAG_LOG 2
|
|
+#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
|
|
+#endif
|
|
+
|
|
+#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
|
|
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
|
|
#endif
|
|
|
|
#ifndef PTRACE_SECCOMP_GET_METADATA
|
|
@@ -2072,14 +2076,26 @@ TEST(seccomp_syscall_mode_lock)
|
|
TEST(detect_seccomp_filter_flags)
|
|
{
|
|
unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
|
|
- SECCOMP_FILTER_FLAG_LOG };
|
|
+ SECCOMP_FILTER_FLAG_LOG,
|
|
+ SECCOMP_FILTER_FLAG_SPEC_ALLOW };
|
|
unsigned int flag, all_flags;
|
|
int i;
|
|
long ret;
|
|
|
|
/* Test detection of known-good filter flags */
|
|
for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
|
|
+ int bits = 0;
|
|
+
|
|
flag = flags[i];
|
|
+ /* Make sure the flag is a single bit! */
|
|
+ while (flag) {
|
|
+ if (flag & 0x1)
|
|
+ bits ++;
|
|
+ flag >>= 1;
|
|
+ }
|
|
+ ASSERT_EQ(1, bits);
|
|
+ flag = flags[i];
|
|
+
|
|
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
|
|
ASSERT_NE(ENOSYS, errno) {
|
|
TH_LOG("Kernel does not support seccomp syscall!");
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Fri, 4 May 2018 15:12:06 +0200
|
|
Subject: seccomp: Move speculation migitation control to arch code
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 8bf37d8c067bb7eb8e7c381bdadf9bd89182b6bc upstream
|
|
|
|
The migitation control is simpler to implement in architecture code as it
|
|
avoids the extra function call to check the mode. Aside of that having an
|
|
explicit seccomp enabled mode in the architecture mitigations would require
|
|
even more workarounds.
|
|
|
|
Move it into architecture code and provide a weak function in the seccomp
|
|
code. Remove the 'which' argument as this allows the architecture to decide
|
|
which mitigations are relevant for seccomp.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kernel/cpu/bugs.c | 29 ++++++++++++++++++-----------
|
|
include/linux/nospec.h | 2 ++
|
|
kernel/seccomp.c | 15 ++-------------
|
|
3 files changed, 22 insertions(+), 24 deletions(-)
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -569,6 +569,24 @@ static int ssb_prctl_set(struct task_str
|
|
return 0;
|
|
}
|
|
|
|
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
|
+ unsigned long ctrl)
|
|
+{
|
|
+ switch (which) {
|
|
+ case PR_SPEC_STORE_BYPASS:
|
|
+ return ssb_prctl_set(task, ctrl);
|
|
+ default:
|
|
+ return -ENODEV;
|
|
+ }
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_SECCOMP
|
|
+void arch_seccomp_spec_mitigate(struct task_struct *task)
|
|
+{
|
|
+ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
|
|
+}
|
|
+#endif
|
|
+
|
|
static int ssb_prctl_get(struct task_struct *task)
|
|
{
|
|
switch (ssb_mode) {
|
|
@@ -587,17 +605,6 @@ static int ssb_prctl_get(struct task_str
|
|
}
|
|
}
|
|
|
|
-int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
|
- unsigned long ctrl)
|
|
-{
|
|
- switch (which) {
|
|
- case PR_SPEC_STORE_BYPASS:
|
|
- return ssb_prctl_set(task, ctrl);
|
|
- default:
|
|
- return -ENODEV;
|
|
- }
|
|
-}
|
|
-
|
|
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
|
{
|
|
switch (which) {
|
|
--- a/include/linux/nospec.h
|
|
+++ b/include/linux/nospec.h
|
|
@@ -62,5 +62,7 @@ static inline unsigned long array_index_
|
|
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
|
|
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
|
unsigned long ctrl);
|
|
+/* Speculation control for seccomp enforced mitigation */
|
|
+void arch_seccomp_spec_mitigate(struct task_struct *task);
|
|
|
|
#endif /* _LINUX_NOSPEC_H */
|
|
--- a/kernel/seccomp.c
|
|
+++ b/kernel/seccomp.c
|
|
@@ -229,18 +229,7 @@ static inline bool seccomp_may_assign_mo
|
|
return true;
|
|
}
|
|
|
|
-/*
|
|
- * If a given speculation mitigation is opt-in (prctl()-controlled),
|
|
- * select it, by disabling speculation (enabling mitigation).
|
|
- */
|
|
-static inline void spec_mitigate(struct task_struct *task,
|
|
- unsigned long which)
|
|
-{
|
|
- int state = arch_prctl_spec_ctrl_get(task, which);
|
|
-
|
|
- if (state > 0 && (state & PR_SPEC_PRCTL))
|
|
- arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE);
|
|
-}
|
|
+void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
|
|
|
|
static inline void seccomp_assign_mode(struct task_struct *task,
|
|
unsigned long seccomp_mode,
|
|
@@ -256,7 +245,7 @@ static inline void seccomp_assign_mode(s
|
|
smp_mb__before_atomic();
|
|
/* Assume default seccomp processes want spec flaw mitigation. */
|
|
if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
|
|
- spec_mitigate(task, PR_SPEC_STORE_BYPASS);
|
|
+ arch_seccomp_spec_mitigate(task);
|
|
set_tsk_thread_flag(task, TIF_SECCOMP);
|
|
}
|
|
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Kees Cook <keescook@chromium.org>
|
|
Date: Thu, 3 May 2018 14:37:54 -0700
|
|
Subject: x86/speculation: Make "seccomp" the default mode for Speculative Store Bypass
|
|
|
|
From: Kees Cook <keescook@chromium.org>
|
|
|
|
commit f21b53b20c754021935ea43364dbf53778eeba32 upstream
|
|
|
|
Unless explicitly opted out of, anything running under seccomp will have
|
|
SSB mitigations enabled. Choosing the "prctl" mode will disable this.
|
|
|
|
[ tglx: Adjusted it to the new arch_seccomp_spec_mitigate() mechanism ]
|
|
|
|
Signed-off-by: Kees Cook <keescook@chromium.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
Documentation/admin-guide/kernel-parameters.txt | 26 ++++++++++++-------
|
|
arch/x86/include/asm/nospec-branch.h | 1
|
|
arch/x86/kernel/cpu/bugs.c | 32 +++++++++++++++++-------
|
|
3 files changed, 41 insertions(+), 18 deletions(-)
|
|
|
|
--- a/Documentation/admin-guide/kernel-parameters.txt
|
|
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
|
@@ -4021,19 +4021,27 @@
|
|
This parameter controls whether the Speculative Store
|
|
Bypass optimization is used.
|
|
|
|
- on - Unconditionally disable Speculative Store Bypass
|
|
- off - Unconditionally enable Speculative Store Bypass
|
|
- auto - Kernel detects whether the CPU model contains an
|
|
- implementation of Speculative Store Bypass and
|
|
- picks the most appropriate mitigation.
|
|
- prctl - Control Speculative Store Bypass per thread
|
|
- via prctl. Speculative Store Bypass is enabled
|
|
- for a process by default. The state of the control
|
|
- is inherited on fork.
|
|
+ on - Unconditionally disable Speculative Store Bypass
|
|
+ off - Unconditionally enable Speculative Store Bypass
|
|
+ auto - Kernel detects whether the CPU model contains an
|
|
+ implementation of Speculative Store Bypass and
|
|
+ picks the most appropriate mitigation. If the
|
|
+ CPU is not vulnerable, "off" is selected. If the
|
|
+ CPU is vulnerable the default mitigation is
|
|
+ architecture and Kconfig dependent. See below.
|
|
+ prctl - Control Speculative Store Bypass per thread
|
|
+ via prctl. Speculative Store Bypass is enabled
|
|
+ for a process by default. The state of the control
|
|
+ is inherited on fork.
|
|
+ seccomp - Same as "prctl" above, but all seccomp threads
|
|
+ will disable SSB unless they explicitly opt out.
|
|
|
|
Not specifying this option is equivalent to
|
|
spec_store_bypass_disable=auto.
|
|
|
|
+ Default mitigations:
|
|
+ X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
|
|
+
|
|
spia_io_base= [HW,MTD]
|
|
spia_fio_base=
|
|
spia_pedr=
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -233,6 +233,7 @@ enum ssb_mitigation {
|
|
SPEC_STORE_BYPASS_NONE,
|
|
SPEC_STORE_BYPASS_DISABLE,
|
|
SPEC_STORE_BYPASS_PRCTL,
|
|
+ SPEC_STORE_BYPASS_SECCOMP,
|
|
};
|
|
|
|
extern char __indirect_thunk_start[];
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -416,22 +416,25 @@ enum ssb_mitigation_cmd {
|
|
SPEC_STORE_BYPASS_CMD_AUTO,
|
|
SPEC_STORE_BYPASS_CMD_ON,
|
|
SPEC_STORE_BYPASS_CMD_PRCTL,
|
|
+ SPEC_STORE_BYPASS_CMD_SECCOMP,
|
|
};
|
|
|
|
static const char *ssb_strings[] = {
|
|
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
|
|
[SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
|
|
- [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
|
|
+ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
|
|
+ [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
|
|
};
|
|
|
|
static const struct {
|
|
const char *option;
|
|
enum ssb_mitigation_cmd cmd;
|
|
} ssb_mitigation_options[] = {
|
|
- { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
|
- { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
|
- { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
|
- { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
|
|
+ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
|
+ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
|
+ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
|
+ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
|
|
+ { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
|
|
};
|
|
|
|
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
|
|
@@ -481,8 +484,15 @@ static enum ssb_mitigation_cmd __init __
|
|
|
|
switch (cmd) {
|
|
case SPEC_STORE_BYPASS_CMD_AUTO:
|
|
- /* Choose prctl as the default mode */
|
|
- mode = SPEC_STORE_BYPASS_PRCTL;
|
|
+ case SPEC_STORE_BYPASS_CMD_SECCOMP:
|
|
+ /*
|
|
+ * Choose prctl+seccomp as the default mode if seccomp is
|
|
+ * enabled.
|
|
+ */
|
|
+ if (IS_ENABLED(CONFIG_SECCOMP))
|
|
+ mode = SPEC_STORE_BYPASS_SECCOMP;
|
|
+ else
|
|
+ mode = SPEC_STORE_BYPASS_PRCTL;
|
|
break;
|
|
case SPEC_STORE_BYPASS_CMD_ON:
|
|
mode = SPEC_STORE_BYPASS_DISABLE;
|
|
@@ -530,12 +540,14 @@ static void ssb_select_mitigation()
|
|
}
|
|
|
|
#undef pr_fmt
|
|
+#define pr_fmt(fmt) "Speculation prctl: " fmt
|
|
|
|
static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
|
|
{
|
|
bool update;
|
|
|
|
- if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
|
|
+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
|
|
+ ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
|
|
return -ENXIO;
|
|
|
|
switch (ctrl) {
|
|
@@ -583,7 +595,8 @@ int arch_prctl_spec_ctrl_set(struct task
|
|
#ifdef CONFIG_SECCOMP
|
|
void arch_seccomp_spec_mitigate(struct task_struct *task)
|
|
{
|
|
- ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
|
|
+ if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
|
|
+ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
|
|
}
|
|
#endif
|
|
|
|
@@ -592,6 +605,7 @@ static int ssb_prctl_get(struct task_str
|
|
switch (ssb_mode) {
|
|
case SPEC_STORE_BYPASS_DISABLE:
|
|
return PR_SPEC_DISABLE;
|
|
+ case SPEC_STORE_BYPASS_SECCOMP:
|
|
case SPEC_STORE_BYPASS_PRCTL:
|
|
if (task_spec_ssb_force_disable(task))
|
|
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 9 May 2018 21:41:38 +0200
|
|
Subject: x86/bugs: Rename _RDS to _SSBD
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit 9f65fb29374ee37856dbad847b4e121aab72b510 upstream
|
|
|
|
Intel collateral will reference the SSB mitigation bit in IA32_SPEC_CTL[2]
|
|
as SSBD (Speculative Store Bypass Disable).
|
|
|
|
Hence changing it.
|
|
|
|
It is unclear yet what the MSR_IA32_ARCH_CAPABILITIES (0x10a) Bit(4) name
|
|
is going to be. Following the rename it would be SSBD_NO but that rolls out
|
|
to Speculative Store Bypass Disable No.
|
|
|
|
Also fixed the missing space in X86_FEATURE_AMD_SSBD.
|
|
|
|
[ tglx: Fixup x86_amd_rds_enable() and rds_tif_to_amd_ls_cfg() as well ]
|
|
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/cpufeatures.h | 4 ++--
|
|
arch/x86/include/asm/msr-index.h | 10 +++++-----
|
|
arch/x86/include/asm/spec-ctrl.h | 12 ++++++------
|
|
arch/x86/include/asm/thread_info.h | 6 +++---
|
|
arch/x86/kernel/cpu/amd.c | 14 +++++++-------
|
|
arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++------------------
|
|
arch/x86/kernel/cpu/common.c | 2 +-
|
|
arch/x86/kernel/cpu/intel.c | 2 +-
|
|
arch/x86/kernel/process.c | 8 ++++----
|
|
arch/x86/kvm/cpuid.c | 2 +-
|
|
arch/x86/kvm/vmx.c | 6 +++---
|
|
11 files changed, 51 insertions(+), 51 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -215,7 +215,7 @@
|
|
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
|
|
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
|
|
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
|
|
-#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */
|
|
+#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
|
|
|
|
/* Virtualization flags: Linux defined, word 8 */
|
|
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
|
@@ -335,7 +335,7 @@
|
|
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
|
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
|
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
|
-#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
|
|
+#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
|
|
|
|
/*
|
|
* BUG word(s)
|
|
--- a/arch/x86/include/asm/msr-index.h
|
|
+++ b/arch/x86/include/asm/msr-index.h
|
|
@@ -42,8 +42,8 @@
|
|
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
|
|
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
|
|
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
|
|
-#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
|
|
-#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
|
|
+#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
|
|
+#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
|
|
|
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
|
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
|
|
@@ -70,10 +70,10 @@
|
|
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
|
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
|
|
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
|
|
-#define ARCH_CAP_RDS_NO (1 << 4) /*
|
|
+#define ARCH_CAP_SSBD_NO (1 << 4) /*
|
|
* Not susceptible to Speculative Store Bypass
|
|
- * attack, so no Reduced Data Speculation control
|
|
- * required.
|
|
+ * attack, so no Speculative Store Bypass
|
|
+ * control required.
|
|
*/
|
|
|
|
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
|
--- a/arch/x86/include/asm/spec-ctrl.h
|
|
+++ b/arch/x86/include/asm/spec-ctrl.h
|
|
@@ -17,20 +17,20 @@ extern void x86_spec_ctrl_restore_host(u
|
|
|
|
/* AMD specific Speculative Store Bypass MSR data */
|
|
extern u64 x86_amd_ls_cfg_base;
|
|
-extern u64 x86_amd_ls_cfg_rds_mask;
|
|
+extern u64 x86_amd_ls_cfg_ssbd_mask;
|
|
|
|
/* The Intel SPEC CTRL MSR base value cache */
|
|
extern u64 x86_spec_ctrl_base;
|
|
|
|
-static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
|
|
+static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
|
|
{
|
|
- BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
|
|
- return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
|
|
+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
|
|
+ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
|
|
}
|
|
|
|
-static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
|
|
+static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
|
|
{
|
|
- return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
|
|
+ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
|
|
}
|
|
|
|
extern void speculative_store_bypass_update(void);
|
|
--- a/arch/x86/include/asm/thread_info.h
|
|
+++ b/arch/x86/include/asm/thread_info.h
|
|
@@ -79,7 +79,7 @@ struct thread_info {
|
|
#define TIF_SIGPENDING 2 /* signal pending */
|
|
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
|
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
|
|
-#define TIF_RDS 5 /* Reduced data speculation */
|
|
+#define TIF_SSBD 5 /* Reduced data speculation */
|
|
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
|
|
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
|
#define TIF_SECCOMP 8 /* secure computing */
|
|
@@ -106,7 +106,7 @@ struct thread_info {
|
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
|
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
|
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
|
-#define _TIF_RDS (1 << TIF_RDS)
|
|
+#define _TIF_SSBD (1 << TIF_SSBD)
|
|
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
|
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
|
@@ -146,7 +146,7 @@ struct thread_info {
|
|
|
|
/* flags to check in __switch_to() */
|
|
#define _TIF_WORK_CTXSW \
|
|
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
|
|
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
|
|
|
|
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
|
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
|
|
--- a/arch/x86/kernel/cpu/amd.c
|
|
+++ b/arch/x86/kernel/cpu/amd.c
|
|
@@ -567,12 +567,12 @@ static void bsp_init_amd(struct cpuinfo_
|
|
}
|
|
/*
|
|
* Try to cache the base value so further operations can
|
|
- * avoid RMW. If that faults, do not enable RDS.
|
|
+ * avoid RMW. If that faults, do not enable SSBD.
|
|
*/
|
|
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
|
|
- setup_force_cpu_cap(X86_FEATURE_RDS);
|
|
- setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
|
|
- x86_amd_ls_cfg_rds_mask = 1ULL << bit;
|
|
+ setup_force_cpu_cap(X86_FEATURE_SSBD);
|
|
+ setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
|
|
+ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
|
|
}
|
|
}
|
|
}
|
|
@@ -920,9 +920,9 @@ static void init_amd(struct cpuinfo_x86
|
|
if (!cpu_has(c, X86_FEATURE_XENPV))
|
|
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
|
|
|
- if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
|
|
- set_cpu_cap(c, X86_FEATURE_RDS);
|
|
- set_cpu_cap(c, X86_FEATURE_AMD_RDS);
|
|
+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
|
+ set_cpu_cap(c, X86_FEATURE_SSBD);
|
|
+ set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
|
|
}
|
|
}
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -45,10 +45,10 @@ static u64 __ro_after_init x86_spec_ctrl
|
|
|
|
/*
|
|
* AMD specific MSR info for Speculative Store Bypass control.
|
|
- * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
|
|
+ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
|
|
*/
|
|
u64 __ro_after_init x86_amd_ls_cfg_base;
|
|
-u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
|
|
+u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
|
|
|
|
void __init check_bugs(void)
|
|
{
|
|
@@ -146,7 +146,7 @@ u64 x86_spec_ctrl_get_default(void)
|
|
u64 msrval = x86_spec_ctrl_base;
|
|
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
- msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
+ msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
return msrval;
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
|
|
@@ -159,7 +159,7 @@ void x86_spec_ctrl_set_guest(u64 guest_s
|
|
return;
|
|
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
|
|
if (host != guest_spec_ctrl)
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
|
|
@@ -174,18 +174,18 @@ void x86_spec_ctrl_restore_host(u64 gues
|
|
return;
|
|
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
|
|
if (host != guest_spec_ctrl)
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, host);
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
|
|
|
|
-static void x86_amd_rds_enable(void)
|
|
+static void x86_amd_ssb_disable(void)
|
|
{
|
|
- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
|
|
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
|
|
|
|
- if (boot_cpu_has(X86_FEATURE_AMD_RDS))
|
|
+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
|
|
wrmsrl(MSR_AMD64_LS_CFG, msrval);
|
|
}
|
|
|
|
@@ -473,7 +473,7 @@ static enum ssb_mitigation_cmd __init __
|
|
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
|
|
enum ssb_mitigation_cmd cmd;
|
|
|
|
- if (!boot_cpu_has(X86_FEATURE_RDS))
|
|
+ if (!boot_cpu_has(X86_FEATURE_SSBD))
|
|
return mode;
|
|
|
|
cmd = ssb_parse_cmdline();
|
|
@@ -507,7 +507,7 @@ static enum ssb_mitigation_cmd __init __
|
|
/*
|
|
* We have three CPU feature flags that are in play here:
|
|
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
|
|
- * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
|
|
+ * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
|
|
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
|
|
*/
|
|
if (mode == SPEC_STORE_BYPASS_DISABLE) {
|
|
@@ -518,12 +518,12 @@ static enum ssb_mitigation_cmd __init __
|
|
*/
|
|
switch (boot_cpu_data.x86_vendor) {
|
|
case X86_VENDOR_INTEL:
|
|
- x86_spec_ctrl_base |= SPEC_CTRL_RDS;
|
|
- x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
|
|
- x86_spec_ctrl_set(SPEC_CTRL_RDS);
|
|
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
|
+ x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
|
|
+ x86_spec_ctrl_set(SPEC_CTRL_SSBD);
|
|
break;
|
|
case X86_VENDOR_AMD:
|
|
- x86_amd_rds_enable();
|
|
+ x86_amd_ssb_disable();
|
|
break;
|
|
}
|
|
}
|
|
@@ -556,16 +556,16 @@ static int ssb_prctl_set(struct task_str
|
|
if (task_spec_ssb_force_disable(task))
|
|
return -EPERM;
|
|
task_clear_spec_ssb_disable(task);
|
|
- update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
|
|
+ update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
|
|
break;
|
|
case PR_SPEC_DISABLE:
|
|
task_set_spec_ssb_disable(task);
|
|
- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
|
|
+ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
|
|
break;
|
|
case PR_SPEC_FORCE_DISABLE:
|
|
task_set_spec_ssb_disable(task);
|
|
task_set_spec_ssb_force_disable(task);
|
|
- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
|
|
+ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
|
|
break;
|
|
default:
|
|
return -ERANGE;
|
|
@@ -635,7 +635,7 @@ void x86_spec_ctrl_setup_ap(void)
|
|
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
|
|
|
|
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
|
- x86_amd_rds_enable();
|
|
+ x86_amd_ssb_disable();
|
|
}
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -950,7 +950,7 @@ static void __init cpu_set_bug_bits(stru
|
|
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
|
|
|
if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
|
|
- !(ia32_cap & ARCH_CAP_RDS_NO))
|
|
+ !(ia32_cap & ARCH_CAP_SSBD_NO))
|
|
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
|
|
|
if (x86_match_cpu(cpu_no_speculation))
|
|
--- a/arch/x86/kernel/cpu/intel.c
|
|
+++ b/arch/x86/kernel/cpu/intel.c
|
|
@@ -189,7 +189,7 @@ static void early_init_intel(struct cpui
|
|
setup_clear_cpu_cap(X86_FEATURE_STIBP);
|
|
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
|
|
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
|
|
- setup_clear_cpu_cap(X86_FEATURE_RDS);
|
|
+ setup_clear_cpu_cap(X86_FEATURE_SSBD);
|
|
}
|
|
|
|
/*
|
|
--- a/arch/x86/kernel/process.c
|
|
+++ b/arch/x86/kernel/process.c
|
|
@@ -283,11 +283,11 @@ static __always_inline void __speculativ
|
|
{
|
|
u64 msr;
|
|
|
|
- if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
|
|
- msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
|
|
+ if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
|
+ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
|
|
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
} else {
|
|
- msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
|
|
+ msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
}
|
|
}
|
|
@@ -329,7 +329,7 @@ void __switch_to_xtra(struct task_struct
|
|
if ((tifp ^ tifn) & _TIF_NOCPUID)
|
|
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
|
|
|
|
- if ((tifp ^ tifn) & _TIF_RDS)
|
|
+ if ((tifp ^ tifn) & _TIF_SSBD)
|
|
__speculative_store_bypass_update(tifn);
|
|
}
|
|
|
|
--- a/arch/x86/kvm/cpuid.c
|
|
+++ b/arch/x86/kvm/cpuid.c
|
|
@@ -402,7 +402,7 @@ static inline int __do_cpuid_ent(struct
|
|
|
|
/* cpuid 7.0.edx*/
|
|
const u32 kvm_cpuid_7_0_edx_x86_features =
|
|
- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(RDS) |
|
|
+ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
|
|
F(ARCH_CAPABILITIES);
|
|
|
|
/* all calls to cpuid_count() should be made on the same cpu */
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -3277,7 +3277,7 @@ static int vmx_get_msr(struct kvm_vcpu *
|
|
if (!msr_info->host_initiated &&
|
|
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
|
|
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
|
|
return 1;
|
|
|
|
msr_info->data = to_vmx(vcpu)->spec_ctrl;
|
|
@@ -3399,11 +3399,11 @@ static int vmx_set_msr(struct kvm_vcpu *
|
|
if (!msr_info->host_initiated &&
|
|
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
|
|
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
|
|
return 1;
|
|
|
|
/* The STIBP bit doesn't fault even if it's not advertised */
|
|
- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS))
|
|
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
|
|
return 1;
|
|
|
|
vmx->spec_ctrl = data;
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 9 May 2018 21:41:38 +0200
|
|
Subject: proc: Use underscores for SSBD in 'status'
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit e96f46ee8587607a828f783daa6eb5b44d25004d upstream
|
|
|
|
The style for the 'status' file is CamelCase or this. _.
|
|
|
|
Fixes: fae1fa0fc ("proc: Provide details on speculation flaw mitigations")
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
fs/proc/array.c | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
--- a/fs/proc/array.c
|
|
+++ b/fs/proc/array.c
|
|
@@ -348,7 +348,7 @@ static inline void task_seccomp(struct s
|
|
#ifdef CONFIG_SECCOMP
|
|
seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
|
|
#endif
|
|
- seq_printf(m, "\nSpeculation Store Bypass:\t");
|
|
+ seq_printf(m, "\nSpeculation_Store_Bypass:\t");
|
|
switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
|
|
case -EINVAL:
|
|
seq_printf(m, "unknown");
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Borislav Petkov <bp@suse.de>
|
|
Date: Tue, 8 May 2018 15:43:45 +0200
|
|
Subject: Documentation/spec_ctrl: Do some minor cleanups
|
|
|
|
From: Borislav Petkov <bp@suse.de>
|
|
|
|
commit dd0792699c4058e63c0715d9a7c2d40226fcdddc upstream
|
|
|
|
Fix some typos, improve formulations, end sentences with a fullstop.
|
|
|
|
Signed-off-by: Borislav Petkov <bp@suse.de>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
Documentation/userspace-api/spec_ctrl.rst | 24 ++++++++++++------------
|
|
1 file changed, 12 insertions(+), 12 deletions(-)
|
|
|
|
--- a/Documentation/userspace-api/spec_ctrl.rst
|
|
+++ b/Documentation/userspace-api/spec_ctrl.rst
|
|
@@ -2,13 +2,13 @@
|
|
Speculation Control
|
|
===================
|
|
|
|
-Quite some CPUs have speculation related misfeatures which are in fact
|
|
-vulnerabilites causing data leaks in various forms even accross privilege
|
|
-domains.
|
|
+Quite some CPUs have speculation-related misfeatures which are in
|
|
+fact vulnerabilities causing data leaks in various forms even across
|
|
+privilege domains.
|
|
|
|
The kernel provides mitigation for such vulnerabilities in various
|
|
-forms. Some of these mitigations are compile time configurable and some on
|
|
-the kernel command line.
|
|
+forms. Some of these mitigations are compile-time configurable and some
|
|
+can be supplied on the kernel command line.
|
|
|
|
There is also a class of mitigations which are very expensive, but they can
|
|
be restricted to a certain set of processes or tasks in controlled
|
|
@@ -32,18 +32,18 @@ the following meaning:
|
|
Bit Define Description
|
|
==== ===================== ===================================================
|
|
0 PR_SPEC_PRCTL Mitigation can be controlled per task by
|
|
- PR_SET_SPECULATION_CTRL
|
|
+ PR_SET_SPECULATION_CTRL.
|
|
1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
|
|
- disabled
|
|
+ disabled.
|
|
2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
|
|
- enabled
|
|
+ enabled.
|
|
3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
|
|
subsequent prctl(..., PR_SPEC_ENABLE) will fail.
|
|
==== ===================== ===================================================
|
|
|
|
If all bits are 0 the CPU is not affected by the speculation misfeature.
|
|
|
|
-If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
|
|
+If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
|
|
available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
|
|
misfeature will fail.
|
|
|
|
@@ -61,9 +61,9 @@ Common error codes
|
|
Value Meaning
|
|
======= =================================================================
|
|
EINVAL The prctl is not implemented by the architecture or unused
|
|
- prctl(2) arguments are not 0
|
|
+ prctl(2) arguments are not 0.
|
|
|
|
-ENODEV arg2 is selecting a not supported speculation misfeature
|
|
+ENODEV arg2 is selecting a not supported speculation misfeature.
|
|
======= =================================================================
|
|
|
|
PR_SET_SPECULATION_CTRL error codes
|
|
@@ -74,7 +74,7 @@ Value Meaning
|
|
0 Success
|
|
|
|
ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
|
|
- PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE
|
|
+ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
|
|
|
|
ENXIO Control of the selected speculation misfeature is not possible.
|
|
See PR_GET_SPECULATION_CTRL.
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Jiri Kosina <jkosina@suse.cz>
|
|
Date: Thu, 10 May 2018 22:47:18 +0200
|
|
Subject: x86/bugs: Fix __ssb_select_mitigation() return type
|
|
|
|
From: Jiri Kosina <jkosina@suse.cz>
|
|
|
|
commit d66d8ff3d21667b41eddbe86b35ab411e40d8c5f upstream
|
|
|
|
__ssb_select_mitigation() returns one of the members of enum ssb_mitigation,
|
|
not ssb_mitigation_cmd; fix the prototype to reflect that.
|
|
|
|
Fixes: 24f7fc83b9204 ("x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation")
|
|
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kernel/cpu/bugs.c | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -468,7 +468,7 @@ static enum ssb_mitigation_cmd __init ss
|
|
return cmd;
|
|
}
|
|
|
|
-static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
|
|
+static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
|
{
|
|
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
|
|
enum ssb_mitigation_cmd cmd;
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Jiri Kosina <jkosina@suse.cz>
|
|
Date: Thu, 10 May 2018 22:47:32 +0200
|
|
Subject: x86/bugs: Make cpu_show_common() static
|
|
|
|
From: Jiri Kosina <jkosina@suse.cz>
|
|
|
|
commit 7bb4d366cba992904bffa4820d24e70a3de93e76 upstream
|
|
|
|
cpu_show_common() is not used outside of arch/x86/kernel/cpu/bugs.c, so
|
|
make it static.
|
|
|
|
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kernel/cpu/bugs.c | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -640,7 +640,7 @@ void x86_spec_ctrl_setup_ap(void)
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
|
|
-ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
|
+static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
|
char *buf, unsigned int bug)
|
|
{
|
|
if (!boot_cpu_has_bug(bug))
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Fri, 11 May 2018 16:50:35 -0400
|
|
Subject: x86/bugs: Fix the parameters alignment and missing void
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit ffed645e3be0e32f8e9ab068d257aee8d0fe8eec upstream
|
|
|
|
Fixes: 7bb4d366c ("x86/bugs: Make cpu_show_common() static")
|
|
Fixes: 24f7fc83b ("x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation")
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kernel/cpu/bugs.c | 4 ++--
|
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -531,7 +531,7 @@ static enum ssb_mitigation __init __ssb_
|
|
return mode;
|
|
}
|
|
|
|
-static void ssb_select_mitigation()
|
|
+static void ssb_select_mitigation(void)
|
|
{
|
|
ssb_mode = __ssb_select_mitigation();
|
|
|
|
@@ -641,7 +641,7 @@ void x86_spec_ctrl_setup_ap(void)
|
|
#ifdef CONFIG_SYSFS
|
|
|
|
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
|
- char *buf, unsigned int bug)
|
|
+ char *buf, unsigned int bug)
|
|
{
|
|
if (!boot_cpu_has_bug(bug))
|
|
return sprintf(buf, "Not affected\n");
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Jim Mattson <jmattson@google.com>
|
|
Date: Sun, 13 May 2018 17:33:57 -0400
|
|
Subject: x86/cpu: Make alternative_msr_write work for 32-bit code
|
|
|
|
From: Jim Mattson <jmattson@google.com>
|
|
|
|
commit 5f2b745f5e1304f438f9b2cd03ebc8120b6e0d3b upstream
|
|
|
|
Cast val and (val >> 32) to (u32), so that they fit in a
|
|
general-purpose register in both 32-bit and 64-bit code.
|
|
|
|
[ tglx: Made it u32 instead of uintptr_t ]
|
|
|
|
Fixes: c65732e4f721 ("x86/cpu: Restore CPUID_8000_0008_EBX reload")
|
|
Signed-off-by: Jim Mattson <jmattson@google.com>
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/nospec-branch.h | 4 ++--
|
|
1 file changed, 2 insertions(+), 2 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -265,8 +265,8 @@ void alternative_msr_write(unsigned int
|
|
{
|
|
asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
|
|
: : "c" (msr),
|
|
- "a" (val),
|
|
- "d" (val >> 32),
|
|
+ "a" ((u32)val),
|
|
+ "d" ((u32)(val >> 32)),
|
|
[feature] "i" (feature)
|
|
: "memory");
|
|
}
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Fri, 11 May 2018 15:21:01 +0200
|
|
Subject: KVM: SVM: Move spec control call after restore of GS
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 15e6c22fd8e5a42c5ed6d487b7c9fe44c2517765 upstream
|
|
|
|
svm_vcpu_run() invokes x86_spec_ctrl_restore_host() after VMEXIT, but
|
|
before the host GS is restored. x86_spec_ctrl_restore_host() uses 'current'
|
|
to determine the host SSBD state of the thread. 'current' is GS based, but
|
|
host GS is not yet restored and the access causes a triple fault.
|
|
|
|
Move the call after the host GS restore.
|
|
|
|
Fixes: 885f82bfbc6f x86/process: Allow runtime control of Speculative Store Bypass
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kvm/svm.c | 24 ++++++++++++------------
|
|
1 file changed, 12 insertions(+), 12 deletions(-)
|
|
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -5495,6 +5495,18 @@ static void svm_vcpu_run(struct kvm_vcpu
|
|
#endif
|
|
);
|
|
|
|
+ /* Eliminate branch target predictions from guest mode */
|
|
+ vmexit_fill_RSB();
|
|
+
|
|
+#ifdef CONFIG_X86_64
|
|
+ wrmsrl(MSR_GS_BASE, svm->host.gs_base);
|
|
+#else
|
|
+ loadsegment(fs, svm->host.fs);
|
|
+#ifndef CONFIG_X86_32_LAZY_GS
|
|
+ loadsegment(gs, svm->host.gs);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
/*
|
|
* We do not use IBRS in the kernel. If this vCPU has used the
|
|
* SPEC_CTRL MSR it may have left it on; save the value and
|
|
@@ -5515,18 +5527,6 @@ static void svm_vcpu_run(struct kvm_vcpu
|
|
|
|
x86_spec_ctrl_restore_host(svm->spec_ctrl);
|
|
|
|
- /* Eliminate branch target predictions from guest mode */
|
|
- vmexit_fill_RSB();
|
|
-
|
|
-#ifdef CONFIG_X86_64
|
|
- wrmsrl(MSR_GS_BASE, svm->host.gs_base);
|
|
-#else
|
|
- loadsegment(fs, svm->host.fs);
|
|
-#ifndef CONFIG_X86_32_LAZY_GS
|
|
- loadsegment(gs, svm->host.gs);
|
|
-#endif
|
|
-#endif
|
|
-
|
|
reload_tss(vcpu);
|
|
|
|
local_irq_disable();
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Borislav Petkov <bp@suse.de>
|
|
Date: Wed, 2 May 2018 18:15:14 +0200
|
|
Subject: x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP
|
|
|
|
From: Borislav Petkov <bp@suse.de>
|
|
|
|
commit e7c587da125291db39ddf1f49b18e5970adbac17 upstream
|
|
|
|
Intel and AMD have different CPUID bits hence for those use synthetic bits
|
|
which get set on the respective vendor's in init_speculation_control(). So
|
|
that debacles like what the commit message of
|
|
|
|
c65732e4f721 ("x86/cpu: Restore CPUID_8000_0008_EBX reload")
|
|
|
|
talks about don't happen anymore.
|
|
|
|
Signed-off-by: Borislav Petkov <bp@suse.de>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Tested-by: Jörg Otte <jrg.otte@gmail.com>
|
|
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
|
|
Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/cpufeatures.h | 10 ++++++----
|
|
arch/x86/kernel/cpu/common.c | 14 ++++++++++----
|
|
arch/x86/kvm/cpuid.c | 10 +++++-----
|
|
arch/x86/kvm/svm.c | 6 +++---
|
|
arch/x86/kvm/vmx.c | 9 ++-------
|
|
5 files changed, 26 insertions(+), 23 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -198,7 +198,6 @@
|
|
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
|
|
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
|
|
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
|
|
-
|
|
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
|
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
|
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
|
|
@@ -216,6 +215,9 @@
|
|
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
|
|
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
|
|
#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
|
|
+#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
|
+#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
|
+#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
|
|
|
/* Virtualization flags: Linux defined, word 8 */
|
|
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
|
@@ -276,9 +278,9 @@
|
|
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
|
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
|
|
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
|
|
-#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
|
|
-#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
|
|
-#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
|
|
+#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
|
|
+#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
|
|
+#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
|
|
|
|
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
|
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -757,17 +757,23 @@ static void init_speculation_control(str
|
|
* and they also have a different bit for STIBP support. Also,
|
|
* a hypervisor might have set the individual AMD bits even on
|
|
* Intel CPUs, for finer-grained selection of what's available.
|
|
- *
|
|
- * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
|
|
- * features, which are visible in /proc/cpuinfo and used by the
|
|
- * kernel. So set those accordingly from the Intel bits.
|
|
*/
|
|
if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
|
|
set_cpu_cap(c, X86_FEATURE_IBRS);
|
|
set_cpu_cap(c, X86_FEATURE_IBPB);
|
|
}
|
|
+
|
|
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
|
+
|
|
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS))
|
|
+ set_cpu_cap(c, X86_FEATURE_IBRS);
|
|
+
|
|
+ if (cpu_has(c, X86_FEATURE_AMD_IBPB))
|
|
+ set_cpu_cap(c, X86_FEATURE_IBPB);
|
|
+
|
|
+ if (cpu_has(c, X86_FEATURE_AMD_STIBP))
|
|
+ set_cpu_cap(c, X86_FEATURE_STIBP);
|
|
}
|
|
|
|
void get_cpu_cap(struct cpuinfo_x86 *c)
|
|
--- a/arch/x86/kvm/cpuid.c
|
|
+++ b/arch/x86/kvm/cpuid.c
|
|
@@ -374,7 +374,7 @@ static inline int __do_cpuid_ent(struct
|
|
|
|
/* cpuid 0x80000008.ebx */
|
|
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
|
|
- F(IBPB) | F(IBRS);
|
|
+ F(AMD_IBPB) | F(AMD_IBRS);
|
|
|
|
/* cpuid 0xC0000001.edx */
|
|
const u32 kvm_cpuid_C000_0001_edx_x86_features =
|
|
@@ -643,10 +643,10 @@ static inline int __do_cpuid_ent(struct
|
|
entry->eax = g_phys_as | (virt_as << 8);
|
|
entry->edx = 0;
|
|
/* IBRS and IBPB aren't necessarily present in hardware cpuid */
|
|
- if (boot_cpu_has(X86_FEATURE_IBPB))
|
|
- entry->ebx |= F(IBPB);
|
|
- if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
- entry->ebx |= F(IBRS);
|
|
+ if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
|
|
+ entry->ebx |= F(AMD_IBPB);
|
|
+ if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
|
|
+ entry->ebx |= F(AMD_IBRS);
|
|
entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
|
|
cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
|
|
break;
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -3959,7 +3959,7 @@ static int svm_get_msr(struct kvm_vcpu *
|
|
break;
|
|
case MSR_IA32_SPEC_CTRL:
|
|
if (!msr_info->host_initiated &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
|
|
return 1;
|
|
|
|
msr_info->data = svm->spec_ctrl;
|
|
@@ -4057,7 +4057,7 @@ static int svm_set_msr(struct kvm_vcpu *
|
|
break;
|
|
case MSR_IA32_SPEC_CTRL:
|
|
if (!msr->host_initiated &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
|
|
return 1;
|
|
|
|
/* The STIBP bit doesn't fault even if it's not advertised */
|
|
@@ -4084,7 +4084,7 @@ static int svm_set_msr(struct kvm_vcpu *
|
|
break;
|
|
case MSR_IA32_PRED_CMD:
|
|
if (!msr->host_initiated &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
|
|
return 1;
|
|
|
|
if (data & ~PRED_CMD_IBPB)
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -3275,9 +3275,7 @@ static int vmx_get_msr(struct kvm_vcpu *
|
|
break;
|
|
case MSR_IA32_SPEC_CTRL:
|
|
if (!msr_info->host_initiated &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
|
|
return 1;
|
|
|
|
msr_info->data = to_vmx(vcpu)->spec_ctrl;
|
|
@@ -3397,9 +3395,7 @@ static int vmx_set_msr(struct kvm_vcpu *
|
|
break;
|
|
case MSR_IA32_SPEC_CTRL:
|
|
if (!msr_info->host_initiated &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
|
|
return 1;
|
|
|
|
/* The STIBP bit doesn't fault even if it's not advertised */
|
|
@@ -3429,7 +3425,6 @@ static int vmx_set_msr(struct kvm_vcpu *
|
|
break;
|
|
case MSR_IA32_PRED_CMD:
|
|
if (!msr_info->host_initiated &&
|
|
- !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
|
|
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
|
|
return 1;
|
|
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 10 May 2018 19:13:18 +0200
|
|
Subject: x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration from IBRS
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 7eb8956a7fec3c1f0abc2a5517dada99ccc8a961 upstream
|
|
|
|
The availability of the SPEC_CTRL MSR is enumerated by a CPUID bit on
|
|
Intel and implied by IBRS or STIBP support on AMD. That's just confusing
|
|
and in case an AMD CPU has IBRS not supported because the underlying
|
|
problem has been fixed but has another bit valid in the SPEC_CTRL MSR,
|
|
the thing falls apart.
|
|
|
|
Add a synthetic feature bit X86_FEATURE_MSR_SPEC_CTRL to denote the
|
|
availability on both Intel and AMD.
|
|
|
|
While at it replace the boot_cpu_has() checks with static_cpu_has() where
|
|
possible. This prevents late microcode loading from exposing SPEC_CTRL, but
|
|
late loading is already very limited as it does not reevaluate the
|
|
mitigation options and other bits and pieces. Having static_cpu_has() is
|
|
the simplest and least fragile solution.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/cpufeatures.h | 1 +
|
|
arch/x86/kernel/cpu/bugs.c | 18 +++++++++++-------
|
|
arch/x86/kernel/cpu/common.c | 9 +++++++--
|
|
arch/x86/kernel/cpu/intel.c | 1 +
|
|
4 files changed, 20 insertions(+), 9 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -206,6 +206,7 @@
|
|
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
|
|
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
|
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
|
+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
|
|
|
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
|
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -64,7 +64,7 @@ void __init check_bugs(void)
|
|
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
|
|
* init code as it is not enumerated and depends on the family.
|
|
*/
|
|
- if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
|
|
|
/* Select the proper spectre mitigation before patching alternatives */
|
|
@@ -145,7 +145,7 @@ u64 x86_spec_ctrl_get_default(void)
|
|
{
|
|
u64 msrval = x86_spec_ctrl_base;
|
|
|
|
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
|
|
msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
return msrval;
|
|
}
|
|
@@ -155,10 +155,12 @@ void x86_spec_ctrl_set_guest(u64 guest_s
|
|
{
|
|
u64 host = x86_spec_ctrl_base;
|
|
|
|
- if (!boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ /* Is MSR_SPEC_CTRL implemented ? */
|
|
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
return;
|
|
|
|
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
+ /* Intel controls SSB in MSR_SPEC_CTRL */
|
|
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
|
|
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
|
|
if (host != guest_spec_ctrl)
|
|
@@ -170,10 +172,12 @@ void x86_spec_ctrl_restore_host(u64 gues
|
|
{
|
|
u64 host = x86_spec_ctrl_base;
|
|
|
|
- if (!boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ /* Is MSR_SPEC_CTRL implemented ? */
|
|
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
return;
|
|
|
|
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
+ /* Intel controls SSB in MSR_SPEC_CTRL */
|
|
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
|
|
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
|
|
if (host != guest_spec_ctrl)
|
|
@@ -631,7 +635,7 @@ int arch_prctl_spec_ctrl_get(struct task
|
|
|
|
void x86_spec_ctrl_setup_ap(void)
|
|
{
|
|
- if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
|
|
|
|
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -761,19 +761,24 @@ static void init_speculation_control(str
|
|
if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
|
|
set_cpu_cap(c, X86_FEATURE_IBRS);
|
|
set_cpu_cap(c, X86_FEATURE_IBPB);
|
|
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
}
|
|
|
|
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
|
|
|
- if (cpu_has(c, X86_FEATURE_AMD_IBRS))
|
|
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
|
set_cpu_cap(c, X86_FEATURE_IBRS);
|
|
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
+ }
|
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_IBPB))
|
|
set_cpu_cap(c, X86_FEATURE_IBPB);
|
|
|
|
- if (cpu_has(c, X86_FEATURE_AMD_STIBP))
|
|
+ if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
|
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
+ }
|
|
}
|
|
|
|
void get_cpu_cap(struct cpuinfo_x86 *c)
|
|
--- a/arch/x86/kernel/cpu/intel.c
|
|
+++ b/arch/x86/kernel/cpu/intel.c
|
|
@@ -188,6 +188,7 @@ static void early_init_intel(struct cpui
|
|
setup_clear_cpu_cap(X86_FEATURE_IBPB);
|
|
setup_clear_cpu_cap(X86_FEATURE_STIBP);
|
|
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
|
|
+ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
|
|
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
|
|
setup_clear_cpu_cap(X86_FEATURE_SSBD);
|
|
}
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 10 May 2018 20:21:36 +0200
|
|
Subject: x86/cpufeatures: Disentangle SSBD enumeration
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 52817587e706686fcdb27f14c1b000c92f266c96 upstream
|
|
|
|
The SSBD enumeration is similarly to the other bits magically shared
|
|
between Intel and AMD though the mechanisms are different.
|
|
|
|
Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific
|
|
features or family dependent setup.
|
|
|
|
Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is
|
|
controlled via MSR_SPEC_CTRL and fix up the usage sites.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/cpufeatures.h | 7 +++----
|
|
arch/x86/kernel/cpu/amd.c | 7 +------
|
|
arch/x86/kernel/cpu/bugs.c | 10 +++++-----
|
|
arch/x86/kernel/cpu/common.c | 3 +++
|
|
arch/x86/kernel/cpu/intel.c | 1 +
|
|
arch/x86/kernel/process.c | 2 +-
|
|
6 files changed, 14 insertions(+), 16 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -207,15 +207,14 @@
|
|
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
|
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
|
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
|
-
|
|
+#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
|
|
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
|
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
|
|
#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
|
|
-
|
|
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
|
|
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
|
|
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
|
|
-#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
|
|
+#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
|
|
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
|
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
|
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
|
@@ -338,7 +337,7 @@
|
|
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
|
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
|
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
|
-#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
|
|
+#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
|
|
|
|
/*
|
|
* BUG word(s)
|
|
--- a/arch/x86/kernel/cpu/amd.c
|
|
+++ b/arch/x86/kernel/cpu/amd.c
|
|
@@ -570,8 +570,8 @@ static void bsp_init_amd(struct cpuinfo_
|
|
* avoid RMW. If that faults, do not enable SSBD.
|
|
*/
|
|
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
|
|
+ setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
|
|
setup_force_cpu_cap(X86_FEATURE_SSBD);
|
|
- setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
|
|
x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
|
|
}
|
|
}
|
|
@@ -919,11 +919,6 @@ static void init_amd(struct cpuinfo_x86
|
|
/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
|
|
if (!cpu_has(c, X86_FEATURE_XENPV))
|
|
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
|
-
|
|
- if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
|
- set_cpu_cap(c, X86_FEATURE_SSBD);
|
|
- set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
|
|
- }
|
|
}
|
|
|
|
#ifdef CONFIG_X86_32
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -159,8 +159,8 @@ void x86_spec_ctrl_set_guest(u64 guest_s
|
|
if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
return;
|
|
|
|
- /* Intel controls SSB in MSR_SPEC_CTRL */
|
|
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
|
|
+ /* SSBD controlled in MSR_SPEC_CTRL */
|
|
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
|
|
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
|
|
if (host != guest_spec_ctrl)
|
|
@@ -176,8 +176,8 @@ void x86_spec_ctrl_restore_host(u64 gues
|
|
if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
return;
|
|
|
|
- /* Intel controls SSB in MSR_SPEC_CTRL */
|
|
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
|
|
+ /* SSBD controlled in MSR_SPEC_CTRL */
|
|
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
|
|
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
|
|
if (host != guest_spec_ctrl)
|
|
@@ -189,7 +189,7 @@ static void x86_amd_ssb_disable(void)
|
|
{
|
|
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
|
|
|
|
- if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
|
|
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
|
wrmsrl(MSR_AMD64_LS_CFG, msrval);
|
|
}
|
|
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -767,6 +767,9 @@ static void init_speculation_control(str
|
|
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
|
|
|
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
|
|
+ set_cpu_cap(c, X86_FEATURE_SSBD);
|
|
+
|
|
if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
|
set_cpu_cap(c, X86_FEATURE_IBRS);
|
|
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
--- a/arch/x86/kernel/cpu/intel.c
|
|
+++ b/arch/x86/kernel/cpu/intel.c
|
|
@@ -191,6 +191,7 @@ static void early_init_intel(struct cpui
|
|
setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
|
|
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
|
|
setup_clear_cpu_cap(X86_FEATURE_SSBD);
|
|
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
|
|
}
|
|
|
|
/*
|
|
--- a/arch/x86/kernel/process.c
|
|
+++ b/arch/x86/kernel/process.c
|
|
@@ -283,7 +283,7 @@ static __always_inline void __speculativ
|
|
{
|
|
u64 msr;
|
|
|
|
- if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
|
+ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
|
|
msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
|
|
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
} else {
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 10 May 2018 16:26:00 +0200
|
|
Subject: x86/cpufeatures: Add FEATURE_ZEN
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit d1035d971829dcf80e8686ccde26f94b0a069472 upstream
|
|
|
|
Add a ZEN feature bit so family-dependent static_cpu_has() optimizations
|
|
can be built for ZEN.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/cpufeatures.h | 1 +
|
|
arch/x86/kernel/cpu/amd.c | 1 +
|
|
2 files changed, 2 insertions(+)
|
|
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -218,6 +218,7 @@
|
|
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
|
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
|
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
|
+#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
|
|
|
|
/* Virtualization flags: Linux defined, word 8 */
|
|
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
|
--- a/arch/x86/kernel/cpu/amd.c
|
|
+++ b/arch/x86/kernel/cpu/amd.c
|
|
@@ -812,6 +812,7 @@ static void init_amd_bd(struct cpuinfo_x
|
|
|
|
static void init_amd_zn(struct cpuinfo_x86 *c)
|
|
{
|
|
+ set_cpu_cap(c, X86_FEATURE_ZEN);
|
|
/*
|
|
* Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
|
|
* all up to and including B1.
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Wed, 9 May 2018 21:53:09 +0200
|
|
Subject: x86/speculation: Handle HT correctly on AMD
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 1f50ddb4f4189243c05926b842dc1a0332195f31 upstream
|
|
|
|
The AMD64_LS_CFG MSR is a per core MSR on Family 17H CPUs. That means when
|
|
hyperthreading is enabled the SSBD bit toggle needs to take both cores into
|
|
account. Otherwise the following situation can happen:
|
|
|
|
CPU0 CPU1
|
|
|
|
disable SSB
|
|
disable SSB
|
|
enable SSB <- Enables it for the Core, i.e. for CPU0 as well
|
|
|
|
So after the SSB enable on CPU1 the task on CPU0 runs with SSB enabled
|
|
again.
|
|
|
|
On Intel the SSBD control is per core as well, but the synchronization
|
|
logic is implemented behind the per thread SPEC_CTRL MSR. It works like
|
|
this:
|
|
|
|
CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
|
|
|
|
i.e. if one of the threads enables a mitigation then this affects both and
|
|
the mitigation is only disabled in the core when both threads disabled it.
|
|
|
|
Add the necessary synchronization logic for AMD family 17H. Unfortunately
|
|
that requires a spinlock to serialize the access to the MSR, but the locks
|
|
are only shared between siblings.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/spec-ctrl.h | 6 +
|
|
arch/x86/kernel/process.c | 125 +++++++++++++++++++++++++++++++++++++--
|
|
arch/x86/kernel/smpboot.c | 5 +
|
|
3 files changed, 130 insertions(+), 6 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/spec-ctrl.h
|
|
+++ b/arch/x86/include/asm/spec-ctrl.h
|
|
@@ -33,6 +33,12 @@ static inline u64 ssbd_tif_to_amd_ls_cfg
|
|
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
|
|
}
|
|
|
|
+#ifdef CONFIG_SMP
|
|
+extern void speculative_store_bypass_ht_init(void);
|
|
+#else
|
|
+static inline void speculative_store_bypass_ht_init(void) { }
|
|
+#endif
|
|
+
|
|
extern void speculative_store_bypass_update(void);
|
|
|
|
#endif
|
|
--- a/arch/x86/kernel/process.c
|
|
+++ b/arch/x86/kernel/process.c
|
|
@@ -279,22 +279,135 @@ static inline void switch_to_bitmap(stru
|
|
}
|
|
}
|
|
|
|
-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
|
|
+#ifdef CONFIG_SMP
|
|
+
|
|
+struct ssb_state {
|
|
+ struct ssb_state *shared_state;
|
|
+ raw_spinlock_t lock;
|
|
+ unsigned int disable_state;
|
|
+ unsigned long local_state;
|
|
+};
|
|
+
|
|
+#define LSTATE_SSB 0
|
|
+
|
|
+static DEFINE_PER_CPU(struct ssb_state, ssb_state);
|
|
+
|
|
+void speculative_store_bypass_ht_init(void)
|
|
+{
|
|
+ struct ssb_state *st = this_cpu_ptr(&ssb_state);
|
|
+ unsigned int this_cpu = smp_processor_id();
|
|
+ unsigned int cpu;
|
|
+
|
|
+ st->local_state = 0;
|
|
+
|
|
+ /*
|
|
+ * Shared state setup happens once on the first bringup
|
|
+ * of the CPU. It's not destroyed on CPU hotunplug.
|
|
+ */
|
|
+ if (st->shared_state)
|
|
+ return;
|
|
+
|
|
+ raw_spin_lock_init(&st->lock);
|
|
+
|
|
+ /*
|
|
+ * Go over HT siblings and check whether one of them has set up the
|
|
+ * shared state pointer already.
|
|
+ */
|
|
+ for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
|
|
+ if (cpu == this_cpu)
|
|
+ continue;
|
|
+
|
|
+ if (!per_cpu(ssb_state, cpu).shared_state)
|
|
+ continue;
|
|
+
|
|
+ /* Link it to the state of the sibling: */
|
|
+ st->shared_state = per_cpu(ssb_state, cpu).shared_state;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * First HT sibling to come up on the core. Link shared state of
|
|
+ * the first HT sibling to itself. The siblings on the same core
|
|
+ * which come up later will see the shared state pointer and link
|
|
+ * themself to the state of this CPU.
|
|
+ */
|
|
+ st->shared_state = st;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Logic is: First HT sibling enables SSBD for both siblings in the core
|
|
+ * and last sibling to disable it, disables it for the whole core. This how
|
|
+ * MSR_SPEC_CTRL works in "hardware":
|
|
+ *
|
|
+ * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
|
|
+ */
|
|
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
|
|
{
|
|
- u64 msr;
|
|
+ struct ssb_state *st = this_cpu_ptr(&ssb_state);
|
|
+ u64 msr = x86_amd_ls_cfg_base;
|
|
|
|
- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
|
|
- msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
|
|
+ if (!static_cpu_has(X86_FEATURE_ZEN)) {
|
|
+ msr |= ssbd_tif_to_amd_ls_cfg(tifn);
|
|
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (tifn & _TIF_SSBD) {
|
|
+ /*
|
|
+ * Since this can race with prctl(), block reentry on the
|
|
+ * same CPU.
|
|
+ */
|
|
+ if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
|
|
+ return;
|
|
+
|
|
+ msr |= x86_amd_ls_cfg_ssbd_mask;
|
|
+
|
|
+ raw_spin_lock(&st->shared_state->lock);
|
|
+ /* First sibling enables SSBD: */
|
|
+ if (!st->shared_state->disable_state)
|
|
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
+ st->shared_state->disable_state++;
|
|
+ raw_spin_unlock(&st->shared_state->lock);
|
|
} else {
|
|
- msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
+ if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
|
|
+ return;
|
|
+
|
|
+ raw_spin_lock(&st->shared_state->lock);
|
|
+ st->shared_state->disable_state--;
|
|
+ if (!st->shared_state->disable_state)
|
|
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
+ raw_spin_unlock(&st->shared_state->lock);
|
|
}
|
|
}
|
|
+#else
|
|
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
|
|
+{
|
|
+ u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
|
|
+
|
|
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
+}
|
|
+#endif
|
|
+
|
|
+static __always_inline void intel_set_ssb_state(unsigned long tifn)
|
|
+{
|
|
+ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
|
+
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
+}
|
|
+
|
|
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
|
|
+{
|
|
+ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
|
+ amd_set_core_ssb_state(tifn);
|
|
+ else
|
|
+ intel_set_ssb_state(tifn);
|
|
+}
|
|
|
|
void speculative_store_bypass_update(void)
|
|
{
|
|
+ preempt_disable();
|
|
__speculative_store_bypass_update(current_thread_info()->flags);
|
|
+ preempt_enable();
|
|
}
|
|
|
|
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|
--- a/arch/x86/kernel/smpboot.c
|
|
+++ b/arch/x86/kernel/smpboot.c
|
|
@@ -77,6 +77,7 @@
|
|
#include <asm/i8259.h>
|
|
#include <asm/misc.h>
|
|
#include <asm/qspinlock.h>
|
|
+#include <asm/spec-ctrl.h>
|
|
|
|
/* Number of siblings per CPU package */
|
|
int smp_num_siblings = 1;
|
|
@@ -242,6 +243,8 @@ static void notrace start_secondary(void
|
|
*/
|
|
check_tsc_sync_target();
|
|
|
|
+ speculative_store_bypass_ht_init();
|
|
+
|
|
/*
|
|
* Lock vector_lock, set CPU online and bring the vector
|
|
* allocator online. Online must be set with vector_lock held
|
|
@@ -1257,6 +1260,8 @@ void __init native_smp_prepare_cpus(unsi
|
|
set_mtrr_aps_delayed_init();
|
|
|
|
smp_quirk_init_udelay();
|
|
+
|
|
+ speculative_store_bypass_ht_init();
|
|
}
|
|
|
|
void arch_enable_nonboot_cpus_begin(void)
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Wed, 9 May 2018 23:01:01 +0200
|
|
Subject: x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit ccbcd2674472a978b48c91c1fbfb66c0ff959f24 upstream
|
|
|
|
AMD is proposing a VIRT_SPEC_CTRL MSR to handle the Speculative Store
|
|
Bypass Disable via MSR_AMD64_LS_CFG so that guests do not have to care
|
|
about the bit position of the SSBD bit and thus facilitate migration.
|
|
Also, the sibling coordination on Family 17H CPUs can only be done on
|
|
the host.
|
|
|
|
Extend x86_spec_ctrl_set_guest() and x86_spec_ctrl_restore_host() with an
|
|
extra argument for the VIRT_SPEC_CTRL MSR.
|
|
|
|
Hand in 0 from VMX and in SVM add a new virt_spec_ctrl member to the CPU
|
|
data structure which is going to be used in later patches for the actual
|
|
implementation.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/spec-ctrl.h | 9 ++++++---
|
|
arch/x86/kernel/cpu/bugs.c | 20 ++++++++++++++++++--
|
|
arch/x86/kvm/svm.c | 11 +++++++++--
|
|
arch/x86/kvm/vmx.c | 5 +++--
|
|
4 files changed, 36 insertions(+), 9 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/spec-ctrl.h
|
|
+++ b/arch/x86/include/asm/spec-ctrl.h
|
|
@@ -10,10 +10,13 @@
|
|
* the guest has, while on VMEXIT we restore the host view. This
|
|
* would be easier if SPEC_CTRL were architecturally maskable or
|
|
* shadowable for guests but this is not (currently) the case.
|
|
- * Takes the guest view of SPEC_CTRL MSR as a parameter.
|
|
+ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
|
|
+ * the guest's version of VIRT_SPEC_CTRL, if emulated.
|
|
*/
|
|
-extern void x86_spec_ctrl_set_guest(u64);
|
|
-extern void x86_spec_ctrl_restore_host(u64);
|
|
+extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
|
|
+ u64 guest_virt_spec_ctrl);
|
|
+extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
|
|
+ u64 guest_virt_spec_ctrl);
|
|
|
|
/* AMD specific Speculative Store Bypass MSR data */
|
|
extern u64 x86_amd_ls_cfg_base;
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -151,7 +151,15 @@ u64 x86_spec_ctrl_get_default(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
|
|
|
|
-void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
|
|
+/**
|
|
+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
|
|
+ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
|
|
+ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
|
|
+ * (may get translated to MSR_AMD64_LS_CFG bits)
|
|
+ *
|
|
+ * Avoids writing to the MSR if the content/bits are the same
|
|
+ */
|
|
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
|
{
|
|
u64 host = x86_spec_ctrl_base;
|
|
|
|
@@ -168,7 +176,15 @@ void x86_spec_ctrl_set_guest(u64 guest_s
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
|
|
|
|
-void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
|
|
+/**
|
|
+ * x86_spec_ctrl_restore_host - Restore host speculation control registers
|
|
+ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
|
|
+ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
|
|
+ * (may get translated to MSR_AMD64_LS_CFG bits)
|
|
+ *
|
|
+ * Avoids writing to the MSR if the content/bits are the same
|
|
+ */
|
|
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
|
{
|
|
u64 host = x86_spec_ctrl_base;
|
|
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -192,6 +192,12 @@ struct vcpu_svm {
|
|
} host;
|
|
|
|
u64 spec_ctrl;
|
|
+ /*
|
|
+ * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
|
|
+ * translated into the appropriate L2_CFG bits on the host to
|
|
+ * perform speculative control.
|
|
+ */
|
|
+ u64 virt_spec_ctrl;
|
|
|
|
u32 *msrpm;
|
|
|
|
@@ -1910,6 +1916,7 @@ static void svm_vcpu_reset(struct kvm_vc
|
|
|
|
vcpu->arch.microcode_version = 0x01000065;
|
|
svm->spec_ctrl = 0;
|
|
+ svm->virt_spec_ctrl = 0;
|
|
|
|
if (!init_event) {
|
|
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
|
|
@@ -5401,7 +5408,7 @@ static void svm_vcpu_run(struct kvm_vcpu
|
|
* is no need to worry about the conditional branch over the wrmsr
|
|
* being speculatively taken.
|
|
*/
|
|
- x86_spec_ctrl_set_guest(svm->spec_ctrl);
|
|
+ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
|
|
|
|
asm volatile (
|
|
"push %%" _ASM_BP "; \n\t"
|
|
@@ -5525,7 +5532,7 @@ static void svm_vcpu_run(struct kvm_vcpu
|
|
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
|
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
|
|
|
- x86_spec_ctrl_restore_host(svm->spec_ctrl);
|
|
+ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
|
|
|
|
reload_tss(vcpu);
|
|
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -9465,9 +9465,10 @@ static void __noclone vmx_vcpu_run(struc
|
|
* is no need to worry about the conditional branch over the wrmsr
|
|
* being speculatively taken.
|
|
*/
|
|
- x86_spec_ctrl_set_guest(vmx->spec_ctrl);
|
|
+ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
|
|
|
|
vmx->__launched = vmx->loaded_vmcs->launched;
|
|
+
|
|
asm(
|
|
/* Store host registers */
|
|
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
|
|
@@ -9603,7 +9604,7 @@ static void __noclone vmx_vcpu_run(struc
|
|
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
|
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
|
|
|
- x86_spec_ctrl_restore_host(vmx->spec_ctrl);
|
|
+ x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
|
|
|
|
/* Eliminate branch target predictions from guest mode */
|
|
vmexit_fill_RSB();
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Tom Lendacky <thomas.lendacky@amd.com>
|
|
Date: Thu, 17 May 2018 17:09:18 +0200
|
|
Subject: x86/speculation: Add virtualized speculative store bypass disable support
|
|
|
|
From: Tom Lendacky <thomas.lendacky@amd.com>
|
|
|
|
commit 11fb0683493b2da112cd64c9dada221b52463bf7 upstream
|
|
|
|
Some AMD processors only support a non-architectural means of enabling
|
|
speculative store bypass disable (SSBD). To allow a simplified view of
|
|
this to a guest, an architectural definition has been created through a new
|
|
CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f. With this, a
|
|
hypervisor can virtualize the existence of this definition and provide an
|
|
architectural method for using SSBD to a guest.
|
|
|
|
Add the new CPUID feature, the new MSR and update the existing SSBD
|
|
support to use this MSR when present.
|
|
|
|
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/cpufeatures.h | 1 +
|
|
arch/x86/include/asm/msr-index.h | 2 ++
|
|
arch/x86/kernel/cpu/bugs.c | 4 +++-
|
|
arch/x86/kernel/process.c | 13 ++++++++++++-
|
|
4 files changed, 18 insertions(+), 2 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -282,6 +282,7 @@
|
|
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
|
|
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
|
|
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
|
|
+#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
|
|
|
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
|
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
|
--- a/arch/x86/include/asm/msr-index.h
|
|
+++ b/arch/x86/include/asm/msr-index.h
|
|
@@ -347,6 +347,8 @@
|
|
#define MSR_AMD64_SEV_ENABLED_BIT 0
|
|
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
|
|
|
|
+#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
|
|
+
|
|
/* Fam 17h MSRs */
|
|
#define MSR_F17H_IRPERF 0xc00000e9
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -205,7 +205,9 @@ static void x86_amd_ssb_disable(void)
|
|
{
|
|
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
|
|
|
|
- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
|
+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
|
|
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
|
|
+ else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
|
wrmsrl(MSR_AMD64_LS_CFG, msrval);
|
|
}
|
|
|
|
--- a/arch/x86/kernel/process.c
|
|
+++ b/arch/x86/kernel/process.c
|
|
@@ -388,6 +388,15 @@ static __always_inline void amd_set_core
|
|
}
|
|
#endif
|
|
|
|
+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
|
|
+{
|
|
+ /*
|
|
+ * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
|
|
+ * so ssbd_tif_to_spec_ctrl() just works.
|
|
+ */
|
|
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
|
|
+}
|
|
+
|
|
static __always_inline void intel_set_ssb_state(unsigned long tifn)
|
|
{
|
|
u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
|
@@ -397,7 +406,9 @@ static __always_inline void intel_set_ss
|
|
|
|
static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
|
|
{
|
|
- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
|
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
|
|
+ amd_set_ssb_virt_state(tifn);
|
|
+ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
|
amd_set_core_ssb_state(tifn);
|
|
else
|
|
intel_set_ssb_state(tifn);
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 10 May 2018 20:31:44 +0200
|
|
Subject: x86/speculation: Rework speculative_store_bypass_update()
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 0270be3e34efb05a88bc4c422572ece038ef3608 upstream
|
|
|
|
The upcoming support for the virtual SPEC_CTRL MSR on AMD needs to reuse
|
|
speculative_store_bypass_update() to avoid code duplication. Add an
|
|
argument for supplying a thread info (TIF) value and create a wrapper
|
|
speculative_store_bypass_update_current() which is used at the existing
|
|
call site.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/spec-ctrl.h | 7 ++++++-
|
|
arch/x86/kernel/cpu/bugs.c | 2 +-
|
|
arch/x86/kernel/process.c | 4 ++--
|
|
3 files changed, 9 insertions(+), 4 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/spec-ctrl.h
|
|
+++ b/arch/x86/include/asm/spec-ctrl.h
|
|
@@ -42,6 +42,11 @@ extern void speculative_store_bypass_ht_
|
|
static inline void speculative_store_bypass_ht_init(void) { }
|
|
#endif
|
|
|
|
-extern void speculative_store_bypass_update(void);
|
|
+extern void speculative_store_bypass_update(unsigned long tif);
|
|
+
|
|
+static inline void speculative_store_bypass_update_current(void)
|
|
+{
|
|
+ speculative_store_bypass_update(current_thread_info()->flags);
|
|
+}
|
|
|
|
#endif
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -598,7 +598,7 @@ static int ssb_prctl_set(struct task_str
|
|
* mitigation until it is next scheduled.
|
|
*/
|
|
if (task == current && update)
|
|
- speculative_store_bypass_update();
|
|
+ speculative_store_bypass_update_current();
|
|
|
|
return 0;
|
|
}
|
|
--- a/arch/x86/kernel/process.c
|
|
+++ b/arch/x86/kernel/process.c
|
|
@@ -414,10 +414,10 @@ static __always_inline void __speculativ
|
|
intel_set_ssb_state(tifn);
|
|
}
|
|
|
|
-void speculative_store_bypass_update(void)
|
|
+void speculative_store_bypass_update(unsigned long tif)
|
|
{
|
|
preempt_disable();
|
|
- __speculative_store_bypass_update(current_thread_info()->flags);
|
|
+ __speculative_store_bypass_update(tif);
|
|
preempt_enable();
|
|
}
|
|
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Borislav Petkov <bp@suse.de>
|
|
Date: Sat, 12 May 2018 00:14:51 +0200
|
|
Subject: x86/bugs: Unify x86_spec_ctrl_{set_guest,restore_host}
|
|
|
|
From: Borislav Petkov <bp@suse.de>
|
|
|
|
commit cc69b34989210f067b2c51d5539b5f96ebcc3a01 upstream
|
|
|
|
Function bodies are very similar and are going to grow more almost
|
|
identical code. Add a bool arg to determine whether SPEC_CTRL is being set
|
|
for the guest or restored to the host.
|
|
|
|
No functional changes.
|
|
|
|
Signed-off-by: Borislav Petkov <bp@suse.de>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/spec-ctrl.h | 33 ++++++++++++++++++---
|
|
arch/x86/kernel/cpu/bugs.c | 60 +++++++++------------------------------
|
|
2 files changed, 44 insertions(+), 49 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/spec-ctrl.h
|
|
+++ b/arch/x86/include/asm/spec-ctrl.h
|
|
@@ -13,10 +13,35 @@
|
|
* Takes the guest view of SPEC_CTRL MSR as a parameter and also
|
|
* the guest's version of VIRT_SPEC_CTRL, if emulated.
|
|
*/
|
|
-extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
|
|
- u64 guest_virt_spec_ctrl);
|
|
-extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
|
|
- u64 guest_virt_spec_ctrl);
|
|
+extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
|
|
+
|
|
+/**
|
|
+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
|
|
+ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
|
|
+ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
|
|
+ * (may get translated to MSR_AMD64_LS_CFG bits)
|
|
+ *
|
|
+ * Avoids writing to the MSR if the content/bits are the same
|
|
+ */
|
|
+static inline
|
|
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
|
+{
|
|
+ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * x86_spec_ctrl_restore_host - Restore host speculation control registers
|
|
+ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
|
|
+ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
|
|
+ * (may get translated to MSR_AMD64_LS_CFG bits)
|
|
+ *
|
|
+ * Avoids writing to the MSR if the content/bits are the same
|
|
+ */
|
|
+static inline
|
|
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
|
+{
|
|
+ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
|
|
+}
|
|
|
|
/* AMD specific Speculative Store Bypass MSR data */
|
|
extern u64 x86_amd_ls_cfg_base;
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -151,55 +151,25 @@ u64 x86_spec_ctrl_get_default(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
|
|
|
|
-/**
|
|
- * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
|
|
- * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
|
|
- * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
|
|
- * (may get translated to MSR_AMD64_LS_CFG bits)
|
|
- *
|
|
- * Avoids writing to the MSR if the content/bits are the same
|
|
- */
|
|
-void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
|
+void
|
|
+x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
|
{
|
|
- u64 host = x86_spec_ctrl_base;
|
|
+ struct thread_info *ti = current_thread_info();
|
|
+ u64 msr, host = x86_spec_ctrl_base;
|
|
|
|
/* Is MSR_SPEC_CTRL implemented ? */
|
|
- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
- return;
|
|
-
|
|
- /* SSBD controlled in MSR_SPEC_CTRL */
|
|
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
|
|
- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
-
|
|
- if (host != guest_spec_ctrl)
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
|
|
-
|
|
-/**
|
|
- * x86_spec_ctrl_restore_host - Restore host speculation control registers
|
|
- * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
|
|
- * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
|
|
- * (may get translated to MSR_AMD64_LS_CFG bits)
|
|
- *
|
|
- * Avoids writing to the MSR if the content/bits are the same
|
|
- */
|
|
-void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
|
-{
|
|
- u64 host = x86_spec_ctrl_base;
|
|
-
|
|
- /* Is MSR_SPEC_CTRL implemented ? */
|
|
- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
- return;
|
|
-
|
|
- /* SSBD controlled in MSR_SPEC_CTRL */
|
|
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
|
|
- host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
-
|
|
- if (host != guest_spec_ctrl)
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, host);
|
|
+ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
|
|
+ /* SSBD controlled in MSR_SPEC_CTRL */
|
|
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
|
|
+ host |= ssbd_tif_to_spec_ctrl(ti->flags);
|
|
+
|
|
+ if (host != guest_spec_ctrl) {
|
|
+ msr = setguest ? guest_spec_ctrl : host;
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
+ }
|
|
+ }
|
|
}
|
|
-EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
|
|
+EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
|
|
|
|
static void x86_amd_ssb_disable(void)
|
|
{
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Sat, 12 May 2018 20:49:16 +0200
|
|
Subject: x86/bugs: Expose x86_spec_ctrl_base directly
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit fa8ac4988249c38476f6ad678a4848a736373403 upstream
|
|
|
|
x86_spec_ctrl_base is the system wide default value for the SPEC_CTRL MSR.
|
|
x86_spec_ctrl_get_default() returns x86_spec_ctrl_base and was intended to
|
|
prevent modification to that variable. Though the variable is read only
|
|
after init and globaly visible already.
|
|
|
|
Remove the function and export the variable instead.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/nospec-branch.h | 16 +++++-----------
|
|
arch/x86/include/asm/spec-ctrl.h | 3 ---
|
|
arch/x86/kernel/cpu/bugs.c | 11 +----------
|
|
3 files changed, 6 insertions(+), 24 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -217,16 +217,7 @@ enum spectre_v2_mitigation {
|
|
SPECTRE_V2_IBRS,
|
|
};
|
|
|
|
-/*
|
|
- * The Intel specification for the SPEC_CTRL MSR requires that we
|
|
- * preserve any already set reserved bits at boot time (e.g. for
|
|
- * future additions that this kernel is not currently aware of).
|
|
- * We then set any additional mitigation bits that we want
|
|
- * ourselves and always use this as the base for SPEC_CTRL.
|
|
- * We also use this when handling guest entry/exit as below.
|
|
- */
|
|
extern void x86_spec_ctrl_set(u64);
|
|
-extern u64 x86_spec_ctrl_get_default(void);
|
|
|
|
/* The Speculative Store Bypass disable variants */
|
|
enum ssb_mitigation {
|
|
@@ -278,6 +269,9 @@ static inline void indirect_branch_predi
|
|
alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
|
|
}
|
|
|
|
+/* The Intel SPEC CTRL MSR base value cache */
|
|
+extern u64 x86_spec_ctrl_base;
|
|
+
|
|
/*
|
|
* With retpoline, we must use IBRS to restrict branch prediction
|
|
* before calling into firmware.
|
|
@@ -286,7 +280,7 @@ static inline void indirect_branch_predi
|
|
*/
|
|
#define firmware_restrict_branch_speculation_start() \
|
|
do { \
|
|
- u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \
|
|
+ u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
|
|
\
|
|
preempt_disable(); \
|
|
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
|
@@ -295,7 +289,7 @@ do { \
|
|
|
|
#define firmware_restrict_branch_speculation_end() \
|
|
do { \
|
|
- u64 val = x86_spec_ctrl_get_default(); \
|
|
+ u64 val = x86_spec_ctrl_base; \
|
|
\
|
|
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
|
X86_FEATURE_USE_IBRS_FW); \
|
|
--- a/arch/x86/include/asm/spec-ctrl.h
|
|
+++ b/arch/x86/include/asm/spec-ctrl.h
|
|
@@ -47,9 +47,6 @@ void x86_spec_ctrl_restore_host(u64 gues
|
|
extern u64 x86_amd_ls_cfg_base;
|
|
extern u64 x86_amd_ls_cfg_ssbd_mask;
|
|
|
|
-/* The Intel SPEC CTRL MSR base value cache */
|
|
-extern u64 x86_spec_ctrl_base;
|
|
-
|
|
static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
|
|
{
|
|
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -36,6 +36,7 @@ static void __init ssb_select_mitigation
|
|
* writes to SPEC_CTRL contain whatever reserved bits have been set.
|
|
*/
|
|
u64 __ro_after_init x86_spec_ctrl_base;
|
|
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
|
|
|
/*
|
|
* The vendor and possibly platform specific bits which can be modified in
|
|
@@ -141,16 +142,6 @@ void x86_spec_ctrl_set(u64 val)
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
|
|
|
|
-u64 x86_spec_ctrl_get_default(void)
|
|
-{
|
|
- u64 msrval = x86_spec_ctrl_base;
|
|
-
|
|
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
|
|
- msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
- return msrval;
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
|
|
-
|
|
void
|
|
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
|
{
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Sat, 12 May 2018 20:53:14 +0200
|
|
Subject: x86/bugs: Remove x86_spec_ctrl_set()
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 4b59bdb569453a60b752b274ca61f009e37f4dae upstream
|
|
|
|
x86_spec_ctrl_set() is only used in bugs.c and the extra mask checks there
|
|
provide no real value as both call sites can just write x86_spec_ctrl_base
|
|
to MSR_SPEC_CTRL. x86_spec_ctrl_base is valid and does not need any extra
|
|
masking or checking.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/nospec-branch.h | 2 --
|
|
arch/x86/kernel/cpu/bugs.c | 13 ++-----------
|
|
2 files changed, 2 insertions(+), 13 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/nospec-branch.h
|
|
+++ b/arch/x86/include/asm/nospec-branch.h
|
|
@@ -217,8 +217,6 @@ enum spectre_v2_mitigation {
|
|
SPECTRE_V2_IBRS,
|
|
};
|
|
|
|
-extern void x86_spec_ctrl_set(u64);
|
|
-
|
|
/* The Speculative Store Bypass disable variants */
|
|
enum ssb_mitigation {
|
|
SPEC_STORE_BYPASS_NONE,
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -133,15 +133,6 @@ static const char *spectre_v2_strings[]
|
|
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
|
SPECTRE_V2_NONE;
|
|
|
|
-void x86_spec_ctrl_set(u64 val)
|
|
-{
|
|
- if (val & x86_spec_ctrl_mask)
|
|
- WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
|
|
- else
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
|
|
-}
|
|
-EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
|
|
-
|
|
void
|
|
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
|
{
|
|
@@ -503,7 +494,7 @@ static enum ssb_mitigation __init __ssb_
|
|
case X86_VENDOR_INTEL:
|
|
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
|
x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
|
|
- x86_spec_ctrl_set(SPEC_CTRL_SSBD);
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
|
break;
|
|
case X86_VENDOR_AMD:
|
|
x86_amd_ssb_disable();
|
|
@@ -615,7 +606,7 @@ int arch_prctl_spec_ctrl_get(struct task
|
|
void x86_spec_ctrl_setup_ap(void)
|
|
{
|
|
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
- x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
|
|
|
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
|
x86_amd_ssb_disable();
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Sat, 12 May 2018 20:10:00 +0200
|
|
Subject: x86/bugs: Rework spec_ctrl base and mask logic
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit be6fcb5478e95bb1c91f489121238deb3abca46a upstream
|
|
|
|
x86_spec_ctrL_mask is intended to mask out bits from a MSR_SPEC_CTRL value
|
|
which are not to be modified. However the implementation is not really used
|
|
and the bitmask was inverted to make a check easier, which was removed in
|
|
"x86/bugs: Remove x86_spec_ctrl_set()"
|
|
|
|
Aside of that it is missing the STIBP bit if it is supported by the
|
|
platform, so if the mask would be used in x86_virt_spec_ctrl() then it
|
|
would prevent a guest from setting STIBP.
|
|
|
|
Add the STIBP bit if supported and use the mask in x86_virt_spec_ctrl() to
|
|
sanitize the value which is supplied by the guest.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/kernel/cpu/bugs.c | 26 +++++++++++++++++++-------
|
|
1 file changed, 19 insertions(+), 7 deletions(-)
|
|
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -42,7 +42,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
|
* The vendor and possibly platform specific bits which can be modified in
|
|
* x86_spec_ctrl_base.
|
|
*/
|
|
-static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
|
|
+static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
|
|
|
|
/*
|
|
* AMD specific MSR info for Speculative Store Bypass control.
|
|
@@ -68,6 +68,10 @@ void __init check_bugs(void)
|
|
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
|
|
|
+ /* Allow STIBP in MSR_SPEC_CTRL if supported */
|
|
+ if (boot_cpu_has(X86_FEATURE_STIBP))
|
|
+ x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
|
|
+
|
|
/* Select the proper spectre mitigation before patching alternatives */
|
|
spectre_v2_select_mitigation();
|
|
|
|
@@ -136,18 +140,26 @@ static enum spectre_v2_mitigation spectr
|
|
void
|
|
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
|
{
|
|
+ u64 msrval, guestval, hostval = x86_spec_ctrl_base;
|
|
struct thread_info *ti = current_thread_info();
|
|
- u64 msr, host = x86_spec_ctrl_base;
|
|
|
|
/* Is MSR_SPEC_CTRL implemented ? */
|
|
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
|
|
+ /*
|
|
+ * Restrict guest_spec_ctrl to supported values. Clear the
|
|
+ * modifiable bits in the host base value and or the
|
|
+ * modifiable bits from the guest value.
|
|
+ */
|
|
+ guestval = hostval & ~x86_spec_ctrl_mask;
|
|
+ guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
|
|
+
|
|
/* SSBD controlled in MSR_SPEC_CTRL */
|
|
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
|
|
- host |= ssbd_tif_to_spec_ctrl(ti->flags);
|
|
+ hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
|
|
|
|
- if (host != guest_spec_ctrl) {
|
|
- msr = setguest ? guest_spec_ctrl : host;
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
+ if (hostval != guestval) {
|
|
+ msrval = setguest ? guestval : hostval;
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
|
|
}
|
|
}
|
|
}
|
|
@@ -493,7 +505,7 @@ static enum ssb_mitigation __init __ssb_
|
|
switch (boot_cpu_data.x86_vendor) {
|
|
case X86_VENDOR_INTEL:
|
|
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
|
- x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
|
|
+ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
|
break;
|
|
case X86_VENDOR_AMD:
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 10 May 2018 20:42:48 +0200
|
|
Subject: x86/speculation, KVM: Implement support for VIRT_SPEC_CTRL/LS_CFG
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 47c61b3955cf712cadfc25635bf9bc174af030ea upstream
|
|
|
|
Add the necessary logic for supporting the emulated VIRT_SPEC_CTRL MSR to
|
|
x86_virt_spec_ctrl(). If either X86_FEATURE_LS_CFG_SSBD or
|
|
X86_FEATURE_VIRT_SPEC_CTRL is set then use the new guest_virt_spec_ctrl
|
|
argument to check whether the state must be modified on the host. The
|
|
update reuses speculative_store_bypass_update() so the ZEN-specific sibling
|
|
coordination can be reused.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/spec-ctrl.h | 6 ++++++
|
|
arch/x86/kernel/cpu/bugs.c | 30 ++++++++++++++++++++++++++++++
|
|
2 files changed, 36 insertions(+)
|
|
|
|
--- a/arch/x86/include/asm/spec-ctrl.h
|
|
+++ b/arch/x86/include/asm/spec-ctrl.h
|
|
@@ -53,6 +53,12 @@ static inline u64 ssbd_tif_to_spec_ctrl(
|
|
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
|
|
}
|
|
|
|
+static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
|
|
+{
|
|
+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
|
|
+ return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
|
|
+}
|
|
+
|
|
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
|
|
{
|
|
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -162,6 +162,36 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
|
|
}
|
|
}
|
|
+
|
|
+ /*
|
|
+ * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
|
|
+ * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
|
|
+ */
|
|
+ if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
|
|
+ !static_cpu_has(X86_FEATURE_VIRT_SSBD))
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * If the host has SSBD mitigation enabled, force it in the host's
|
|
+ * virtual MSR value. If its not permanently enabled, evaluate
|
|
+ * current's TIF_SSBD thread flag.
|
|
+ */
|
|
+ if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
|
|
+ hostval = SPEC_CTRL_SSBD;
|
|
+ else
|
|
+ hostval = ssbd_tif_to_spec_ctrl(ti->flags);
|
|
+
|
|
+ /* Sanitize the guest value */
|
|
+ guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
|
|
+
|
|
+ if (hostval != guestval) {
|
|
+ unsigned long tif;
|
|
+
|
|
+ tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
|
|
+ ssbd_spec_ctrl_to_tif(hostval);
|
|
+
|
|
+ speculative_store_bypass_update(tif);
|
|
+ }
|
|
}
|
|
EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
|
|
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Tom Lendacky <thomas.lendacky@amd.com>
|
|
Date: Thu, 10 May 2018 22:06:39 +0200
|
|
Subject: KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD
|
|
|
|
From: Tom Lendacky <thomas.lendacky@amd.com>
|
|
|
|
commit bc226f07dcd3c9ef0b7f6236fe356ea4a9cb4769 upstream
|
|
|
|
Expose the new virtualized architectural mechanism, VIRT_SSBD, for using
|
|
speculative store bypass disable (SSBD) under SVM. This will allow guests
|
|
to use SSBD on hardware that uses non-architectural mechanisms for enabling
|
|
SSBD.
|
|
|
|
[ tglx: Folded the migration fixup from Paolo Bonzini ]
|
|
|
|
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/kvm_host.h | 2 +-
|
|
arch/x86/kernel/cpu/common.c | 3 ++-
|
|
arch/x86/kvm/cpuid.c | 11 +++++++++--
|
|
arch/x86/kvm/svm.c | 21 +++++++++++++++++++--
|
|
arch/x86/kvm/vmx.c | 18 +++++++++++++++---
|
|
arch/x86/kvm/x86.c | 13 ++++---------
|
|
6 files changed, 50 insertions(+), 18 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/kvm_host.h
|
|
+++ b/arch/x86/include/asm/kvm_host.h
|
|
@@ -933,7 +933,7 @@ struct kvm_x86_ops {
|
|
int (*hardware_setup)(void); /* __init */
|
|
void (*hardware_unsetup)(void); /* __exit */
|
|
bool (*cpu_has_accelerated_tpr)(void);
|
|
- bool (*cpu_has_high_real_mode_segbase)(void);
|
|
+ bool (*has_emulated_msr)(int index);
|
|
void (*cpuid_update)(struct kvm_vcpu *vcpu);
|
|
|
|
int (*vm_init)(struct kvm *kvm);
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -767,7 +767,8 @@ static void init_speculation_control(str
|
|
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
|
|
|
- if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
|
|
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
|
|
+ cpu_has(c, X86_FEATURE_VIRT_SSBD))
|
|
set_cpu_cap(c, X86_FEATURE_SSBD);
|
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
|
--- a/arch/x86/kvm/cpuid.c
|
|
+++ b/arch/x86/kvm/cpuid.c
|
|
@@ -374,7 +374,7 @@ static inline int __do_cpuid_ent(struct
|
|
|
|
/* cpuid 0x80000008.ebx */
|
|
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
|
|
- F(AMD_IBPB) | F(AMD_IBRS);
|
|
+ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
|
|
|
|
/* cpuid 0xC0000001.edx */
|
|
const u32 kvm_cpuid_C000_0001_edx_x86_features =
|
|
@@ -642,13 +642,20 @@ static inline int __do_cpuid_ent(struct
|
|
g_phys_as = phys_as;
|
|
entry->eax = g_phys_as | (virt_as << 8);
|
|
entry->edx = 0;
|
|
- /* IBRS and IBPB aren't necessarily present in hardware cpuid */
|
|
+ /*
|
|
+ * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
|
|
+ * hardware cpuid
|
|
+ */
|
|
if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
|
|
entry->ebx |= F(AMD_IBPB);
|
|
if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
|
|
entry->ebx |= F(AMD_IBRS);
|
|
+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
|
|
+ entry->ebx |= F(VIRT_SSBD);
|
|
entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
|
|
cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
|
|
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
|
+ entry->ebx |= F(VIRT_SSBD);
|
|
break;
|
|
}
|
|
case 0x80000019:
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -3971,6 +3971,13 @@ static int svm_get_msr(struct kvm_vcpu *
|
|
|
|
msr_info->data = svm->spec_ctrl;
|
|
break;
|
|
+ case MSR_AMD64_VIRT_SPEC_CTRL:
|
|
+ if (!msr_info->host_initiated &&
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
|
|
+ return 1;
|
|
+
|
|
+ msr_info->data = svm->virt_spec_ctrl;
|
|
+ break;
|
|
case MSR_F15H_IC_CFG: {
|
|
|
|
int family, model;
|
|
@@ -4105,6 +4112,16 @@ static int svm_set_msr(struct kvm_vcpu *
|
|
break;
|
|
set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
|
|
break;
|
|
+ case MSR_AMD64_VIRT_SPEC_CTRL:
|
|
+ if (!msr->host_initiated &&
|
|
+ !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
|
|
+ return 1;
|
|
+
|
|
+ if (data & ~SPEC_CTRL_SSBD)
|
|
+ return 1;
|
|
+
|
|
+ svm->virt_spec_ctrl = data;
|
|
+ break;
|
|
case MSR_STAR:
|
|
svm->vmcb->save.star = data;
|
|
break;
|
|
@@ -5635,7 +5652,7 @@ static bool svm_cpu_has_accelerated_tpr(
|
|
return false;
|
|
}
|
|
|
|
-static bool svm_has_high_real_mode_segbase(void)
|
|
+static bool svm_has_emulated_msr(int index)
|
|
{
|
|
return true;
|
|
}
|
|
@@ -6859,7 +6876,7 @@ static struct kvm_x86_ops svm_x86_ops __
|
|
.hardware_enable = svm_hardware_enable,
|
|
.hardware_disable = svm_hardware_disable,
|
|
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
|
|
- .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
|
|
+ .has_emulated_msr = svm_has_emulated_msr,
|
|
|
|
.vcpu_create = svm_create_vcpu,
|
|
.vcpu_free = svm_free_vcpu,
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -9231,9 +9231,21 @@ static void vmx_handle_external_intr(str
|
|
}
|
|
STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
|
|
|
|
-static bool vmx_has_high_real_mode_segbase(void)
|
|
+static bool vmx_has_emulated_msr(int index)
|
|
{
|
|
- return enable_unrestricted_guest || emulate_invalid_guest_state;
|
|
+ switch (index) {
|
|
+ case MSR_IA32_SMBASE:
|
|
+ /*
|
|
+ * We cannot do SMM unless we can run the guest in big
|
|
+ * real mode.
|
|
+ */
|
|
+ return enable_unrestricted_guest || emulate_invalid_guest_state;
|
|
+ case MSR_AMD64_VIRT_SPEC_CTRL:
|
|
+ /* This is AMD only. */
|
|
+ return false;
|
|
+ default:
|
|
+ return true;
|
|
+ }
|
|
}
|
|
|
|
static bool vmx_mpx_supported(void)
|
|
@@ -12297,7 +12309,7 @@ static struct kvm_x86_ops vmx_x86_ops __
|
|
.hardware_enable = hardware_enable,
|
|
.hardware_disable = hardware_disable,
|
|
.cpu_has_accelerated_tpr = report_flexpriority,
|
|
- .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
|
|
+ .has_emulated_msr = vmx_has_emulated_msr,
|
|
|
|
.vcpu_create = vmx_create_vcpu,
|
|
.vcpu_free = vmx_free_vcpu,
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -1045,6 +1045,7 @@ static u32 emulated_msrs[] = {
|
|
MSR_SMI_COUNT,
|
|
MSR_PLATFORM_INFO,
|
|
MSR_MISC_FEATURES_ENABLES,
|
|
+ MSR_AMD64_VIRT_SPEC_CTRL,
|
|
};
|
|
|
|
static unsigned num_emulated_msrs;
|
|
@@ -2843,7 +2844,7 @@ int kvm_vm_ioctl_check_extension(struct
|
|
* fringe case that is not enabled except via specific settings
|
|
* of the module parameters.
|
|
*/
|
|
- r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
|
|
+ r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
|
|
break;
|
|
case KVM_CAP_VAPIC:
|
|
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
|
|
@@ -4522,14 +4523,8 @@ static void kvm_init_msr_list(void)
|
|
num_msrs_to_save = j;
|
|
|
|
for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
|
|
- switch (emulated_msrs[i]) {
|
|
- case MSR_IA32_SMBASE:
|
|
- if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
|
|
- continue;
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
+ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
|
|
+ continue;
|
|
|
|
if (j < i)
|
|
emulated_msrs[j] = emulated_msrs[i];
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Date: Wed, 16 May 2018 23:18:09 -0400
|
|
Subject: x86/bugs: Rename SSBD_NO to SSB_NO
|
|
|
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
commit 240da953fcc6a9008c92fae5b1f727ee5ed167ab upstream
|
|
|
|
The "336996 Speculative Execution Side Channel Mitigations" from
|
|
May defines this as SSB_NO, hence lets sync-up.
|
|
|
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/msr-index.h | 2 +-
|
|
arch/x86/kernel/cpu/common.c | 2 +-
|
|
2 files changed, 2 insertions(+), 2 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/msr-index.h
|
|
+++ b/arch/x86/include/asm/msr-index.h
|
|
@@ -70,7 +70,7 @@
|
|
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
|
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
|
|
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
|
|
-#define ARCH_CAP_SSBD_NO (1 << 4) /*
|
|
+#define ARCH_CAP_SSB_NO (1 << 4) /*
|
|
* Not susceptible to Speculative Store Bypass
|
|
* attack, so no Speculative Store Bypass
|
|
* control required.
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -965,7 +965,7 @@ static void __init cpu_set_bug_bits(stru
|
|
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
|
|
|
if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
|
|
- !(ia32_cap & ARCH_CAP_SSBD_NO))
|
|
+ !(ia32_cap & ARCH_CAP_SSB_NO))
|
|
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
|
|
|
if (x86_match_cpu(cpu_no_speculation))
|
|
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Alexei Starovoitov <ast@kernel.org>
|
|
Date: Tue, 15 May 2018 09:27:05 -0700
|
|
Subject: bpf: Prevent memory disambiguation attack
|
|
|
|
From: Alexei Starovoitov <ast@kernel.org>
|
|
|
|
commit af86ca4e3088fe5eacf2f7e58c01fa68ca067672 upstream
|
|
|
|
Detect code patterns where malicious 'speculative store bypass' can be used
|
|
and sanitize such patterns.
|
|
|
|
39: (bf) r3 = r10
|
|
40: (07) r3 += -216
|
|
41: (79) r8 = *(u64 *)(r7 +0) // slow read
|
|
42: (7a) *(u64 *)(r10 -72) = 0 // verifier inserts this instruction
|
|
43: (7b) *(u64 *)(r8 +0) = r3 // this store becomes slow due to r8
|
|
44: (79) r1 = *(u64 *)(r6 +0) // cpu speculatively executes this load
|
|
45: (71) r2 = *(u8 *)(r1 +0) // speculatively arbitrary 'load byte'
|
|
// is now sanitized
|
|
|
|
Above code after x86 JIT becomes:
|
|
e5: mov %rbp,%rdx
|
|
e8: add $0xffffffffffffff28,%rdx
|
|
ef: mov 0x0(%r13),%r14
|
|
f3: movq $0x0,-0x48(%rbp)
|
|
fb: mov %rdx,0x0(%r14)
|
|
ff: mov 0x0(%rbx),%rdi
|
|
103: movzbq 0x0(%rdi),%rsi
|
|
|
|
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
include/linux/bpf_verifier.h | 1
|
|
kernel/bpf/verifier.c | 59 ++++++++++++++++++++++++++++++++++++++++---
|
|
2 files changed, 57 insertions(+), 3 deletions(-)
|
|
|
|
--- a/include/linux/bpf_verifier.h
|
|
+++ b/include/linux/bpf_verifier.h
|
|
@@ -146,6 +146,7 @@ struct bpf_insn_aux_data {
|
|
s32 call_imm; /* saved imm field of call insn */
|
|
};
|
|
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
|
+ int sanitize_stack_off; /* stack slot to be cleared */
|
|
bool seen; /* this insn was processed by the verifier */
|
|
};
|
|
|
|
--- a/kernel/bpf/verifier.c
|
|
+++ b/kernel/bpf/verifier.c
|
|
@@ -970,7 +970,7 @@ static bool register_is_null(struct bpf_
|
|
*/
|
|
static int check_stack_write(struct bpf_verifier_env *env,
|
|
struct bpf_func_state *state, /* func where register points to */
|
|
- int off, int size, int value_regno)
|
|
+ int off, int size, int value_regno, int insn_idx)
|
|
{
|
|
struct bpf_func_state *cur; /* state of the current function */
|
|
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
|
|
@@ -1009,8 +1009,33 @@ static int check_stack_write(struct bpf_
|
|
state->stack[spi].spilled_ptr = cur->regs[value_regno];
|
|
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
|
|
|
|
- for (i = 0; i < BPF_REG_SIZE; i++)
|
|
+ for (i = 0; i < BPF_REG_SIZE; i++) {
|
|
+ if (state->stack[spi].slot_type[i] == STACK_MISC &&
|
|
+ !env->allow_ptr_leaks) {
|
|
+ int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
|
|
+ int soff = (-spi - 1) * BPF_REG_SIZE;
|
|
+
|
|
+ /* detected reuse of integer stack slot with a pointer
|
|
+ * which means either llvm is reusing stack slot or
|
|
+ * an attacker is trying to exploit CVE-2018-3639
|
|
+ * (speculative store bypass)
|
|
+ * Have to sanitize that slot with preemptive
|
|
+ * store of zero.
|
|
+ */
|
|
+ if (*poff && *poff != soff) {
|
|
+ /* disallow programs where single insn stores
|
|
+ * into two different stack slots, since verifier
|
|
+ * cannot sanitize them
|
|
+ */
|
|
+ verbose(env,
|
|
+ "insn %d cannot access two stack slots fp%d and fp%d",
|
|
+ insn_idx, *poff, soff);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ *poff = soff;
|
|
+ }
|
|
state->stack[spi].slot_type[i] = STACK_SPILL;
|
|
+ }
|
|
} else {
|
|
u8 type = STACK_MISC;
|
|
|
|
@@ -1685,7 +1710,7 @@ static int check_mem_access(struct bpf_v
|
|
|
|
if (t == BPF_WRITE)
|
|
err = check_stack_write(env, state, off, size,
|
|
- value_regno);
|
|
+ value_regno, insn_idx);
|
|
else
|
|
err = check_stack_read(env, state, off, size,
|
|
value_regno);
|
|
@@ -5156,6 +5181,34 @@ static int convert_ctx_accesses(struct b
|
|
else
|
|
continue;
|
|
|
|
+ if (type == BPF_WRITE &&
|
|
+ env->insn_aux_data[i + delta].sanitize_stack_off) {
|
|
+ struct bpf_insn patch[] = {
|
|
+ /* Sanitize suspicious stack slot with zero.
|
|
+ * There are no memory dependencies for this store,
|
|
+ * since it's only using frame pointer and immediate
|
|
+ * constant of zero
|
|
+ */
|
|
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP,
|
|
+ env->insn_aux_data[i + delta].sanitize_stack_off,
|
|
+ 0),
|
|
+ /* the original STX instruction will immediately
|
|
+ * overwrite the same stack slot with appropriate value
|
|
+ */
|
|
+ *insn,
|
|
+ };
|
|
+
|
|
+ cnt = ARRAY_SIZE(patch);
|
|
+ new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
|
|
+ if (!new_prog)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ delta += cnt - 1;
|
|
+ env->prog = new_prog;
|
|
+ insn = new_prog->insnsi + i + delta;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
|
|
continue;
|
|
|