kernel/patch-5.13-redhat.patch

4634 lines
139 KiB
Diff

Documentation/admin-guide/kdump/kdump.rst | 11 +
Makefile | 1 +
arch/arm/Kconfig | 4 +-
arch/arm64/Kconfig | 3 +-
.../boot/dts/rockchip/rk3399-pinebook-pro.dts | 2 +-
arch/arm64/kernel/acpi.c | 2 +-
arch/s390/include/asm/ipl.h | 1 +
arch/s390/kernel/ipl.c | 5 +
arch/s390/kernel/setup.c | 4 +
arch/x86/kernel/setup.c | 22 +-
drivers/acpi/apei/hest.c | 8 +
drivers/acpi/device_pm.c | 32 ++
drivers/acpi/internal.h | 9 +
drivers/acpi/irq.c | 17 +-
drivers/acpi/scan.c | 9 +
drivers/acpi/x86/s2idle.c | 157 ++++--
drivers/acpi/x86/utils.c | 25 +
drivers/ata/libahci.c | 18 +
drivers/char/ipmi/ipmi_dmi.c | 15 +
drivers/char/ipmi/ipmi_msghandler.c | 16 +-
drivers/firmware/efi/Makefile | 1 +
drivers/firmware/efi/efi.c | 124 +++--
drivers/firmware/efi/secureboot.c | 38 ++
drivers/gpu/drm/i915/display/intel_dp.c | 12 +-
drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 11 +
drivers/hid/hid-rmi.c | 64 ---
drivers/hwtracing/coresight/coresight-etm4x-core.c | 19 +
drivers/input/rmi4/rmi_driver.c | 124 +++--
drivers/iommu/iommu.c | 22 +
drivers/net/wireguard/main.c | 6 +
drivers/nvme/host/pci.c | 28 +-
drivers/pci/quirks.c | 24 +
drivers/platform/x86/amd-pmc.c | 204 +++++++-
drivers/scsi/smartpqi/smartpqi_init.c | 16 +
drivers/usb/core/hub.c | 7 +
include/linux/acpi.h | 5 +
include/linux/efi.h | 22 +-
include/linux/lsm_hook_defs.h | 2 +
include/linux/lsm_hooks.h | 6 +
include/linux/rmi.h | 1 +
include/linux/security.h | 5 +
init/Kconfig | 2 +-
kernel/crash_core.c | 28 +-
kernel/module_signing.c | 9 +-
security/integrity/platform_certs/load_uefi.c | 6 +-
security/lockdown/Kconfig | 13 +
security/lockdown/lockdown.c | 1 +
security/security.c | 6 +
sound/soc/intel/boards/sof_pcm512x.c | 13 +-
sound/soc/intel/boards/sof_rt5682.c | 14 +
sound/soc/intel/boards/sof_sdw.c | 12 +
sound/soc/intel/boards/sof_sdw_max98373.c | 4 +-
sound/soc/sof/sof-pci-dev.c | 9 +
tools/testing/selftests/bpf/Makefile | 1 -
tools/testing/selftests/bpf/prog_tests/atomics.c | 246 ----------
.../testing/selftests/bpf/prog_tests/bpf_tcp_ca.c | 280 -----------
.../testing/selftests/bpf/prog_tests/kfunc_call.c | 59 ---
.../selftests/bpf/prog_tests/linked_funcs.c | 42 --
.../testing/selftests/bpf/prog_tests/linked_maps.c | 30 --
.../testing/selftests/bpf/prog_tests/linked_vars.c | 43 --
.../selftests/bpf/prog_tests/static_linked.c | 40 --
tools/testing/selftests/bpf/progs/bpf_cubic.c | 545 ---------------------
tools/testing/selftests/bpf/progs/bpf_dctcp.c | 224 ---------
.../testing/selftests/bpf/progs/kfunc_call_test.c | 47 --
.../selftests/bpf/progs/kfunc_call_test_subprog.c | 42 --
tools/testing/selftests/bpf/progs/linked_funcs1.c | 73 ---
tools/testing/selftests/bpf/progs/linked_funcs2.c | 73 ---
tools/testing/selftests/bpf/progs/linked_maps1.c | 82 ----
tools/testing/selftests/bpf/progs/linked_maps2.c | 76 ---
tools/testing/selftests/bpf/progs/linked_vars1.c | 54 --
tools/testing/selftests/bpf/progs/linked_vars2.c | 55 ---
.../selftests/bpf/progs/test_static_linked1.c | 30 --
.../selftests/bpf/progs/test_static_linked2.c | 31 --
73 files changed, 941 insertions(+), 2351 deletions(-)
diff --git a/Documentation/admin-guide/kdump/kdump.rst b/Documentation/admin-guide/kdump/kdump.rst
index 75a9dd98e76e..3ff3291551f9 100644
--- a/Documentation/admin-guide/kdump/kdump.rst
+++ b/Documentation/admin-guide/kdump/kdump.rst
@@ -285,6 +285,17 @@ This would mean:
2) if the RAM size is between 512M and 2G (exclusive), then reserve 64M
3) if the RAM size is larger than 2G, then reserve 128M
+Or you can use crashkernel=auto if you have enough memory. The threshold
+is 2G on x86_64, arm64, ppc64 and ppc64le. The threshold is 4G for s390x.
+If your system memory is less than the threshold crashkernel=auto will not
+reserve memory.
+
+The automatically reserved memory size varies based on architecture.
+The size changes according to system memory size like below:
+ x86_64: 1G-64G:160M,64G-1T:256M,1T-:512M
+ s390x: 4G-64G:160M,64G-1T:256M,1T-:512M
+ arm64: 2G-:512M
+ ppc64: 2G-4G:384M,4G-16G:512M,16G-64G:1G,64G-128G:2G,128G-:4G
Boot into System Kernel
diff --git a/Makefile b/Makefile
index ddbd64b92a72..6e74913db692 100644
--- a/Makefile
+++ b/Makefile
@@ -508,6 +508,7 @@ KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE
KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \
-Werror=implicit-function-declaration -Werror=implicit-int \
+ -Wno-address-of-packed-member \
-Werror=return-type -Wno-format-security \
-std=gnu89
KBUILD_CPPFLAGS := -D__KERNEL__
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 24804f11302d..fd5ff3fa0d5b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1484,9 +1484,9 @@ config HIGHMEM
If unsure, say n.
config HIGHPTE
- bool "Allocate 2nd-level pagetables from highmem" if EXPERT
+ bool "Allocate 2nd-level pagetables from highmem"
depends on HIGHMEM
- default y
+ default n
help
The VM uses one page of physical memory for each page table.
For systems with a lot of processes, this can use a lot of
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9f1d8566bbf9..ebb24a713210 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -921,7 +921,7 @@ endchoice
config ARM64_FORCE_52BIT
bool "Force 52-bit virtual addresses for userspace"
- depends on ARM64_VA_BITS_52 && EXPERT
+ depends on ARM64_VA_BITS_52
help
For systems with 52-bit userspace VAs enabled, the kernel will attempt
to maintain compatibility with older software by providing 48-bit VAs
@@ -1165,6 +1165,7 @@ config XEN
config FORCE_MAX_ZONEORDER
int
default "14" if ARM64_64K_PAGES
+ default "13" if (ARCH_THUNDER && !ARM64_64K_PAGES && !RHEL_DIFFERENCES)
default "12" if ARM64_16K_PAGES
default "11"
help
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
index 2b5f001ff4a6..dcdb3cd75be7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -386,7 +386,7 @@ mains_charger: dc-charger {
};
&cdn_dp {
- status = "okay";
+ status = "disabled";
};
&cpu_b0 {
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index cada0b816c8a..0fc840d6b0fb 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -40,7 +40,7 @@ int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
EXPORT_SYMBOL(acpi_pci_disabled);
static bool param_acpi_off __initdata;
-static bool param_acpi_on __initdata;
+static bool param_acpi_on __initdata = true;
static bool param_acpi_force __initdata;
static int __init parse_acpi(char *arg)
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index a9e2c7295b35..6ff11f3a2d47 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -127,6 +127,7 @@ int ipl_report_add_component(struct ipl_report *report, struct kexec_buf *kbuf,
unsigned char flags, unsigned short cert);
int ipl_report_add_certificate(struct ipl_report *report, void *key,
unsigned long addr, unsigned long len);
+bool ipl_get_secureboot(void);
/*
* DIAG 308 support
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index dba04fbc37a2..f809ab6441fd 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2215,3 +2215,8 @@ int ipl_report_free(struct ipl_report *report)
}
#endif
+
+bool ipl_get_secureboot(void)
+{
+ return !!ipl_secure_flag;
+}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 93538e63fa03..60e4b4275dc4 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -49,6 +49,7 @@
#include <linux/memory.h>
#include <linux/compat.h>
#include <linux/start_kernel.h>
+#include <linux/security.h>
#include <linux/hugetlb.h>
#include <asm/boot_data.h>
@@ -1114,6 +1115,9 @@ void __init setup_arch(char **cmdline_p)
log_component_list();
+ if (ipl_get_secureboot())
+ security_lock_kernel_down("Secure IPL mode", LOCKDOWN_INTEGRITY_MAX);
+
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 1e720626069a..8419cc38a146 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -18,6 +18,7 @@
#include <linux/root_dev.h>
#include <linux/hugetlb.h>
#include <linux/tboot.h>
+#include <linux/security.h>
#include <linux/usb/xhci-dbgp.h>
#include <linux/static_call.h>
#include <linux/swiotlb.h>
@@ -962,6 +963,13 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled(EFI_BOOT))
efi_init();
+ efi_set_secure_boot(boot_params.secure_boot);
+
+#ifdef CONFIG_LOCK_DOWN_IN_EFI_SECURE_BOOT
+ if (efi_enabled(EFI_SECURE_BOOT))
+ security_lock_kernel_down("EFI Secure Boot mode", LOCKDOWN_INTEGRITY_MAX);
+#endif
+
dmi_setup();
/*
@@ -1126,19 +1134,7 @@ void __init setup_arch(char **cmdline_p)
/* Allocate bigger log buffer */
setup_log_buf(1);
- if (efi_enabled(EFI_BOOT)) {
- switch (boot_params.secure_boot) {
- case efi_secureboot_mode_disabled:
- pr_info("Secure boot disabled\n");
- break;
- case efi_secureboot_mode_enabled:
- pr_info("Secure boot enabled\n");
- break;
- default:
- pr_info("Secure boot could not be determined\n");
- break;
- }
- }
+ efi_set_secure_boot(boot_params.secure_boot);
reserve_initrd();
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 277f00b288d1..adbce15c273d 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -94,6 +94,14 @@ int apei_hest_parse(apei_hest_func_t func, void *data)
if (hest_disable || !hest_tab)
return -EINVAL;
+#ifdef CONFIG_ARM64
+ /* Ignore broken firmware */
+ if (!strncmp(hest_tab->header.oem_id, "HPE ", 6) &&
+ !strncmp(hest_tab->header.oem_table_id, "ProLiant", 8) &&
+ MIDR_IMPLEMENTOR(read_cpuid_id()) == ARM_CPU_IMP_APM)
+ return -EINVAL;
+#endif
+
hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
for (i = 0; i < hest_tab->error_source_count; i++) {
len = hest_esrc_len(hest_hdr);
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 9d2d3b9bb8b5..0cfdef2fc3ad 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1338,4 +1338,36 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
return 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
+
+/**
+ * acpi_storage_d3 - Check if D3 should be used in the suspend path
+ * @dev: Device to check
+ *
+ * Return %true if the platform firmware wants @dev to be programmed
+ * into D3hot or D3cold (if supported) in the suspend path, or %false
+ * when there is no specific preference. On some platforms, if this
+ * hint is ignored, @dev may remain unresponsive after suspending the
+ * platform as a whole.
+ *
+ * Although the property has storage in the name it actually is
+ * applied to the PCIe slot and plugging in a non-storage device the
+ * same platform restrictions will likely apply.
+ */
+bool acpi_storage_d3(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ u8 val;
+
+ if (force_storage_d3())
+ return true;
+
+ if (!adev)
+ return false;
+ if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
+ &val))
+ return false;
+ return val == 1;
+}
+EXPORT_SYMBOL_GPL(acpi_storage_d3);
+
#endif /* CONFIG_PM */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index e21611c9a170..7ac01b03ba67 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -236,6 +236,15 @@ static inline int suspend_nvs_save(void) { return 0; }
static inline void suspend_nvs_restore(void) {}
#endif
+#ifdef CONFIG_X86
+bool force_storage_d3(void);
+#else
+static inline bool force_storage_d3(void)
+{
+ return false;
+}
+#endif
+
/*--------------------------------------------------------------------------
Device properties
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index c68e694fca26..146cba5ae5bc 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -130,6 +130,7 @@ struct acpi_irq_parse_one_ctx {
unsigned int index;
unsigned long *res_flags;
struct irq_fwspec *fwspec;
+ bool skip_producer_check;
};
/**
@@ -201,7 +202,8 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
return AE_CTRL_TERMINATE;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
eirq = &ares->data.extended_irq;
- if (eirq->producer_consumer == ACPI_PRODUCER)
+ if (!ctx->skip_producer_check &&
+ eirq->producer_consumer == ACPI_PRODUCER)
return AE_OK;
if (ctx->index >= eirq->interrupt_count) {
ctx->index -= eirq->interrupt_count;
@@ -236,8 +238,19 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
static int acpi_irq_parse_one(acpi_handle handle, unsigned int index,
struct irq_fwspec *fwspec, unsigned long *flags)
{
- struct acpi_irq_parse_one_ctx ctx = { -EINVAL, index, flags, fwspec };
+ struct acpi_irq_parse_one_ctx ctx = { -EINVAL, index, flags, fwspec, false };
+ /*
+ * Firmware on arm64-based HPE m400 platform incorrectly marks
+ * its UART interrupt as ACPI_PRODUCER rather than ACPI_CONSUMER.
+ * Don't do the producer/consumer check for that device.
+ */
+ if (IS_ENABLED(CONFIG_ARM64)) {
+ struct acpi_device *adev = acpi_bus_get_acpi_device(handle);
+
+ if (adev && !strcmp(acpi_device_hid(adev), "APMC0D08"))
+ ctx.skip_producer_check = true;
+ }
acpi_walk_resources(handle, METHOD_NAME__CRS, acpi_irq_parse_one_cb, &ctx);
return ctx.rc;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 438df8da6d12..d5343c0075f8 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1641,6 +1641,15 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids))
return false;
+ /*
+ * Firmware on some arm64 X-Gene platforms will make the UART
+ * device appear as both a UART and a slave of that UART. Just
+ * bail out here for X-Gene UARTs.
+ */
+ if (IS_ENABLED(CONFIG_ARM64) &&
+ !strcmp(acpi_device_hid(device), "APMC0D08"))
+ return false;
+
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list,
acpi_check_serial_bus_slave,
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 2d7ddb8a8cb6..1c507804fb10 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -32,6 +32,9 @@ static const struct acpi_device_id lps0_device_ids[] = {
{"", },
};
+/* Microsoft platform agnostic UUID */
+#define ACPI_LPS0_DSM_UUID_MICROSOFT "11e00d56-ce64-47ce-837b-1f898f9aa461"
+
#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
@@ -39,6 +42,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_SCREEN_ON 4
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
+#define ACPI_LPS0_MS_ENTRY 7
+#define ACPI_LPS0_MS_EXIT 8
/* AMD */
#define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
@@ -49,7 +54,10 @@ static const struct acpi_device_id lps0_device_ids[] = {
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
-static char lps0_dsm_func_mask;
+static int lps0_dsm_func_mask;
+
+static guid_t lps0_dsm_guid_microsoft;
+static int lps0_dsm_func_mask_microsoft;
/* Device constraint entry structure */
struct lpi_device_info {
@@ -70,15 +78,7 @@ struct lpi_constraints {
int min_dstate;
};
-/* AMD */
-/* Device constraint entry structure */
-struct lpi_device_info_amd {
- int revision;
- int count;
- union acpi_object *package;
-};
-
-/* Constraint package structure */
+/* AMD Constraint package structure */
struct lpi_device_constraint_amd {
char *name;
int enabled;
@@ -96,15 +96,15 @@ static void lpi_device_get_constraints_amd(void)
int i, j, k;
out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
- 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
NULL, ACPI_TYPE_PACKAGE);
- if (!out_obj)
- return;
-
acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
out_obj ? "successful" : "failed");
+ if (!out_obj)
+ return;
+
for (i = 0; i < out_obj->package.count; i++) {
union acpi_object *package = &out_obj->package.elements[i];
@@ -317,14 +317,15 @@ static void lpi_check_constraints(void)
}
}
-static void acpi_sleep_run_lps0_dsm(unsigned int func)
+static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid)
{
union acpi_object *out_obj;
- if (!(lps0_dsm_func_mask & (1 << func)))
+ if (!(func_mask & (1 << func)))
return;
- out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
+ out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid,
+ rev_id, func, NULL);
ACPI_FREE(out_obj);
acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
@@ -336,11 +337,33 @@ static bool acpi_s2idle_vendor_amd(void)
return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
}
+static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
+{
+ union acpi_object *obj;
+ int ret = -EINVAL;
+
+ guid_parse(uuid, dsm_guid);
+ obj = acpi_evaluate_dsm(handle, dsm_guid, rev, 0, NULL);
+
+ /* Check if the _DSM is present and as expected. */
+ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length == 0 ||
+ obj->buffer.length > sizeof(u32)) {
+ acpi_handle_debug(handle,
+ "_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev);
+ goto out;
+ }
+
+ ret = *(int *)obj->buffer.pointer;
+ acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret);
+
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
+
static int lps0_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
- union acpi_object *out_obj;
-
if (lps0_device_handle)
return 0;
@@ -348,28 +371,36 @@ static int lps0_device_attach(struct acpi_device *adev,
return 0;
if (acpi_s2idle_vendor_amd()) {
- guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
+ /* AMD0004, AMDI0005:
+ * - Should use rev_id 0x0
+ * - function mask > 0x3: Should use AMD method, but has off by one bug
+ * - function mask = 0x3: Should use Microsoft method
+ * AMDI0006:
+ * - should use rev_id 0x0
+ * - function mask = 0x3: Should use Microsoft method
+ */
+ const char *hid = acpi_device_hid(adev);
rev_id = 0;
+ lps0_dsm_func_mask = validate_dsm(adev->handle,
+ ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
+ lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
+ ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
+ &lps0_dsm_guid_microsoft);
+ if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
+ !strcmp(hid, "AMDI0005"))) {
+ lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
+ acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
+ ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
+ }
} else {
- guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
rev_id = 1;
+ lps0_dsm_func_mask = validate_dsm(adev->handle,
+ ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
+ lps0_dsm_func_mask_microsoft = -EINVAL;
}
- /* Check if the _DSM is present and as expected. */
- if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
- acpi_handle_debug(adev->handle,
- "_DSM function 0 evaluation failed\n");
- return 0;
- }
-
- lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
-
- ACPI_FREE(out_obj);
-
- acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
- lps0_dsm_func_mask);
+ if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
+ return 0; //function evaluation failed
lps0_device_handle = adev->handle;
@@ -386,11 +417,15 @@ static int lps0_device_attach(struct acpi_device *adev,
mem_sleep_current = PM_SUSPEND_TO_IDLE;
/*
- * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
- * EC GPE to be enabled while suspended for certain wakeup devices to
- * work, so mark it as wakeup-capable.
+ * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
+ * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
+ * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
+ *
+ * Only enable on !AMD as enabling this universally causes problems for a number
+ * of AMD based systems.
*/
- acpi_ec_mark_gpe_for_wake();
+ if (!acpi_s2idle_vendor_amd())
+ acpi_ec_mark_gpe_for_wake();
return 0;
}
@@ -408,12 +443,23 @@ int acpi_s2idle_prepare_late(void)
if (pm_debug_messages_on)
lpi_check_constraints();
- if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD);
+ if (lps0_dsm_func_mask_microsoft > 0) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ } else if (acpi_s2idle_vendor_amd()) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
+ lps0_dsm_func_mask, lps0_dsm_guid);
} else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+ lps0_dsm_func_mask, lps0_dsm_guid);
}
return 0;
@@ -424,12 +470,23 @@ void acpi_s2idle_restore_early(void)
if (!lps0_device_handle || sleep_no_lps0)
return;
- if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
+ if (lps0_dsm_func_mask_microsoft > 0) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ } else if (acpi_s2idle_vendor_amd()) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
+ lps0_dsm_func_mask, lps0_dsm_guid);
} else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
+ lps0_dsm_func_mask, lps0_dsm_guid);
}
}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index bdc1ba00aee9..f22f23933063 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -135,3 +135,28 @@ bool acpi_device_always_present(struct acpi_device *adev)
return ret;
}
+
+/*
+ * AMD systems from Renoir and Lucienne *require* that the NVME controller
+ * is put into D3 over a Modern Standby / suspend-to-idle cycle.
+ *
+ * This is "typically" accomplished using the `StorageD3Enable`
+ * property in the _DSD that is checked via the `acpi_storage_d3` function
+ * but this property was introduced after many of these systems launched
+ * and most OEM systems don't have it in their BIOS.
+ *
+ * The Microsoft documentation for StorageD3Enable mentioned that Windows has
+ * a hardcoded allowlist for D3 support, which was used for these platforms.
+ *
+ * This allows quirking on Linux in a similar fashion.
+ */
+static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
+ {}
+};
+
+bool force_storage_d3(void)
+{
+ return x86_match_cpu(storage_d3_cpu_ids);
+}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index fec2e9754aed..bea4e2973259 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -671,6 +671,24 @@ int ahci_stop_engine(struct ata_port *ap)
tmp &= ~PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
+#ifdef CONFIG_ARM64
+ /* Rev Ax of Cavium CN99XX needs a hack for port stop */
+ if (dev_is_pci(ap->host->dev) &&
+ to_pci_dev(ap->host->dev)->vendor == 0x14e4 &&
+ to_pci_dev(ap->host->dev)->device == 0x9027 &&
+ midr_is_cpu_model_range(read_cpuid_id(),
+ MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN),
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(0, MIDR_REVISION_MASK))) {
+ tmp = readl(hpriv->mmio + 0x8000);
+ udelay(100);
+ writel(tmp | (1 << 26), hpriv->mmio + 0x8000);
+ udelay(100);
+ writel(tmp & ~(1 << 26), hpriv->mmio + 0x8000);
+ dev_warn(ap->host->dev, "CN99XX SATA reset workaround applied\n");
+ }
+#endif
+
/* wait for engine to stop. This could be as long as 500 msec */
tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index bbf7029e224b..cf7faa970dd6 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -215,6 +215,21 @@ static int __init scan_for_dmi_ipmi(void)
{
const struct dmi_device *dev = NULL;
+#ifdef CONFIG_ARM64
+ /* RHEL-only
+ * If this is ARM-based HPE m400, return now, because that platform
+ * reports the host-side ipmi address as intel port-io space, which
+ * does not exist in the ARM architecture.
+ */
+ const char *dmistr = dmi_get_system_info(DMI_PRODUCT_NAME);
+
+ if (dmistr && (strcmp("ProLiant m400 Server", dmistr) == 0)) {
+ pr_debug("%s does not support host ipmi\n", dmistr);
+ return 0;
+ }
+ /* END RHEL-only */
+#endif
+
while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev)))
dmi_decode_ipmi((const struct dmi_header *) dev->device_data);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 8a0e97b33cae..32e4b183d102 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -34,6 +34,7 @@
#include <linux/uuid.h>
#include <linux/nospec.h>
#include <linux/vmalloc.h>
+#include <linux/dmi.h>
#include <linux/delay.h>
#define IPMI_DRIVER_VERSION "39.2"
@@ -5159,8 +5160,21 @@ static int __init ipmi_init_msghandler_mod(void)
{
int rv;
- pr_info("version " IPMI_DRIVER_VERSION "\n");
+#ifdef CONFIG_ARM64
+ /* RHEL-only
+ * If this is ARM-based HPE m400, return now, because that platform
+ * reports the host-side ipmi address as intel port-io space, which
+ * does not exist in the ARM architecture.
+ */
+ const char *dmistr = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (dmistr && (strcmp("ProLiant m400 Server", dmistr) == 0)) {
+ pr_debug("%s does not support host ipmi\n", dmistr);
+ return -ENOSYS;
+ }
+ /* END RHEL-only */
+#endif
+ pr_info("version " IPMI_DRIVER_VERSION "\n");
mutex_lock(&ipmi_interfaces_mutex);
rv = ipmi_register_driver();
mutex_unlock(&ipmi_interfaces_mutex);
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 467e94259679..9b6f5b8e5397 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_map.o
obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
obj-$(CONFIG_EFI_TEST) += test/
obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
+obj-$(CONFIG_EFI) += secureboot.o
obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 847f33ffc4ae..363037f8eaf8 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -31,6 +31,7 @@
#include <linux/ucs2_string.h>
#include <linux/memblock.h>
#include <linux/security.h>
+#include <linux/bsearch.h>
#include <asm/early_ioremap.h>
@@ -841,40 +842,101 @@ int efi_mem_type(unsigned long phys_addr)
}
#endif
+struct efi_error_code {
+ efi_status_t status;
+ int errno;
+ const char *description;
+};
+
+static const struct efi_error_code efi_error_codes[] = {
+ { EFI_SUCCESS, 0, "Success"},
+#if 0
+ { EFI_LOAD_ERROR, -EPICK_AN_ERRNO, "Load Error"},
+#endif
+ { EFI_INVALID_PARAMETER, -EINVAL, "Invalid Parameter"},
+ { EFI_UNSUPPORTED, -ENOSYS, "Unsupported"},
+ { EFI_BAD_BUFFER_SIZE, -ENOSPC, "Bad Buffer Size"},
+ { EFI_BUFFER_TOO_SMALL, -ENOSPC, "Buffer Too Small"},
+ { EFI_NOT_READY, -EAGAIN, "Not Ready"},
+ { EFI_DEVICE_ERROR, -EIO, "Device Error"},
+ { EFI_WRITE_PROTECTED, -EROFS, "Write Protected"},
+ { EFI_OUT_OF_RESOURCES, -ENOMEM, "Out of Resources"},
+#if 0
+ { EFI_VOLUME_CORRUPTED, -EPICK_AN_ERRNO, "Volume Corrupt"},
+ { EFI_VOLUME_FULL, -EPICK_AN_ERRNO, "Volume Full"},
+ { EFI_NO_MEDIA, -EPICK_AN_ERRNO, "No Media"},
+ { EFI_MEDIA_CHANGED, -EPICK_AN_ERRNO, "Media changed"},
+#endif
+ { EFI_NOT_FOUND, -ENOENT, "Not Found"},
+#if 0
+ { EFI_ACCESS_DENIED, -EPICK_AN_ERRNO, "Access Denied"},
+ { EFI_NO_RESPONSE, -EPICK_AN_ERRNO, "No Response"},
+ { EFI_NO_MAPPING, -EPICK_AN_ERRNO, "No mapping"},
+ { EFI_TIMEOUT, -EPICK_AN_ERRNO, "Time out"},
+ { EFI_NOT_STARTED, -EPICK_AN_ERRNO, "Not started"},
+ { EFI_ALREADY_STARTED, -EPICK_AN_ERRNO, "Already started"},
+#endif
+ { EFI_ABORTED, -EINTR, "Aborted"},
+#if 0
+ { EFI_ICMP_ERROR, -EPICK_AN_ERRNO, "ICMP Error"},
+ { EFI_TFTP_ERROR, -EPICK_AN_ERRNO, "TFTP Error"},
+ { EFI_PROTOCOL_ERROR, -EPICK_AN_ERRNO, "Protocol Error"},
+ { EFI_INCOMPATIBLE_VERSION, -EPICK_AN_ERRNO, "Incompatible Version"},
+#endif
+ { EFI_SECURITY_VIOLATION, -EACCES, "Security Policy Violation"},
+#if 0
+ { EFI_CRC_ERROR, -EPICK_AN_ERRNO, "CRC Error"},
+ { EFI_END_OF_MEDIA, -EPICK_AN_ERRNO, "End of Media"},
+ { EFI_END_OF_FILE, -EPICK_AN_ERRNO, "End of File"},
+ { EFI_INVALID_LANGUAGE, -EPICK_AN_ERRNO, "Invalid Languages"},
+ { EFI_COMPROMISED_DATA, -EPICK_AN_ERRNO, "Compromised Data"},
+
+ // warnings
+ { EFI_WARN_UNKOWN_GLYPH, -EPICK_AN_ERRNO, "Warning Unknown Glyph"},
+ { EFI_WARN_DELETE_FAILURE, -EPICK_AN_ERRNO, "Warning Delete Failure"},
+ { EFI_WARN_WRITE_FAILURE, -EPICK_AN_ERRNO, "Warning Write Failure"},
+ { EFI_WARN_BUFFER_TOO_SMALL, -EPICK_AN_ERRNO, "Warning Buffer Too Small"},
+#endif
+};
+
+static int
+efi_status_cmp_bsearch(const void *key, const void *item)
+{
+ u64 status = (u64)(uintptr_t)key;
+ struct efi_error_code *code = (struct efi_error_code *)item;
+
+ if (status < code->status)
+ return -1;
+ if (status > code->status)
+ return 1;
+ return 0;
+}
+
int efi_status_to_err(efi_status_t status)
{
- int err;
-
- switch (status) {
- case EFI_SUCCESS:
- err = 0;
- break;
- case EFI_INVALID_PARAMETER:
- err = -EINVAL;
- break;
- case EFI_OUT_OF_RESOURCES:
- err = -ENOSPC;
- break;
- case EFI_DEVICE_ERROR:
- err = -EIO;
- break;
- case EFI_WRITE_PROTECTED:
- err = -EROFS;
- break;
- case EFI_SECURITY_VIOLATION:
- err = -EACCES;
- break;
- case EFI_NOT_FOUND:
- err = -ENOENT;
- break;
- case EFI_ABORTED:
- err = -EINTR;
- break;
- default:
- err = -EINVAL;
- }
+ struct efi_error_code *found;
+ size_t num = sizeof(efi_error_codes) / sizeof(struct efi_error_code);
- return err;
+ found = bsearch((void *)(uintptr_t)status, efi_error_codes,
+ sizeof(struct efi_error_code), num,
+ efi_status_cmp_bsearch);
+ if (!found)
+ return -EINVAL;
+ return found->errno;
+}
+
+const char *
+efi_status_to_str(efi_status_t status)
+{
+ struct efi_error_code *found;
+ size_t num = sizeof(efi_error_codes) / sizeof(struct efi_error_code);
+
+ found = bsearch((void *)(uintptr_t)status, efi_error_codes,
+ sizeof(struct efi_error_code), num,
+ efi_status_cmp_bsearch);
+ if (!found)
+ return "Unknown error code";
+ return found->description;
}
static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
diff --git a/drivers/firmware/efi/secureboot.c b/drivers/firmware/efi/secureboot.c
new file mode 100644
index 000000000000..de0a3714a5d4
--- /dev/null
+++ b/drivers/firmware/efi/secureboot.c
@@ -0,0 +1,38 @@
+/* Core kernel secure boot support.
+ *
+ * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+
+/*
+ * Decide what to do when UEFI secure boot mode is enabled.
+ */
+void __init efi_set_secure_boot(enum efi_secureboot_mode mode)
+{
+ if (efi_enabled(EFI_BOOT)) {
+ switch (mode) {
+ case efi_secureboot_mode_disabled:
+ pr_info("Secure boot disabled\n");
+ break;
+ case efi_secureboot_mode_enabled:
+ set_bit(EFI_SECURE_BOOT, &efi.flags);
+ pr_info("Secure boot enabled\n");
+ break;
+ default:
+ pr_warn("Secure boot could not be determined (mode %u)\n",
+ mode);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ddee6d4f07cf..282a1c0f654a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1318,14 +1318,16 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
- if (intel_dp->use_max_params) {
+ if (intel_dp->use_max_params ||
+ intel_dp->dpcd[DP_DPCD_REV] <= DP_DPCD_REV_11) {
/*
* Use the maximum clock and number of lanes the eDP panel
* advertizes being capable of in case the initial fast
- * optimal params failed us. The panels are generally
- * designed to support only a single clock and lane
- * configuration, and typically on older panels these
- * values correspond to the native resolution of the panel.
+ * optimal params failed us or the panel is DP 1.1 or earlier.
+ * The panels are generally designed to support only a single
+ * clock and lane configuration, and typically on older panels
+ * these values correspond to the native resolution of the
+ * panel.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 212bd87c0c4a..b8b066938e48 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -114,6 +114,17 @@ static int rockchip_drm_bind(struct device *dev)
struct rockchip_drm_private *private;
int ret;
+ /* Remove existing drivers that may own the framebuffer memory. */
+ ret = drm_fb_helper_remove_conflicting_framebuffers(NULL,
+ "rockchip-drm-fb",
+ false);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "Failed to remove existing framebuffers - %d.\n",
+ ret);
+ return ret;
+ }
+
drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
if (IS_ERR(drm_dev))
return PTR_ERR(drm_dev);
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 311eee599ce9..2460c6bd46f8 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -322,19 +322,12 @@ static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
struct rmi_device *rmi_dev = hdata->xport.rmi_dev;
- unsigned long flags;
if (!(test_bit(RMI_STARTED, &hdata->flags)))
return 0;
- local_irq_save(flags);
-
rmi_set_attn_data(rmi_dev, data[1], &data[2], size - 2);
- generic_handle_irq(hdata->rmi_irq);
-
- local_irq_restore(flags);
-
return 1;
}
@@ -591,56 +584,6 @@ static const struct rmi_transport_ops hid_rmi_ops = {
.reset = rmi_hid_reset,
};
-static void rmi_irq_teardown(void *data)
-{
- struct rmi_data *hdata = data;
- struct irq_domain *domain = hdata->domain;
-
- if (!domain)
- return;
-
- irq_dispose_mapping(irq_find_mapping(domain, 0));
-
- irq_domain_remove(domain);
- hdata->domain = NULL;
- hdata->rmi_irq = 0;
-}
-
-static int rmi_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw_irq_num)
-{
- irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops rmi_irq_ops = {
- .map = rmi_irq_map,
-};
-
-static int rmi_setup_irq_domain(struct hid_device *hdev)
-{
- struct rmi_data *hdata = hid_get_drvdata(hdev);
- int ret;
-
- hdata->domain = irq_domain_create_linear(hdev->dev.fwnode, 1,
- &rmi_irq_ops, hdata);
- if (!hdata->domain)
- return -ENOMEM;
-
- ret = devm_add_action_or_reset(&hdev->dev, &rmi_irq_teardown, hdata);
- if (ret)
- return ret;
-
- hdata->rmi_irq = irq_create_mapping(hdata->domain, 0);
- if (hdata->rmi_irq <= 0) {
- hid_err(hdev, "Can't allocate an IRQ\n");
- return hdata->rmi_irq < 0 ? hdata->rmi_irq : -ENXIO;
- }
-
- return 0;
-}
-
static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct rmi_data *data = NULL;
@@ -713,18 +656,11 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
mutex_init(&data->page_mutex);
- ret = rmi_setup_irq_domain(hdev);
- if (ret) {
- hid_err(hdev, "failed to allocate IRQ domain\n");
- return ret;
- }
-
if (data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)
rmi_hid_pdata.gpio_data.disable = true;
data->xport.dev = hdev->dev.parent;
data->xport.pdata = rmi_hid_pdata;
- data->xport.pdata.irq = data->rmi_irq;
data->xport.proto_name = "hid";
data->xport.ops = &hid_rmi_ops;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index db881993c211..92a1ebb9cea3 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
@@ -2076,6 +2077,16 @@ static const struct amba_id etm4_ids[] = {
{},
};
+static const struct dmi_system_id broken_coresight[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HPE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Apollo 70"),
+ },
+ },
+ { } /* terminating entry */
+};
+
MODULE_DEVICE_TABLE(amba, etm4_ids);
static struct amba_driver etm4x_amba_driver = {
@@ -2109,6 +2120,11 @@ static int __init etm4x_init(void)
{
int ret;
+ if (dmi_check_system(broken_coresight)) {
+ pr_info("ETM4 disabled due to firmware bug\n");
+ return 0;
+ }
+
ret = etm4_pm_setup();
/* etm4_pm_setup() does its own cleanup - exit on error */
@@ -2135,6 +2151,9 @@ static int __init etm4x_init(void)
static void __exit etm4x_exit(void)
{
+ if (dmi_check_system(broken_coresight))
+ return;
+
amba_driver_unregister(&etm4x_amba_driver);
platform_driver_unregister(&etm4_platform_driver);
etm4_pm_clear();
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 258d5fe3d395..f7298e3dc8f3 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -182,34 +182,47 @@ void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
attn_data.data = fifo_data;
kfifo_put(&drvdata->attn_fifo, attn_data);
+
+ schedule_work(&drvdata->attn_work);
}
EXPORT_SYMBOL_GPL(rmi_set_attn_data);
-static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
+static void attn_callback(struct work_struct *work)
{
- struct rmi_device *rmi_dev = dev_id;
- struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_driver_data *drvdata = container_of(work,
+ struct rmi_driver_data,
+ attn_work);
struct rmi4_attn_data attn_data = {0};
int ret, count;
count = kfifo_get(&drvdata->attn_fifo, &attn_data);
- if (count) {
- *(drvdata->irq_status) = attn_data.irq_status;
- drvdata->attn_data = attn_data;
- }
+ if (!count)
+ return;
- ret = rmi_process_interrupt_requests(rmi_dev);
+ *(drvdata->irq_status) = attn_data.irq_status;
+ drvdata->attn_data = attn_data;
+
+ ret = rmi_process_interrupt_requests(drvdata->rmi_dev);
if (ret)
- rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
+ rmi_dbg(RMI_DEBUG_CORE, &drvdata->rmi_dev->dev,
"Failed to process interrupt request: %d\n", ret);
- if (count) {
- kfree(attn_data.data);
- drvdata->attn_data.data = NULL;
- }
+ kfree(attn_data.data);
+ drvdata->attn_data.data = NULL;
if (!kfifo_is_empty(&drvdata->attn_fifo))
- return rmi_irq_fn(irq, dev_id);
+ schedule_work(&drvdata->attn_work);
+}
+
+static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
+{
+ struct rmi_device *rmi_dev = dev_id;
+ int ret;
+
+ ret = rmi_process_interrupt_requests(rmi_dev);
+ if (ret)
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
+ "Failed to process interrupt request: %d\n", ret);
return IRQ_HANDLED;
}
@@ -217,7 +230,6 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
static int rmi_irq_init(struct rmi_device *rmi_dev)
{
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
- struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
int irq_flags = irq_get_trigger_type(pdata->irq);
int ret;
@@ -235,8 +247,6 @@ static int rmi_irq_init(struct rmi_device *rmi_dev)
return ret;
}
- data->enabled = true;
-
return 0;
}
@@ -886,23 +896,27 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
if (data->enabled)
goto out;
- enable_irq(irq);
- data->enabled = true;
- if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
- retval = disable_irq_wake(irq);
- if (retval)
- dev_warn(&rmi_dev->dev,
- "Failed to disable irq for wake: %d\n",
- retval);
- }
+ if (irq) {
+ enable_irq(irq);
+ data->enabled = true;
+ if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
+ retval = disable_irq_wake(irq);
+ if (retval)
+ dev_warn(&rmi_dev->dev,
+ "Failed to disable irq for wake: %d\n",
+ retval);
+ }
- /*
- * Call rmi_process_interrupt_requests() after enabling irq,
- * otherwise we may lose interrupt on edge-triggered systems.
- */
- irq_flags = irq_get_trigger_type(pdata->irq);
- if (irq_flags & IRQ_TYPE_EDGE_BOTH)
- rmi_process_interrupt_requests(rmi_dev);
+ /*
+ * Call rmi_process_interrupt_requests() after enabling irq,
+ * otherwise we may lose interrupt on edge-triggered systems.
+ */
+ irq_flags = irq_get_trigger_type(pdata->irq);
+ if (irq_flags & IRQ_TYPE_EDGE_BOTH)
+ rmi_process_interrupt_requests(rmi_dev);
+ } else {
+ data->enabled = true;
+ }
out:
mutex_unlock(&data->enabled_mutex);
@@ -922,20 +936,22 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
goto out;
data->enabled = false;
- disable_irq(irq);
- if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
- retval = enable_irq_wake(irq);
- if (retval)
- dev_warn(&rmi_dev->dev,
- "Failed to enable irq for wake: %d\n",
- retval);
- }
-
- /* make sure the fifo is clean */
- while (!kfifo_is_empty(&data->attn_fifo)) {
- count = kfifo_get(&data->attn_fifo, &attn_data);
- if (count)
- kfree(attn_data.data);
+ if (irq) {
+ disable_irq(irq);
+ if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
+ retval = enable_irq_wake(irq);
+ if (retval)
+ dev_warn(&rmi_dev->dev,
+ "Failed to enable irq for wake: %d\n",
+ retval);
+ }
+ } else {
+ /* make sure the fifo is clean */
+ while (!kfifo_is_empty(&data->attn_fifo)) {
+ count = kfifo_get(&data->attn_fifo, &attn_data);
+ if (count)
+ kfree(attn_data.data);
+ }
}
out:
@@ -981,6 +997,8 @@ static int rmi_driver_remove(struct device *dev)
irq_domain_remove(data->irqdomain);
data->irqdomain = NULL;
+ cancel_work_sync(&data->attn_work);
+
rmi_f34_remove_sysfs(rmi_dev);
rmi_free_function_list(rmi_dev);
@@ -1219,9 +1237,15 @@ static int rmi_driver_probe(struct device *dev)
}
}
- retval = rmi_irq_init(rmi_dev);
- if (retval < 0)
- goto err_destroy_functions;
+ if (pdata->irq) {
+ retval = rmi_irq_init(rmi_dev);
+ if (retval < 0)
+ goto err_destroy_functions;
+ }
+
+ data->enabled = true;
+
+ INIT_WORK(&data->attn_work, attn_callback);
if (data->f01_container->dev.driver) {
/* Driver already bound, so enable ATTN now. */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index db966a7841fe..8ae7a008b06f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "iommu: " fmt
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/types.h>
@@ -3039,6 +3040,27 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle)
}
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
+#ifdef CONFIG_ARM64
+static int __init iommu_quirks(void)
+{
+ const char *vendor, *name;
+
+ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ name = dmi_get_system_info(DMI_PRODUCT_NAME);
+
+ if (vendor &&
+ (strncmp(vendor, "GIGABYTE", 8) == 0 && name &&
+ (strncmp(name, "R120", 4) == 0 ||
+ strncmp(name, "R270", 4) == 0))) {
+ pr_warn("Gigabyte %s detected, force iommu passthrough mode", name);
+ iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
+ }
+
+ return 0;
+}
+arch_initcall(iommu_quirks);
+#endif
+
/*
* Changes the default domain of an iommu group that has *only* one device
*
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
index 75dbe77b0b4b..4bd6dd722f44 100644
--- a/drivers/net/wireguard/main.c
+++ b/drivers/net/wireguard/main.c
@@ -12,6 +12,7 @@
#include <uapi/linux/wireguard.h>
+#include <linux/fips.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/genetlink.h>
@@ -21,6 +22,11 @@ static int __init mod_init(void)
{
int ret;
+#ifdef CONFIG_RHEL_DIFFERENCES
+ if (fips_enabled)
+ return -EOPNOTSUPP;
+#endif
+
ret = wg_allowedips_slab_init();
if (ret < 0)
goto err_allowedips;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d963f25fc7ae..66455e2261d0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2880,32 +2880,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
return 0;
}
-#ifdef CONFIG_ACPI
-static bool nvme_acpi_storage_d3(struct pci_dev *dev)
-{
- struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
- u8 val;
-
- /*
- * Look for _DSD property specifying that the storage device on the port
- * must use D3 to support deep platform power savings during
- * suspend-to-idle.
- */
-
- if (!adev)
- return false;
- if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
- &val))
- return false;
- return val == 1;
-}
-#else
-static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
-{
- return false;
-}
-#endif /* CONFIG_ACPI */
-
static void nvme_async_probe(void *data, async_cookie_t cookie)
{
struct nvme_dev *dev = data;
@@ -2955,7 +2929,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
quirks |= check_vendor_combination_bug(pdev);
- if (!noacpi && nvme_acpi_storage_d3(pdev)) {
+ if (!noacpi && acpi_storage_d3(&pdev->dev)) {
/*
* Some systems use a bios work around to ask for D3 on
* platforms that support kernel managed suspend.
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7b1c81b899cd..5904ece27f64 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4231,6 +4231,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
quirk_bridge_cavm_thrx2_pcie_root);
+/*
+ * PCI BAR 5 is not setup correctly for the on-board AHCI controller
+ * on Broadcom's Vulcan processor. Added a quirk to fix BAR 5 by
+ * using BAR 4's resources which are populated correctly and NOT
+ * actually used by the AHCI controller.
+ */
+static void quirk_fix_vulcan_ahci_bars(struct pci_dev *dev)
+{
+ struct resource *r = &dev->resource[4];
+
+ if (!(r->flags & IORESOURCE_MEM) || (r->start == 0))
+ return;
+
+ /* Set BAR5 resource to BAR4 */
+ dev->resource[5] = *r;
+
+ /* Update BAR5 in pci config space */
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, r->start);
+
+ /* Clear BAR4's resource */
+ memset(r, 0, sizeof(*r));
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9027, quirk_fix_vulcan_ahci_bars);
+
/*
* Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
* class code. Fix it.
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index ca95c2a52e26..e38e8288468f 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -46,26 +46,70 @@
#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
#define AMD_PMC_RESULT_FAILED 0xFF
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
+#define FCH_SSC_MAPPING_SIZE 0x800
+#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION 0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
+#define SMU_MSG_LOG_START 0x06
+#define SMU_MSG_LOG_RESET 0x07
+#define SMU_MSG_LOG_DUMP_DATA 0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
/* List of supported CPU ids */
#define AMD_CPU_ID_RV 0x15D0
#define AMD_CPU_ID_RN 0x1630
#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
+#define AMD_CPU_ID_YC 0x14B5
#define PMC_MSG_DELAY_MIN_US 100
#define RESPONSE_REGISTER_LOOP_MAX 200
+#define SOC_SUBSYSTEM_IP_MAX 12
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
enum amd_pmc_def {
MSG_TEST = 0x01,
MSG_OS_HINT_PCO,
MSG_OS_HINT_RN,
};
+struct amd_pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+static const struct amd_pmc_bit_map soc15_ip_blk[] = {
+ {"DISPLAY", BIT(0)},
+ {"CPU", BIT(1)},
+ {"GFX", BIT(2)},
+ {"VDD", BIT(3)},
+ {"ACP", BIT(4)},
+ {"VCN", BIT(5)},
+ {"ISP", BIT(6)},
+ {"NBIO", BIT(7)},
+ {"DF", BIT(8)},
+ {"USB0", BIT(9)},
+ {"USB1", BIT(10)},
+ {"LAPIC", BIT(11)},
+ {}
+};
+
struct amd_pmc_dev {
void __iomem *regbase;
- void __iomem *smu_base;
+ void __iomem *smu_virt_addr;
+ void __iomem *fch_virt_addr;
u32 base_addr;
u32 cpu_id;
+ u32 active_ips;
struct device *dev;
struct mutex lock; /* generic mutex lock */
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -74,6 +118,7 @@ struct amd_pmc_dev {
};
static struct amd_pmc_dev pmc;
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -85,13 +130,76 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
iowrite32(val, dev->regbase + reg_offset);
}
+struct smu_metrics {
+ u32 table_version;
+ u32 hint_count;
+ u32 s0i3_cyclecount;
+ u32 timein_s0i2;
+ u64 timeentering_s0i3_lastcapture;
+ u64 timeentering_s0i3_totaltime;
+ u64 timeto_resume_to_os_lastcapture;
+ u64 timeto_resume_to_os_totaltime;
+ u64 timein_s0i3_lastcapture;
+ u64 timein_s0i3_totaltime;
+ u64 timein_swdrips_lastcapture;
+ u64 timein_swdrips_totaltime;
+ u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
+ u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
+} __packed;
+
#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
+ struct amd_pmc_dev *dev = s->private;
+ struct smu_metrics table;
+ int idx;
+
+ if (dev->cpu_id == AMD_CPU_ID_PCO)
+ return -EINVAL;
+
+ memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
+
+ seq_puts(s, "\n=== SMU Statistics ===\n");
+ seq_printf(s, "Table Version: %d\n", table.table_version);
+ seq_printf(s, "Hint Count: %d\n", table.hint_count);
+ seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
+ seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
+ seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
+
+ seq_puts(s, "\n=== Active time (in us) ===\n");
+ for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
+ if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
+ seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+ table.timecondition_notmet_lastcapture[idx]);
+ }
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
+static int s0ix_stats_show(struct seq_file *s, void *unused)
+{
+ struct amd_pmc_dev *dev = s->private;
+ u64 entry_time, exit_time, residency;
+
+ entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
+ entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
+
+ exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
+ exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
+
+ /* It's in 48MHz. We need to convert it */
+ residency = (exit_time - entry_time) / 48;
+
+ seq_puts(s, "=== S0ix statistics ===\n");
+ seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
+ seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
+ seq_printf(s, "Residency Time: %lld\n", residency);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+
static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
{
debugfs_remove_recursive(dev->dbgfs_dir);
@@ -102,6 +210,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
&smu_fw_info_fops);
+ debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
+ &s0ix_stats_fops);
}
#else
static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
@@ -113,6 +223,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
}
#endif /* CONFIG_DEBUG_FS */
+static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+{
+ u32 phys_addr_low, phys_addr_hi;
+ u64 smu_phys_addr;
+
+ if (dev->cpu_id == AMD_CPU_ID_PCO)
+ return -EINVAL;
+
+ /* Get Active devices list from SMU */
+ amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
+
+ /* Get dram address */
+ amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
+ amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
+ smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+ dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
+ if (!dev->smu_virt_addr)
+ return -ENOMEM;
+
+ /* Start the logging */
+ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
+
+ return 0;
+}
+
static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
{
u32 value;
@@ -127,10 +263,9 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
}
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
{
int rc;
- u8 msg;
u32 val;
mutex_lock(&dev->lock);
@@ -150,8 +285,8 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
/* Write message ID to message ID register */
- msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
+
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
val, val != 0, PMC_MSG_DELAY_MIN_US,
@@ -163,6 +298,12 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
switch (val) {
case AMD_PMC_RESULT_OK:
+
+ if (ret) {
+ /* PMFW may take longer time to return back the data */
+ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+ *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
+ }
break;
case AMD_PMC_RESULT_CMD_REJECT_BUSY:
dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
@@ -182,32 +323,54 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
out_unlock:
mutex_unlock(&dev->lock);
+ amd_pmc_dump_registers(dev);
return rc;
}
+static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+{
+ switch (dev->cpu_id) {
+ case AMD_CPU_ID_PCO:
+ return MSG_OS_HINT_PCO;
+ case AMD_CPU_ID_RN:
+ case AMD_CPU_ID_YC:
+ return MSG_OS_HINT_RN;
+ }
+ return -EINVAL;
+}
+
static int __maybe_unused amd_pmc_suspend(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
int rc;
+ u8 msg;
+
+ /* Reset and Start SMU logging - to monitor the s0i3 stats */
+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
- rc = amd_pmc_send_cmd(pdev, 1);
+ msg = amd_pmc_get_os_hint(pdev);
+ rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
if (rc)
dev_err(pdev->dev, "suspend failed\n");
- amd_pmc_dump_registers(pdev);
- return 0;
+ return rc;
}
static int __maybe_unused amd_pmc_resume(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
int rc;
+ u8 msg;
+
+ /* Let SMU know that we are looking for stats */
+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
- rc = amd_pmc_send_cmd(pdev, 0);
+ msg = amd_pmc_get_os_hint(pdev);
+ rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
if (rc)
dev_err(pdev->dev, "resume failed\n");
- amd_pmc_dump_registers(pdev);
return 0;
}
@@ -216,6 +379,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
};
static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
@@ -227,9 +391,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = &pmc;
struct pci_dev *rdev;
- u32 base_addr_lo;
- u32 base_addr_hi;
- u64 base_addr;
+ u32 base_addr_lo, base_addr_hi;
+ u64 base_addr, fch_phys_addr;
int err;
u32 val;
@@ -279,7 +442,20 @@ static int amd_pmc_probe(struct platform_device *pdev)
if (!dev->regbase)
return -ENOMEM;
- amd_pmc_dump_registers(dev);
+ mutex_init(&dev->lock);
+
+ /* Use FCH registers to get the S0ix stats */
+ base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
+ base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
+ fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+ dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
+ if (!dev->fch_virt_addr)
+ return -ENOMEM;
+
+ /* Use SMU to get the s0i3 debug stats */
+ err = amd_pmc_setup_smu_logging(dev);
+ if (err)
+ dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
mutex_init(&dev->lock);
platform_set_drvdata(pdev, dev);
@@ -298,6 +474,8 @@ static int amd_pmc_remove(struct platform_device *pdev)
static const struct acpi_device_id amd_pmc_acpi_ids[] = {
{"AMDI0005", 0},
+ {"AMDI0006", 0},
+ {"AMDI0007", 0},
{"AMD0004", 0},
{ }
};
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 5db16509b6e1..5b8b5e3edd39 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -8808,6 +8808,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd22c)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004c)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0110)
@@ -9064,6 +9076,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADVANTECH, 0x8312)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_DELL, 0x1fe0)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4e123336e410..22d98f536d90 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5635,6 +5635,13 @@ static void hub_event(struct work_struct *work)
(u16) hub->change_bits[0],
(u16) hub->event_bits[0]);
+ /* Don't disconnect USB-SATA on TrimSlice */
+ if (strcmp(dev_name(hdev->bus->controller), "tegra-ehci.0") == 0) {
+ if ((hdev->state == 7) && (hub->change_bits[0] == 0) &&
+ (hub->event_bits[0] == 0x2))
+ hub->event_bits[0] = 0;
+ }
+
/* Lock the device, then check to see if we were
* disconnected while waiting for the lock to succeed. */
usb_lock_device(hdev);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c60745f657e9..dd0dafd21e33 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1004,6 +1004,7 @@ int acpi_dev_resume(struct device *dev);
int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
+bool acpi_storage_d3(struct device *dev);
#else
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
@@ -1011,6 +1012,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
return 0;
}
+static inline bool acpi_storage_d3(struct device *dev)
+{
+ return false;
+}
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 6b5d36babfcc..fd4a5d66a9d0 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -43,6 +43,8 @@
#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_IS_ERROR(x) ((x) & (1UL << (BITS_PER_LONG-1)))
+
typedef unsigned long efi_status_t;
typedef u8 efi_bool_t;
typedef u16 efi_char16_t; /* UNICODE character */
@@ -782,6 +784,14 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */
#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */
+#define EFI_SECURE_BOOT 13 /* Are we in Secure Boot mode? */
+
+enum efi_secureboot_mode {
+ efi_secureboot_mode_unset,
+ efi_secureboot_mode_unknown,
+ efi_secureboot_mode_disabled,
+ efi_secureboot_mode_enabled,
+};
#ifdef CONFIG_EFI
/*
@@ -793,6 +803,8 @@ static inline bool efi_enabled(int feature)
}
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
+extern void __init efi_set_secure_boot(enum efi_secureboot_mode mode);
+
bool __pure __efi_soft_reserve_enabled(void);
static inline bool __pure efi_soft_reserve_enabled(void)
@@ -813,6 +825,8 @@ static inline bool efi_enabled(int feature)
static inline void
efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
+static inline void efi_set_secure_boot(enum efi_secureboot_mode mode) {}
+
static inline bool efi_soft_reserve_enabled(void)
{
return false;
@@ -825,6 +839,7 @@ static inline bool efi_rt_services_supported(unsigned int mask)
#endif
extern int efi_status_to_err(efi_status_t status);
+extern const char *efi_status_to_str(efi_status_t status);
/*
* Variable Attributes
@@ -1077,13 +1092,6 @@ static inline bool efi_runtime_disabled(void) { return true; }
extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
extern unsigned long efi_call_virt_save_flags(void);
-enum efi_secureboot_mode {
- efi_secureboot_mode_unset,
- efi_secureboot_mode_unknown,
- efi_secureboot_mode_disabled,
- efi_secureboot_mode_enabled,
-};
-
static inline
enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var)
{
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 04c01794de83..26f8df026fa9 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -395,6 +395,8 @@ LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux)
#endif /* CONFIG_BPF_SYSCALL */
LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
+LSM_HOOK(int, 0, lock_kernel_down, const char *where, enum lockdown_reason level)
+
#ifdef CONFIG_PERF_EVENTS
LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 5c4c5c0602cb..753b53038690 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1545,6 +1545,12 @@
*
* @what: kernel feature being accessed
*
+ * @lock_kernel_down
+ * Put the kernel into lock-down mode.
+ *
+ * @where: Where the lock-down is originating from (e.g. command line option)
+ * @level: The lock-down level (can only increase)
+ *
* Security hooks for perf events
*
* @perf_event_open:
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index ab7eea01ab42..fff7c5f737fc 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -364,6 +364,7 @@ struct rmi_driver_data {
struct rmi4_attn_data attn_data;
DECLARE_KFIFO(attn_fifo, struct rmi4_attn_data, 16);
+ struct work_struct attn_work;
};
int rmi_register_transport_device(struct rmi_transport_dev *xport);
diff --git a/include/linux/security.h b/include/linux/security.h
index 0acd1b68bf30..a02717d46d6d 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -472,6 +472,7 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
int security_locked_down(enum lockdown_reason what);
+int security_lock_kernel_down(const char *where, enum lockdown_reason level);
#else /* CONFIG_SECURITY */
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
@@ -1348,6 +1349,10 @@ static inline int security_locked_down(enum lockdown_reason what)
{
return 0;
}
+static inline int security_lock_kernel_down(const char *where, enum lockdown_reason level)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
diff --git a/init/Kconfig b/init/Kconfig
index a61c92066c2e..94107b1d0e3e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1622,7 +1622,7 @@ config AIO
this option saves about 7k.
config IO_URING
- bool "Enable IO uring support" if EXPERT
+ bool "Enable IO uring support"
select IO_WQ
default y
help
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 684a6061a13a..220579c0e963 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -7,6 +7,7 @@
#include <linux/crash_core.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
+#include <linux/sizes.h>
#include <asm/page.h>
#include <asm/sections.h>
@@ -41,6 +42,15 @@ static int __init parse_crashkernel_mem(char *cmdline,
unsigned long long *crash_base)
{
char *cur = cmdline, *tmp;
+ unsigned long long total_mem = system_ram;
+
+ /*
+ * Firmware sometimes reserves some memory regions for it's own use.
+ * so we get less than actual system memory size.
+ * Workaround this by round up the total size to 128M which is
+ * enough for most test cases.
+ */
+ total_mem = roundup(total_mem, SZ_128M);
/* for each entry of the comma-separated list */
do {
@@ -85,13 +95,13 @@ static int __init parse_crashkernel_mem(char *cmdline,
return -EINVAL;
}
cur = tmp;
- if (size >= system_ram) {
+ if (size >= total_mem) {
pr_warn("crashkernel: invalid size\n");
return -EINVAL;
}
/* match ? */
- if (system_ram >= start && system_ram < end) {
+ if (total_mem >= start && total_mem < end) {
*crash_size = size;
break;
}
@@ -250,6 +260,20 @@ static int __init __parse_crashkernel(char *cmdline,
if (suffix)
return parse_crashkernel_suffix(ck_cmdline, crash_size,
suffix);
+
+ if (strncmp(ck_cmdline, "auto", 4) == 0) {
+#ifdef CONFIG_X86_64
+ ck_cmdline = "1G-64G:160M,64G-1T:256M,1T-:512M";
+#elif defined(CONFIG_S390)
+ ck_cmdline = "4G-64G:160M,64G-1T:256M,1T-:512M";
+#elif defined(CONFIG_ARM64)
+ ck_cmdline = "2G-:512M";
+#elif defined(CONFIG_PPC64)
+ ck_cmdline = "2G-4G:384M,4G-16G:512M,16G-64G:1G,64G-128G:2G,128G-:4G";
+#endif
+ pr_info("Using crashkernel=auto, the size chosen is a best effort estimation.\n");
+ }
+
/*
* if the commandline contains a ':', then that's the extended
* syntax -- if not, it must be the classic syntax
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index 8723ae70ea1f..fb2d773498c2 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -38,8 +38,15 @@ int mod_verify_sig(const void *mod, struct load_info *info)
modlen -= sig_len + sizeof(ms);
info->len = modlen;
- return verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len,
+ ret = verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len,
VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_MODULE_SIGNATURE,
NULL, NULL);
+ if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) {
+ ret = verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len,
+ VERIFY_USE_PLATFORM_KEYRING,
+ VERIFYING_MODULE_SIGNATURE,
+ NULL, NULL);
+ }
+ return ret;
}
diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
index f290f78c3f30..d3e7ae04f5be 100644
--- a/security/integrity/platform_certs/load_uefi.c
+++ b/security/integrity/platform_certs/load_uefi.c
@@ -46,7 +46,8 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
return NULL;
if (*status != EFI_BUFFER_TOO_SMALL) {
- pr_err("Couldn't get size: 0x%lx\n", *status);
+ pr_err("Couldn't get size: %s (0x%lx)\n",
+ efi_status_to_str(*status), *status);
return NULL;
}
@@ -57,7 +58,8 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
*status = efi.get_variable(name, guid, NULL, &lsize, db);
if (*status != EFI_SUCCESS) {
kfree(db);
- pr_err("Error reading db var: 0x%lx\n", *status);
+ pr_err("Error reading db var: %s (0x%lx)\n",
+ efi_status_to_str(*status), *status);
return NULL;
}
diff --git a/security/lockdown/Kconfig b/security/lockdown/Kconfig
index e84ddf484010..d0501353a4b9 100644
--- a/security/lockdown/Kconfig
+++ b/security/lockdown/Kconfig
@@ -16,6 +16,19 @@ config SECURITY_LOCKDOWN_LSM_EARLY
subsystem is fully initialised. If enabled, lockdown will
unconditionally be called before any other LSMs.
+config LOCK_DOWN_IN_EFI_SECURE_BOOT
+ bool "Lock down the kernel in EFI Secure Boot mode"
+ default n
+ depends on EFI && SECURITY_LOCKDOWN_LSM_EARLY
+ help
+ UEFI Secure Boot provides a mechanism for ensuring that the firmware
+ will only load signed bootloaders and kernels. Secure boot mode may
+ be determined from EFI variables provided by the system firmware if
+ not indicated by the boot parameters.
+
+ Enabling this option results in kernel lockdown being triggered if
+ EFI Secure Boot is set.
+
choice
prompt "Kernel default lockdown mode"
default LOCK_DOWN_KERNEL_FORCE_NONE
diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
index 87cbdc64d272..18555cf18da7 100644
--- a/security/lockdown/lockdown.c
+++ b/security/lockdown/lockdown.c
@@ -73,6 +73,7 @@ static int lockdown_is_locked_down(enum lockdown_reason what)
static struct security_hook_list lockdown_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(locked_down, lockdown_is_locked_down),
+ LSM_HOOK_INIT(lock_kernel_down, lock_kernel_down),
};
static int __init lockdown_lsm_init(void)
diff --git a/security/security.c b/security/security.c
index 0d626c0dafcc..61696b1f5d3f 100644
--- a/security/security.c
+++ b/security/security.c
@@ -2599,6 +2599,12 @@ int security_locked_down(enum lockdown_reason what)
}
EXPORT_SYMBOL(security_locked_down);
+int security_lock_kernel_down(const char *where, enum lockdown_reason level)
+{
+ return call_int_hook(lock_kernel_down, 0, where, level);
+}
+EXPORT_SYMBOL(security_lock_kernel_down);
+
#ifdef CONFIG_PERF_EVENTS
int security_perf_event_open(struct perf_event_attr *attr, int type)
{
diff --git a/sound/soc/intel/boards/sof_pcm512x.c b/sound/soc/intel/boards/sof_pcm512x.c
index 8620d4f38493..335c212c1961 100644
--- a/sound/soc/intel/boards/sof_pcm512x.c
+++ b/sound/soc/intel/boards/sof_pcm512x.c
@@ -26,11 +26,16 @@
#define SOF_PCM512X_SSP_CODEC(quirk) ((quirk) & GENMASK(3, 0))
#define SOF_PCM512X_SSP_CODEC_MASK (GENMASK(3, 0))
+#define SOF_PCM512X_ENABLE_SSP_CAPTURE BIT(4)
+#define SOF_PCM512X_ENABLE_DMIC BIT(5)
#define IDISP_CODEC_MASK 0x4
/* Default: SSP5 */
-static unsigned long sof_pcm512x_quirk = SOF_PCM512X_SSP_CODEC(5);
+static unsigned long sof_pcm512x_quirk =
+ SOF_PCM512X_SSP_CODEC(5) |
+ SOF_PCM512X_ENABLE_SSP_CAPTURE |
+ SOF_PCM512X_ENABLE_DMIC;
static bool is_legacy_cpu;
@@ -245,8 +250,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
links[id].dpcm_playback = 1;
/*
* capture only supported with specific versions of the Hifiberry DAC+
- * links[id].dpcm_capture = 1;
*/
+ if (sof_pcm512x_quirk & SOF_PCM512X_ENABLE_SSP_CAPTURE)
+ links[id].dpcm_capture = 1;
links[id].no_pcm = 1;
links[id].cpus = &cpus[id];
links[id].num_cpus = 1;
@@ -381,6 +387,9 @@ static int sof_audio_probe(struct platform_device *pdev)
ssp_codec = sof_pcm512x_quirk & SOF_PCM512X_SSP_CODEC_MASK;
+ if (!(sof_pcm512x_quirk & SOF_PCM512X_ENABLE_DMIC))
+ dmic_be_num = 0;
+
/* compute number of dai links */
sof_audio_card_pcm512x.num_links = 1 + dmic_be_num + hdmi_num;
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index 78262c659983..f4b25ae7fd5a 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -155,6 +155,20 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
SOF_RT5682_SSP_AMP(2) |
SOF_RT5682_NUM_HDMIDEV(4)),
},
+ {
+ .callback = sof_rt5682_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alder Lake Client Platform"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO-ADL_MAX98373_ALC5682I_I2S"),
+ },
+ .driver_data = (void *)(SOF_RT5682_MCLK_EN |
+ SOF_RT5682_SSP_CODEC(0) |
+ SOF_SPEAKER_AMP_PRESENT |
+ SOF_MAX98373_SPEAKER_AMP_PRESENT |
+ SOF_RT5682_SSP_AMP(2) |
+ SOF_RT5682_NUM_HDMIDEV(4)),
+ },
{}
};
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 3ca7e1ab48ab..3635d8002ecf 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -128,6 +128,18 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
SOF_RT711_JD_SRC_JD2 |
SOF_RT715_DAI_ID_FIX),
},
+ {
+ /* Dell XPS 9710 */
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5D")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ SOF_RT711_JD_SRC_JD2 |
+ SOF_RT715_DAI_ID_FIX |
+ SOF_SDW_FOUR_SPK),
+ },
{
.callback = sof_sdw_quirk_cb,
.matches = {
diff --git a/sound/soc/intel/boards/sof_sdw_max98373.c b/sound/soc/intel/boards/sof_sdw_max98373.c
index 25daef910aee..25f9065b627c 100644
--- a/sound/soc/intel/boards/sof_sdw_max98373.c
+++ b/sound/soc/intel/boards/sof_sdw_max98373.c
@@ -90,7 +90,7 @@ static int mx8373_enable_spk_pin(struct snd_pcm_substream *substream, bool enabl
static int mx8373_sdw_prepare(struct snd_pcm_substream *substream)
{
- int ret = 0;
+ int ret;
/* according to soc_pcm_prepare dai link prepare is called first */
ret = sdw_prepare(substream);
@@ -102,7 +102,7 @@ static int mx8373_sdw_prepare(struct snd_pcm_substream *substream)
static int mx8373_sdw_hw_free(struct snd_pcm_substream *substream)
{
- int ret = 0;
+ int ret;
/* according to soc_pcm_hw_free dai link free is called first */
ret = sdw_hw_free(substream);
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
index 3489dc1b48f6..2bdf3cf37422 100644
--- a/sound/soc/sof/sof-pci-dev.c
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -50,6 +50,15 @@ static const struct dmi_system_id sof_tplg_table[] = {
},
.driver_data = "sof-tgl-rt5682-ssp0-max98373-ssp2.tplg",
},
+ {
+ .callback = sof_tplg_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alder Lake Client Platform"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO-ADL_MAX98373_ALC5682I_I2S"),
+ },
+ .driver_data = "sof-adl-rt5682-ssp0-max98373-ssp2.tplg",
+ },
{}
};
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 511259c2c6c5..bd2ca0032883 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -409,7 +409,6 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_EXTRA_HDRS) \
$(TRUNNER_BPF_OBJS) \
$(TRUNNER_BPF_SKELS) \
- $(TRUNNER_BPF_SKELS_LINKED) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
$(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c
deleted file mode 100644
index 21efe7bbf10d..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/atomics.c
+++ /dev/null
@@ -1,246 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <test_progs.h>
-
-#include "atomics.skel.h"
-
-static void test_add(struct atomics *skel)
-{
- int err, prog_fd;
- __u32 duration = 0, retval;
- struct bpf_link *link;
-
- link = bpf_program__attach(skel->progs.add);
- if (CHECK(IS_ERR(link), "attach(add)", "err: %ld\n", PTR_ERR(link)))
- return;
-
- prog_fd = bpf_program__fd(skel->progs.add);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
- NULL, NULL, &retval, &duration);
- if (CHECK(err || retval, "test_run add",
- "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
- goto cleanup;
-
- ASSERT_EQ(skel->data->add64_value, 3, "add64_value");
- ASSERT_EQ(skel->bss->add64_result, 1, "add64_result");
-
- ASSERT_EQ(skel->data->add32_value, 3, "add32_value");
- ASSERT_EQ(skel->bss->add32_result, 1, "add32_result");
-
- ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value");
- ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result");
-
- ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
-
-cleanup:
- bpf_link__destroy(link);
-}
-
-static void test_sub(struct atomics *skel)
-{
- int err, prog_fd;
- __u32 duration = 0, retval;
- struct bpf_link *link;
-
- link = bpf_program__attach(skel->progs.sub);
- if (CHECK(IS_ERR(link), "attach(sub)", "err: %ld\n", PTR_ERR(link)))
- return;
-
- prog_fd = bpf_program__fd(skel->progs.sub);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
- NULL, NULL, &retval, &duration);
- if (CHECK(err || retval, "test_run sub",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration))
- goto cleanup;
-
- ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value");
- ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result");
-
- ASSERT_EQ(skel->data->sub32_value, -1, "sub32_value");
- ASSERT_EQ(skel->bss->sub32_result, 1, "sub32_result");
-
- ASSERT_EQ(skel->bss->sub_stack_value_copy, -1, "sub_stack_value");
- ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result");
-
- ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
-
-cleanup:
- bpf_link__destroy(link);
-}
-
-static void test_and(struct atomics *skel)
-{
- int err, prog_fd;
- __u32 duration = 0, retval;
- struct bpf_link *link;
-
- link = bpf_program__attach(skel->progs.and);
- if (CHECK(IS_ERR(link), "attach(and)", "err: %ld\n", PTR_ERR(link)))
- return;
-
- prog_fd = bpf_program__fd(skel->progs.and);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
- NULL, NULL, &retval, &duration);
- if (CHECK(err || retval, "test_run and",
- "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
- goto cleanup;
-
- ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value");
- ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result");
-
- ASSERT_EQ(skel->data->and32_value, 0x010, "and32_value");
- ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result");
-
- ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
-cleanup:
- bpf_link__destroy(link);
-}
-
-static void test_or(struct atomics *skel)
-{
- int err, prog_fd;
- __u32 duration = 0, retval;
- struct bpf_link *link;
-
- link = bpf_program__attach(skel->progs.or);
- if (CHECK(IS_ERR(link), "attach(or)", "err: %ld\n", PTR_ERR(link)))
- return;
-
- prog_fd = bpf_program__fd(skel->progs.or);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
- NULL, NULL, &retval, &duration);
- if (CHECK(err || retval, "test_run or",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration))
- goto cleanup;
-
- ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value");
- ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result");
-
- ASSERT_EQ(skel->data->or32_value, 0x111, "or32_value");
- ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result");
-
- ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
-cleanup:
- bpf_link__destroy(link);
-}
-
-static void test_xor(struct atomics *skel)
-{
- int err, prog_fd;
- __u32 duration = 0, retval;
- struct bpf_link *link;
-
- link = bpf_program__attach(skel->progs.xor);
- if (CHECK(IS_ERR(link), "attach(xor)", "err: %ld\n", PTR_ERR(link)))
- return;
-
- prog_fd = bpf_program__fd(skel->progs.xor);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
- NULL, NULL, &retval, &duration);
- if (CHECK(err || retval, "test_run xor",
- "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
- goto cleanup;
-
- ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value");
- ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result");
-
- ASSERT_EQ(skel->data->xor32_value, 0x101, "xor32_value");
- ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result");
-
- ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
-cleanup:
- bpf_link__destroy(link);
-}
-
-static void test_cmpxchg(struct atomics *skel)
-{
- int err, prog_fd;
- __u32 duration = 0, retval;
- struct bpf_link *link;
-
- link = bpf_program__attach(skel->progs.cmpxchg);
- if (CHECK(IS_ERR(link), "attach(cmpxchg)", "err: %ld\n", PTR_ERR(link)))
- return;
-
- prog_fd = bpf_program__fd(skel->progs.cmpxchg);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
- NULL, NULL, &retval, &duration);
- if (CHECK(err || retval, "test_run add",
- "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
- goto cleanup;
-
- ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value");
- ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail");
- ASSERT_EQ(skel->bss->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed");
-
- ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value");
- ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail");
- ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
-
-cleanup:
- bpf_link__destroy(link);
-}
-
-static void test_xchg(struct atomics *skel)
-{
- int err, prog_fd;
- __u32 duration = 0, retval;
- struct bpf_link *link;
-
- link = bpf_program__attach(skel->progs.xchg);
- if (CHECK(IS_ERR(link), "attach(xchg)", "err: %ld\n", PTR_ERR(link)))
- return;
-
- prog_fd = bpf_program__fd(skel->progs.xchg);
- err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
- NULL, NULL, &retval, &duration);
- if (CHECK(err || retval, "test_run add",
- "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
- goto cleanup;
-
- ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value");
- ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result");
-
- ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value");
- ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
-
-cleanup:
- bpf_link__destroy(link);
-}
-
-void test_atomics(void)
-{
- struct atomics *skel;
- __u32 duration = 0;
-
- skel = atomics__open_and_load();
- if (CHECK(!skel, "skel_load", "atomics skeleton failed\n"))
- return;
-
- if (skel->data->skip_tests) {
- printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)",
- __func__);
- test__skip();
- goto cleanup;
- }
-
- if (test__start_subtest("add"))
- test_add(skel);
- if (test__start_subtest("sub"))
- test_sub(skel);
- if (test__start_subtest("and"))
- test_and(skel);
- if (test__start_subtest("or"))
- test_or(skel);
- if (test__start_subtest("xor"))
- test_xor(skel);
- if (test__start_subtest("cmpxchg"))
- test_cmpxchg(skel);
- if (test__start_subtest("xchg"))
- test_xchg(skel);
-
-cleanup:
- atomics__destroy(skel);
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
deleted file mode 100644
index e25917f04602..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2019 Facebook */
-
-#include <linux/err.h>
-#include <netinet/tcp.h>
-#include <test_progs.h>
-#include "bpf_dctcp.skel.h"
-#include "bpf_cubic.skel.h"
-#include "bpf_tcp_nogpl.skel.h"
-
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
-static const unsigned int total_bytes = 10 * 1024 * 1024;
-static const struct timeval timeo_sec = { .tv_sec = 10 };
-static const size_t timeo_optlen = sizeof(timeo_sec);
-static int expected_stg = 0xeB9F;
-static int stop, duration;
-
-static int settimeo(int fd)
-{
- int err;
-
- err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec,
- timeo_optlen);
- if (CHECK(err == -1, "setsockopt(fd, SO_RCVTIMEO)", "errno:%d\n",
- errno))
- return -1;
-
- err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec,
- timeo_optlen);
- if (CHECK(err == -1, "setsockopt(fd, SO_SNDTIMEO)", "errno:%d\n",
- errno))
- return -1;
-
- return 0;
-}
-
-static int settcpca(int fd, const char *tcp_ca)
-{
- int err;
-
- err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca));
- if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n",
- errno))
- return -1;
-
- return 0;
-}
-
-static void *server(void *arg)
-{
- int lfd = (int)(long)arg, err = 0, fd;
- ssize_t nr_sent = 0, bytes = 0;
- char batch[1500];
-
- fd = accept(lfd, NULL, NULL);
- while (fd == -1) {
- if (errno == EINTR)
- continue;
- err = -errno;
- goto done;
- }
-
- if (settimeo(fd)) {
- err = -errno;
- goto done;
- }
-
- while (bytes < total_bytes && !READ_ONCE(stop)) {
- nr_sent = send(fd, &batch,
- min(total_bytes - bytes, sizeof(batch)), 0);
- if (nr_sent == -1 && errno == EINTR)
- continue;
- if (nr_sent == -1) {
- err = -errno;
- break;
- }
- bytes += nr_sent;
- }
-
- CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n",
- bytes, total_bytes, nr_sent, errno);
-
-done:
- if (fd != -1)
- close(fd);
- if (err) {
- WRITE_ONCE(stop, 1);
- return ERR_PTR(err);
- }
- return NULL;
-}
-
-static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
-{
- struct sockaddr_in6 sa6 = {};
- ssize_t nr_recv = 0, bytes = 0;
- int lfd = -1, fd = -1;
- pthread_t srv_thread;
- socklen_t addrlen = sizeof(sa6);
- void *thread_ret;
- char batch[1500];
- int err;
-
- WRITE_ONCE(stop, 0);
-
- lfd = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(lfd == -1, "socket", "errno:%d\n", errno))
- return;
- fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) {
- close(lfd);
- return;
- }
-
- if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca) ||
- settimeo(lfd) || settimeo(fd))
- goto done;
-
- /* bind, listen and start server thread to accept */
- sa6.sin6_family = AF_INET6;
- sa6.sin6_addr = in6addr_loopback;
- err = bind(lfd, (struct sockaddr *)&sa6, addrlen);
- if (CHECK(err == -1, "bind", "errno:%d\n", errno))
- goto done;
- err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen);
- if (CHECK(err == -1, "getsockname", "errno:%d\n", errno))
- goto done;
- err = listen(lfd, 1);
- if (CHECK(err == -1, "listen", "errno:%d\n", errno))
- goto done;
-
- if (sk_stg_map) {
- err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
- &expected_stg, BPF_NOEXIST);
- if (CHECK(err, "bpf_map_update_elem(sk_stg_map)",
- "err:%d errno:%d\n", err, errno))
- goto done;
- }
-
- /* connect to server */
- err = connect(fd, (struct sockaddr *)&sa6, addrlen);
- if (CHECK(err == -1, "connect", "errno:%d\n", errno))
- goto done;
-
- if (sk_stg_map) {
- int tmp_stg;
-
- err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
- &tmp_stg);
- if (CHECK(!err || errno != ENOENT,
- "bpf_map_lookup_elem(sk_stg_map)",
- "err:%d errno:%d\n", err, errno))
- goto done;
- }
-
- err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
- if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno))
- goto done;
-
- /* recv total_bytes */
- while (bytes < total_bytes && !READ_ONCE(stop)) {
- nr_recv = recv(fd, &batch,
- min(total_bytes - bytes, sizeof(batch)), 0);
- if (nr_recv == -1 && errno == EINTR)
- continue;
- if (nr_recv == -1)
- break;
- bytes += nr_recv;
- }
-
- CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n",
- bytes, total_bytes, nr_recv, errno);
-
- WRITE_ONCE(stop, 1);
- pthread_join(srv_thread, &thread_ret);
- CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld",
- PTR_ERR(thread_ret));
-done:
- close(lfd);
- close(fd);
-}
-
-static void test_cubic(void)
-{
- struct bpf_cubic *cubic_skel;
- struct bpf_link *link;
-
- cubic_skel = bpf_cubic__open_and_load();
- if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n"))
- return;
-
- link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
- if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n",
- PTR_ERR(link))) {
- bpf_cubic__destroy(cubic_skel);
- return;
- }
-
- do_test("bpf_cubic", NULL);
-
- bpf_link__destroy(link);
- bpf_cubic__destroy(cubic_skel);
-}
-
-static void test_dctcp(void)
-{
- struct bpf_dctcp *dctcp_skel;
- struct bpf_link *link;
-
- dctcp_skel = bpf_dctcp__open_and_load();
- if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n"))
- return;
-
- link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
- if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n",
- PTR_ERR(link))) {
- bpf_dctcp__destroy(dctcp_skel);
- return;
- }
-
- do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
- CHECK(dctcp_skel->bss->stg_result != expected_stg,
- "Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n",
- dctcp_skel->bss->stg_result, expected_stg);
-
- bpf_link__destroy(link);
- bpf_dctcp__destroy(dctcp_skel);
-}
-
-static char *err_str;
-static bool found;
-
-static int libbpf_debug_print(enum libbpf_print_level level,
- const char *format, va_list args)
-{
- char *log_buf;
-
- if (level != LIBBPF_WARN ||
- strcmp(format, "libbpf: \n%s\n")) {
- vprintf(format, args);
- return 0;
- }
-
- log_buf = va_arg(args, char *);
- if (!log_buf)
- goto out;
- if (err_str && strstr(log_buf, err_str) != NULL)
- found = true;
-out:
- printf(format, log_buf);
- return 0;
-}
-
-static void test_invalid_license(void)
-{
- libbpf_print_fn_t old_print_fn;
- struct bpf_tcp_nogpl *skel;
-
- err_str = "struct ops programs must have a GPL compatible license";
- found = false;
- old_print_fn = libbpf_set_print(libbpf_debug_print);
-
- skel = bpf_tcp_nogpl__open_and_load();
- ASSERT_NULL(skel, "bpf_tcp_nogpl");
- ASSERT_EQ(found, true, "expected_err_msg");
-
- bpf_tcp_nogpl__destroy(skel);
- libbpf_set_print(old_print_fn);
-}
-
-void test_bpf_tcp_ca(void)
-{
- if (test__start_subtest("dctcp"))
- test_dctcp();
- if (test__start_subtest("cubic"))
- test_cubic();
- if (test__start_subtest("invalid_license"))
- test_invalid_license();
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
deleted file mode 100644
index 7fc0951ee75f..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ /dev/null
@@ -1,59 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-#include <test_progs.h>
-#include <network_helpers.h>
-#include "kfunc_call_test.skel.h"
-#include "kfunc_call_test_subprog.skel.h"
-
-static void test_main(void)
-{
- struct kfunc_call_test *skel;
- int prog_fd, retval, err;
-
- skel = kfunc_call_test__open_and_load();
- if (!ASSERT_OK_PTR(skel, "skel"))
- return;
-
- prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1);
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, (__u32 *)&retval, NULL);
- ASSERT_OK(err, "bpf_prog_test_run(test1)");
- ASSERT_EQ(retval, 12, "test1-retval");
-
- prog_fd = bpf_program__fd(skel->progs.kfunc_call_test2);
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, (__u32 *)&retval, NULL);
- ASSERT_OK(err, "bpf_prog_test_run(test2)");
- ASSERT_EQ(retval, 3, "test2-retval");
-
- kfunc_call_test__destroy(skel);
-}
-
-static void test_subprog(void)
-{
- struct kfunc_call_test_subprog *skel;
- int prog_fd, retval, err;
-
- skel = kfunc_call_test_subprog__open_and_load();
- if (!ASSERT_OK_PTR(skel, "skel"))
- return;
-
- prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1);
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, (__u32 *)&retval, NULL);
- ASSERT_OK(err, "bpf_prog_test_run(test1)");
- ASSERT_EQ(retval, 10, "test1-retval");
- ASSERT_NEQ(skel->data->active_res, -1, "active_res");
- ASSERT_EQ(skel->data->sk_state, BPF_TCP_CLOSE, "sk_state");
-
- kfunc_call_test_subprog__destroy(skel);
-}
-
-void test_kfunc_call(void)
-{
- if (test__start_subtest("main"))
- test_main();
-
- if (test__start_subtest("subprog"))
- test_subprog();
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
deleted file mode 100644
index e9916f2817ec..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include <test_progs.h>
-#include <sys/syscall.h>
-#include "linked_funcs.skel.h"
-
-void test_linked_funcs(void)
-{
- int err;
- struct linked_funcs *skel;
-
- skel = linked_funcs__open();
- if (!ASSERT_OK_PTR(skel, "skel_open"))
- return;
-
- skel->rodata->my_tid = syscall(SYS_gettid);
- skel->bss->syscall_id = SYS_getpgid;
-
- err = linked_funcs__load(skel);
- if (!ASSERT_OK(err, "skel_load"))
- goto cleanup;
-
- err = linked_funcs__attach(skel);
- if (!ASSERT_OK(err, "skel_attach"))
- goto cleanup;
-
- /* trigger */
- syscall(SYS_getpgid);
-
- ASSERT_EQ(skel->bss->output_val1, 2000 + 2000, "output_val1");
- ASSERT_EQ(skel->bss->output_ctx1, SYS_getpgid, "output_ctx1");
- ASSERT_EQ(skel->bss->output_weak1, 42, "output_weak1");
-
- ASSERT_EQ(skel->bss->output_val2, 2 * 1000 + 2 * (2 * 1000), "output_val2");
- ASSERT_EQ(skel->bss->output_ctx2, SYS_getpgid, "output_ctx2");
- /* output_weak2 should never be updated */
- ASSERT_EQ(skel->bss->output_weak2, 0, "output_weak2");
-
-cleanup:
- linked_funcs__destroy(skel);
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_maps.c b/tools/testing/selftests/bpf/prog_tests/linked_maps.c
deleted file mode 100644
index 85dcaaaf2775..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/linked_maps.c
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include <test_progs.h>
-#include <sys/syscall.h>
-#include "linked_maps.skel.h"
-
-void test_linked_maps(void)
-{
- int err;
- struct linked_maps *skel;
-
- skel = linked_maps__open_and_load();
- if (!ASSERT_OK_PTR(skel, "skel_open"))
- return;
-
- err = linked_maps__attach(skel);
- if (!ASSERT_OK(err, "skel_attach"))
- goto cleanup;
-
- /* trigger */
- syscall(SYS_getpgid);
-
- ASSERT_EQ(skel->bss->output_first1, 2000, "output_first1");
- ASSERT_EQ(skel->bss->output_second1, 2, "output_second1");
- ASSERT_EQ(skel->bss->output_weak1, 2, "output_weak1");
-
-cleanup:
- linked_maps__destroy(skel);
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_vars.c b/tools/testing/selftests/bpf/prog_tests/linked_vars.c
deleted file mode 100644
index 267166abe4c1..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/linked_vars.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include <test_progs.h>
-#include <sys/syscall.h>
-#include "linked_vars.skel.h"
-
-void test_linked_vars(void)
-{
- int err;
- struct linked_vars *skel;
-
- skel = linked_vars__open();
- if (!ASSERT_OK_PTR(skel, "skel_open"))
- return;
-
- skel->bss->input_bss1 = 1000;
- skel->bss->input_bss2 = 2000;
- skel->bss->input_bss_weak = 3000;
-
- err = linked_vars__load(skel);
- if (!ASSERT_OK(err, "skel_load"))
- goto cleanup;
-
- err = linked_vars__attach(skel);
- if (!ASSERT_OK(err, "skel_attach"))
- goto cleanup;
-
- /* trigger */
- syscall(SYS_getpgid);
-
- ASSERT_EQ(skel->bss->output_bss1, 1000 + 2000 + 3000, "output_bss1");
- ASSERT_EQ(skel->bss->output_bss2, 1000 + 2000 + 3000, "output_bss2");
- /* 10 comes from "winner" input_data_weak in first obj file */
- ASSERT_EQ(skel->bss->output_data1, 1 + 2 + 10, "output_bss1");
- ASSERT_EQ(skel->bss->output_data2, 1 + 2 + 10, "output_bss2");
- /* 100 comes from "winner" input_rodata_weak in first obj file */
- ASSERT_EQ(skel->bss->output_rodata1, 11 + 22 + 100, "output_weak1");
- ASSERT_EQ(skel->bss->output_rodata2, 11 + 22 + 100, "output_weak2");
-
-cleanup:
- linked_vars__destroy(skel);
-}
diff --git a/tools/testing/selftests/bpf/prog_tests/static_linked.c b/tools/testing/selftests/bpf/prog_tests/static_linked.c
deleted file mode 100644
index 46556976dccc..000000000000
--- a/tools/testing/selftests/bpf/prog_tests/static_linked.c
+++ /dev/null
@@ -1,40 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2019 Facebook */
-
-#include <test_progs.h>
-#include "test_static_linked.skel.h"
-
-void test_static_linked(void)
-{
- int err;
- struct test_static_linked* skel;
-
- skel = test_static_linked__open();
- if (!ASSERT_OK_PTR(skel, "skel_open"))
- return;
-
- skel->rodata->rovar1 = 1;
- skel->bss->static_var1 = 2;
- skel->bss->static_var11 = 3;
-
- skel->rodata->rovar2 = 4;
- skel->bss->static_var2 = 5;
- skel->bss->static_var22 = 6;
-
- err = test_static_linked__load(skel);
- if (!ASSERT_OK(err, "skel_load"))
- goto cleanup;
-
- err = test_static_linked__attach(skel);
- if (!ASSERT_OK(err, "skel_attach"))
- goto cleanup;
-
- /* trigger */
- usleep(1);
-
- ASSERT_EQ(skel->bss->var1, 1 * 2 + 2 + 3, "var1");
- ASSERT_EQ(skel->bss->var2, 4 * 3 + 5 + 6, "var2");
-
-cleanup:
- test_static_linked__destroy(skel);
-}
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
deleted file mode 100644
index f62df4d023f9..000000000000
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ /dev/null
@@ -1,545 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-/* WARNING: This implemenation is not necessarily the same
- * as the tcp_cubic.c. The purpose is mainly for testing
- * the kernel BPF logic.
- *
- * Highlights:
- * 1. CONFIG_HZ .kconfig map is used.
- * 2. In bictcp_update(), calculation is changed to use usec
- * resolution (i.e. USEC_PER_JIFFY) instead of using jiffies.
- * Thus, usecs_to_jiffies() is not used in the bpf_cubic.c.
- * 3. In bitctcp_update() [under tcp_friendliness], the original
- * "while (ca->ack_cnt > delta)" loop is changed to the equivalent
- * "ca->ack_cnt / delta" operation.
- */
-
-#include <linux/bpf.h>
-#include <linux/stddef.h>
-#include <linux/tcp.h>
-#include "bpf_tcp_helpers.h"
-
-char _license[] SEC("license") = "GPL";
-
-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
-
-#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
- * max_cwnd = snd_cwnd * beta
- */
-#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
-
-/* Two methods of hybrid slow start */
-#define HYSTART_ACK_TRAIN 0x1
-#define HYSTART_DELAY 0x2
-
-/* Number of delay samples for detecting the increase of delay */
-#define HYSTART_MIN_SAMPLES 8
-#define HYSTART_DELAY_MIN (4000U) /* 4ms */
-#define HYSTART_DELAY_MAX (16000U) /* 16 ms */
-#define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
-
-static int fast_convergence = 1;
-static const int beta = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
-static int initial_ssthresh;
-static const int bic_scale = 41;
-static int tcp_friendliness = 1;
-
-static int hystart = 1;
-static int hystart_detect = HYSTART_ACK_TRAIN | HYSTART_DELAY;
-static int hystart_low_window = 16;
-static int hystart_ack_delta_us = 2000;
-
-static const __u32 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */
-static const __u32 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
- / (BICTCP_BETA_SCALE - beta);
-/* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
- * so K = cubic_root( (wmax-cwnd)*rtt/c )
- * the unit of K is bictcp_HZ=2^10, not HZ
- *
- * c = bic_scale >> 10
- * rtt = 100ms
- *
- * the following code has been designed and tested for
- * cwnd < 1 million packets
- * RTT < 100 seconds
- * HZ < 1,000,00 (corresponding to 10 nano-second)
- */
-
-/* 1/c * 2^2*bictcp_HZ * srtt, 2^40 */
-static const __u64 cube_factor = (__u64)(1ull << (10+3*BICTCP_HZ))
- / (bic_scale * 10);
-
-/* BIC TCP Parameters */
-struct bictcp {
- __u32 cnt; /* increase cwnd by 1 after ACKs */
- __u32 last_max_cwnd; /* last maximum snd_cwnd */
- __u32 last_cwnd; /* the last snd_cwnd */
- __u32 last_time; /* time when updated last_cwnd */
- __u32 bic_origin_point;/* origin point of bic function */
- __u32 bic_K; /* time to origin point
- from the beginning of the current epoch */
- __u32 delay_min; /* min delay (usec) */
- __u32 epoch_start; /* beginning of an epoch */
- __u32 ack_cnt; /* number of acks */
- __u32 tcp_cwnd; /* estimated tcp cwnd */
- __u16 unused;
- __u8 sample_cnt; /* number of samples to decide curr_rtt */
- __u8 found; /* the exit point is found? */
- __u32 round_start; /* beginning of each round */
- __u32 end_seq; /* end_seq of the round */
- __u32 last_ack; /* last time when the ACK spacing is close */
- __u32 curr_rtt; /* the minimum rtt of current round */
-};
-
-static inline void bictcp_reset(struct bictcp *ca)
-{
- ca->cnt = 0;
- ca->last_max_cwnd = 0;
- ca->last_cwnd = 0;
- ca->last_time = 0;
- ca->bic_origin_point = 0;
- ca->bic_K = 0;
- ca->delay_min = 0;
- ca->epoch_start = 0;
- ca->ack_cnt = 0;
- ca->tcp_cwnd = 0;
- ca->found = 0;
-}
-
-extern unsigned long CONFIG_HZ __kconfig;
-#define HZ CONFIG_HZ
-#define USEC_PER_MSEC 1000UL
-#define USEC_PER_SEC 1000000UL
-#define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
-
-static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
-{
- return dividend / divisor;
-}
-
-#define div64_ul div64_u64
-
-#define BITS_PER_U64 (sizeof(__u64) * 8)
-static __always_inline int fls64(__u64 x)
-{
- int num = BITS_PER_U64 - 1;
-
- if (x == 0)
- return 0;
-
- if (!(x & (~0ull << (BITS_PER_U64-32)))) {
- num -= 32;
- x <<= 32;
- }
- if (!(x & (~0ull << (BITS_PER_U64-16)))) {
- num -= 16;
- x <<= 16;
- }
- if (!(x & (~0ull << (BITS_PER_U64-8)))) {
- num -= 8;
- x <<= 8;
- }
- if (!(x & (~0ull << (BITS_PER_U64-4)))) {
- num -= 4;
- x <<= 4;
- }
- if (!(x & (~0ull << (BITS_PER_U64-2)))) {
- num -= 2;
- x <<= 2;
- }
- if (!(x & (~0ull << (BITS_PER_U64-1))))
- num -= 1;
-
- return num + 1;
-}
-
-static __always_inline __u32 bictcp_clock_us(const struct sock *sk)
-{
- return tcp_sk(sk)->tcp_mstamp;
-}
-
-static __always_inline void bictcp_hystart_reset(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
-
- ca->round_start = ca->last_ack = bictcp_clock_us(sk);
- ca->end_seq = tp->snd_nxt;
- ca->curr_rtt = ~0U;
- ca->sample_cnt = 0;
-}
-
-/* "struct_ops/" prefix is not a requirement
- * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS
- * as long as it is used in one of the func ptr
- * under SEC(".struct_ops").
- */
-SEC("struct_ops/bpf_cubic_init")
-void BPF_PROG(bpf_cubic_init, struct sock *sk)
-{
- struct bictcp *ca = inet_csk_ca(sk);
-
- bictcp_reset(ca);
-
- if (hystart)
- bictcp_hystart_reset(sk);
-
- if (!hystart && initial_ssthresh)
- tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
-}
-
-/* No prefix in SEC will also work.
- * The remaining tcp-cubic functions have an easier way.
- */
-SEC("no-sec-prefix-bictcp_cwnd_event")
-void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
-{
- if (event == CA_EVENT_TX_START) {
- struct bictcp *ca = inet_csk_ca(sk);
- __u32 now = tcp_jiffies32;
- __s32 delta;
-
- delta = now - tcp_sk(sk)->lsndtime;
-
- /* We were application limited (idle) for a while.
- * Shift epoch_start to keep cwnd growth to cubic curve.
- */
- if (ca->epoch_start && delta > 0) {
- ca->epoch_start += delta;
- if (after(ca->epoch_start, now))
- ca->epoch_start = now;
- }
- return;
- }
-}
-
-/*
- * cbrt(x) MSB values for x MSB values in [0..63].
- * Precomputed then refined by hand - Willy Tarreau
- *
- * For x in [0..63],
- * v = cbrt(x << 18) - 1
- * cbrt(x) = (v[x] + 10) >> 6
- */
-static const __u8 v[] = {
- /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
- /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
- /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
- /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
- /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
- /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
- /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
- /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
-};
-
-/* calculate the cubic root of x using a table lookup followed by one
- * Newton-Raphson iteration.
- * Avg err ~= 0.195%
- */
-static __always_inline __u32 cubic_root(__u64 a)
-{
- __u32 x, b, shift;
-
- if (a < 64) {
- /* a in [0..63] */
- return ((__u32)v[(__u32)a] + 35) >> 6;
- }
-
- b = fls64(a);
- b = ((b * 84) >> 8) - 1;
- shift = (a >> (b * 3));
-
- /* it is needed for verifier's bound check on v */
- if (shift >= 64)
- return 0;
-
- x = ((__u32)(((__u32)v[shift] + 10) << b)) >> 6;
-
- /*
- * Newton-Raphson iteration
- * 2
- * x = ( 2 * x + a / x ) / 3
- * k+1 k k
- */
- x = (2 * x + (__u32)div64_u64(a, (__u64)x * (__u64)(x - 1)));
- x = ((x * 341) >> 10);
- return x;
-}
-
-/*
- * Compute congestion window to use.
- */
-static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
- __u32 acked)
-{
- __u32 delta, bic_target, max_cnt;
- __u64 offs, t;
-
- ca->ack_cnt += acked; /* count the number of ACKed packets */
-
- if (ca->last_cwnd == cwnd &&
- (__s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
- return;
-
- /* The CUBIC function can update ca->cnt at most once per jiffy.
- * On all cwnd reduction events, ca->epoch_start is set to 0,
- * which will force a recalculation of ca->cnt.
- */
- if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
- goto tcp_friendliness;
-
- ca->last_cwnd = cwnd;
- ca->last_time = tcp_jiffies32;
-
- if (ca->epoch_start == 0) {
- ca->epoch_start = tcp_jiffies32; /* record beginning */
- ca->ack_cnt = acked; /* start counting */
- ca->tcp_cwnd = cwnd; /* syn with cubic */
-
- if (ca->last_max_cwnd <= cwnd) {
- ca->bic_K = 0;
- ca->bic_origin_point = cwnd;
- } else {
- /* Compute new K based on
- * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
- */
- ca->bic_K = cubic_root(cube_factor
- * (ca->last_max_cwnd - cwnd));
- ca->bic_origin_point = ca->last_max_cwnd;
- }
- }
-
- /* cubic function - calc*/
- /* calculate c * time^3 / rtt,
- * while considering overflow in calculation of time^3
- * (so time^3 is done by using 64 bit)
- * and without the support of division of 64bit numbers
- * (so all divisions are done by using 32 bit)
- * also NOTE the unit of those veriables
- * time = (t - K) / 2^bictcp_HZ
- * c = bic_scale >> 10
- * rtt = (srtt >> 3) / HZ
- * !!! The following code does not have overflow problems,
- * if the cwnd < 1 million packets !!!
- */
-
- t = (__s32)(tcp_jiffies32 - ca->epoch_start) * USEC_PER_JIFFY;
- t += ca->delay_min;
- /* change the unit from usec to bictcp_HZ */
- t <<= BICTCP_HZ;
- t /= USEC_PER_SEC;
-
- if (t < ca->bic_K) /* t - K */
- offs = ca->bic_K - t;
- else
- offs = t - ca->bic_K;
-
- /* c/rtt * (t-K)^3 */
- delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
- if (t < ca->bic_K) /* below origin*/
- bic_target = ca->bic_origin_point - delta;
- else /* above origin*/
- bic_target = ca->bic_origin_point + delta;
-
- /* cubic function - calc bictcp_cnt*/
- if (bic_target > cwnd) {
- ca->cnt = cwnd / (bic_target - cwnd);
- } else {
- ca->cnt = 100 * cwnd; /* very small increment*/
- }
-
- /*
- * The initial growth of cubic function may be too conservative
- * when the available bandwidth is still unknown.
- */
- if (ca->last_max_cwnd == 0 && ca->cnt > 20)
- ca->cnt = 20; /* increase cwnd 5% per RTT */
-
-tcp_friendliness:
- /* TCP Friendly */
- if (tcp_friendliness) {
- __u32 scale = beta_scale;
- __u32 n;
-
- /* update tcp cwnd */
- delta = (cwnd * scale) >> 3;
- if (ca->ack_cnt > delta && delta) {
- n = ca->ack_cnt / delta;
- ca->ack_cnt -= n * delta;
- ca->tcp_cwnd += n;
- }
-
- if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
- delta = ca->tcp_cwnd - cwnd;
- max_cnt = cwnd / delta;
- if (ca->cnt > max_cnt)
- ca->cnt = max_cnt;
- }
- }
-
- /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
- * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
- */
- ca->cnt = max(ca->cnt, 2U);
-}
-
-/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
-void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
-
- if (!tcp_is_cwnd_limited(sk))
- return;
-
- if (tcp_in_slow_start(tp)) {
- if (hystart && after(ack, ca->end_seq))
- bictcp_hystart_reset(sk);
- acked = tcp_slow_start(tp, acked);
- if (!acked)
- return;
- }
- bictcp_update(ca, tp->snd_cwnd, acked);
- tcp_cong_avoid_ai(tp, ca->cnt, acked);
-}
-
-__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
-
- ca->epoch_start = 0; /* end of epoch */
-
- /* Wmax and fast convergence */
- if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
- ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
- / (2 * BICTCP_BETA_SCALE);
- else
- ca->last_max_cwnd = tp->snd_cwnd;
-
- return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
-}
-
-void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
-{
- if (new_state == TCP_CA_Loss) {
- bictcp_reset(inet_csk_ca(sk));
- bictcp_hystart_reset(sk);
- }
-}
-
-#define GSO_MAX_SIZE 65536
-
-/* Account for TSO/GRO delays.
- * Otherwise short RTT flows could get too small ssthresh, since during
- * slow start we begin with small TSO packets and ca->delay_min would
- * not account for long aggregation delay when TSO packets get bigger.
- * Ideally even with a very small RTT we would like to have at least one
- * TSO packet being sent and received by GRO, and another one in qdisc layer.
- * We apply another 100% factor because @rate is doubled at this point.
- * We cap the cushion to 1ms.
- */
-static __always_inline __u32 hystart_ack_delay(struct sock *sk)
-{
- unsigned long rate;
-
- rate = sk->sk_pacing_rate;
- if (!rate)
- return 0;
- return min((__u64)USEC_PER_MSEC,
- div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
-}
-
-static __always_inline void hystart_update(struct sock *sk, __u32 delay)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
- __u32 threshold;
-
- if (hystart_detect & HYSTART_ACK_TRAIN) {
- __u32 now = bictcp_clock_us(sk);
-
- /* first detection parameter - ack-train detection */
- if ((__s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
- ca->last_ack = now;
-
- threshold = ca->delay_min + hystart_ack_delay(sk);
-
- /* Hystart ack train triggers if we get ack past
- * ca->delay_min/2.
- * Pacing might have delayed packets up to RTT/2
- * during slow start.
- */
- if (sk->sk_pacing_status == SK_PACING_NONE)
- threshold >>= 1;
-
- if ((__s32)(now - ca->round_start) > threshold) {
- ca->found = 1;
- tp->snd_ssthresh = tp->snd_cwnd;
- }
- }
- }
-
- if (hystart_detect & HYSTART_DELAY) {
- /* obtain the minimum delay of more than sampling packets */
- if (ca->curr_rtt > delay)
- ca->curr_rtt = delay;
- if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
- ca->sample_cnt++;
- } else {
- if (ca->curr_rtt > ca->delay_min +
- HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
- ca->found = 1;
- tp->snd_ssthresh = tp->snd_cwnd;
- }
- }
- }
-}
-
-void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
- const struct ack_sample *sample)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
- __u32 delay;
-
- /* Some calls are for duplicates without timetamps */
- if (sample->rtt_us < 0)
- return;
-
- /* Discard delay samples right after fast recovery */
- if (ca->epoch_start && (__s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
- return;
-
- delay = sample->rtt_us;
- if (delay == 0)
- delay = 1;
-
- /* first time call or link delay decreases */
- if (ca->delay_min == 0 || ca->delay_min > delay)
- ca->delay_min = delay;
-
- /* hystart triggers when cwnd is larger than some threshold */
- if (!ca->found && tcp_in_slow_start(tp) && hystart &&
- tp->snd_cwnd >= hystart_low_window)
- hystart_update(sk, delay);
-}
-
-extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
-
-__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk)
-{
- return tcp_reno_undo_cwnd(sk);
-}
-
-SEC(".struct_ops")
-struct tcp_congestion_ops cubic = {
- .init = (void *)bpf_cubic_init,
- .ssthresh = (void *)bpf_cubic_recalc_ssthresh,
- .cong_avoid = (void *)bpf_cubic_cong_avoid,
- .set_state = (void *)bpf_cubic_state,
- .undo_cwnd = (void *)bpf_cubic_undo_cwnd,
- .cwnd_event = (void *)bpf_cubic_cwnd_event,
- .pkts_acked = (void *)bpf_cubic_acked,
- .name = "bpf_cubic",
-};
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
deleted file mode 100644
index fd42247da8b4..000000000000
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ /dev/null
@@ -1,224 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2019 Facebook */
-
-/* WARNING: This implemenation is not necessarily the same
- * as the tcp_dctcp.c. The purpose is mainly for testing
- * the kernel BPF logic.
- */
-
-#include <stddef.h>
-#include <linux/bpf.h>
-#include <linux/types.h>
-#include <linux/stddef.h>
-#include <linux/tcp.h>
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-#include "bpf_tcp_helpers.h"
-
-char _license[] SEC("license") = "GPL";
-
-int stg_result = 0;
-
-struct {
- __uint(type, BPF_MAP_TYPE_SK_STORAGE);
- __uint(map_flags, BPF_F_NO_PREALLOC);
- __type(key, int);
- __type(value, int);
-} sk_stg_map SEC(".maps");
-
-#define DCTCP_MAX_ALPHA 1024U
-
-struct dctcp {
- __u32 old_delivered;
- __u32 old_delivered_ce;
- __u32 prior_rcv_nxt;
- __u32 dctcp_alpha;
- __u32 next_seq;
- __u32 ce_state;
- __u32 loss_cwnd;
-};
-
-static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
-static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
-
-static __always_inline void dctcp_reset(const struct tcp_sock *tp,
- struct dctcp *ca)
-{
- ca->next_seq = tp->snd_nxt;
-
- ca->old_delivered = tp->delivered;
- ca->old_delivered_ce = tp->delivered_ce;
-}
-
-SEC("struct_ops/dctcp_init")
-void BPF_PROG(dctcp_init, struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- struct dctcp *ca = inet_csk_ca(sk);
- int *stg;
-
- ca->prior_rcv_nxt = tp->rcv_nxt;
- ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
- ca->loss_cwnd = 0;
- ca->ce_state = 0;
-
- stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
- if (stg) {
- stg_result = *stg;
- bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
- }
- dctcp_reset(tp, ca);
-}
-
-SEC("struct_ops/dctcp_ssthresh")
-__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
-{
- struct dctcp *ca = inet_csk_ca(sk);
- struct tcp_sock *tp = tcp_sk(sk);
-
- ca->loss_cwnd = tp->snd_cwnd;
- return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
-}
-
-SEC("struct_ops/dctcp_update_alpha")
-void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- struct dctcp *ca = inet_csk_ca(sk);
-
- /* Expired RTT */
- if (!before(tp->snd_una, ca->next_seq)) {
- __u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
- __u32 alpha = ca->dctcp_alpha;
-
- /* alpha = (1 - g) * alpha + g * F */
-
- alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
- if (delivered_ce) {
- __u32 delivered = tp->delivered - ca->old_delivered;
-
- /* If dctcp_shift_g == 1, a 32bit value would overflow
- * after 8 M packets.
- */
- delivered_ce <<= (10 - dctcp_shift_g);
- delivered_ce /= max(1U, delivered);
-
- alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
- }
- ca->dctcp_alpha = alpha;
- dctcp_reset(tp, ca);
- }
-}
-
-static __always_inline void dctcp_react_to_loss(struct sock *sk)
-{
- struct dctcp *ca = inet_csk_ca(sk);
- struct tcp_sock *tp = tcp_sk(sk);
-
- ca->loss_cwnd = tp->snd_cwnd;
- tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
-}
-
-SEC("struct_ops/dctcp_state")
-void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
-{
- if (new_state == TCP_CA_Recovery &&
- new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
- dctcp_react_to_loss(sk);
- /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
- * one loss-adjustment per RTT.
- */
-}
-
-static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (ce_state == 1)
- tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
- else
- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
-}
-
-/* Minimal DCTP CE state machine:
- *
- * S: 0 <- last pkt was non-CE
- * 1 <- last pkt was CE
- */
-static __always_inline
-void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
- __u32 *prior_rcv_nxt, __u32 *ce_state)
-{
- __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
-
- if (*ce_state != new_ce_state) {
- /* CE state has changed, force an immediate ACK to
- * reflect the new CE state. If an ACK was delayed,
- * send that first to reflect the prior CE state.
- */
- if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
- dctcp_ece_ack_cwr(sk, *ce_state);
- bpf_tcp_send_ack(sk, *prior_rcv_nxt);
- }
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
- }
- *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
- *ce_state = new_ce_state;
- dctcp_ece_ack_cwr(sk, new_ce_state);
-}
-
-SEC("struct_ops/dctcp_cwnd_event")
-void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
-{
- struct dctcp *ca = inet_csk_ca(sk);
-
- switch (ev) {
- case CA_EVENT_ECN_IS_CE:
- case CA_EVENT_ECN_NO_CE:
- dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
- break;
- case CA_EVENT_LOSS:
- dctcp_react_to_loss(sk);
- break;
- default:
- /* Don't care for the rest. */
- break;
- }
-}
-
-SEC("struct_ops/dctcp_cwnd_undo")
-__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
-{
- const struct dctcp *ca = inet_csk_ca(sk);
-
- return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
-}
-
-extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
-
-SEC("struct_ops/dctcp_reno_cong_avoid")
-void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
-{
- tcp_reno_cong_avoid(sk, ack, acked);
-}
-
-SEC(".struct_ops")
-struct tcp_congestion_ops dctcp_nouse = {
- .init = (void *)dctcp_init,
- .set_state = (void *)dctcp_state,
- .flags = TCP_CONG_NEEDS_ECN,
- .name = "bpf_dctcp_nouse",
-};
-
-SEC(".struct_ops")
-struct tcp_congestion_ops dctcp = {
- .init = (void *)dctcp_init,
- .in_ack_event = (void *)dctcp_update_alpha,
- .cwnd_event = (void *)dctcp_cwnd_event,
- .ssthresh = (void *)dctcp_ssthresh,
- .cong_avoid = (void *)dctcp_cong_avoid,
- .undo_cwnd = (void *)dctcp_cwnd_undo,
- .set_state = (void *)dctcp_state,
- .flags = TCP_CONG_NEEDS_ECN,
- .name = "bpf_dctcp",
-};
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
deleted file mode 100644
index 470f8723e463..000000000000
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-#include "bpf_tcp_helpers.h"
-
-extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
-extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
- __u32 c, __u64 d) __ksym;
-
-SEC("classifier")
-int kfunc_call_test2(struct __sk_buff *skb)
-{
- struct bpf_sock *sk = skb->sk;
-
- if (!sk)
- return -1;
-
- sk = bpf_sk_fullsock(sk);
- if (!sk)
- return -1;
-
- return bpf_kfunc_call_test2((struct sock *)sk, 1, 2);
-}
-
-SEC("classifier")
-int kfunc_call_test1(struct __sk_buff *skb)
-{
- struct bpf_sock *sk = skb->sk;
- __u64 a = 1ULL << 32;
- __u32 ret;
-
- if (!sk)
- return -1;
-
- sk = bpf_sk_fullsock(sk);
- if (!sk)
- return -1;
-
- a = bpf_kfunc_call_test1((struct sock *)sk, 1, a | 2, 3, a | 4);
- ret = a >> 32; /* ret should be 2 */
- ret += (__u32)a; /* ret should be 12 */
-
- return ret;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
deleted file mode 100644
index b2dcb7d9cb03..000000000000
--- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-#include "bpf_tcp_helpers.h"
-
-extern const int bpf_prog_active __ksym;
-extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
- __u32 c, __u64 d) __ksym;
-extern struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym;
-int active_res = -1;
-int sk_state = -1;
-
-int __noinline f1(struct __sk_buff *skb)
-{
- struct bpf_sock *sk = skb->sk;
- int *active;
-
- if (!sk)
- return -1;
-
- sk = bpf_sk_fullsock(sk);
- if (!sk)
- return -1;
-
- active = (int *)bpf_per_cpu_ptr(&bpf_prog_active,
- bpf_get_smp_processor_id());
- if (active)
- active_res = *active;
-
- sk_state = bpf_kfunc_call_test3((struct sock *)sk)->__sk_common.skc_state;
-
- return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4);
-}
-
-SEC("classifier")
-int kfunc_call_test1(struct __sk_buff *skb)
-{
- return f1(skb);
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c
deleted file mode 100644
index b964ec1390c2..000000000000
--- a/tools/testing/selftests/bpf/progs/linked_funcs1.c
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include "vmlinux.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-
-/* weak and shared between two files */
-const volatile int my_tid __weak;
-long syscall_id __weak;
-
-int output_val1;
-int output_ctx1;
-int output_weak1;
-
-/* same "subprog" name in all files, but it's ok because they all are static */
-static __noinline int subprog(int x)
-{
- /* but different formula */
- return x * 1;
-}
-
-/* Global functions can't be void */
-int set_output_val1(int x)
-{
- output_val1 = x + subprog(x);
- return x;
-}
-
-/* This function can't be verified as global, as it assumes raw_tp/sys_enter
- * context and accesses syscall id (second argument). So we mark it as
- * __hidden, so that libbpf will mark it as static in the final object file,
- * right before verifying it in the kernel.
- *
- * But we don't mark it as __hidden here, rather at extern site. __hidden is
- * "contaminating" visibility, so it will get propagated from either extern or
- * actual definition (including from the losing __weak definition).
- */
-void set_output_ctx1(__u64 *ctx)
-{
- output_ctx1 = ctx[1]; /* long id, same as in BPF_PROG below */
-}
-
-/* this weak instance should win because it's the first one */
-__weak int set_output_weak(int x)
-{
- output_weak1 = x;
- return x;
-}
-
-extern int set_output_val2(int x);
-
-/* here we'll force set_output_ctx2() to be __hidden in the final obj file */
-__hidden extern void set_output_ctx2(__u64 *ctx);
-
-SEC("raw_tp/sys_enter")
-int BPF_PROG(handler1, struct pt_regs *regs, long id)
-{
- if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
- return 0;
-
- set_output_val2(1000);
- set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */
-
- /* keep input value the same across both files to avoid dependency on
- * handler call order; differentiate by output_weak1 vs output_weak2.
- */
- set_output_weak(42);
-
- return 0;
-}
-
-char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c
deleted file mode 100644
index 575e958e60b7..000000000000
--- a/tools/testing/selftests/bpf/progs/linked_funcs2.c
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include "vmlinux.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-
-/* weak and shared between both files */
-const volatile int my_tid __weak;
-long syscall_id __weak;
-
-int output_val2;
-int output_ctx2;
-int output_weak2; /* should stay zero */
-
-/* same "subprog" name in all files, but it's ok because they all are static */
-static __noinline int subprog(int x)
-{
- /* but different formula */
- return x * 2;
-}
-
-/* Global functions can't be void */
-int set_output_val2(int x)
-{
- output_val2 = 2 * x + 2 * subprog(x);
- return 2 * x;
-}
-
-/* This function can't be verified as global, as it assumes raw_tp/sys_enter
- * context and accesses syscall id (second argument). So we mark it as
- * __hidden, so that libbpf will mark it as static in the final object file,
- * right before verifying it in the kernel.
- *
- * But we don't mark it as __hidden here, rather at extern site. __hidden is
- * "contaminating" visibility, so it will get propagated from either extern or
- * actual definition (including from the losing __weak definition).
- */
-void set_output_ctx2(__u64 *ctx)
-{
- output_ctx2 = ctx[1]; /* long id, same as in BPF_PROG below */
-}
-
-/* this weak instance should lose, because it will be processed second */
-__weak int set_output_weak(int x)
-{
- output_weak2 = x;
- return 2 * x;
-}
-
-extern int set_output_val1(int x);
-
-/* here we'll force set_output_ctx1() to be __hidden in the final obj file */
-__hidden extern void set_output_ctx1(__u64 *ctx);
-
-SEC("raw_tp/sys_enter")
-int BPF_PROG(handler2, struct pt_regs *regs, long id)
-{
- if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
- return 0;
-
- set_output_val1(2000);
- set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */
-
- /* keep input value the same across both files to avoid dependency on
- * handler call order; differentiate by output_weak1 vs output_weak2.
- */
- set_output_weak(42);
-
- return 0;
-}
-
-char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_maps1.c b/tools/testing/selftests/bpf/progs/linked_maps1.c
deleted file mode 100644
index 52291515cc72..000000000000
--- a/tools/testing/selftests/bpf/progs/linked_maps1.c
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include "vmlinux.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-
-struct my_key { long x; };
-struct my_value { long x; };
-
-struct {
- __uint(type, BPF_MAP_TYPE_HASH);
- __type(key, struct my_key);
- __type(value, struct my_value);
- __uint(max_entries, 16);
-} map1 SEC(".maps");
-
- /* Matches map2 definition in linked_maps2.c. Order of the attributes doesn't
- * matter.
- */
-typedef struct {
- __uint(max_entries, 8);
- __type(key, int);
- __type(value, int);
- __uint(type, BPF_MAP_TYPE_ARRAY);
-} map2_t;
-
-extern map2_t map2 SEC(".maps");
-
-/* This should be the winning map definition, but we have no way of verifying,
- * so we just make sure that it links and works without errors
- */
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __type(key, int);
- __type(value, int);
- __uint(max_entries, 16);
-} map_weak __weak SEC(".maps");
-
-int output_first1;
-int output_second1;
-int output_weak1;
-
-SEC("raw_tp/sys_enter")
-int BPF_PROG(handler_enter1)
-{
- /* update values with key = 1 */
- int key = 1, val = 1;
- struct my_key key_struct = { .x = 1 };
- struct my_value val_struct = { .x = 1000 };
-
- bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
- bpf_map_update_elem(&map2, &key, &val, 0);
- bpf_map_update_elem(&map_weak, &key, &val, 0);
-
- return 0;
-}
-
-SEC("raw_tp/sys_exit")
-int BPF_PROG(handler_exit1)
-{
- /* lookup values with key = 2, set in another file */
- int key = 2, *val;
- struct my_key key_struct = { .x = 2 };
- struct my_value *value_struct;
-
- value_struct = bpf_map_lookup_elem(&map1, &key_struct);
- if (value_struct)
- output_first1 = value_struct->x;
-
- val = bpf_map_lookup_elem(&map2, &key);
- if (val)
- output_second1 = *val;
-
- val = bpf_map_lookup_elem(&map_weak, &key);
- if (val)
- output_weak1 = *val;
-
- return 0;
-}
-
-char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_maps2.c b/tools/testing/selftests/bpf/progs/linked_maps2.c
deleted file mode 100644
index 0693687474ed..000000000000
--- a/tools/testing/selftests/bpf/progs/linked_maps2.c
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include "vmlinux.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-
-/* modifiers and typedefs are ignored when comparing key/value types */
-typedef struct my_key { long x; } key_type;
-typedef struct my_value { long x; } value_type;
-
-extern struct {
- __uint(max_entries, 16);
- __type(key, key_type);
- __type(value, value_type);
- __uint(type, BPF_MAP_TYPE_HASH);
-} map1 SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __type(key, int);
- __type(value, int);
- __uint(max_entries, 8);
-} map2 SEC(".maps");
-
-/* this definition will lose, but it has to exactly match the winner */
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __type(key, int);
- __type(value, int);
- __uint(max_entries, 16);
-} map_weak __weak SEC(".maps");
-
-int output_first2;
-int output_second2;
-int output_weak2;
-
-SEC("raw_tp/sys_enter")
-int BPF_PROG(handler_enter2)
-{
- /* update values with key = 2 */
- int key = 2, val = 2;
- key_type key_struct = { .x = 2 };
- value_type val_struct = { .x = 2000 };
-
- bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
- bpf_map_update_elem(&map2, &key, &val, 0);
- bpf_map_update_elem(&map_weak, &key, &val, 0);
-
- return 0;
-}
-
-SEC("raw_tp/sys_exit")
-int BPF_PROG(handler_exit2)
-{
- /* lookup values with key = 1, set in another file */
- int key = 1, *val;
- key_type key_struct = { .x = 1 };
- value_type *value_struct;
-
- value_struct = bpf_map_lookup_elem(&map1, &key_struct);
- if (value_struct)
- output_first2 = value_struct->x;
-
- val = bpf_map_lookup_elem(&map2, &key);
- if (val)
- output_second2 = *val;
-
- val = bpf_map_lookup_elem(&map_weak, &key);
- if (val)
- output_weak2 = *val;
-
- return 0;
-}
-
-char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_vars1.c b/tools/testing/selftests/bpf/progs/linked_vars1.c
deleted file mode 100644
index ef9e9d0bb0ca..000000000000
--- a/tools/testing/selftests/bpf/progs/linked_vars1.c
+++ /dev/null
@@ -1,54 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include "vmlinux.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-
-extern int LINUX_KERNEL_VERSION __kconfig;
-/* this weak extern will be strict due to the other file's strong extern */
-extern bool CONFIG_BPF_SYSCALL __kconfig __weak;
-extern const void bpf_link_fops __ksym __weak;
-
-int input_bss1;
-int input_data1 = 1;
-const volatile int input_rodata1 = 11;
-
-int input_bss_weak __weak;
-/* these two definitions should win */
-int input_data_weak __weak = 10;
-const volatile int input_rodata_weak __weak = 100;
-
-extern int input_bss2;
-extern int input_data2;
-extern const int input_rodata2;
-
-int output_bss1;
-int output_data1;
-int output_rodata1;
-
-long output_sink1;
-
-static __noinline int get_bss_res(void)
-{
- /* just make sure all the relocations work against .text as well */
- return input_bss1 + input_bss2 + input_bss_weak;
-}
-
-SEC("raw_tp/sys_enter")
-int BPF_PROG(handler1)
-{
- output_bss1 = get_bss_res();
- output_data1 = input_data1 + input_data2 + input_data_weak;
- output_rodata1 = input_rodata1 + input_rodata2 + input_rodata_weak;
-
- /* make sure we actually use above special externs, otherwise compiler
- * will optimize them out
- */
- output_sink1 = LINUX_KERNEL_VERSION
- + CONFIG_BPF_SYSCALL
- + (long)&bpf_link_fops;
- return 0;
-}
-
-char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/linked_vars2.c b/tools/testing/selftests/bpf/progs/linked_vars2.c
deleted file mode 100644
index e4f5bd388a3c..000000000000
--- a/tools/testing/selftests/bpf/progs/linked_vars2.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include "vmlinux.h"
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-
-extern int LINUX_KERNEL_VERSION __kconfig;
-/* when an extern is defined as both strong and weak, resulting symbol will be strong */
-extern bool CONFIG_BPF_SYSCALL __kconfig;
-extern const void __start_BTF __ksym;
-
-int input_bss2;
-int input_data2 = 2;
-const volatile int input_rodata2 = 22;
-
-int input_bss_weak __weak;
-/* these two weak variables should lose */
-int input_data_weak __weak = 20;
-const volatile int input_rodata_weak __weak = 200;
-
-extern int input_bss1;
-extern int input_data1;
-extern const int input_rodata1;
-
-int output_bss2;
-int output_data2;
-int output_rodata2;
-
-int output_sink2;
-
-static __noinline int get_data_res(void)
-{
- /* just make sure all the relocations work against .text as well */
- return input_data1 + input_data2 + input_data_weak;
-}
-
-SEC("raw_tp/sys_enter")
-int BPF_PROG(handler2)
-{
- output_bss2 = input_bss1 + input_bss2 + input_bss_weak;
- output_data2 = get_data_res();
- output_rodata2 = input_rodata1 + input_rodata2 + input_rodata_weak;
-
- /* make sure we actually use above special externs, otherwise compiler
- * will optimize them out
- */
- output_sink2 = LINUX_KERNEL_VERSION
- + CONFIG_BPF_SYSCALL
- + (long)&__start_BTF;
-
- return 0;
-}
-
-char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked1.c b/tools/testing/selftests/bpf/progs/test_static_linked1.c
deleted file mode 100644
index ea1a6c4c7172..000000000000
--- a/tools/testing/selftests/bpf/progs/test_static_linked1.c
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-/* 8-byte aligned .bss */
-static volatile long static_var1;
-static volatile int static_var11;
-int var1 = 0;
-/* 4-byte aligned .rodata */
-const volatile int rovar1;
-
-/* same "subprog" name in both files */
-static __noinline int subprog(int x)
-{
- /* but different formula */
- return x * 2;
-}
-
-SEC("raw_tp/sys_enter")
-int handler1(const void *ctx)
-{
- var1 = subprog(rovar1) + static_var1 + static_var11;
-
- return 0;
-}
-
-char LICENSE[] SEC("license") = "GPL";
-int VERSION SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_static_linked2.c b/tools/testing/selftests/bpf/progs/test_static_linked2.c
deleted file mode 100644
index 54d8d1ab577c..000000000000
--- a/tools/testing/selftests/bpf/progs/test_static_linked2.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2021 Facebook */
-
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-/* 4-byte aligned .bss */
-static volatile int static_var2;
-static volatile int static_var22;
-int var2 = 0;
-/* 8-byte aligned .rodata */
-const volatile long rovar2;
-
-/* same "subprog" name in both files */
-static __noinline int subprog(int x)
-{
- /* but different formula */
- return x * 3;
-}
-
-SEC("raw_tp/sys_enter")
-int handler2(const void *ctx)
-{
- var2 = subprog(rovar2) + static_var2 + static_var22;
-
- return 0;
-}
-
-/* different name and/or type of the variable doesn't matter */
-char _license[] SEC("license") = "GPL";
-int _version SEC("version") = 1;
From 04fe164eacef2d9e6faac240e1c5537db88299ed Mon Sep 17 00:00:00 2001
From: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com>
Date: Thu, 1 Apr 2021 20:02:37 +0300
Subject: [PATCH] drm/i915/display/psr: Disable DC3CO when the PSR2 is used
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Due to the changed sequence of activating/deactivating DC3CO, disable
DC3CO until the changed dc3co activating/deactivating sequence is applied.
References: https://gitlab.freedesktop.org/drm/intel/-/issues/3134
Signed-off-by: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210401170237.40472-1-gwan-gyeong.mun@intel.com
---
drivers/gpu/drm/i915/display/intel_psr.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 8ada4f829cab..4cec6b4d7fb9 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -654,6 +654,13 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 exit_scanlines;
+ /*
+ * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
+ * disable DC3CO until the changed dc3co activating/deactivating sequence
+ * is applied. B.Specs:49196
+ */
+ return;
+
/*
* DMC's DC3CO exit mechanism has an issue with Selective Fecth
* TODO: when the issue is addressed, this restriction should be removed.
--
GitLab