6794 lines
194 KiB
Diff
6794 lines
194 KiB
Diff
Documentation/arm64/arm-acpi.txt | 323 ++++++++++
|
|
Documentation/kernel-parameters.txt | 3 +-
|
|
arch/arm/kvm/mmu.c | 4 +
|
|
arch/arm64/Kconfig | 9 +
|
|
arch/arm64/Makefile | 1 +
|
|
arch/arm64/include/asm/acenv.h | 18 +
|
|
arch/arm64/include/asm/acpi.h | 102 +++
|
|
arch/arm64/include/asm/cpu_ops.h | 1 +
|
|
arch/arm64/include/asm/elf.h | 3 +-
|
|
arch/arm64/include/asm/pci.h | 51 ++
|
|
arch/arm64/include/asm/psci.h | 3 +-
|
|
arch/arm64/include/asm/smp.h | 10 +-
|
|
arch/arm64/kernel/Makefile | 4 +-
|
|
arch/arm64/kernel/acpi.c | 398 ++++++++++++
|
|
arch/arm64/kernel/cpu_ops.c | 8 +-
|
|
arch/arm64/kernel/efi.c | 37 ++
|
|
arch/arm64/kernel/pci.c | 97 ++-
|
|
arch/arm64/kernel/psci.c | 78 ++-
|
|
arch/arm64/kernel/setup.c | 44 +-
|
|
arch/arm64/kernel/smp.c | 2 +-
|
|
arch/arm64/kernel/smp_parking_protocol.c | 110 ++++
|
|
arch/arm64/kernel/time.c | 7 +
|
|
arch/arm64/mm/dma-mapping.c | 112 ++++
|
|
arch/arm64/pci/Makefile | 2 +
|
|
arch/arm64/pci/mmconfig.c | 292 +++++++++
|
|
arch/arm64/pci/pci.c | 461 ++++++++++++++
|
|
drivers/acpi/Kconfig | 6 +-
|
|
drivers/acpi/Makefile | 6 +-
|
|
drivers/acpi/bus.c | 3 +
|
|
drivers/acpi/internal.h | 5 +
|
|
drivers/acpi/osl.c | 6 +-
|
|
drivers/acpi/processor_core.c | 37 ++
|
|
drivers/acpi/sleep-arm.c | 28 +
|
|
drivers/acpi/tables.c | 48 +-
|
|
drivers/acpi/utils.c | 26 +
|
|
drivers/ata/Kconfig | 2 +-
|
|
drivers/ata/ahci_platform.c | 13 +
|
|
drivers/ata/ahci_xgene.c | 30 +-
|
|
drivers/clocksource/arm_arch_timer.c | 136 +++-
|
|
drivers/input/keyboard/gpio_keys_polled.c | 1 +
|
|
drivers/iommu/arm-smmu.c | 8 +-
|
|
drivers/irqchip/irq-gic-v3.c | 10 +
|
|
drivers/irqchip/irq-gic.c | 118 ++++
|
|
drivers/irqchip/irqchip.c | 3 +
|
|
drivers/leds/leds-gpio.c | 1 +
|
|
drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 16 +-
|
|
drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 3 +
|
|
drivers/net/ethernet/amd/xgbe/xgbe-main.c | 276 ++++++--
|
|
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 20 +-
|
|
drivers/net/ethernet/amd/xgbe/xgbe-ptp.c | 4 +-
|
|
drivers/net/ethernet/amd/xgbe/xgbe.h | 13 +
|
|
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | 69 +-
|
|
drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 80 ++-
|
|
drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 1 +
|
|
drivers/net/ethernet/smsc/smc91x.c | 10 +
|
|
drivers/net/phy/amd-xgbe-phy.c | 777 ++++++++++++-----------
|
|
drivers/pci/host/pci-xgene.c | 167 +++++
|
|
drivers/pnp/resource.c | 2 +
|
|
drivers/tty/Kconfig | 6 +
|
|
drivers/tty/Makefile | 1 +
|
|
drivers/tty/sbsauart.c | 358 +++++++++++
|
|
drivers/tty/serial/8250/8250_dw.c | 9 +
|
|
drivers/virtio/virtio_mmio.c | 12 +-
|
|
include/acpi/acpi_bus.h | 2 +
|
|
include/acpi/acpi_io.h | 6 +
|
|
include/asm-generic/vmlinux.lds.h | 7 +
|
|
include/kvm/arm_vgic.h | 20 +-
|
|
include/linux/acpi.h | 1 +
|
|
include/linux/clocksource.h | 6 +
|
|
include/linux/irqchip/arm-gic-acpi.h | 31 +
|
|
include/linux/irqchip/arm-gic.h | 2 +
|
|
include/linux/pci.h | 37 +-
|
|
virt/kvm/arm/arch_timer.c | 107 ++--
|
|
virt/kvm/arm/vgic-v2.c | 86 ++-
|
|
virt/kvm/arm/vgic-v3.c | 8 +-
|
|
virt/kvm/arm/vgic.c | 30 +-
|
|
76 files changed, 4208 insertions(+), 626 deletions(-)
|
|
|
|
diff --git a/Documentation/arm64/arm-acpi.txt b/Documentation/arm64/arm-acpi.txt
|
|
new file mode 100644
|
|
index 0000000..17cf96d
|
|
--- /dev/null
|
|
+++ b/Documentation/arm64/arm-acpi.txt
|
|
@@ -0,0 +1,323 @@
|
|
+ACPI on ARMv8 Servers
|
|
+---------------------
|
|
+ACPI can be used for ARMv8 general purpose servers designed to follow
|
|
+the ARM SBSA (Server Base System Architecture) specification, currently
|
|
+available to those with an ARM login at http://silver.arm.com.
|
|
+
|
|
+The ARMv8 kernel implements the reduced hardware model of ACPI version
|
|
+5.1 and its corresponding errata. Links to the specification and all
|
|
+external documents it refers to are managed by the UEFI Forum. The
|
|
+specification is available at http://www.uefi.org/specifications and
|
|
+external documents can be found via http://www.uefi.org/acpi.
|
|
+
|
|
+If an ARMv8 system does not meet the requirements of the SBSA, or cannot
|
|
+be described using the mechanisms defined in the required ACPI specifications,
|
|
+then it is likely that Device Tree (DT) is more suitable than ACPI for the
|
|
+hardware.
|
|
+
|
|
+
|
|
+Relationship with Device Tree
|
|
+-----------------------------
|
|
+ACPI support in drivers and subsystems for ARMv8 should never be mutually
|
|
+exclusive with DT support at compile time.
|
|
+
|
|
+At boot time the kernel will only use one description method depending on
|
|
+parameters passed from the bootloader (including kernel bootargs).
|
|
+
|
|
+Regardless of whether DT or ACPI is used, the kernel must always be capable
|
|
+of booting with either scheme (in kernels with both schemes enabled at compile
|
|
+time).
|
|
+
|
|
+When booting using ACPI tables, the /chosen node in DT will still be parsed
|
|
+to extract the kernel command line and initrd path. No other section of the
|
|
+DT will be used.
|
|
+
|
|
+
|
|
+Booting using ACPI tables
|
|
+-------------------------
|
|
+The only defined method for passing ACPI tables to the kernel on ARMv8
|
|
+is via the UEFI system configuration table.
|
|
+
|
|
+Processing of ACPI tables may be disabled by passing acpi=off on the kernel
|
|
+command line; this is the default behavior. If acpi=force is used, the kernel
|
|
+will ONLY use device configuration information contained in the ACPI tables.
|
|
+
|
|
+In order for the kernel to load and use ACPI tables, the UEFI implementation
|
|
+MUST set the ACPI_20_TABLE_GUID to point to the RSDP table (the table with
|
|
+the ACPI signature "RSD PTR "). If this pointer is incorrect and acpi=force
|
|
+is used, the kernel will disable ACPI and try to use DT to boot.
|
|
+
|
|
+If the pointer to the RSDP table is correct, the table will be mapped into
|
|
+the kernel by the ACPI core, using the address provided by UEFI.
|
|
+
|
|
+The ACPI core will then locate and map in all other ACPI tables provided by
|
|
+using the addresses in the RSDP table to find the XSDT (eXtended System
|
|
+Description Table). The XSDT in turn provides the addresses to all other
|
|
+ACPI tables provided by the system firmware; the ACPI core will then traverse
|
|
+this table and map in the tables listed.
|
|
+
|
|
+The ACPI core will ignore any provided RSDT (Root System Description Table).
|
|
+RSDTs have been deprecated and are ignored on arm64 since they only allow
|
|
+for 32-bit addresses.
|
|
+
|
|
+Further, the ACPI core will only use the 64-bit address fields in the FADT
|
|
+(Fixed ACPI Description Table). Any 32-bit address fields in the FADT will
|
|
+be ignored on arm64.
|
|
+
|
|
+Hardware reduced mode (see Section 4.1 of the ACPI 5.1 specification) will
|
|
+be enforced by the ACPI core on arm64. Doing so allows the ACPI core to
|
|
+run less complex code since it no longer has to provide support for legacy
|
|
+hardware from other architectures.
|
|
+
|
|
+For the ACPI core to operate properly, and in turn provide the information
|
|
+the kernel needs to configure devices, it expects to find the following
|
|
+tables (all section numbers refer to the ACPI 5.1 specfication):
|
|
+
|
|
+ -- RSDP (Root System Description Pointer), section 5.2.5
|
|
+
|
|
+ -- XSDT (eXtended System Description Table), section 5.2.8
|
|
+
|
|
+ -- FACS (Firmware ACPI Control Structure), section 5.2.10
|
|
+
|
|
+ -- FADT (Fixed ACPI Description Table), section 5.2.9
|
|
+
|
|
+ -- DSDT (Differentiated System Description Table), section
|
|
+ 5.2.11.1
|
|
+
|
|
+ -- MADT (Multiple APIC Description Table), section 5.2.12
|
|
+
|
|
+ -- GTDT (Generic Timer Description Table), section 5.2.24
|
|
+
|
|
+ -- If PCI is supported, the MCFG (Memory mapped ConFiGuration
|
|
+ Table), section 5.2.6, specifically Table 5-31.
|
|
+
|
|
+If the above tables are not all present, the kernel may or may not be
|
|
+able to boot properly since it may not be able to configure all of the
|
|
+devices available.
|
|
+
|
|
+
|
|
+ACPI Detection
|
|
+--------------
|
|
+Drivers should determine their probe() type by checking for a null
|
|
+value for ACPI_HANDLE, or checking .of_node, or other information in
|
|
+the device structure. This is detailed further in the "Driver
|
|
+Recommendations" section.
|
|
+
|
|
+In non-driver code, if the presence of ACPI needs to be detected at
|
|
+runtime, then check the value of acpi_disabled. If CONFIG_ACPI is not
|
|
+set, acpi_disabled will always be 1.
|
|
+
|
|
+
|
|
+Device Enumeration
|
|
+------------------
|
|
+Device descriptions in ACPI should use standard recognized ACPI interfaces.
|
|
+These can contain less information than is typically provided via a Device
|
|
+Tree description for the same device. This is also one of the reasons that
|
|
+ACPI can be useful -- the driver takes into account that it may have less
|
|
+detailed information about the device and uses sensible defaults instead.
|
|
+If done properly in the driver, the hardware can change and improve over
|
|
+time without the driver having to change at all.
|
|
+
|
|
+Clocks provide an excellent example. In DT, clocks need to be specified
|
|
+and the drivers need to take them into account. In ACPI, the assumption
|
|
+is that UEFI will leave the device in a reasonable default state, including
|
|
+any clock settings. If for some reason the driver needs to change a clock
|
|
+value, this can be done in an ACPI method; all the driver needs to do is
|
|
+invoke the method and not concern itself with what the method needs to do
|
|
+to change the clock. Changing the hardware can then take place over time
|
|
+by changing what the ACPI method does, and not the driver.
|
|
+
|
|
+ACPI drivers should only look at one specific ASL object -- the _DSD object
|
|
+-- for device driver parameters (known in DT as "bindings", or "Device
|
|
+Properties" in ACPI). Not all DT bindings will be recognized. The UEFI
|
|
+Forum provides a mechanism for registering such bindings [URL TBD by ASWG]
|
|
+so that they may be used on any operating system supporting ACPI. Device
|
|
+properties that have not been registered with the UEFI Forum should not be
|
|
+used.
|
|
+
|
|
+Drivers should look for device properties in the _DSD object ONLY; the _DSD
|
|
+object is described in the ACPI specification section 6.2.5, but more
|
|
+specifically, use the _DSD Device Properties UUID:
|
|
+
|
|
+ -- UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301
|
|
+
|
|
+ -- http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf)
|
|
+
|
|
+The kernel has an interface for looking up device properties in a manner
|
|
+independent of whether DT or ACPI is being used and that interface should
|
|
+be used; it can eliminate some duplication of code paths in driver probing
|
|
+functions and discourage divergence between DT bindings and ACPI device
|
|
+properties.
|
|
+
|
|
+ACPI tables are described with a formal language called ASL, the ACPI
|
|
+Source Language (section 19 of the specification). This means that there
|
|
+are always multiple ways to describe the same thing -- including device
|
|
+properties. For example, device properties could use an ASL construct
|
|
+that looks like this: Name(KEY0, "value0"). An ACPI device driver would
|
|
+then retrieve the value of the property by evaluating the KEY0 object.
|
|
+However, using Name() this way has multiple problems: (1) ACPI limits
|
|
+names ("KEY0") to four characters unlike DT; (2) there is no industry
|
|
+wide registry that maintains a list of names, minimzing re-use; (3)
|
|
+there is also no registry for the definition of property values ("value0"),
|
|
+again making re-use difficult; and (4) how does one maintain backward
|
|
+compatibility as new hardware comes out? The _DSD method was created
|
|
+to solve precisely these sorts of problems; Linux drivers should ALWAYS
|
|
+use the _DSD method for device properties and nothing else.
|
|
+
|
|
+The _DSM object (ACPI Section 9.14.1) could also be used for conveying
|
|
+device properties to a driver. Linux drivers should only expect it to
|
|
+be used if _DSD cannot represent the data required, and there is no way
|
|
+to create a new UUID for the _DSD object. Note that there is even less
|
|
+regulation of the use of _DSM than there is of _DSD. Drivers that depend
|
|
+on the contents of _DSM objects will be more difficult to maintain over
|
|
+time because of this.
|
|
+
|
|
+The _DSD object is a very flexible mechanism in ACPI, as are the registered
|
|
+Device Properties. This flexibility allows _DSD to cover more than just the
|
|
+generic server case and care should be taken in device drivers not to expect
|
|
+it to replicate highly specific embedded behaviour from DT.
|
|
+
|
|
+Both DT bindings and ACPI device properties for device drivers have review
|
|
+processes. Use them. And, before creating new device properties, check to
|
|
+be sure that they have not been defined before and either registered in the
|
|
+Linux kernel documentation or the UEFI Forum. If the device drivers supports
|
|
+ACPI and DT, please make sure the device properties are consistent in both
|
|
+places.
|
|
+
|
|
+
|
|
+Programmable Power Control Resources
|
|
+------------------------------------
|
|
+Programmable power control resources include such resources as voltage/current
|
|
+providers (regulators) and clock sources.
|
|
+
|
|
+The kernel assumes that power control of these resources is represented with
|
|
+Power Resource Objects (ACPI section 7.1). The ACPI core will then handle
|
|
+correctly enabling and disabling resources as they are needed. In order to
|
|
+get that to work, ACPI assumes each device has defined D-states and that these
|
|
+can be controlled through the optional ACPI methods _PS0, _PS1, _PS2, and _PS3;
|
|
+in ACPI, _PS0 is the method to invoke to turn a device full on, and _PS3 is for
|
|
+turning a device full off.
|
|
+
|
|
+The kernel ACPI code will also assume that the _PS? methods follow the normal
|
|
+ACPI rules for such methods:
|
|
+
|
|
+ -- If either _PS0 or _PS3 is implemented, then the other method must also
|
|
+ be implemented.
|
|
+
|
|
+ -- If a device requires usage or setup of a power resource when on, the ASL
|
|
+ should organize that it is allocated/enabled using the _PS0 method.
|
|
+
|
|
+ -- Resources allocated or enabled in the _PS0 method should be disabled
|
|
+ or de-allocated in the _PS3 method.
|
|
+
|
|
+ -- Firmware will leave the resources in a reasonable state before handing
|
|
+ over control to the kernel.
|
|
+
|
|
+Such code in _PS? methods will of course be very platform specific. But,
|
|
+this allows the driver to abstract out the interface for operating the device
|
|
+and avoid having to read special non-standard values from ACPI tables. Further,
|
|
+abstracting the use of these resources allows the hardware to change over time
|
|
+without requiring updates to the driver.
|
|
+
|
|
+
|
|
+Clocks
|
|
+------
|
|
+ACPI makes the assumption that clocks are initialized by the firmware --
|
|
+UEFI, in this case -- to some working value before control is handed over
|
|
+to the kernel. This has implications for devices such as UARTs, or SoC
|
|
+driven LCD displays, for example.
|
|
+
|
|
+When the kernel boots, the clock is assumed to be set to reasonable
|
|
+working value. If for some reason the frequency needs to change -- e.g.,
|
|
+throttling for power management -- the device driver should expect that
|
|
+process to be abstracted out into some ACPI method that can be invoked
|
|
+(please see the ACPI specification for further recommendations on standard
|
|
+methods to be expected). If is not, there is no direct way for ACPI to
|
|
+control the clocks.
|
|
+
|
|
+
|
|
+Driver Recommendations
|
|
+----------------------
|
|
+DO NOT remove any DT handling when adding ACPI support for a driver. The
|
|
+same device may be used on many different systems.
|
|
+
|
|
+DO try to structure the driver so that it is data driven. That is, set up
|
|
+a struct containing internal per-device state based on defaults and whatever
|
|
+else must be discovered by the driver probe function. Then, have the rest
|
|
+of the driver operate off of the contents of that struct. Doing so should
|
|
+allow most divergence between ACPI and DT functionality to be kept local to
|
|
+the probe function instead of being scattered throughout the driver. For
|
|
+example:
|
|
+
|
|
+static int device_probe_dt(struct platform_device *pdev)
|
|
+{
|
|
+ /* DT specific functionality */
|
|
+ ...
|
|
+}
|
|
+
|
|
+static int device_probe_acpi(struct platform_device *pdev)
|
|
+{
|
|
+ /* ACPI specific functionality */
|
|
+ ...
|
|
+}
|
|
+
|
|
+static int device_probe(stuct platform_device *pdev)
|
|
+{
|
|
+ ...
|
|
+ struct device_node node = pdev->dev.of_node;
|
|
+ ...
|
|
+
|
|
+ if (node)
|
|
+ ret = device_probe_dt(pdev);
|
|
+ else if (ACPI_HANDLE(&pdev->dev))
|
|
+ ret = device_probe_acpi(pdev);
|
|
+ else
|
|
+ /* other initialization */
|
|
+ ...
|
|
+ /* Continue with any generic probe operations */
|
|
+ ...
|
|
+}
|
|
+
|
|
+DO keep the MODULE_DEVICE_TABLE entries together in the driver to make it
|
|
+clear the different names the driver is probed for, both from DT and from
|
|
+ACPI:
|
|
+
|
|
+static struct of_device_id virtio_mmio_match[] = {
|
|
+ { .compatible = "virtio,mmio", },
|
|
+ { }
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, virtio_mmio_match);
|
|
+
|
|
+static const struct acpi_device_id virtio_mmio_acpi_match[] = {
|
|
+ { "LNRO0005", },
|
|
+ { }
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
|
|
+
|
|
+
|
|
+ASWG
|
|
+----
|
|
+The following areas are not yet fully defined for ARM in the 5.1 version
|
|
+of the ACPI specification and are expected to be worked through in the
|
|
+UEFI ACPI Specification Working Group (ASWG):
|
|
+
|
|
+ -- ACPI based CPU topology
|
|
+ -- ACPI based Power management
|
|
+ -- CPU idle control based on PSCI
|
|
+ -- CPU performance control (CPPC)
|
|
+ -- ACPI based SMMU
|
|
+ -- ITS support for GIC in MADT
|
|
+
|
|
+Participation in this group is open to all UEFI members. Please see
|
|
+http://www.uefi.org/workinggroup for details on group membership.
|
|
+
|
|
+It is the intent of the ARMv8 ACPI kernel code to follow the ACPI specification
|
|
+as closely as possible, and to only implement functionality that complies with
|
|
+the released standards from UEFI ASWG. As a practical matter, there will be
|
|
+vendors that provide bad ACPI tables or violate the standards in some way.
|
|
+If this is because of errors, quirks and fixups may be necessary, but will
|
|
+be avoided if possible. If there are features missing from ACPI that preclude
|
|
+it from being used on a platform, ECRs (Engineering Change Requests) should be
|
|
+submitted to ASWG and go through the normal approval process; for those that
|
|
+are not UEFI members, many other members of the Linux community are and would
|
|
+likely be willing to assist in submitting ECRs.
|
|
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
|
|
index 4df73da..4adfd50 100644
|
|
--- a/Documentation/kernel-parameters.txt
|
|
+++ b/Documentation/kernel-parameters.txt
|
|
@@ -165,7 +165,7 @@ multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
|
|
bytes respectively. Such letter suffixes can also be entirely omitted.
|
|
|
|
|
|
- acpi= [HW,ACPI,X86]
|
|
+ acpi= [HW,ACPI,X86,ARM64]
|
|
Advanced Configuration and Power Interface
|
|
Format: { force | off | strict | noirq | rsdt }
|
|
force -- enable ACPI if default was off
|
|
@@ -175,6 +175,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|
strictly ACPI specification compliant.
|
|
rsdt -- prefer RSDT over (default) XSDT
|
|
copy_dsdt -- copy DSDT to memory
|
|
+ For ARM64, ONLY "acpi=off" or "acpi=force" are available
|
|
|
|
See also Documentation/power/runtime_pm.txt, pci=noacpi
|
|
|
|
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
|
|
index 1dc9778..0b88d36 100644
|
|
--- a/arch/arm/kvm/mmu.c
|
|
+++ b/arch/arm/kvm/mmu.c
|
|
@@ -1315,6 +1315,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
(KVM_PHYS_SIZE >> PAGE_SHIFT))
|
|
return -EFAULT;
|
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
+ stage2_flush_memslot(kvm, memslot);
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
+
|
|
/*
|
|
* A memory region could potentially cover multiple VMAs, and any holes
|
|
* between them, so iterate over all of them to find out if we can map
|
|
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
|
|
index b1f9a20..d60e537 100644
|
|
--- a/arch/arm64/Kconfig
|
|
+++ b/arch/arm64/Kconfig
|
|
@@ -5,6 +5,7 @@ config ARM64
|
|
select ARCH_HAS_GCOV_PROFILE_ALL
|
|
select ARCH_HAS_SG_CHAIN
|
|
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
|
+ select ACPI_REDUCED_HARDWARE_ONLY if ACPI
|
|
select ARCH_USE_CMPXCHG_LOCKREF
|
|
select ARCH_SUPPORTS_ATOMIC_RMW
|
|
select ARCH_WANT_OPTIONAL_GPIOLIB
|
|
@@ -193,6 +194,9 @@ config PCI_DOMAINS_GENERIC
|
|
config PCI_SYSCALL
|
|
def_bool PCI
|
|
|
|
+config PCI_MMCONFIG
|
|
+ def_bool y if PCI && ACPI
|
|
+
|
|
source "drivers/pci/Kconfig"
|
|
source "drivers/pci/pcie/Kconfig"
|
|
source "drivers/pci/hotplug/Kconfig"
|
|
@@ -384,6 +388,9 @@ config SMP
|
|
|
|
If you don't know what to do here, say N.
|
|
|
|
+config ARM_PARKING_PROTOCOL
|
|
+ def_bool y if SMP
|
|
+
|
|
config SCHED_MC
|
|
bool "Multi-core scheduler support"
|
|
depends on SMP
|
|
@@ -646,6 +653,8 @@ source "drivers/Kconfig"
|
|
|
|
source "drivers/firmware/Kconfig"
|
|
|
|
+source "drivers/acpi/Kconfig"
|
|
+
|
|
source "fs/Kconfig"
|
|
|
|
source "arch/arm64/kvm/Kconfig"
|
|
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
|
|
index 1c43cec..ab3b0b1 100644
|
|
--- a/arch/arm64/Makefile
|
|
+++ b/arch/arm64/Makefile
|
|
@@ -49,6 +49,7 @@ core-$(CONFIG_NET) += arch/arm64/net/
|
|
core-$(CONFIG_KVM) += arch/arm64/kvm/
|
|
core-$(CONFIG_XEN) += arch/arm64/xen/
|
|
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
|
|
+drivers-$(CONFIG_PCI) += arch/arm64/pci/
|
|
libs-y := arch/arm64/lib/ $(libs-y)
|
|
libs-y += $(LIBGCC)
|
|
libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/
|
|
diff --git a/arch/arm64/include/asm/acenv.h b/arch/arm64/include/asm/acenv.h
|
|
new file mode 100644
|
|
index 0000000..b49166f
|
|
--- /dev/null
|
|
+++ b/arch/arm64/include/asm/acenv.h
|
|
@@ -0,0 +1,18 @@
|
|
+/*
|
|
+ * ARM64 specific ACPICA environments and implementation
|
|
+ *
|
|
+ * Copyright (C) 2014, Linaro Ltd.
|
|
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
|
|
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ */
|
|
+
|
|
+#ifndef _ASM_ACENV_H
|
|
+#define _ASM_ACENV_H
|
|
+
|
|
+/* It is required unconditionally by ACPI core, update it when needed. */
|
|
+
|
|
+#endif /* _ASM_ACENV_H */
|
|
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
|
|
new file mode 100644
|
|
index 0000000..6e692f4
|
|
--- /dev/null
|
|
+++ b/arch/arm64/include/asm/acpi.h
|
|
@@ -0,0 +1,102 @@
|
|
+/*
|
|
+ * Copyright (C) 2013-2014, Linaro Ltd.
|
|
+ * Author: Al Stone <al.stone@linaro.org>
|
|
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
|
|
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation;
|
|
+ */
|
|
+
|
|
+#ifndef _ASM_ACPI_H
|
|
+#define _ASM_ACPI_H
|
|
+
|
|
+#include <asm/smp_plat.h>
|
|
+
|
|
+/* Basic configuration for ACPI */
|
|
+#ifdef CONFIG_ACPI
|
|
+#define acpi_strict 1 /* No out-of-spec workarounds on ARM64 */
|
|
+extern int acpi_disabled;
|
|
+extern int acpi_noirq;
|
|
+extern int acpi_pci_disabled;
|
|
+
|
|
+/* 1 to indicate PSCI 0.2+ is implemented */
|
|
+static inline bool acpi_psci_present(void)
|
|
+{
|
|
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
|
|
+}
|
|
+
|
|
+/* 1 to indicate HVC must be used instead of SMC as the PSCI conduit */
|
|
+static inline bool acpi_psci_use_hvc(void)
|
|
+{
|
|
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
|
|
+}
|
|
+
|
|
+static inline void disable_acpi(void)
|
|
+{
|
|
+ acpi_disabled = 1;
|
|
+ acpi_pci_disabled = 1;
|
|
+ acpi_noirq = 1;
|
|
+}
|
|
+
|
|
+static inline void enable_acpi(void)
|
|
+{
|
|
+ acpi_disabled = 0;
|
|
+ acpi_pci_disabled = 0;
|
|
+ acpi_noirq = 0;
|
|
+}
|
|
+
|
|
+/* MPIDR value provided in GICC structure is 64 bits, but the
|
|
+ * existing apic_id (CPU hardware ID) using in acpi processor
|
|
+ * driver is 32-bit, to conform to the same datatype we need
|
|
+ * to repack the GICC structure MPIDR.
|
|
+ *
|
|
+ * Only 32 bits of MPIDR are used:
|
|
+ *
|
|
+ * Bits [0:7] Aff0;
|
|
+ * Bits [8:15] Aff1;
|
|
+ * Bits [16:23] Aff2;
|
|
+ * Bits [32:39] Aff3;
|
|
+ */
|
|
+static inline u32 pack_mpidr(u64 mpidr)
|
|
+{
|
|
+ return (u32) ((mpidr & 0xff00000000) >> 8) | mpidr;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * The ACPI processor driver for ACPI core code needs this macro
|
|
+ * to find out this cpu was already mapped (mapping from CPU hardware
|
|
+ * ID to CPU logical ID) or not.
|
|
+ *
|
|
+ * cpu_logical_map(cpu) is the mapping of MPIDR and the logical cpu,
|
|
+ * and MPIDR is the cpu hardware ID we needed to pack.
|
|
+ */
|
|
+#define cpu_physical_id(cpu) pack_mpidr(cpu_logical_map(cpu))
|
|
+
|
|
+/*
|
|
+ * It's used from ACPI core in kdump to boot UP system with SMP kernel,
|
|
+ * with this check the ACPI core will not override the CPU index
|
|
+ * obtained from GICC with 0 and not print some error message as well.
|
|
+ * Since MADT must provide at least one GICC structure for GIC
|
|
+ * initialization, CPU will be always available in MADT on ARM64.
|
|
+ */
|
|
+static inline bool acpi_has_cpu_in_madt(void)
|
|
+{
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static inline void arch_fix_phys_package_id(int num, u32 slot) { }
|
|
+void __init acpi_smp_init_cpus(void);
|
|
+
|
|
+extern int acpi_get_cpu_parked_address(int cpu, u64 *addr);
|
|
+
|
|
+#else
|
|
+static inline void disable_acpi(void) { }
|
|
+static inline bool acpi_psci_present(void) { return false; }
|
|
+static inline bool acpi_psci_use_hvc(void) { return false; }
|
|
+static inline void acpi_smp_init_cpus(void) { }
|
|
+static inline int acpi_get_cpu_parked_address(int cpu, u64 *addr) { return -EOPNOTSUPP; }
|
|
+#endif /* CONFIG_ACPI */
|
|
+
|
|
+#endif /*_ASM_ACPI_H*/
|
|
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
|
|
index 6f8e2ef..978f567 100644
|
|
--- a/arch/arm64/include/asm/cpu_ops.h
|
|
+++ b/arch/arm64/include/asm/cpu_ops.h
|
|
@@ -64,6 +64,7 @@ struct cpu_operations {
|
|
};
|
|
|
|
extern const struct cpu_operations *cpu_ops[NR_CPUS];
|
|
+const struct cpu_operations *cpu_get_ops(const char *name);
|
|
int __init cpu_read_ops(struct device_node *dn, int cpu);
|
|
void __init cpu_read_bootcpu_ops(void);
|
|
|
|
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
|
|
index 1f65be3..c0f89a0 100644
|
|
--- a/arch/arm64/include/asm/elf.h
|
|
+++ b/arch/arm64/include/asm/elf.h
|
|
@@ -114,7 +114,8 @@ typedef struct user_fpsimd_state elf_fpregset_t;
|
|
*/
|
|
#define elf_check_arch(x) ((x)->e_machine == EM_AARCH64)
|
|
|
|
-#define elf_read_implies_exec(ex,stk) (stk != EXSTACK_DISABLE_X)
|
|
+#define elf_read_implies_exec(ex,stk) (test_thread_flag(TIF_32BIT) \
|
|
+ ? (stk == EXSTACK_ENABLE_X) : 0)
|
|
|
|
#define CORE_DUMP_USE_REGSET
|
|
#define ELF_EXEC_PAGESIZE PAGE_SIZE
|
|
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
|
|
index 872ba93..2f287a6 100644
|
|
--- a/arch/arm64/include/asm/pci.h
|
|
+++ b/arch/arm64/include/asm/pci.h
|
|
@@ -33,5 +33,56 @@ static inline int pci_proc_domain(struct pci_bus *bus)
|
|
}
|
|
#endif /* CONFIG_PCI */
|
|
|
|
+/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
|
|
+#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
|
|
+
|
|
+#define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20)
|
|
+
|
|
+struct acpi_device;
|
|
+
|
|
+struct pci_sysdata {
|
|
+ int domain; /* PCI domain */
|
|
+ int node; /* NUMA node */
|
|
+ struct acpi_device *companion; /* ACPI companion device */
|
|
+ void *iommu; /* IOMMU private data */
|
|
+};
|
|
+
|
|
+struct acpi_pci_root;
|
|
+struct pci_mmcfg_region;
|
|
+
|
|
+typedef int (*acpi_mcfg_fixup_t)(struct acpi_pci_root *root,
|
|
+ struct pci_mmcfg_region *cfg);
|
|
+
|
|
+struct pci_mmcfg_region {
|
|
+ struct list_head list;
|
|
+ struct resource res;
|
|
+ int (*read)(struct pci_mmcfg_region *cfg, unsigned int bus,
|
|
+ unsigned int devfn, int reg, int len, u32 *value);
|
|
+ int (*write)(struct pci_mmcfg_region *cfg, unsigned int bus,
|
|
+ unsigned int devfn, int reg, int len, u32 value);
|
|
+ acpi_mcfg_fixup_t fixup;
|
|
+ void *data;
|
|
+ u64 address;
|
|
+ char __iomem *virt;
|
|
+ u16 segment;
|
|
+ u8 start_bus;
|
|
+ u8 end_bus;
|
|
+ char name[PCI_MMCFG_RESOURCE_NAME_LEN];
|
|
+};
|
|
+
|
|
+struct acpi_mcfg_fixup {
|
|
+ char oem_id[7];
|
|
+ char oem_table_id[9];
|
|
+ acpi_mcfg_fixup_t hook;
|
|
+};
|
|
+
|
|
+/* Designate a routine to fix up buggy MCFG */
|
|
+#define DECLARE_ACPI_MCFG_FIXUP(oem_id, table_id, hook) \
|
|
+ static const struct acpi_mcfg_fixup __acpi_fixup_##hook __used \
|
|
+ __attribute__((__section__(".acpi_fixup_mcfg"), aligned((sizeof(void *))))) \
|
|
+ = { {oem_id}, {table_id}, hook };
|
|
+
|
|
+extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
|
|
+
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_PCI_H */
|
|
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
|
|
index e5312ea..2454bc5 100644
|
|
--- a/arch/arm64/include/asm/psci.h
|
|
+++ b/arch/arm64/include/asm/psci.h
|
|
@@ -14,6 +14,7 @@
|
|
#ifndef __ASM_PSCI_H
|
|
#define __ASM_PSCI_H
|
|
|
|
-int psci_init(void);
|
|
+int psci_dt_init(void);
|
|
+int psci_acpi_init(void);
|
|
|
|
#endif /* __ASM_PSCI_H */
|
|
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
|
|
index 780f82c..3411561 100644
|
|
--- a/arch/arm64/include/asm/smp.h
|
|
+++ b/arch/arm64/include/asm/smp.h
|
|
@@ -39,9 +39,10 @@ extern void show_ipi_list(struct seq_file *p, int prec);
|
|
extern void handle_IPI(int ipinr, struct pt_regs *regs);
|
|
|
|
/*
|
|
- * Setup the set of possible CPUs (via set_cpu_possible)
|
|
+ * Discover the set of possible CPUs and determine their
|
|
+ * SMP operations.
|
|
*/
|
|
-extern void smp_init_cpus(void);
|
|
+extern void of_smp_init_cpus(void);
|
|
|
|
/*
|
|
* Provide a function to raise an IPI cross call on CPUs in callmap.
|
|
@@ -51,6 +52,11 @@ extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
|
|
extern void (*__smp_cross_call)(const struct cpumask *, unsigned int);
|
|
|
|
/*
|
|
+ * Provide a function to signal a parked secondary CPU.
|
|
+ */
|
|
+extern void set_smp_boot_wakeup_call(void (*)(int cpu));
|
|
+
|
|
+/*
|
|
* Called from the secondary holding pen, this is the secondary CPU entry point.
|
|
*/
|
|
asmlinkage void secondary_start_kernel(void);
|
|
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
|
|
index eaa77ed..63ffe3c 100644
|
|
--- a/arch/arm64/kernel/Makefile
|
|
+++ b/arch/arm64/kernel/Makefile
|
|
@@ -23,7 +23,8 @@ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
|
|
../../arm/kernel/opcodes.o
|
|
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
|
|
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
|
|
-arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
|
|
+arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o \
|
|
+ smp_parking_protocol.o
|
|
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
|
|
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
|
|
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
|
@@ -33,6 +34,7 @@ arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
|
arm64-obj-$(CONFIG_KGDB) += kgdb.o
|
|
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
|
|
arm64-obj-$(CONFIG_PCI) += pci.o
|
|
+arm64-obj-$(CONFIG_ACPI) += acpi.o
|
|
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
|
|
|
|
obj-y += $(arm64-obj-y) vdso/
|
|
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
|
|
new file mode 100644
|
|
index 0000000..06a96be
|
|
--- /dev/null
|
|
+++ b/arch/arm64/kernel/acpi.c
|
|
@@ -0,0 +1,398 @@
|
|
+/*
|
|
+ * ARM64 Specific Low-Level ACPI Boot Support
|
|
+ *
|
|
+ * Copyright (C) 2013-2014, Linaro Ltd.
|
|
+ * Author: Al Stone <al.stone@linaro.org>
|
|
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
|
|
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
|
|
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
|
|
+ * Author: Naresh Bhat <naresh.bhat@linaro.org>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ */
|
|
+
|
|
+#define pr_fmt(fmt) "ACPI: " fmt
|
|
+
|
|
+#include <linux/init.h>
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/cpumask.h>
|
|
+#include <linux/memblock.h>
|
|
+#include <linux/irq.h>
|
|
+#include <linux/irqdomain.h>
|
|
+#include <linux/bootmem.h>
|
|
+#include <linux/smp.h>
|
|
+#include <linux/irqchip/arm-gic-acpi.h>
|
|
+
|
|
+#include <asm/cputype.h>
|
|
+#include <asm/cpu_ops.h>
|
|
+
|
|
+int acpi_noirq; /* skip ACPI IRQ initialization */
|
|
+int acpi_disabled;
|
|
+EXPORT_SYMBOL(acpi_disabled);
|
|
+
|
|
+int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
|
|
+EXPORT_SYMBOL(acpi_pci_disabled);
|
|
+
|
|
+static int enabled_cpus; /* Processors (GICC) with enabled flag in MADT */
|
|
+
|
|
+static char *boot_method;
|
|
+static u64 parked_address[NR_CPUS];
|
|
+
|
|
+/*
|
|
+ * Since we're on ARM, the default interrupt routing model
|
|
+ * clearly has to be GIC.
|
|
+ */
|
|
+enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_GIC;
|
|
+
|
|
+/*
|
|
+ * __acpi_map_table() will be called before page_init(), so early_ioremap()
|
|
+ * or early_memremap() should be called here to for ACPI table mapping.
|
|
+ */
|
|
+char *__init __acpi_map_table(unsigned long phys, unsigned long size)
|
|
+{
|
|
+ if (!phys || !size)
|
|
+ return NULL;
|
|
+
|
|
+ return early_memremap(phys, size);
|
|
+}
|
|
+
|
|
+void __init __acpi_unmap_table(char *map, unsigned long size)
|
|
+{
|
|
+ if (!map || !size)
|
|
+ return;
|
|
+
|
|
+ early_memunmap(map, size);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * acpi_map_gic_cpu_interface - generates a logical cpu number
|
|
+ * and map to MPIDR represented by GICC structure
|
|
+ * @mpidr: CPU's hardware id to register, MPIDR represented in MADT
|
|
+ * @enabled: this cpu is enabled or not
|
|
+ *
|
|
+ * Returns the logical cpu number which maps to MPIDR
|
|
+ */
|
|
+static int acpi_map_gic_cpu_interface(u64 mpidr, u64 parked_addr, u8 enabled)
|
|
+{
|
|
+ int cpu;
|
|
+
|
|
+ if (mpidr == INVALID_HWID) {
|
|
+ pr_info("Skip MADT cpu entry with invalid MPIDR\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ total_cpus++;
|
|
+ if (!enabled)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (enabled_cpus >= NR_CPUS) {
|
|
+ pr_warn("NR_CPUS limit of %d reached, Processor %d/0x%llx ignored.\n",
|
|
+ NR_CPUS, total_cpus, mpidr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* No need to check duplicate MPIDRs for the first CPU */
|
|
+ if (enabled_cpus) {
|
|
+ /*
|
|
+ * Duplicate MPIDRs are a recipe for disaster. Scan
|
|
+ * all initialized entries and check for
|
|
+ * duplicates. If any is found just ignore the CPU.
|
|
+ */
|
|
+ for_each_possible_cpu(cpu) {
|
|
+ if (cpu_logical_map(cpu) == mpidr) {
|
|
+ pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
|
|
+ mpidr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* allocate a logical cpu id for the new comer */
|
|
+ cpu = cpumask_next_zero(-1, cpu_possible_mask);
|
|
+ } else {
|
|
+ /*
|
|
+ * First GICC entry must be BSP as ACPI spec said
|
|
+ * in section 5.2.12.15
|
|
+ */
|
|
+ if (cpu_logical_map(0) != mpidr) {
|
|
+ pr_err("First GICC entry with MPIDR 0x%llx is not BSP\n",
|
|
+ mpidr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * boot_cpu_init() already hold bit 0 in cpu_present_mask
|
|
+ * for BSP, no need to allocate again.
|
|
+ */
|
|
+ cpu = 0;
|
|
+ }
|
|
+
|
|
+ parked_address[cpu] = parked_addr;
|
|
+
|
|
+ /* CPU 0 was already initialized */
|
|
+ if (cpu) {
|
|
+ cpu_ops[cpu] = cpu_get_ops(boot_method);
|
|
+ if (!cpu_ops[cpu])
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (cpu_ops[cpu]->cpu_init(NULL, cpu))
|
|
+ return -EOPNOTSUPP;
|
|
+
|
|
+ /* map the logical cpu id to cpu MPIDR */
|
|
+ cpu_logical_map(cpu) = mpidr;
|
|
+
|
|
+ set_cpu_possible(cpu, true);
|
|
+ } else {
|
|
+ /* get cpu0's ops, no need to return if ops is null */
|
|
+ cpu_ops[0] = cpu_get_ops(boot_method);
|
|
+ }
|
|
+
|
|
+ enabled_cpus++;
|
|
+ return cpu;
|
|
+}
|
|
+
|
|
+static int __init
|
|
+acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
|
|
+ const unsigned long end)
|
|
+{
|
|
+ struct acpi_madt_generic_interrupt *processor;
|
|
+
|
|
+ processor = (struct acpi_madt_generic_interrupt *)header;
|
|
+
|
|
+ if (BAD_MADT_ENTRY(processor, end))
|
|
+ return -EINVAL;
|
|
+
|
|
+ acpi_table_print_madt_entry(header);
|
|
+
|
|
+ acpi_map_gic_cpu_interface(processor->arm_mpidr & MPIDR_HWID_BITMASK,
|
|
+ processor->parked_address, processor->flags & ACPI_MADT_ENABLED);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Parse GIC cpu interface entries in MADT for SMP init */
|
|
+void __init acpi_smp_init_cpus(void)
|
|
+{
|
|
+ int count;
|
|
+
|
|
+ /*
|
|
+ * do a partial walk of MADT to determine how many CPUs
|
|
+ * we have including disabled CPUs, and get information
|
|
+ * we need for SMP init
|
|
+ */
|
|
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
|
|
+ acpi_parse_gic_cpu_interface, 0);
|
|
+
|
|
+ if (!count) {
|
|
+ pr_err("No GIC CPU interface entries present\n");
|
|
+ return;
|
|
+ } else if (count < 0) {
|
|
+ pr_err("Error parsing GIC CPU interface entry\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Make boot-up look pretty */
|
|
+ pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus);
|
|
+}
|
|
+
|
|
+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
|
|
+{
|
|
+ *irq = irq_find_mapping(NULL, gsi);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
|
|
+
|
|
+/*
|
|
+ * success: return IRQ number (>0)
|
|
+ * failure: return =< 0
|
|
+ */
|
|
+int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
|
|
+{
|
|
+ unsigned int irq;
|
|
+ unsigned int irq_type;
|
|
+
|
|
+ /*
|
|
+ * ACPI have no bindings to indicate SPI or PPI, so we
|
|
+ * use different mappings from DT in ACPI.
|
|
+ *
|
|
+ * For FDT
|
|
+ * PPI interrupt: in the range [0, 15];
|
|
+ * SPI interrupt: in the range [0, 987];
|
|
+ *
|
|
+ * For ACPI, GSI should be unique so using
|
|
+ * the hwirq directly for the mapping:
|
|
+ * PPI interrupt: in the range [16, 31];
|
|
+ * SPI interrupt: in the range [32, 1019];
|
|
+ */
|
|
+
|
|
+ if (trigger == ACPI_EDGE_SENSITIVE &&
|
|
+ polarity == ACPI_ACTIVE_LOW)
|
|
+ irq_type = IRQ_TYPE_EDGE_FALLING;
|
|
+ else if (trigger == ACPI_EDGE_SENSITIVE &&
|
|
+ polarity == ACPI_ACTIVE_HIGH)
|
|
+ irq_type = IRQ_TYPE_EDGE_RISING;
|
|
+ else if (trigger == ACPI_LEVEL_SENSITIVE &&
|
|
+ polarity == ACPI_ACTIVE_LOW)
|
|
+ irq_type = IRQ_TYPE_LEVEL_LOW;
|
|
+ else if (trigger == ACPI_LEVEL_SENSITIVE &&
|
|
+ polarity == ACPI_ACTIVE_HIGH)
|
|
+ irq_type = IRQ_TYPE_LEVEL_HIGH;
|
|
+ else
|
|
+ irq_type = IRQ_TYPE_NONE;
|
|
+
|
|
+ /*
|
|
+ * Since only one GIC is supported in ACPI 5.0, we can
|
|
+ * create mapping refer to the default domain
|
|
+ */
|
|
+ irq = irq_create_mapping(NULL, gsi);
|
|
+ if (!irq)
|
|
+ return irq;
|
|
+
|
|
+ /* Set irq type if specified and different than the current one */
|
|
+ if (irq_type != IRQ_TYPE_NONE &&
|
|
+ irq_type != irq_get_trigger_type(irq))
|
|
+ irq_set_irq_type(irq, irq_type);
|
|
+ return irq;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(acpi_register_gsi);
|
|
+
|
|
+void acpi_unregister_gsi(u32 gsi)
|
|
+{
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
|
|
+
|
|
+static int __init acpi_parse_fadt(struct acpi_table_header *table)
|
|
+{
|
|
+ struct acpi_table_fadt *fadt = (struct acpi_table_fadt *)table;
|
|
+
|
|
+ /*
|
|
+ * Revision in table header is the FADT Major revision,
|
|
+ * and there is a minor revision of FADT which was introduced
|
|
+ * by ACPI 5.1, we only deal with ACPI 5.1 or newer revision
|
|
+ * to get arm boot flags, or we will disable ACPI.
|
|
+ */
|
|
+ if (table->revision > 5 ||
|
|
+ (table->revision == 5 && fadt->minor_revision >= 1)) {
|
|
+ /*
|
|
+ * ACPI 5.1 only has two explicit methods to boot up SMP,
|
|
+ * PSCI and Parking protocol, but the Parking protocol is
|
|
+ * only specified for ARMv7 now, so make PSCI as the only
|
|
+ * way for the SMP boot protocol before some updates for
|
|
+ * the ACPI spec or the Parking protocol spec.
|
|
+ */
|
|
+ if (acpi_psci_present())
|
|
+ boot_method = "psci";
|
|
+ else if (IS_ENABLED(CONFIG_ARM_PARKING_PROTOCOL))
|
|
+ boot_method = "parking-protocol";
|
|
+
|
|
+ if (!boot_method)
|
|
+ pr_warn("No boot method, will not bring up secondary CPUs\n");
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
+ pr_warn("Unsupported FADT revision %d.%d, should be 5.1+, will disable ACPI\n",
|
|
+ table->revision, fadt->minor_revision);
|
|
+ disable_acpi();
|
|
+
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * acpi_boot_table_init() called from setup_arch(), always.
|
|
+ * 1. find RSDP and get its address, and then find XSDT
|
|
+ * 2. extract all tables and checksums them all
|
|
+ * 3. check ACPI FADT revisoin
|
|
+ *
|
|
+ * We can parse ACPI boot-time tables such as MADT after
|
|
+ * this function is called.
|
|
+ */
|
|
+void __init acpi_boot_table_init(void)
|
|
+{
|
|
+ /* If acpi_disabled, bail out */
|
|
+ if (acpi_disabled)
|
|
+ return;
|
|
+
|
|
+ /* Initialize the ACPI boot-time table parser. */
|
|
+ if (acpi_table_init()) {
|
|
+ disable_acpi();
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
|
|
+ pr_err("Can't find FADT or error happened during parsing FADT\n");
|
|
+}
|
|
+
|
|
+void __init acpi_gic_init(void)
|
|
+{
|
|
+ struct acpi_table_header *table;
|
|
+ acpi_status status;
|
|
+ acpi_size tbl_size;
|
|
+ int err;
|
|
+
|
|
+ status = acpi_get_table_with_size(ACPI_SIG_MADT, 0, &table, &tbl_size);
|
|
+ if (ACPI_FAILURE(status)) {
|
|
+ const char *msg = acpi_format_exception(status);
|
|
+
|
|
+ pr_err("Failed to get MADT table, %s\n", msg);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ err = gic_v2_acpi_init(table);
|
|
+ if (err)
|
|
+ pr_err("Failed to initialize GIC IRQ controller");
|
|
+
|
|
+ early_acpi_os_unmap_memory((char *)table, tbl_size);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Parked Address in ACPI GIC structure will be used as the CPU
|
|
+ * release address
|
|
+ */
|
|
+int acpi_get_cpu_parked_address(int cpu, u64 *addr)
|
|
+{
|
|
+ if (!addr || !parked_address[cpu])
|
|
+ return -EINVAL;
|
|
+
|
|
+ *addr = parked_address[cpu];
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __init parse_acpi(char *arg)
|
|
+{
|
|
+ if (!arg)
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* "acpi=off" disables both ACPI table parsing and interpreter */
|
|
+ if (strcmp(arg, "off") == 0)
|
|
+ disable_acpi();
|
|
+ else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
|
|
+ enable_acpi();
|
|
+ else
|
|
+ return -EINVAL; /* Core will print when we return error */
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+early_param("acpi", parse_acpi);
|
|
+
|
|
+int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
|
|
+{
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
|
|
+{
|
|
+ /* TBD */
|
|
+ return -EINVAL;
|
|
+}
|
|
+EXPORT_SYMBOL(acpi_register_ioapic);
|
|
+
|
|
+int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
|
|
+{
|
|
+ /* TBD */
|
|
+ return -EINVAL;
|
|
+}
|
|
+EXPORT_SYMBOL(acpi_unregister_ioapic);
|
|
+
|
|
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
|
|
index cce9524..1d90f31 100644
|
|
--- a/arch/arm64/kernel/cpu_ops.c
|
|
+++ b/arch/arm64/kernel/cpu_ops.c
|
|
@@ -23,19 +23,23 @@
|
|
#include <linux/string.h>
|
|
|
|
extern const struct cpu_operations smp_spin_table_ops;
|
|
+extern const struct cpu_operations smp_parking_protocol_ops;
|
|
extern const struct cpu_operations cpu_psci_ops;
|
|
|
|
const struct cpu_operations *cpu_ops[NR_CPUS];
|
|
|
|
-static const struct cpu_operations *supported_cpu_ops[] __initconst = {
|
|
+static const struct cpu_operations *supported_cpu_ops[] = {
|
|
#ifdef CONFIG_SMP
|
|
&smp_spin_table_ops,
|
|
+#ifdef CONFIG_ARM_PARKING_PROTOCOL
|
|
+ &smp_parking_protocol_ops,
|
|
+#endif
|
|
#endif
|
|
&cpu_psci_ops,
|
|
NULL,
|
|
};
|
|
|
|
-static const struct cpu_operations * __init cpu_get_ops(const char *name)
|
|
+const struct cpu_operations *cpu_get_ops(const char *name)
|
|
{
|
|
const struct cpu_operations **ops = supported_cpu_ops;
|
|
|
|
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
|
|
index 6fac253..f9de195 100644
|
|
--- a/arch/arm64/kernel/efi.c
|
|
+++ b/arch/arm64/kernel/efi.c
|
|
@@ -484,3 +484,40 @@ static int __init arm64_dmi_init(void)
|
|
return 0;
|
|
}
|
|
core_initcall(arm64_dmi_init);
|
|
+
|
|
+/*
|
|
+ * If nothing else is handling pm_power_off, use EFI
|
|
+ *
|
|
+ * When Guenter Roeck's power-off handler call chain patches land,
|
|
+ * we just need to return true unconditionally.
|
|
+ */
|
|
+bool efi_poweroff_required(void)
|
|
+{
|
|
+ return pm_power_off == NULL;
|
|
+}
|
|
+
|
|
+static int arm64_efi_restart(struct notifier_block *this,
|
|
+ unsigned long mode, void *cmd)
|
|
+{
|
|
+ efi_reboot(reboot_mode, cmd);
|
|
+ return NOTIFY_DONE;
|
|
+}
|
|
+
|
|
+static struct notifier_block arm64_efi_restart_nb = {
|
|
+ .notifier_call = arm64_efi_restart,
|
|
+ .priority = INT_MAX,
|
|
+};
|
|
+
|
|
+static int __init arm64_register_efi_restart(void)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
|
|
+ ret = register_restart_handler(&arm64_efi_restart_nb);
|
|
+ if (ret)
|
|
+ pr_err("%s: cannot register restart handler, %d\n",
|
|
+ __func__, ret);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+late_initcall(arm64_register_efi_restart);
|
|
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
|
|
index ce5836c..978cd21 100644
|
|
--- a/arch/arm64/kernel/pci.c
|
|
+++ b/arch/arm64/kernel/pci.c
|
|
@@ -17,6 +17,8 @@
|
|
#include <linux/of_pci.h>
|
|
#include <linux/of_platform.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/pci-acpi.h>
|
|
|
|
#include <asm/pci-bridge.h>
|
|
|
|
@@ -37,34 +39,99 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
|
return res->start;
|
|
}
|
|
|
|
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
|
|
+{
|
|
+ struct pci_sysdata *sd;
|
|
+
|
|
+ if (!acpi_disabled) {
|
|
+ sd = bridge->bus->sysdata;
|
|
+ ACPI_COMPANION_SET(&bridge->dev, sd->companion);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/*
|
|
* Try to assign the IRQ number from DT when adding a new device
|
|
*/
|
|
int pcibios_add_device(struct pci_dev *dev)
|
|
{
|
|
- dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
|
|
+ if (acpi_disabled)
|
|
+ dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
|
|
|
|
return 0;
|
|
}
|
|
|
|
+void pcibios_add_bus(struct pci_bus *bus)
|
|
+{
|
|
+ if (!acpi_disabled)
|
|
+ acpi_pci_add_bus(bus);
|
|
+}
|
|
|
|
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
|
|
-static bool dt_domain_found = false;
|
|
+void pcibios_remove_bus(struct pci_bus *bus)
|
|
+{
|
|
+ if (!acpi_disabled)
|
|
+ acpi_pci_remove_bus(bus);
|
|
+}
|
|
+
|
|
+int pcibios_enable_irq(struct pci_dev *dev)
|
|
+{
|
|
+ if (!acpi_disabled && !pci_dev_msi_enabled(dev))
|
|
+ acpi_pci_irq_enable(dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pcibios_disable_irq(struct pci_dev *dev)
|
|
+{
|
|
+ if (!acpi_disabled && !pci_dev_msi_enabled(dev))
|
|
+ acpi_pci_irq_disable(dev);
|
|
+ return 0;
|
|
+}
|
|
|
|
+int pcibios_enable_device(struct pci_dev *dev, int bars)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ err = pci_enable_resources(dev, bars);
|
|
+ if (err < 0)
|
|
+ return err;
|
|
+
|
|
+ if (!pci_dev_msi_enabled(dev))
|
|
+ return pcibios_enable_irq(dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
|
|
void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
|
|
{
|
|
- int domain = of_get_pci_domain_nr(parent->of_node);
|
|
-
|
|
- if (domain >= 0) {
|
|
- dt_domain_found = true;
|
|
- } else if (dt_domain_found == true) {
|
|
- dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n",
|
|
- parent->of_node->full_name);
|
|
- return;
|
|
- } else {
|
|
- domain = pci_get_new_domain_nr();
|
|
- }
|
|
+ int domain = -1;
|
|
|
|
- bus->domain_nr = domain;
|
|
+ if (acpi_disabled)
|
|
+ domain = of_get_pci_domain_nr(parent->of_node);
|
|
+ else {
|
|
+ struct pci_sysdata *sd = bus->sysdata;
|
|
+
|
|
+ domain = sd->domain;
|
|
+ }
|
|
+ if (domain >= 0)
|
|
+ bus->domain_nr = domain;
|
|
}
|
|
#endif
|
|
+
|
|
+static int __init pcibios_assign_resources(void)
|
|
+{
|
|
+ struct pci_bus *root_bus;
|
|
+
|
|
+ if (acpi_disabled)
|
|
+ return 0;
|
|
+
|
|
+ list_for_each_entry(root_bus, &pci_root_buses, node) {
|
|
+ pcibios_resource_survey_bus(root_bus);
|
|
+ pci_assign_unassigned_root_bus_resources(root_bus);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+/*
|
|
+ * fs_initcall comes after subsys_initcall, so we know acpi scan
|
|
+ * has run.
|
|
+ */
|
|
+fs_initcall(pcibios_assign_resources);
|
|
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
|
|
index f1dbca7..dbb3945 100644
|
|
--- a/arch/arm64/kernel/psci.c
|
|
+++ b/arch/arm64/kernel/psci.c
|
|
@@ -15,6 +15,7 @@
|
|
|
|
#define pr_fmt(fmt) "psci: " fmt
|
|
|
|
+#include <linux/acpi.h>
|
|
#include <linux/init.h>
|
|
#include <linux/of.h>
|
|
#include <linux/smp.h>
|
|
@@ -24,6 +25,7 @@
|
|
#include <linux/slab.h>
|
|
#include <uapi/linux/psci.h>
|
|
|
|
+#include <asm/acpi.h>
|
|
#include <asm/compiler.h>
|
|
#include <asm/cpu_ops.h>
|
|
#include <asm/errno.h>
|
|
@@ -304,6 +306,33 @@ static void psci_sys_poweroff(void)
|
|
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
|
|
}
|
|
|
|
+static void psci_0_2_set_functions(void)
|
|
+{
|
|
+ pr_info("Using standard PSCI v0.2 function IDs\n");
|
|
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
|
|
+ psci_ops.cpu_suspend = psci_cpu_suspend;
|
|
+
|
|
+ psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
|
|
+ psci_ops.cpu_off = psci_cpu_off;
|
|
+
|
|
+ psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
|
|
+ psci_ops.cpu_on = psci_cpu_on;
|
|
+
|
|
+ psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
|
|
+ psci_ops.migrate = psci_migrate;
|
|
+
|
|
+ psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
|
|
+ psci_ops.affinity_info = psci_affinity_info;
|
|
+
|
|
+ psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
|
|
+ PSCI_0_2_FN_MIGRATE_INFO_TYPE;
|
|
+ psci_ops.migrate_info_type = psci_migrate_info_type;
|
|
+
|
|
+ arm_pm_restart = psci_sys_reset;
|
|
+
|
|
+ pm_power_off = psci_sys_poweroff;
|
|
+}
|
|
+
|
|
/*
|
|
* PSCI Function IDs for v0.2+ are well defined so use
|
|
* standard values.
|
|
@@ -337,29 +366,7 @@ static int __init psci_0_2_init(struct device_node *np)
|
|
}
|
|
}
|
|
|
|
- pr_info("Using standard PSCI v0.2 function IDs\n");
|
|
- psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
|
|
- psci_ops.cpu_suspend = psci_cpu_suspend;
|
|
-
|
|
- psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
|
|
- psci_ops.cpu_off = psci_cpu_off;
|
|
-
|
|
- psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
|
|
- psci_ops.cpu_on = psci_cpu_on;
|
|
-
|
|
- psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
|
|
- psci_ops.migrate = psci_migrate;
|
|
-
|
|
- psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
|
|
- psci_ops.affinity_info = psci_affinity_info;
|
|
-
|
|
- psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
|
|
- PSCI_0_2_FN_MIGRATE_INFO_TYPE;
|
|
- psci_ops.migrate_info_type = psci_migrate_info_type;
|
|
-
|
|
- arm_pm_restart = psci_sys_reset;
|
|
-
|
|
- pm_power_off = psci_sys_poweroff;
|
|
+ psci_0_2_set_functions();
|
|
|
|
out_put_node:
|
|
of_node_put(np);
|
|
@@ -412,7 +419,7 @@ static const struct of_device_id psci_of_match[] __initconst = {
|
|
{},
|
|
};
|
|
|
|
-int __init psci_init(void)
|
|
+int __init psci_dt_init(void)
|
|
{
|
|
struct device_node *np;
|
|
const struct of_device_id *matched_np;
|
|
@@ -427,6 +434,29 @@ int __init psci_init(void)
|
|
return init_fn(np);
|
|
}
|
|
|
|
+/*
|
|
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
|
|
+ * explicitly clarified in SBBR
|
|
+ */
|
|
+int __init psci_acpi_init(void)
|
|
+{
|
|
+ if (!acpi_psci_present()) {
|
|
+ pr_info("is not implemented in ACPI.\n");
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
+ pr_info("probing for conduit method from ACPI.\n");
|
|
+
|
|
+ if (acpi_psci_use_hvc())
|
|
+ invoke_psci_fn = __invoke_psci_fn_hvc;
|
|
+ else
|
|
+ invoke_psci_fn = __invoke_psci_fn_smc;
|
|
+
|
|
+ psci_0_2_set_functions();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
#ifdef CONFIG_SMP
|
|
|
|
static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
|
|
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
|
|
index 20fe2932ad0c..cf4ab5661088 100644
|
|
--- a/arch/arm64/kernel/setup.c
|
|
+++ b/arch/arm64/kernel/setup.c
|
|
@@ -43,6 +43,7 @@
|
|
#include <linux/of_fdt.h>
|
|
#include <linux/of_platform.h>
|
|
#include <linux/efi.h>
|
|
+#include <linux/acpi.h>
|
|
#include <linux/personality.h>
|
|
|
|
#include <asm/fixmap.h>
|
|
@@ -61,6 +62,7 @@
|
|
#include <asm/memblock.h>
|
|
#include <asm/psci.h>
|
|
#include <asm/efi.h>
|
|
+#include <asm/acpi.h>
|
|
|
|
unsigned int processor_id;
|
|
EXPORT_SYMBOL(processor_id);
|
|
@@ -387,6 +389,8 @@ void __init setup_arch(char **cmdline_p)
|
|
early_fixmap_init();
|
|
early_ioremap_init();
|
|
|
|
+ disable_acpi();
|
|
+
|
|
parse_early_param();
|
|
|
|
/*
|
|
@@ -398,19 +402,29 @@ void __init setup_arch(char **cmdline_p)
|
|
efi_init();
|
|
arm64_memblock_init();
|
|
|
|
+ /* Parse the ACPI tables for possible boot-time configuration */
|
|
+ acpi_boot_table_init();
|
|
+
|
|
paging_init();
|
|
request_standard_resources();
|
|
|
|
efi_idmap_init();
|
|
early_ioremap_reset();
|
|
|
|
- unflatten_device_tree();
|
|
-
|
|
- psci_init();
|
|
+ if (acpi_disabled) {
|
|
+ unflatten_device_tree();
|
|
+ psci_dt_init();
|
|
+ cpu_read_bootcpu_ops();
|
|
+#ifdef CONFIG_SMP
|
|
+ of_smp_init_cpus();
|
|
+#endif
|
|
+ } else {
|
|
+ psci_acpi_init();
|
|
+ acpi_smp_init_cpus();
|
|
+ }
|
|
|
|
cpu_read_bootcpu_ops();
|
|
#ifdef CONFIG_SMP
|
|
- smp_init_cpus();
|
|
smp_build_mpidr_hash();
|
|
#endif
|
|
|
|
@@ -565,3 +579,25 @@ const struct seq_operations cpuinfo_op = {
|
|
.stop = c_stop,
|
|
.show = c_show
|
|
};
|
|
+
|
|
+/*
|
|
+ * Temporary hack to avoid need for console= on command line
|
|
+ */
|
|
+static int __init arm64_console_setup(void)
|
|
+{
|
|
+ /* Allow cmdline to override our assumed preferences */
|
|
+ if (console_set_on_cmdline)
|
|
+ return 0;
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_SBSAUART_TTY))
|
|
+ add_preferred_console("ttySBSA", 0, "115200");
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_SERIAL_AMBA_PL011))
|
|
+ add_preferred_console("ttyAMA", 0, "115200");
|
|
+
|
|
+ if (IS_ENABLED(CONFIG_SERIAL_8250))
|
|
+ add_preferred_console("ttyS", 0, "115200");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+early_initcall(arm64_console_setup);
|
|
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
|
|
index 7ae6ee0..5aaf5a4 100644
|
|
--- a/arch/arm64/kernel/smp.c
|
|
+++ b/arch/arm64/kernel/smp.c
|
|
@@ -323,7 +323,7 @@ void __init smp_prepare_boot_cpu(void)
|
|
* cpu logical map array containing MPIDR values related to logical
|
|
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
|
|
*/
|
|
-void __init smp_init_cpus(void)
|
|
+void __init of_smp_init_cpus(void)
|
|
{
|
|
struct device_node *dn = NULL;
|
|
unsigned int i, cpu = 1;
|
|
diff --git a/arch/arm64/kernel/smp_parking_protocol.c b/arch/arm64/kernel/smp_parking_protocol.c
|
|
new file mode 100644
|
|
index 0000000..e1153ce
|
|
--- /dev/null
|
|
+++ b/arch/arm64/kernel/smp_parking_protocol.c
|
|
@@ -0,0 +1,110 @@
|
|
+/*
|
|
+ * Parking Protocol SMP initialisation
|
|
+ *
|
|
+ * Based largely on spin-table method.
|
|
+ *
|
|
+ * Copyright (C) 2013 ARM Ltd.
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+#include <linux/delay.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/smp.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/acpi.h>
|
|
+
|
|
+#include <asm/cacheflush.h>
|
|
+#include <asm/cpu_ops.h>
|
|
+#include <asm/cputype.h>
|
|
+#include <asm/smp_plat.h>
|
|
+
|
|
+static phys_addr_t cpu_mailbox_addr[NR_CPUS];
|
|
+
|
|
+static void (*__smp_boot_wakeup)(int cpu);
|
|
+
|
|
+void set_smp_boot_wakeup_call(void (*fn)(int cpu))
|
|
+{
|
|
+ __smp_boot_wakeup = fn;
|
|
+}
|
|
+
|
|
+static int smp_parking_protocol_cpu_init(struct device_node *dn,
|
|
+ unsigned int cpu)
|
|
+{
|
|
+ /*
|
|
+ * Determine the mailbox address.
|
|
+ */
|
|
+ if (!acpi_get_cpu_parked_address(cpu, &cpu_mailbox_addr[cpu])) {
|
|
+ pr_info("%s: ACPI parked addr=%llx\n",
|
|
+ __func__, cpu_mailbox_addr[cpu]);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ pr_err("CPU %d: missing or invalid parking protocol mailbox\n", cpu);
|
|
+
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+static int smp_parking_protocol_cpu_prepare(unsigned int cpu)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+struct parking_protocol_mailbox {
|
|
+ __le32 cpu_id;
|
|
+ __le32 reserved;
|
|
+ __le64 entry_point;
|
|
+};
|
|
+
|
|
+static int smp_parking_protocol_cpu_boot(unsigned int cpu)
|
|
+{
|
|
+ struct parking_protocol_mailbox __iomem *mailbox;
|
|
+
|
|
+ if (!cpu_mailbox_addr[cpu] || !__smp_boot_wakeup)
|
|
+ return -ENODEV;
|
|
+
|
|
+ /*
|
|
+ * The mailbox may or may not be inside the linear mapping.
|
|
+ * As ioremap_cache will either give us a new mapping or reuse the
|
|
+ * existing linear mapping, we can use it to cover both cases. In
|
|
+ * either case the memory will be MT_NORMAL.
|
|
+ */
|
|
+ mailbox = ioremap_cache(cpu_mailbox_addr[cpu], sizeof(*mailbox));
|
|
+ if (!mailbox)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ /*
|
|
+ * We write the entry point and cpu id as LE regardless of the
|
|
+ * native endianess of the kernel. Therefore, any boot-loaders
|
|
+ * that read this address need to convert this address to the
|
|
+ * Boot-Loader's endianess before jumping.
|
|
+ */
|
|
+ writeq(__pa(secondary_entry), &mailbox->entry_point);
|
|
+ writel(cpu, &mailbox->cpu_id);
|
|
+ __flush_dcache_area(mailbox, sizeof(*mailbox));
|
|
+ __smp_boot_wakeup(cpu);
|
|
+
|
|
+ /* temp hack for broken firmware */
|
|
+ sev();
|
|
+
|
|
+ iounmap(mailbox);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+const struct cpu_operations smp_parking_protocol_ops = {
|
|
+ .name = "parking-protocol",
|
|
+ .cpu_init = smp_parking_protocol_cpu_init,
|
|
+ .cpu_prepare = smp_parking_protocol_cpu_prepare,
|
|
+ .cpu_boot = smp_parking_protocol_cpu_boot,
|
|
+};
|
|
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
|
|
index 1a7125c..42f9195 100644
|
|
--- a/arch/arm64/kernel/time.c
|
|
+++ b/arch/arm64/kernel/time.c
|
|
@@ -35,6 +35,7 @@
|
|
#include <linux/delay.h>
|
|
#include <linux/clocksource.h>
|
|
#include <linux/clk-provider.h>
|
|
+#include <linux/acpi.h>
|
|
|
|
#include <clocksource/arm_arch_timer.h>
|
|
|
|
@@ -72,6 +73,12 @@ void __init time_init(void)
|
|
|
|
tick_setup_hrtimer_broadcast();
|
|
|
|
+ /*
|
|
+ * Since ACPI or FDT will only one be available in the system,
|
|
+ * we can use acpi_generic_timer_init() here safely
|
|
+ */
|
|
+ acpi_generic_timer_init();
|
|
+
|
|
arch_timer_rate = arch_timer_get_rate();
|
|
if (!arch_timer_rate)
|
|
panic("Unable to initialise architected timer.\n");
|
|
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
|
|
index d920942..cf890e3 100644
|
|
--- a/arch/arm64/mm/dma-mapping.c
|
|
+++ b/arch/arm64/mm/dma-mapping.c
|
|
@@ -23,8 +23,14 @@
|
|
#include <linux/genalloc.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/dma-contiguous.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/platform_device.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/swiotlb.h>
|
|
+#include <linux/amba/bus.h>
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/pci.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
@@ -423,10 +429,116 @@ out:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
+#ifdef CONFIG_PCI
|
|
+static void arm64_of_set_dma_ops(void *_dev)
|
|
+{
|
|
+ struct device *dev = _dev;
|
|
+
|
|
+ /*
|
|
+ * PCI devices won't have an ACPI handle but the bridge will.
|
|
+ * Search up the device chain until we find an of_node
|
|
+ * to check.
|
|
+ */
|
|
+ while (dev) {
|
|
+ if (dev->of_node) {
|
|
+ if (of_dma_is_coherent(dev->of_node))
|
|
+ set_dma_ops(_dev, &coherent_swiotlb_dma_ops);
|
|
+ break;
|
|
+ }
|
|
+ dev = dev->parent;
|
|
+ }
|
|
+}
|
|
+#else
|
|
+static inline arm64_of_set_dma_ops(void *_dev) {}
|
|
+#endif
|
|
+
|
|
+
|
|
+#ifdef CONFIG_ACPI
|
|
+static void arm64_acpi_set_dma_ops(void *_dev)
|
|
+{
|
|
+ struct device *dev = _dev;
|
|
+
|
|
+ /*
|
|
+ * Kernel defaults to noncoherent ops but ACPI 5.1 spec says arm64
|
|
+ * defaults to coherent. For PCI devices, the _CCA is only a default
|
|
+ * setting. Individual devices on a PCIe bus may set transaction
|
|
+ * ordering and caching attributes individually. Such drivers will
|
|
+ * also be resonsible for using the correct DMA ops for the cache
|
|
+ * conherence used.
|
|
+ *
|
|
+ * PCI devices won't have a handle but the bridge will.
|
|
+ * Search up the device chain until we find an ACPI handle
|
|
+ * to check.
|
|
+ */
|
|
+ while (dev) {
|
|
+ if (ACPI_HANDLE(dev)) {
|
|
+ acpi_status status;
|
|
+ int coherent;
|
|
+ struct dma_map_ops *ops;
|
|
+
|
|
+ status = acpi_check_coherency(ACPI_HANDLE(dev),
|
|
+ &coherent);
|
|
+ if (ACPI_FAILURE(status) || coherent)
|
|
+ ops = &coherent_swiotlb_dma_ops;
|
|
+ else
|
|
+ ops = &noncoherent_swiotlb_dma_ops;
|
|
+
|
|
+ set_dma_ops(_dev, ops);
|
|
+ break;
|
|
+ }
|
|
+ dev = dev->parent;
|
|
+ }
|
|
+}
|
|
+#else
|
|
+static inline arm64_acpi_set_dma_ops(void *_dev) {}
|
|
+#endif
|
|
+
|
|
+static int dma_bus_notifier(struct notifier_block *nb,
|
|
+ unsigned long event, void *_dev)
|
|
+{
|
|
+ if (event != BUS_NOTIFY_ADD_DEVICE)
|
|
+ return NOTIFY_DONE;
|
|
+
|
|
+ if (acpi_disabled)
|
|
+ arm64_of_set_dma_ops(_dev);
|
|
+ else
|
|
+ arm64_acpi_set_dma_ops(_dev);
|
|
+
|
|
+ return NOTIFY_OK;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_ACPI
|
|
+static struct notifier_block platform_bus_nb = {
|
|
+ .notifier_call = dma_bus_notifier,
|
|
+};
|
|
+
|
|
+static struct notifier_block amba_bus_nb = {
|
|
+ .notifier_call = dma_bus_notifier,
|
|
+};
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PCI
|
|
+static struct notifier_block pci_bus_nb = {
|
|
+ .notifier_call = dma_bus_notifier,
|
|
+};
|
|
+#endif
|
|
+
|
|
static int __init swiotlb_late_init(void)
|
|
{
|
|
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
|
|
|
|
+ /*
|
|
+ * These must be registered before of_platform_populate().
|
|
+ */
|
|
+#ifdef CONFIG_ACPI
|
|
+ bus_register_notifier(&platform_bus_type, &platform_bus_nb);
|
|
+ bus_register_notifier(&amba_bustype, &amba_bus_nb);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PCI
|
|
+ bus_register_notifier(&pci_bus_type, &pci_bus_nb);
|
|
+#endif
|
|
+
|
|
dma_ops = &noncoherent_swiotlb_dma_ops;
|
|
|
|
return swiotlb_late_init_with_default_size(swiotlb_size);
|
|
diff --git a/arch/arm64/pci/Makefile b/arch/arm64/pci/Makefile
|
|
new file mode 100644
|
|
index 0000000..7038b51
|
|
--- /dev/null
|
|
+++ b/arch/arm64/pci/Makefile
|
|
@@ -0,0 +1,2 @@
|
|
+obj-y += pci.o
|
|
+obj-$(CONFIG_ACPI) += mmconfig.o
|
|
diff --git a/arch/arm64/pci/mmconfig.c b/arch/arm64/pci/mmconfig.c
|
|
new file mode 100644
|
|
index 0000000..e83e0d5
|
|
--- /dev/null
|
|
+++ b/arch/arm64/pci/mmconfig.c
|
|
@@ -0,0 +1,292 @@
|
|
+/*
|
|
+ * mmconfig.c - Low-level direct PCI config space access via MMCONFIG
|
|
+ *
|
|
+ * Borrowed heavily from x86
|
|
+ */
|
|
+
|
|
+#include <linux/pci.h>
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/bitmap.h>
|
|
+#include <linux/dmi.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/rculist.h>
|
|
+#include <linux/rcupdate.h>
|
|
+
|
|
+#define PREFIX "PCI: "
|
|
+
|
|
+/* Indicate if the mmcfg resources have been placed into the resource table. */
|
|
+static bool pci_mmcfg_running_state;
|
|
+static bool pci_mmcfg_arch_init_failed;
|
|
+static DEFINE_MUTEX(pci_mmcfg_lock);
|
|
+
|
|
+LIST_HEAD(pci_mmcfg_list);
|
|
+
|
|
+struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
|
|
+{
|
|
+ struct pci_mmcfg_region *cfg;
|
|
+
|
|
+ list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
|
|
+ if (cfg->segment == segment &&
|
|
+ cfg->start_bus <= bus && bus <= cfg->end_bus)
|
|
+ return cfg;
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void __iomem *mcfg_ioremap(struct pci_mmcfg_region *cfg)
|
|
+{
|
|
+ void __iomem *addr;
|
|
+ u64 start, size;
|
|
+ int num_buses;
|
|
+
|
|
+ start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
|
|
+ num_buses = cfg->end_bus - cfg->start_bus + 1;
|
|
+ size = PCI_MMCFG_BUS_OFFSET(num_buses);
|
|
+ addr = ioremap_nocache(start, size);
|
|
+ if (addr)
|
|
+ addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
|
|
+ return addr;
|
|
+}
|
|
+
|
|
+void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg)
|
|
+{
|
|
+ if (cfg && cfg->virt) {
|
|
+ iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
|
|
+ cfg->virt = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
+void __init pci_mmcfg_arch_free(void)
|
|
+{
|
|
+ struct pci_mmcfg_region *cfg;
|
|
+
|
|
+ list_for_each_entry(cfg, &pci_mmcfg_list, list)
|
|
+ pci_mmcfg_arch_unmap(cfg);
|
|
+}
|
|
+
|
|
+int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
|
|
+{
|
|
+ cfg->virt = mcfg_ioremap(cfg);
|
|
+ if (!cfg->virt) {
|
|
+ pr_err(PREFIX "can't map MMCONFIG at %pR\n", &cfg->res);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
|
|
+{
|
|
+ if (cfg->res.parent)
|
|
+ release_resource(&cfg->res);
|
|
+ list_del(&cfg->list);
|
|
+ kfree(cfg);
|
|
+}
|
|
+
|
|
+static void __init free_all_mmcfg(void)
|
|
+{
|
|
+ struct pci_mmcfg_region *cfg, *tmp;
|
|
+
|
|
+ pci_mmcfg_arch_free();
|
|
+ list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
|
|
+ pci_mmconfig_remove(cfg);
|
|
+}
|
|
+
|
|
+static void list_add_sorted(struct pci_mmcfg_region *new)
|
|
+{
|
|
+ struct pci_mmcfg_region *cfg;
|
|
+
|
|
+ /* keep list sorted by segment and starting bus number */
|
|
+ list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) {
|
|
+ if (cfg->segment > new->segment ||
|
|
+ (cfg->segment == new->segment &&
|
|
+ cfg->start_bus >= new->start_bus)) {
|
|
+ list_add_tail_rcu(&new->list, &cfg->list);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ list_add_tail_rcu(&new->list, &pci_mmcfg_list);
|
|
+}
|
|
+
|
|
+static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
|
|
+ int end, u64 addr)
|
|
+{
|
|
+ struct pci_mmcfg_region *new;
|
|
+ struct resource *res;
|
|
+
|
|
+ if (addr == 0)
|
|
+ return NULL;
|
|
+
|
|
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
|
|
+ if (!new)
|
|
+ return NULL;
|
|
+
|
|
+ new->address = addr;
|
|
+ new->segment = segment;
|
|
+ new->start_bus = start;
|
|
+ new->end_bus = end;
|
|
+
|
|
+ res = &new->res;
|
|
+ res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
|
|
+ res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
|
|
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
|
+ snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
|
|
+ "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
|
|
+ res->name = new->name;
|
|
+
|
|
+ return new;
|
|
+}
|
|
+
|
|
+static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
|
|
+ int end, u64 addr)
|
|
+{
|
|
+ struct pci_mmcfg_region *new;
|
|
+
|
|
+ new = pci_mmconfig_alloc(segment, start, end, addr);
|
|
+ if (new) {
|
|
+ mutex_lock(&pci_mmcfg_lock);
|
|
+ list_add_sorted(new);
|
|
+ mutex_unlock(&pci_mmcfg_lock);
|
|
+
|
|
+ pr_info(PREFIX
|
|
+ "MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
|
|
+ "(base %#lx)\n",
|
|
+ segment, start, end, &new->res, (unsigned long)addr);
|
|
+ }
|
|
+
|
|
+ return new;
|
|
+}
|
|
+
|
|
+extern struct acpi_mcfg_fixup __start_acpi_mcfg_fixups[];
|
|
+extern struct acpi_mcfg_fixup __end_acpi_mcfg_fixups[];
|
|
+
|
|
+static int __init pci_parse_mcfg(struct acpi_table_header *header)
|
|
+{
|
|
+ struct acpi_table_mcfg *mcfg;
|
|
+ struct acpi_mcfg_allocation *cfg_table, *cfg;
|
|
+ struct acpi_mcfg_fixup *fixup;
|
|
+ struct pci_mmcfg_region *new;
|
|
+ unsigned long i;
|
|
+ int entries;
|
|
+
|
|
+ if (!header)
|
|
+ return -EINVAL;
|
|
+
|
|
+ mcfg = (struct acpi_table_mcfg *)header;
|
|
+
|
|
+ /* how many config structures do we have */
|
|
+ free_all_mmcfg();
|
|
+ entries = 0;
|
|
+ i = header->length - sizeof(struct acpi_table_mcfg);
|
|
+ while (i >= sizeof(struct acpi_mcfg_allocation)) {
|
|
+ entries++;
|
|
+ i -= sizeof(struct acpi_mcfg_allocation);
|
|
+ }
|
|
+ if (entries == 0) {
|
|
+ pr_err(PREFIX "MMCONFIG has no entries\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ fixup = __start_acpi_mcfg_fixups;
|
|
+ while (fixup < __end_acpi_mcfg_fixups) {
|
|
+ if (!strncmp(fixup->oem_id, header->oem_id, 6) &&
|
|
+ !strncmp(fixup->oem_table_id, header->oem_table_id, 8))
|
|
+ break;
|
|
+ ++fixup;
|
|
+ }
|
|
+
|
|
+ cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
|
|
+ for (i = 0; i < entries; i++) {
|
|
+ cfg = &cfg_table[i];
|
|
+
|
|
+ new = pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
|
|
+ cfg->end_bus_number, cfg->address);
|
|
+ if (!new) {
|
|
+ pr_warn(PREFIX "no memory for MCFG entries\n");
|
|
+ free_all_mmcfg();
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ if (fixup < __end_acpi_mcfg_fixups)
|
|
+ new->fixup = fixup->hook;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int __init pci_mmcfg_arch_init(void)
|
|
+{
|
|
+ struct pci_mmcfg_region *cfg;
|
|
+
|
|
+ list_for_each_entry(cfg, &pci_mmcfg_list, list)
|
|
+ if (pci_mmcfg_arch_map(cfg)) {
|
|
+ pci_mmcfg_arch_free();
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static void __init __pci_mmcfg_init(int early)
|
|
+{
|
|
+ if (list_empty(&pci_mmcfg_list)) {
|
|
+ pr_info("No MCFG table found!\n");
|
|
+ pci_mmcfg_arch_init_failed = true;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!pci_mmcfg_arch_init()) {
|
|
+ pr_info("pci_mmcfg_arch_init failed!\n");
|
|
+ free_all_mmcfg();
|
|
+ pci_mmcfg_arch_init_failed = true;
|
|
+ }
|
|
+}
|
|
+
|
|
+void __init pci_mmcfg_early_init(void)
|
|
+{
|
|
+ acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
|
|
+
|
|
+ __pci_mmcfg_init(1);
|
|
+}
|
|
+
|
|
+static int __init pci_mmcfg_init(void)
|
|
+{
|
|
+ pci_mmcfg_early_init();
|
|
+ return 0;
|
|
+}
|
|
+arch_initcall(pci_mmcfg_init);
|
|
+
|
|
+void __init pci_mmcfg_late_init(void)
|
|
+{
|
|
+ /* MMCONFIG hasn't been enabled yet, try again */
|
|
+ if (pci_mmcfg_arch_init_failed) {
|
|
+ acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
|
|
+ __pci_mmcfg_init(0);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int __init pci_mmcfg_late_insert_resources(void)
|
|
+{
|
|
+ struct pci_mmcfg_region *cfg;
|
|
+
|
|
+ pci_mmcfg_running_state = true;
|
|
+
|
|
+ /*
|
|
+ * Attempt to insert the mmcfg resources but not with the busy flag
|
|
+ * marked so it won't cause request errors when __request_region is
|
|
+ * called.
|
|
+ */
|
|
+ list_for_each_entry(cfg, &pci_mmcfg_list, list)
|
|
+ if (!cfg->res.parent)
|
|
+ insert_resource(&iomem_resource, &cfg->res);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Perform MMCONFIG resource insertion after PCI initialization to allow for
|
|
+ * misprogrammed MCFG tables that state larger sizes but actually conflict
|
|
+ * with other system resources.
|
|
+ */
|
|
+late_initcall(pci_mmcfg_late_insert_resources);
|
|
diff --git a/arch/arm64/pci/pci.c b/arch/arm64/pci/pci.c
|
|
new file mode 100644
|
|
index 0000000..0166475
|
|
--- /dev/null
|
|
+++ b/arch/arm64/pci/pci.c
|
|
@@ -0,0 +1,461 @@
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/pci.h>
|
|
+
|
|
+struct pci_root_info {
|
|
+ struct acpi_device *bridge;
|
|
+ char name[16];
|
|
+ unsigned int res_num;
|
|
+ struct resource *res;
|
|
+ resource_size_t *res_offset;
|
|
+ struct pci_sysdata sd;
|
|
+ u16 segment;
|
|
+ u8 start_bus;
|
|
+ u8 end_bus;
|
|
+};
|
|
+
|
|
+static char __iomem *pci_dev_base(struct pci_mmcfg_region *cfg,
|
|
+ unsigned int bus, unsigned int devfn)
|
|
+{
|
|
+ return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
|
|
+}
|
|
+
|
|
+static int __raw_pci_read(struct pci_mmcfg_region *cfg, unsigned int bus,
|
|
+ unsigned int devfn, int reg, int len, u32 *value)
|
|
+{
|
|
+ char __iomem *addr = pci_dev_base(cfg, bus, devfn) + (reg & ~3);
|
|
+ int shift = (reg & 3) * 8;
|
|
+ u32 v;
|
|
+
|
|
+ v = readl(addr) >> shift;
|
|
+ switch (len) {
|
|
+ case 1:
|
|
+ *value = v & 0xff;
|
|
+ break;
|
|
+ case 2:
|
|
+ *value = v & 0xffff;
|
|
+ break;
|
|
+ case 4:
|
|
+ *value = v;
|
|
+ break;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __raw_pci_write(struct pci_mmcfg_region *cfg, unsigned int bus,
|
|
+ unsigned int devfn, int reg, int len, u32 value)
|
|
+{
|
|
+ char __iomem *addr = pci_dev_base(cfg, bus, devfn) + (reg & ~3);
|
|
+ int mask = 0, shift = (reg & 3) * 8;
|
|
+ u32 v;
|
|
+
|
|
+ switch (len) {
|
|
+ case 1:
|
|
+ mask = 0xff << shift;
|
|
+ break;
|
|
+ case 2:
|
|
+ mask = 0xffff << shift;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (mask) {
|
|
+ v = readl(addr) & ~mask;
|
|
+ writel(v | (value << shift), addr);
|
|
+ } else
|
|
+ writel(value, addr);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * raw_pci_read/write - Platform-specific PCI config space access.
|
|
+ */
|
|
+int raw_pci_read(unsigned int domain, unsigned int bus,
|
|
+ unsigned int devfn, int reg, int len, u32 *val)
|
|
+{
|
|
+ struct pci_mmcfg_region *cfg;
|
|
+ int ret;
|
|
+
|
|
+ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) {
|
|
+err: *val = -1;
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ rcu_read_lock();
|
|
+ cfg = pci_mmconfig_lookup(domain, bus);
|
|
+ if (!cfg || !cfg->virt) {
|
|
+ rcu_read_unlock();
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (cfg->read)
|
|
+ ret = (*cfg->read)(cfg, bus, devfn, reg, len, val);
|
|
+ else
|
|
+ ret = __raw_pci_read(cfg, bus, devfn, reg, len, val);
|
|
+
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int raw_pci_write(unsigned int domain, unsigned int bus,
|
|
+ unsigned int devfn, int reg, int len, u32 val)
|
|
+{
|
|
+ struct pci_mmcfg_region *cfg;
|
|
+ int ret;
|
|
+
|
|
+ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095)))
|
|
+ return -EINVAL;
|
|
+
|
|
+ rcu_read_lock();
|
|
+ cfg = pci_mmconfig_lookup(domain, bus);
|
|
+ if (!cfg || !cfg->virt) {
|
|
+ rcu_read_unlock();
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (cfg->write)
|
|
+ ret = (*cfg->write)(cfg, bus, devfn, reg, len, val);
|
|
+ else
|
|
+ ret = __raw_pci_write(cfg, bus, devfn, reg, len, val);
|
|
+
|
|
+ rcu_read_unlock();
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_ACPI
|
|
+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
|
|
+ int size, u32 *value)
|
|
+{
|
|
+ return raw_pci_read(pci_domain_nr(bus), bus->number,
|
|
+ devfn, where, size, value);
|
|
+}
|
|
+
|
|
+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
|
|
+ int size, u32 value)
|
|
+{
|
|
+ return raw_pci_write(pci_domain_nr(bus), bus->number,
|
|
+ devfn, where, size, value);
|
|
+}
|
|
+
|
|
+struct pci_ops pci_root_ops = {
|
|
+ .read = pci_read,
|
|
+ .write = pci_write,
|
|
+};
|
|
+
|
|
+static acpi_status resource_to_addr(struct acpi_resource *resource,
|
|
+ struct acpi_resource_address64 *addr)
|
|
+{
|
|
+ acpi_status status;
|
|
+
|
|
+ memset(addr, 0, sizeof(*addr));
|
|
+ switch (resource->type) {
|
|
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
|
|
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
|
|
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
|
|
+ status = acpi_resource_to_address64(resource, addr);
|
|
+ if (ACPI_SUCCESS(status) &&
|
|
+ (addr->resource_type == ACPI_MEMORY_RANGE ||
|
|
+ addr->resource_type == ACPI_IO_RANGE) &&
|
|
+ addr->address_length > 0) {
|
|
+ return AE_OK;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ return AE_ERROR;
|
|
+}
|
|
+
|
|
+static acpi_status count_resource(struct acpi_resource *acpi_res, void *data)
|
|
+{
|
|
+ struct pci_root_info *info = data;
|
|
+ struct acpi_resource_address64 addr;
|
|
+ acpi_status status;
|
|
+
|
|
+ status = resource_to_addr(acpi_res, &addr);
|
|
+ if (ACPI_SUCCESS(status))
|
|
+ info->res_num++;
|
|
+ return AE_OK;
|
|
+}
|
|
+
|
|
+static acpi_status setup_resource(struct acpi_resource *acpi_res, void *data)
|
|
+{
|
|
+ struct pci_root_info *info = data;
|
|
+ struct resource *res;
|
|
+ struct acpi_resource_address64 addr;
|
|
+ acpi_status status;
|
|
+ unsigned long flags;
|
|
+ u64 start, end;
|
|
+
|
|
+ status = resource_to_addr(acpi_res, &addr);
|
|
+ if (!ACPI_SUCCESS(status))
|
|
+ return AE_OK;
|
|
+
|
|
+ if (addr.resource_type == ACPI_MEMORY_RANGE) {
|
|
+ flags = IORESOURCE_MEM;
|
|
+ if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
|
|
+ flags |= IORESOURCE_PREFETCH;
|
|
+ } else if (addr.resource_type == ACPI_IO_RANGE) {
|
|
+ flags = IORESOURCE_IO;
|
|
+ } else
|
|
+ return AE_OK;
|
|
+
|
|
+ start = addr.minimum + addr.translation_offset;
|
|
+ end = addr.maximum + addr.translation_offset;
|
|
+
|
|
+ res = &info->res[info->res_num];
|
|
+ res->name = info->name;
|
|
+ res->flags = flags;
|
|
+ res->start = start;
|
|
+ res->end = end;
|
|
+
|
|
+ if (flags & IORESOURCE_IO) {
|
|
+ unsigned long port;
|
|
+ int err;
|
|
+
|
|
+ err = pci_register_io_range(start, addr.address_length);
|
|
+ if (err)
|
|
+ return AE_OK;
|
|
+
|
|
+ port = pci_address_to_pio(start);
|
|
+ if (port == (unsigned long)-1) {
|
|
+ res->start = -1;
|
|
+ res->end = -1;
|
|
+ return AE_OK;
|
|
+ }
|
|
+
|
|
+ res->start = port;
|
|
+ res->end = res->start + addr.address_length - 1;
|
|
+
|
|
+ if (pci_remap_iospace(res, start) < 0)
|
|
+ return AE_OK;
|
|
+
|
|
+ info->res_offset[info->res_num] = 0;
|
|
+ } else
|
|
+ info->res_offset[info->res_num] = addr.translation_offset;
|
|
+
|
|
+ info->res_num++;
|
|
+
|
|
+ return AE_OK;
|
|
+}
|
|
+
|
|
+static void coalesce_windows(struct pci_root_info *info, unsigned long type)
|
|
+{
|
|
+ int i, j;
|
|
+ struct resource *res1, *res2;
|
|
+
|
|
+ for (i = 0; i < info->res_num; i++) {
|
|
+ res1 = &info->res[i];
|
|
+ if (!(res1->flags & type))
|
|
+ continue;
|
|
+
|
|
+ for (j = i + 1; j < info->res_num; j++) {
|
|
+ res2 = &info->res[j];
|
|
+ if (!(res2->flags & type))
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * I don't like throwing away windows because then
|
|
+ * our resources no longer match the ACPI _CRS, but
|
|
+ * the kernel resource tree doesn't allow overlaps.
|
|
+ */
|
|
+ if (resource_overlaps(res1, res2)) {
|
|
+ res2->start = min(res1->start, res2->start);
|
|
+ res2->end = max(res1->end, res2->end);
|
|
+ dev_info(&info->bridge->dev,
|
|
+ "host bridge window expanded to %pR; %pR ignored\n",
|
|
+ res2, res1);
|
|
+ res1->flags = 0;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static void add_resources(struct pci_root_info *info,
|
|
+ struct list_head *resources)
|
|
+{
|
|
+ int i;
|
|
+ struct resource *res, *root, *conflict;
|
|
+
|
|
+ coalesce_windows(info, IORESOURCE_MEM);
|
|
+ coalesce_windows(info, IORESOURCE_IO);
|
|
+
|
|
+ for (i = 0; i < info->res_num; i++) {
|
|
+ res = &info->res[i];
|
|
+
|
|
+ if (res->flags & IORESOURCE_MEM)
|
|
+ root = &iomem_resource;
|
|
+ else if (res->flags & IORESOURCE_IO)
|
|
+ root = &ioport_resource;
|
|
+ else
|
|
+ continue;
|
|
+
|
|
+ conflict = insert_resource_conflict(root, res);
|
|
+ if (conflict)
|
|
+ dev_info(&info->bridge->dev,
|
|
+ "ignoring host bridge window %pR (conflicts with %s %pR)\n",
|
|
+ res, conflict->name, conflict);
|
|
+ else
|
|
+ pci_add_resource_offset(resources, res,
|
|
+ info->res_offset[i]);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void free_pci_root_info_res(struct pci_root_info *info)
|
|
+{
|
|
+ kfree(info->res);
|
|
+ info->res = NULL;
|
|
+ kfree(info->res_offset);
|
|
+ info->res_offset = NULL;
|
|
+ info->res_num = 0;
|
|
+}
|
|
+
|
|
+static void __release_pci_root_info(struct pci_root_info *info)
|
|
+{
|
|
+ int i;
|
|
+ struct resource *res;
|
|
+
|
|
+ for (i = 0; i < info->res_num; i++) {
|
|
+ res = &info->res[i];
|
|
+
|
|
+ if (!res->parent)
|
|
+ continue;
|
|
+
|
|
+ if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
|
|
+ continue;
|
|
+
|
|
+ release_resource(res);
|
|
+ }
|
|
+
|
|
+ free_pci_root_info_res(info);
|
|
+
|
|
+ kfree(info);
|
|
+}
|
|
+
|
|
+static void release_pci_root_info(struct pci_host_bridge *bridge)
|
|
+{
|
|
+ struct pci_root_info *info = bridge->release_data;
|
|
+
|
|
+ __release_pci_root_info(info);
|
|
+}
|
|
+
|
|
+static void probe_pci_root_info(struct pci_root_info *info,
|
|
+ struct acpi_device *device,
|
|
+ int busnum, int domain)
|
|
+{
|
|
+ size_t size;
|
|
+
|
|
+ sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
|
|
+ info->bridge = device;
|
|
+
|
|
+ info->res_num = 0;
|
|
+ acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
|
|
+ info);
|
|
+ if (!info->res_num)
|
|
+ return;
|
|
+
|
|
+ size = sizeof(*info->res) * info->res_num;
|
|
+ info->res = kzalloc_node(size, GFP_KERNEL, info->sd.node);
|
|
+ if (!info->res) {
|
|
+ info->res_num = 0;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ size = sizeof(*info->res_offset) * info->res_num;
|
|
+ info->res_num = 0;
|
|
+ info->res_offset = kzalloc_node(size, GFP_KERNEL, info->sd.node);
|
|
+ if (!info->res_offset) {
|
|
+ kfree(info->res);
|
|
+ info->res = NULL;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
|
|
+ info);
|
|
+}
|
|
+
|
|
+/* Root bridge scanning */
|
|
+struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
|
|
+{
|
|
+ struct acpi_device *device = root->device;
|
|
+ struct pci_mmcfg_region *mcfg;
|
|
+ struct pci_root_info *info;
|
|
+ int domain = root->segment;
|
|
+ int busnum = root->secondary.start;
|
|
+ LIST_HEAD(resources);
|
|
+ struct pci_bus *bus;
|
|
+ struct pci_sysdata *sd;
|
|
+ int node;
|
|
+
|
|
+ /* we need mmconfig */
|
|
+ mcfg = pci_mmconfig_lookup(domain, busnum);
|
|
+ if (!mcfg) {
|
|
+ pr_err("pci_bus %04x:%02x has no MCFG table\n",
|
|
+ domain, busnum);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ /* temporary hack */
|
|
+ if (mcfg->fixup)
|
|
+ (*mcfg->fixup)(root, mcfg);
|
|
+
|
|
+ if (domain && !pci_domains_supported) {
|
|
+ pr_warn("PCI %04x:%02x: multiple domains not supported.\n",
|
|
+ domain, busnum);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ node = NUMA_NO_NODE;
|
|
+
|
|
+ info = kzalloc_node(sizeof(*info), GFP_KERNEL, node);
|
|
+ if (!info) {
|
|
+ pr_warn("PCI %04x:%02x: ignored (out of memory)\n",
|
|
+ domain, busnum);
|
|
+ return NULL;
|
|
+ }
|
|
+ info->segment = domain;
|
|
+ info->start_bus = busnum;
|
|
+ info->end_bus = root->secondary.end;
|
|
+
|
|
+ sd = &info->sd;
|
|
+ sd->domain = domain;
|
|
+ sd->node = node;
|
|
+ sd->companion = device;
|
|
+
|
|
+ probe_pci_root_info(info, device, busnum, domain);
|
|
+
|
|
+ /* insert busn res at first */
|
|
+ pci_add_resource(&resources, &root->secondary);
|
|
+
|
|
+ /* then _CRS resources */
|
|
+ add_resources(info, &resources);
|
|
+
|
|
+ bus = pci_create_root_bus(NULL, busnum, &pci_root_ops, sd, &resources);
|
|
+ if (bus) {
|
|
+ pci_scan_child_bus(bus);
|
|
+ pci_set_host_bridge_release(to_pci_host_bridge(bus->bridge),
|
|
+ release_pci_root_info, info);
|
|
+ } else {
|
|
+ pci_free_resource_list(&resources);
|
|
+ __release_pci_root_info(info);
|
|
+ }
|
|
+
|
|
+ /* After the PCI-E bus has been walked and all devices discovered,
|
|
+ * configure any settings of the fabric that might be necessary.
|
|
+ */
|
|
+ if (bus) {
|
|
+ struct pci_bus *child;
|
|
+
|
|
+ list_for_each_entry(child, &bus->children, node)
|
|
+ pcie_bus_configure_settings(child);
|
|
+ }
|
|
+
|
|
+ if (bus && node != NUMA_NO_NODE)
|
|
+ dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
|
|
+
|
|
+ return bus;
|
|
+}
|
|
+
|
|
+#endif /* CONFIG_ACPI */
|
|
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
|
|
index 8951cef..63aa47c 100644
|
|
--- a/drivers/acpi/Kconfig
|
|
+++ b/drivers/acpi/Kconfig
|
|
@@ -5,8 +5,7 @@
|
|
menuconfig ACPI
|
|
bool "ACPI (Advanced Configuration and Power Interface) Support"
|
|
depends on !IA64_HP_SIM
|
|
- depends on IA64 || X86
|
|
- depends on PCI
|
|
+ depends on ((IA64 || X86) && PCI) || ARM64
|
|
select PNP
|
|
default y
|
|
help
|
|
@@ -163,6 +162,7 @@ config ACPI_PROCESSOR
|
|
tristate "Processor"
|
|
select THERMAL
|
|
select CPU_IDLE
|
|
+ depends on X86 || IA64
|
|
default y
|
|
help
|
|
This driver installs ACPI as the idle handler for Linux and uses
|
|
@@ -263,7 +263,7 @@ config ACPI_DEBUG
|
|
|
|
config ACPI_PCI_SLOT
|
|
bool "PCI slot detection driver"
|
|
- depends on SYSFS
|
|
+ depends on SYSFS && PCI
|
|
default n
|
|
help
|
|
This driver creates entries in /sys/bus/pci/slots/ for all PCI
|
|
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
|
|
index f74317c..c346011 100644
|
|
--- a/drivers/acpi/Makefile
|
|
+++ b/drivers/acpi/Makefile
|
|
@@ -23,7 +23,11 @@ acpi-y += nvs.o
|
|
|
|
# Power management related files
|
|
acpi-y += wakeup.o
|
|
+ifeq ($(ARCH), arm64)
|
|
+acpi-y += sleep-arm.o
|
|
+else # X86, IA64
|
|
acpi-y += sleep.o
|
|
+endif
|
|
acpi-y += device_pm.o
|
|
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
|
|
|
|
@@ -39,7 +43,7 @@ acpi-y += processor_core.o
|
|
acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
|
|
acpi-y += ec.o
|
|
acpi-$(CONFIG_ACPI_DOCK) += dock.o
|
|
-acpi-y += pci_root.o pci_link.o pci_irq.o
|
|
+acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
|
|
acpi-y += acpi_lpss.o
|
|
acpi-y += acpi_platform.o
|
|
acpi-y += acpi_pnp.o
|
|
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
|
|
index 8b67bd0..c412fdb 100644
|
|
--- a/drivers/acpi/bus.c
|
|
+++ b/drivers/acpi/bus.c
|
|
@@ -448,6 +448,9 @@ static int __init acpi_bus_init_irq(void)
|
|
case ACPI_IRQ_MODEL_IOSAPIC:
|
|
message = "IOSAPIC";
|
|
break;
|
|
+ case ACPI_IRQ_MODEL_GIC:
|
|
+ message = "GIC";
|
|
+ break;
|
|
case ACPI_IRQ_MODEL_PLATFORM:
|
|
message = "platform specific model";
|
|
break;
|
|
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
|
|
index 163e82f..c5ff8ba 100644
|
|
--- a/drivers/acpi/internal.h
|
|
+++ b/drivers/acpi/internal.h
|
|
@@ -26,8 +26,13 @@
|
|
acpi_status acpi_os_initialize1(void);
|
|
int init_acpi_device_notify(void);
|
|
int acpi_scan_init(void);
|
|
+#ifdef CONFIG_PCI
|
|
void acpi_pci_root_init(void);
|
|
void acpi_pci_link_init(void);
|
|
+#else
|
|
+static inline void acpi_pci_root_init(void) {}
|
|
+static inline void acpi_pci_link_init(void) {}
|
|
+#endif
|
|
void acpi_processor_init(void);
|
|
void acpi_platform_init(void);
|
|
void acpi_pnp_init(void);
|
|
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
|
|
index f9eeae8..581b9f7 100644
|
|
--- a/drivers/acpi/osl.c
|
|
+++ b/drivers/acpi/osl.c
|
|
@@ -336,11 +336,11 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
|
|
return NULL;
|
|
}
|
|
|
|
-#ifndef CONFIG_IA64
|
|
-#define should_use_kmap(pfn) page_is_ram(pfn)
|
|
-#else
|
|
+#if defined(CONFIG_IA64) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
|
|
/* ioremap will take care of cache attributes */
|
|
#define should_use_kmap(pfn) 0
|
|
+#else
|
|
+#define should_use_kmap(pfn) page_is_ram(pfn)
|
|
#endif
|
|
|
|
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
|
|
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
|
|
index 342942f..734c029 100644
|
|
--- a/drivers/acpi/processor_core.c
|
|
+++ b/drivers/acpi/processor_core.c
|
|
@@ -64,6 +64,38 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
|
|
return 0;
|
|
}
|
|
|
|
+/*
|
|
+ * On ARM platform, MPIDR value is the hardware ID as apic ID
|
|
+ * on Intel platforms
|
|
+ */
|
|
+static int map_gicc_mpidr(struct acpi_subtable_header *entry,
|
|
+ int device_declaration, u32 acpi_id, int *mpidr)
|
|
+{
|
|
+ struct acpi_madt_generic_interrupt *gicc =
|
|
+ container_of(entry, struct acpi_madt_generic_interrupt, header);
|
|
+
|
|
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
|
|
+ return -ENODEV;
|
|
+
|
|
+ /* In the GIC interrupt model, logical processors are
|
|
+ * required to have a Processor Device object in the DSDT,
|
|
+ * so we should check device_declaration here
|
|
+ */
|
|
+ if (device_declaration && (gicc->uid == acpi_id)) {
|
|
+ /*
|
|
+ * Only bits [0:7] Aff0, bits [8:15] Aff1, bits [16:23] Aff2
|
|
+ * and bits [32:39] Aff3 are meaningful, so pack the Affx
|
|
+ * fields into a single 32 bit identifier to accommodate the
|
|
+ * acpi processor drivers.
|
|
+ */
|
|
+ *mpidr = ((gicc->arm_mpidr & 0xff00000000) >> 8)
|
|
+ | gicc->arm_mpidr;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
static int map_madt_entry(int type, u32 acpi_id)
|
|
{
|
|
unsigned long madt_end, entry;
|
|
@@ -99,6 +131,9 @@ static int map_madt_entry(int type, u32 acpi_id)
|
|
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
|
|
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
|
|
break;
|
|
+ } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
|
|
+ if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
|
|
+ break;
|
|
}
|
|
entry += header->length;
|
|
}
|
|
@@ -131,6 +166,8 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
|
map_lsapic_id(header, type, acpi_id, &phys_id);
|
|
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
|
|
map_x2apic_id(header, type, acpi_id, &phys_id);
|
|
+ else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
|
|
+ map_gicc_mpidr(header, type, acpi_id, &phys_id);
|
|
|
|
exit:
|
|
kfree(buffer.pointer);
|
|
diff --git a/drivers/acpi/sleep-arm.c b/drivers/acpi/sleep-arm.c
|
|
new file mode 100644
|
|
index 0000000..54578ef
|
|
--- /dev/null
|
|
+++ b/drivers/acpi/sleep-arm.c
|
|
@@ -0,0 +1,28 @@
|
|
+/*
|
|
+ * ARM64 Specific Sleep Functionality
|
|
+ *
|
|
+ * Copyright (C) 2013-2014, Linaro Ltd.
|
|
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ */
|
|
+
|
|
+#include <linux/acpi.h>
|
|
+
|
|
+/*
|
|
+ * Currently the ACPI 5.1 standard does not define S states in a
|
|
+ * manner which is usable for ARM64. These two stubs are sufficient
|
|
+ * that system initialises and device PM works.
|
|
+ */
|
|
+u32 acpi_target_system_state(void)
|
|
+{
|
|
+ return ACPI_STATE_S0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(acpi_target_system_state);
|
|
+
|
|
+int __init acpi_sleep_init(void)
|
|
+{
|
|
+ return -ENOSYS;
|
|
+}
|
|
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
|
|
index 93b8152..122b48f 100644
|
|
--- a/drivers/acpi/tables.c
|
|
+++ b/drivers/acpi/tables.c
|
|
@@ -183,6 +183,49 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
|
|
}
|
|
break;
|
|
|
|
+ case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
|
|
+ {
|
|
+ struct acpi_madt_generic_interrupt *p =
|
|
+ (struct acpi_madt_generic_interrupt *)header;
|
|
+ pr_info("GICC (acpi_id[0x%04x] address[%p] MPDIR[0x%llx] %s)\n",
|
|
+ p->uid, (void *)(unsigned long)p->base_address,
|
|
+ p->arm_mpidr,
|
|
+ (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
|
|
+
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR:
|
|
+ {
|
|
+ struct acpi_madt_generic_distributor *p =
|
|
+ (struct acpi_madt_generic_distributor *)header;
|
|
+ pr_info("GIC Distributor (gic_id[0x%04x] address[%p] gsi_base[%d])\n",
|
|
+ p->gic_id,
|
|
+ (void *)(unsigned long)p->base_address,
|
|
+ p->global_irq_base);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case ACPI_MADT_TYPE_GENERIC_MSI_FRAME:
|
|
+ {
|
|
+ struct acpi_madt_generic_msi_frame *p =
|
|
+ (struct acpi_madt_generic_msi_frame *)header;
|
|
+ pr_info("GIC MSI Frame (msi_fame_id[%d] address[%p])\n",
|
|
+ p->msi_frame_id,
|
|
+ (void *)(unsigned long)p->base_address);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR:
|
|
+ {
|
|
+ struct acpi_madt_generic_redistributor *p =
|
|
+ (struct acpi_madt_generic_redistributor *)header;
|
|
+ pr_info("GIC Redistributor (address[%p] region_size[0x%x])\n",
|
|
+ (void *)(unsigned long)p->base_address,
|
|
+ p->length);
|
|
+ }
|
|
+ break;
|
|
+
|
|
default:
|
|
pr_warn("Found unsupported MADT entry (type = 0x%x)\n",
|
|
header->type);
|
|
@@ -210,7 +253,7 @@ acpi_parse_entries(char *id, unsigned long table_size,
|
|
return -EINVAL;
|
|
|
|
if (!table_header) {
|
|
- pr_warn("%4.4s not present\n", id);
|
|
+ pr_warn("Table header not present\n");
|
|
return -ENODEV;
|
|
}
|
|
|
|
@@ -246,7 +289,8 @@ acpi_parse_entries(char *id, unsigned long table_size,
|
|
|
|
if (max_entries && count > max_entries) {
|
|
pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
|
|
- id, entry_id, count - max_entries, count);
|
|
+ table_header->signature, entry_id, count - max_entries,
|
|
+ count);
|
|
}
|
|
|
|
return count;
|
|
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
|
|
index cd49a39..7f68f96 100644
|
|
--- a/drivers/acpi/utils.c
|
|
+++ b/drivers/acpi/utils.c
|
|
@@ -712,3 +712,29 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
|
|
return false;
|
|
}
|
|
EXPORT_SYMBOL(acpi_check_dsm);
|
|
+
|
|
+/**
|
|
+ * acpi_check_coherency - check for memory coherency of a device
|
|
+ * @handle: ACPI device handle
|
|
+ * @val: Pointer to returned value
|
|
+ *
|
|
+ * Search a device and its parents for a _CCA method and return
|
|
+ * its value.
|
|
+ */
|
|
+acpi_status acpi_check_coherency(acpi_handle handle, int *val)
|
|
+{
|
|
+ unsigned long long data;
|
|
+ acpi_status status;
|
|
+
|
|
+ do {
|
|
+ status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
|
|
+ if (!ACPI_FAILURE(status)) {
|
|
+ *val = data;
|
|
+ break;
|
|
+ }
|
|
+ status = acpi_get_parent(handle, &handle);
|
|
+ } while (!ACPI_FAILURE(status));
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL(acpi_check_coherency);
|
|
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
|
|
index a3a1360..edca892 100644
|
|
--- a/drivers/ata/Kconfig
|
|
+++ b/drivers/ata/Kconfig
|
|
@@ -48,7 +48,7 @@ config ATA_VERBOSE_ERROR
|
|
|
|
config ATA_ACPI
|
|
bool "ATA ACPI Support"
|
|
- depends on ACPI && PCI
|
|
+ depends on ACPI
|
|
default y
|
|
help
|
|
This option adds support for ATA-related ACPI objects.
|
|
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
|
|
index 18d5398..999e577 100644
|
|
--- a/drivers/ata/ahci_platform.c
|
|
+++ b/drivers/ata/ahci_platform.c
|
|
@@ -20,6 +20,9 @@
|
|
#include <linux/platform_device.h>
|
|
#include <linux/libata.h>
|
|
#include <linux/ahci_platform.h>
|
|
+#ifdef CONFIG_ATA_ACPI
|
|
+#include <linux/acpi.h>
|
|
+#endif
|
|
#include "ahci.h"
|
|
|
|
static const struct ata_port_info ahci_port_info = {
|
|
@@ -71,12 +74,22 @@ static const struct of_device_id ahci_of_match[] = {
|
|
};
|
|
MODULE_DEVICE_TABLE(of, ahci_of_match);
|
|
|
|
+#ifdef CONFIG_ATA_ACPI
|
|
+static const struct acpi_device_id ahci_acpi_match[] = {
|
|
+ { "AMDI0600", 0 }, /* AMD Seattle AHCI */
|
|
+ { },
|
|
+};
|
|
+#endif
|
|
+
|
|
static struct platform_driver ahci_driver = {
|
|
.probe = ahci_probe,
|
|
.remove = ata_platform_remove_one,
|
|
.driver = {
|
|
.name = "ahci",
|
|
.of_match_table = ahci_of_match,
|
|
+#ifdef CONFIG_ATA_ACPI
|
|
+ .acpi_match_table = ACPI_PTR(ahci_acpi_match),
|
|
+#endif
|
|
.pm = &ahci_pm_ops,
|
|
},
|
|
};
|
|
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
|
|
index feeb8f1..8f82267 100644
|
|
--- a/drivers/ata/ahci_xgene.c
|
|
+++ b/drivers/ata/ahci_xgene.c
|
|
@@ -28,6 +28,7 @@
|
|
#include <linux/of_address.h>
|
|
#include <linux/of_irq.h>
|
|
#include <linux/phy/phy.h>
|
|
+#include <linux/acpi.h>
|
|
#include "ahci.h"
|
|
|
|
/* Max # of disk per a controller */
|
|
@@ -148,14 +150,6 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
|
|
return rc;
|
|
}
|
|
|
|
-static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
|
|
-{
|
|
- void __iomem *diagcsr = ctx->csr_diag;
|
|
-
|
|
- return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
|
|
- readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
|
|
-}
|
|
-
|
|
/**
|
|
* xgene_ahci_read_id - Read ID data from the specified device
|
|
* @dev: device
|
|
@@ -501,11 +495,6 @@ static int xgene_ahci_probe(struct platform_device *pdev)
|
|
return -ENODEV;
|
|
}
|
|
|
|
- if (xgene_ahci_is_memram_inited(ctx)) {
|
|
- dev_info(dev, "skip clock and PHY initialization\n");
|
|
- goto skip_clk_phy;
|
|
- }
|
|
-
|
|
/* Due to errata, HW requires full toggle transition */
|
|
rc = ahci_platform_enable_clks(hpriv);
|
|
if (rc)
|
|
@@ -518,7 +507,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
|
|
|
|
/* Configure the host controller */
|
|
xgene_ahci_hw_init(hpriv);
|
|
-skip_clk_phy:
|
|
+
|
|
hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
|
|
|
|
rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
|
|
@@ -533,6 +522,16 @@ disable_resources:
|
|
return rc;
|
|
}
|
|
|
|
+#ifdef CONFIG_ACPI
|
|
+static const struct acpi_device_id xgene_ahci_acpi_match[] = {
|
|
+ { "APMC0D00", },
|
|
+ { "APMC0D0D", },
|
|
+ { "APMC0D09", },
|
|
+ { }
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
|
|
+#endif
|
|
+
|
|
static const struct of_device_id xgene_ahci_of_match[] = {
|
|
{.compatible = "apm,xgene-ahci"},
|
|
{},
|
|
@@ -545,6 +544,7 @@ static struct platform_driver xgene_ahci_driver = {
|
|
.driver = {
|
|
.name = "xgene-ahci",
|
|
.of_match_table = xgene_ahci_of_match,
|
|
+ .acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
|
|
index 095c177..fd5ebbf 100644
|
|
--- a/drivers/clocksource/arm_arch_timer.c
|
|
+++ b/drivers/clocksource/arm_arch_timer.c
|
|
@@ -21,6 +21,7 @@
|
|
#include <linux/io.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sched_clock.h>
|
|
+#include <linux/acpi.h>
|
|
|
|
#include <asm/arch_timer.h>
|
|
#include <asm/virt.h>
|
|
@@ -61,7 +62,8 @@ enum ppi_nr {
|
|
MAX_TIMER_PPI
|
|
};
|
|
|
|
-static int arch_timer_ppi[MAX_TIMER_PPI];
|
|
+int arch_timer_ppi[MAX_TIMER_PPI];
|
|
+EXPORT_SYMBOL(arch_timer_ppi);
|
|
|
|
static struct clock_event_device __percpu *arch_timer_evt;
|
|
|
|
@@ -370,8 +372,12 @@ arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
|
|
if (arch_timer_rate)
|
|
return;
|
|
|
|
- /* Try to determine the frequency from the device tree or CNTFRQ */
|
|
- if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
|
|
+ /*
|
|
+ * Try to determine the frequency from the device tree or CNTFRQ,
|
|
+ * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
|
|
+ */
|
|
+ if (!acpi_disabled ||
|
|
+ of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
|
|
if (cntbase)
|
|
arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
|
|
else
|
|
@@ -690,28 +696,8 @@ static void __init arch_timer_common_init(void)
|
|
arch_timer_arch_init();
|
|
}
|
|
|
|
-static void __init arch_timer_init(struct device_node *np)
|
|
+static void __init arch_timer_init(void)
|
|
{
|
|
- int i;
|
|
-
|
|
- if (arch_timers_present & ARCH_CP15_TIMER) {
|
|
- pr_warn("arch_timer: multiple nodes in dt, skipping\n");
|
|
- return;
|
|
- }
|
|
-
|
|
- arch_timers_present |= ARCH_CP15_TIMER;
|
|
- for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
|
|
- arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
|
|
- arch_timer_detect_rate(NULL, np);
|
|
-
|
|
- /*
|
|
- * If we cannot rely on firmware initializing the timer registers then
|
|
- * we should use the physical timers instead.
|
|
- */
|
|
- if (IS_ENABLED(CONFIG_ARM) &&
|
|
- of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
|
|
- arch_timer_use_virtual = false;
|
|
-
|
|
/*
|
|
* If HYP mode is available, we know that the physical timer
|
|
* has been configured to be accessible from PL1. Use it, so
|
|
@@ -730,13 +716,39 @@ static void __init arch_timer_init(struct device_node *np)
|
|
}
|
|
}
|
|
|
|
- arch_timer_c3stop = !of_property_read_bool(np, "always-on");
|
|
-
|
|
arch_timer_register();
|
|
arch_timer_common_init();
|
|
}
|
|
-CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
|
|
-CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);
|
|
+
|
|
+static void __init arch_timer_of_init(struct device_node *np)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * If we cannot rely on firmware initializing the timer registers then
|
|
+ * we should use the physical timers instead.
|
|
+ */
|
|
+ if (IS_ENABLED(CONFIG_ARM) &&
|
|
+ of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
|
|
+ arch_timer_use_virtual = false;
|
|
+
|
|
+ if (arch_timers_present & ARCH_CP15_TIMER) {
|
|
+ pr_warn("arch_timer: multiple nodes in dt, skipping\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ arch_timers_present |= ARCH_CP15_TIMER;
|
|
+ for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
|
|
+ arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
|
|
+
|
|
+ arch_timer_detect_rate(NULL, np);
|
|
+
|
|
+ arch_timer_c3stop = !of_property_read_bool(np, "always-on");
|
|
+
|
|
+ arch_timer_init();
|
|
+}
|
|
+CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
|
|
+CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
|
|
|
|
static void __init arch_timer_mem_init(struct device_node *np)
|
|
{
|
|
@@ -803,3 +815,71 @@ static void __init arch_timer_mem_init(struct device_node *np)
|
|
}
|
|
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
|
|
arch_timer_mem_init);
|
|
+
|
|
+#ifdef CONFIG_ACPI
|
|
+static int __init
|
|
+map_generic_timer_interrupt(u32 interrupt, u32 flags)
|
|
+{
|
|
+ int trigger, polarity;
|
|
+
|
|
+ if (!interrupt)
|
|
+ return 0;
|
|
+
|
|
+ trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
|
|
+ : ACPI_LEVEL_SENSITIVE;
|
|
+
|
|
+ polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
|
|
+ : ACPI_ACTIVE_HIGH;
|
|
+
|
|
+ return acpi_register_gsi(NULL, interrupt, trigger, polarity);
|
|
+}
|
|
+
|
|
+/* Initialize per-processor generic timer */
|
|
+static int __init arch_timer_acpi_init(struct acpi_table_header *table)
|
|
+{
|
|
+ struct acpi_table_gtdt *gtdt;
|
|
+
|
|
+ if (arch_timers_present & ARCH_CP15_TIMER) {
|
|
+ pr_warn("arch_timer: already initialized, skipping\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ gtdt = container_of(table, struct acpi_table_gtdt, header);
|
|
+
|
|
+ arch_timers_present |= ARCH_CP15_TIMER;
|
|
+
|
|
+ arch_timer_ppi[PHYS_SECURE_PPI] =
|
|
+ map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
|
|
+ gtdt->secure_el1_flags);
|
|
+
|
|
+ arch_timer_ppi[PHYS_NONSECURE_PPI] =
|
|
+ map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
|
|
+ gtdt->non_secure_el1_flags);
|
|
+
|
|
+ arch_timer_ppi[VIRT_PPI] =
|
|
+ map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
|
|
+ gtdt->virtual_timer_flags);
|
|
+
|
|
+ arch_timer_ppi[HYP_PPI] =
|
|
+ map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
|
|
+ gtdt->non_secure_el2_flags);
|
|
+
|
|
+ /* Get the frequency from CNTFRQ */
|
|
+ arch_timer_detect_rate(NULL, NULL);
|
|
+
|
|
+ /* Always-on capability */
|
|
+ arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
|
|
+
|
|
+ arch_timer_init();
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Initialize all the generic timers presented in GTDT */
|
|
+void __init acpi_generic_timer_init(void)
|
|
+{
|
|
+ if (acpi_disabled)
|
|
+ return;
|
|
+
|
|
+ acpi_table_parse(ACPI_SIG_GTDT, arch_timer_acpi_init);
|
|
+}
|
|
+#endif
|
|
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
|
|
index 90df4df..c9c1c8c 100644
|
|
--- a/drivers/input/keyboard/gpio_keys_polled.c
|
|
+++ b/drivers/input/keyboard/gpio_keys_polled.c
|
|
@@ -297,6 +297,7 @@ static struct platform_driver gpio_keys_polled_driver = {
|
|
.probe = gpio_keys_polled_probe,
|
|
.driver = {
|
|
.name = DRV_NAME,
|
|
+ .owner = THIS_MODULE,
|
|
.of_match_table = gpio_keys_polled_of_match,
|
|
},
|
|
};
|
|
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
|
|
index 6cd47b7..09f904a 100644
|
|
--- a/drivers/iommu/arm-smmu.c
|
|
+++ b/drivers/iommu/arm-smmu.c
|
|
@@ -451,7 +451,10 @@ static struct device_node *dev_get_dev_node(struct device *dev)
|
|
|
|
while (!pci_is_root_bus(bus))
|
|
bus = bus->parent;
|
|
- return bus->bridge->parent->of_node;
|
|
+ if (bus->bridge->parent)
|
|
+ return bus->bridge->parent->of_node;
|
|
+ else
|
|
+ return NULL;
|
|
}
|
|
|
|
return dev->of_node;
|
|
@@ -567,6 +570,9 @@ static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
|
|
struct arm_smmu_master *master = NULL;
|
|
struct device_node *dev_node = dev_get_dev_node(dev);
|
|
|
|
+ if (!dev_node)
|
|
+ return NULL;
|
|
+
|
|
spin_lock(&arm_smmu_devices_lock);
|
|
list_for_each_entry(smmu, &arm_smmu_devices, list) {
|
|
master = find_smmu_master(smmu, dev_node);
|
|
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
|
|
index 1a146cc..ab7dfe2 100644
|
|
--- a/drivers/irqchip/irq-gic-v3.c
|
|
+++ b/drivers/irqchip/irq-gic-v3.c
|
|
@@ -520,9 +520,19 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|
isb();
|
|
}
|
|
|
|
+#ifdef CONFIG_ARM_PARKING_PROTOCOL
|
|
+static void gic_wakeup_parked_cpu(int cpu)
|
|
+{
|
|
+ gic_raise_softirq(cpumask_of(cpu), 0);
|
|
+}
|
|
+#endif
|
|
+
|
|
static void gic_smp_init(void)
|
|
{
|
|
set_smp_cross_call(gic_raise_softirq);
|
|
+#ifdef CONFIG_ARM_PARKING_PROTOCOL
|
|
+ set_smp_boot_wakeup_call(gic_wakeup_parked_cpu);
|
|
+#endif
|
|
register_cpu_notifier(&gic_cpu_notifier);
|
|
}
|
|
|
|
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
|
|
index d617ee5..da0cd51 100644
|
|
--- a/drivers/irqchip/irq-gic.c
|
|
+++ b/drivers/irqchip/irq-gic.c
|
|
@@ -33,12 +33,14 @@
|
|
#include <linux/of.h>
|
|
#include <linux/of_address.h>
|
|
#include <linux/of_irq.h>
|
|
+#include <linux/acpi.h>
|
|
#include <linux/irqdomain.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/irqchip/chained_irq.h>
|
|
#include <linux/irqchip/arm-gic.h>
|
|
+#include <linux/irqchip/arm-gic-acpi.h>
|
|
|
|
#include <asm/cputype.h>
|
|
#include <asm/irq.h>
|
|
@@ -641,6 +643,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|
|
|
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
|
|
}
|
|
+
|
|
+#ifdef CONFIG_ARM_PARKING_PROTOCOL
|
|
+static void gic_wakeup_parked_cpu(int cpu)
|
|
+{
|
|
+ gic_raise_softirq(cpumask_of(cpu), GIC_DIST_SOFTINT_NSATT);
|
|
+}
|
|
+#endif
|
|
#endif
|
|
|
|
#ifdef CONFIG_BL_SWITCHER
|
|
@@ -1025,6 +1034,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
|
|
#ifdef CONFIG_SMP
|
|
set_smp_cross_call(gic_raise_softirq);
|
|
register_cpu_notifier(&gic_cpu_notifier);
|
|
+#ifdef CONFIG_ARM_PARKING_PROTOCOL
|
|
+ set_smp_boot_wakeup_call(gic_wakeup_parked_cpu);
|
|
+#endif
|
|
#endif
|
|
set_handle_irq(gic_handle_irq);
|
|
}
|
|
@@ -1083,3 +1095,109 @@ IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
|
|
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
|
|
|
|
#endif
|
|
+
|
|
+#ifdef CONFIG_ACPI
|
|
+static phys_addr_t dist_phy_base, cpu_phy_base;
|
|
+static int cpu_base_assigned;
|
|
+
|
|
+static int __init
|
|
+gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
|
|
+ const unsigned long end)
|
|
+{
|
|
+ struct acpi_madt_generic_interrupt *processor;
|
|
+ phys_addr_t gic_cpu_base;
|
|
+
|
|
+ processor = (struct acpi_madt_generic_interrupt *)header;
|
|
+
|
|
+ if (BAD_MADT_ENTRY(processor, end))
|
|
+ return -EINVAL;
|
|
+
|
|
+ /*
|
|
+ * There is no support for non-banked GICv1/2 register in ACPI spec.
|
|
+ * All CPU interface addresses have to be the same.
|
|
+ */
|
|
+ gic_cpu_base = processor->base_address;
|
|
+ if (cpu_base_assigned && gic_cpu_base != cpu_phy_base)
|
|
+ return -EFAULT;
|
|
+
|
|
+ cpu_phy_base = gic_cpu_base;
|
|
+ cpu_base_assigned = 1;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __init
|
|
+gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
|
|
+ const unsigned long end)
|
|
+{
|
|
+ struct acpi_madt_generic_distributor *dist;
|
|
+
|
|
+ dist = (struct acpi_madt_generic_distributor *)header;
|
|
+
|
|
+ if (BAD_MADT_ENTRY(dist, end))
|
|
+ return -EINVAL;
|
|
+
|
|
+ dist_phy_base = dist->base_address;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int __init
|
|
+gic_v2_acpi_init(struct acpi_table_header *table)
|
|
+{
|
|
+ void __iomem *cpu_base, *dist_base;
|
|
+ int count;
|
|
+
|
|
+ /* Collect CPU base addresses */
|
|
+ count = acpi_parse_entries(ACPI_SIG_MADT,
|
|
+ sizeof(struct acpi_table_madt),
|
|
+ gic_acpi_parse_madt_cpu, table,
|
|
+ ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
|
|
+ if (count < 0) {
|
|
+ pr_err("Error during GICC entries parsing\n");
|
|
+ return -EFAULT;
|
|
+ } else if (!count) {
|
|
+ pr_err("No valid GICC entries exist\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Find distributor base address. We expect one distributor entry since
|
|
+ * ACPI 5.1 spec neither support multi-GIC instances nor GIC cascade.
|
|
+ */
|
|
+ count = acpi_parse_entries(ACPI_SIG_MADT,
|
|
+ sizeof(struct acpi_table_madt),
|
|
+ gic_acpi_parse_madt_distributor, table,
|
|
+ ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
|
|
+ if (count <= 0) {
|
|
+ pr_err("Error during GICD entries parsing\n");
|
|
+ return -EFAULT;
|
|
+ } else if (!count) {
|
|
+ pr_err("No valid GICD entries exist\n");
|
|
+ return -EINVAL;
|
|
+ } else if (count > 1) {
|
|
+ pr_err("More than one GICD entry detected\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
|
|
+ if (!cpu_base) {
|
|
+ pr_err("Unable to map GICC registers\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ dist_base = ioremap(dist_phy_base, ACPI_GICV2_DIST_MEM_SIZE);
|
|
+ if (!dist_base) {
|
|
+ pr_err("Unable to map GICD registers\n");
|
|
+ iounmap(cpu_base);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
|
|
+ * as default IRQ domain to allow for GSI registration and GSI to IRQ
|
|
+ * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
|
|
+ */
|
|
+ gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
|
|
+ irq_set_default_host(gic_data[0].domain);
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
|
|
index 0fe2f71..9106c6d 100644
|
|
--- a/drivers/irqchip/irqchip.c
|
|
+++ b/drivers/irqchip/irqchip.c
|
|
@@ -11,6 +11,7 @@
|
|
#include <linux/init.h>
|
|
#include <linux/of_irq.h>
|
|
#include <linux/irqchip.h>
|
|
+#include <linux/irqchip/arm-gic-acpi.h>
|
|
|
|
/*
|
|
* This special of_device_id is the sentinel at the end of the
|
|
@@ -26,4 +27,6 @@ extern struct of_device_id __irqchip_of_table[];
|
|
void __init irqchip_init(void)
|
|
{
|
|
of_irq_init(__irqchip_of_table);
|
|
+
|
|
+ acpi_gic_init();
|
|
}
|
|
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
|
|
index 7ea1ea42..5fb4440 100644
|
|
--- a/drivers/leds/leds-gpio.c
|
|
+++ b/drivers/leds/leds-gpio.c
|
|
@@ -291,6 +291,7 @@ static struct platform_driver gpio_led_driver = {
|
|
.remove = gpio_led_remove,
|
|
.driver = {
|
|
.name = "leds-gpio",
|
|
+ .owner = THIS_MODULE,
|
|
.of_match_table = of_gpio_leds_match,
|
|
},
|
|
};
|
|
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
|
|
index 53f5f66..3957e63 100644
|
|
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
|
|
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
|
|
@@ -130,7 +130,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
|
|
|
|
DBGPR("-->xgbe_usec_to_riwt\n");
|
|
|
|
- rate = clk_get_rate(pdata->sysclk);
|
|
+ rate = pdata->sysclk_rate;
|
|
|
|
/*
|
|
* Convert the input usec value to the watchdog timer value. Each
|
|
@@ -153,7 +153,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
|
|
|
|
DBGPR("-->xgbe_riwt_to_usec\n");
|
|
|
|
- rate = clk_get_rate(pdata->sysclk);
|
|
+ rate = pdata->sysclk_rate;
|
|
|
|
/*
|
|
* Convert the input watchdog timer value to the usec value. Each
|
|
@@ -854,6 +854,18 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
|
|
else
|
|
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
|
|
|
|
+ if (XGBE_SEATTLE_A0) {
|
|
+ /* The PCS implementation has reversed the devices in
|
|
+ * package registers so we need to change 05 to 06 and
|
|
+ * 06 to 05 if being read (these registers are readonly
|
|
+ * so no need to do this in the write function)
|
|
+ */
|
|
+ if ((mmd_address & 0xffff) == 0x05)
|
|
+ mmd_address = (mmd_address & ~0xffff) | 0x06;
|
|
+ else if ((mmd_address & 0xffff) == 0x06)
|
|
+ mmd_address = (mmd_address & ~0xffff) | 0x05;
|
|
+ }
|
|
+
|
|
/* The PCS registers are accessed using mmio. The underlying APB3
|
|
* management interface uses indirect addressing to access the MMD
|
|
* register sets. This requires accessing of the PCS register in two
|
|
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
|
|
index dbd3850..74be78e 100644
|
|
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
|
|
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
|
|
@@ -124,6 +124,7 @@
|
|
#include <linux/of.h>
|
|
#include <linux/of_net.h>
|
|
#include <linux/clk.h>
|
|
+#include <linux/acpi.h>
|
|
|
|
#include "xgbe.h"
|
|
#include "xgbe-common.h"
|
|
@@ -161,6 +162,205 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
|
|
xgbe_init_function_ptrs_desc(&pdata->desc_if);
|
|
}
|
|
|
|
+static int xgbe_map_resources(struct xgbe_prv_data *pdata)
|
|
+{
|
|
+ struct platform_device *pdev = pdata->pdev;
|
|
+ struct device *dev = pdata->dev;
|
|
+ struct resource *res;
|
|
+
|
|
+ /* Obtain the mmio areas for the device */
|
|
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ pdata->xgmac_regs = devm_ioremap_resource(dev, res);
|
|
+ if (IS_ERR(pdata->xgmac_regs)) {
|
|
+ dev_err(dev, "xgmac ioremap failed\n");
|
|
+ return PTR_ERR(pdata->xgmac_regs);
|
|
+ }
|
|
+ DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
|
|
+
|
|
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
|
+ pdata->xpcs_regs = devm_ioremap_resource(dev, res);
|
|
+ if (IS_ERR(pdata->xpcs_regs)) {
|
|
+ dev_err(dev, "xpcs ioremap failed\n");
|
|
+ return PTR_ERR(pdata->xpcs_regs);
|
|
+ }
|
|
+ DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_ACPI
|
|
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
|
|
+{
|
|
+ struct acpi_device *adev = pdata->adev;
|
|
+ struct device *dev = pdata->dev;
|
|
+ const union acpi_object *property;
|
|
+ acpi_status status;
|
|
+ u64 cca;
|
|
+ unsigned int i;
|
|
+ int ret;
|
|
+
|
|
+ /* Map the memory resources */
|
|
+ ret = xgbe_map_resources(pdata);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* Obtain the system clock setting */
|
|
+ ret = acpi_dev_get_property(adev, XGBE_ACPI_DMA_FREQ, ACPI_TYPE_INTEGER,
|
|
+ &property);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "unable to obtain %s acpi property\n",
|
|
+ XGBE_ACPI_DMA_FREQ);
|
|
+ return ret;
|
|
+ }
|
|
+ pdata->sysclk_rate = property->integer.value;
|
|
+
|
|
+ /* Obtain the PTP clock setting */
|
|
+ ret = acpi_dev_get_property(adev, XGBE_ACPI_PTP_FREQ, ACPI_TYPE_INTEGER,
|
|
+ &property);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "unable to obtain %s acpi property\n",
|
|
+ XGBE_ACPI_PTP_FREQ);
|
|
+ return ret;
|
|
+ }
|
|
+ pdata->ptpclk_rate = property->integer.value;
|
|
+
|
|
+ /* Retrieve the MAC address */
|
|
+ ret = acpi_dev_get_property_array(adev, XGBE_ACPI_MAC_ADDR,
|
|
+ ACPI_TYPE_INTEGER, &property);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "unable to obtain %s acpi property\n",
|
|
+ XGBE_ACPI_MAC_ADDR);
|
|
+ return ret;
|
|
+ }
|
|
+ if (property->package.count != 6) {
|
|
+ dev_err(dev, "invalid %s acpi property\n",
|
|
+ XGBE_ACPI_MAC_ADDR);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ for (i = 0; i < property->package.count; i++) {
|
|
+ union acpi_object *obj = &property->package.elements[i];
|
|
+
|
|
+ pdata->mac_addr[i] = (u8)obj->integer.value;
|
|
+ }
|
|
+ if (!is_valid_ether_addr(pdata->mac_addr)) {
|
|
+ dev_err(dev, "invalid %s acpi property\n",
|
|
+ XGBE_ACPI_MAC_ADDR);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* Retrieve the PHY mode - it must be "xgmii" */
|
|
+ ret = acpi_dev_get_property(adev, XGBE_ACPI_PHY_MODE, ACPI_TYPE_STRING,
|
|
+ &property);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "unable to obtain %s acpi property\n",
|
|
+ XGBE_ACPI_PHY_MODE);
|
|
+ return ret;
|
|
+ }
|
|
+ if (strcmp(property->string.pointer,
|
|
+ phy_modes(PHY_INTERFACE_MODE_XGMII))) {
|
|
+ dev_err(dev, "invalid %s acpi property\n",
|
|
+ XGBE_ACPI_PHY_MODE);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
|
|
+
|
|
+#ifndef METHOD_NAME__CCA
|
|
+#define METHOD_NAME__CCA "_CCA"
|
|
+#endif
|
|
+ /* Set the device cache coherency values */
|
|
+ if (acpi_has_method(adev->handle, METHOD_NAME__CCA)) {
|
|
+ status = acpi_evaluate_integer(adev->handle, METHOD_NAME__CCA,
|
|
+ NULL, &cca);
|
|
+ if (ACPI_FAILURE(status)) {
|
|
+ dev_err(dev, "error obtaining acpi _CCA method\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ } else {
|
|
+ cca = 0;
|
|
+ }
|
|
+
|
|
+ if (cca) {
|
|
+ pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
|
|
+ pdata->arcache = XGBE_DMA_OS_ARCACHE;
|
|
+ pdata->awcache = XGBE_DMA_OS_AWCACHE;
|
|
+ } else {
|
|
+ pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
|
|
+ pdata->arcache = XGBE_DMA_SYS_ARCACHE;
|
|
+ pdata->awcache = XGBE_DMA_SYS_AWCACHE;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#else /* CONFIG_ACPI */
|
|
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
|
|
+{
|
|
+ return -EINVAL;
|
|
+}
|
|
+#endif /* CONFIG_ACPI */
|
|
+
|
|
+#ifdef CONFIG_OF
|
|
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
|
|
+{
|
|
+ struct device *dev = pdata->dev;
|
|
+ const u8 *mac_addr;
|
|
+ int ret;
|
|
+
|
|
+ /* Map the memory resources */
|
|
+ ret = xgbe_map_resources(pdata);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* Obtain the system clock setting */
|
|
+ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
|
|
+ if (IS_ERR(pdata->sysclk)) {
|
|
+ dev_err(dev, "dma devm_clk_get failed\n");
|
|
+ return PTR_ERR(pdata->sysclk);
|
|
+ }
|
|
+ pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
|
|
+
|
|
+ /* Obtain the PTP clock setting */
|
|
+ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
|
|
+ if (IS_ERR(pdata->ptpclk)) {
|
|
+ dev_err(dev, "ptp devm_clk_get failed\n");
|
|
+ return PTR_ERR(pdata->ptpclk);
|
|
+ }
|
|
+ pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
|
|
+
|
|
+ /* Retrieve the MAC address */
|
|
+ mac_addr = of_get_mac_address(dev->of_node);
|
|
+ if (!mac_addr) {
|
|
+ dev_err(dev, "invalid mac address for this device\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
|
|
+
|
|
+ /* Retrieve the PHY mode - it must be "xgmii" */
|
|
+ pdata->phy_mode = of_get_phy_mode(dev->of_node);
|
|
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
|
|
+ dev_err(dev, "invalid phy-mode specified for this device\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* Set the device cache coherency values */
|
|
+ if (of_property_read_bool(dev->of_node, "dma-coherent")) {
|
|
+ pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
|
|
+ pdata->arcache = XGBE_DMA_OS_ARCACHE;
|
|
+ pdata->awcache = XGBE_DMA_OS_AWCACHE;
|
|
+ } else {
|
|
+ pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
|
|
+ pdata->arcache = XGBE_DMA_SYS_ARCACHE;
|
|
+ pdata->awcache = XGBE_DMA_SYS_AWCACHE;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#else /* CONFIG_OF */
|
|
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
|
|
+{
|
|
+ return -EINVAL;
|
|
+}
|
|
+#endif /*CONFIG_OF */
|
|
+
|
|
static int xgbe_probe(struct platform_device *pdev)
|
|
{
|
|
struct xgbe_prv_data *pdata;
|
|
@@ -186,6 +386,7 @@ static int xgbe_probe(struct platform_device *pdev)
|
|
pdata = netdev_priv(netdev);
|
|
pdata->netdev = netdev;
|
|
pdata->pdev = pdev;
|
|
+ pdata->adev = ACPI_COMPANION(dev);
|
|
pdata->dev = dev;
|
|
platform_set_drvdata(pdev, netdev);
|
|
|
|
@@ -212,40 +413,13 @@ static int xgbe_probe(struct platform_device *pdev)
|
|
goto err_io;
|
|
}
|
|
|
|
- /* Obtain the system clock setting */
|
|
- pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
|
|
- if (IS_ERR(pdata->sysclk)) {
|
|
- dev_err(dev, "dma devm_clk_get failed\n");
|
|
- ret = PTR_ERR(pdata->sysclk);
|
|
- goto err_io;
|
|
- }
|
|
-
|
|
- /* Obtain the PTP clock setting */
|
|
- pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
|
|
- if (IS_ERR(pdata->ptpclk)) {
|
|
- dev_err(dev, "ptp devm_clk_get failed\n");
|
|
- ret = PTR_ERR(pdata->ptpclk);
|
|
- goto err_io;
|
|
- }
|
|
-
|
|
- /* Obtain the mmio areas for the device */
|
|
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
- pdata->xgmac_regs = devm_ioremap_resource(dev, res);
|
|
- if (IS_ERR(pdata->xgmac_regs)) {
|
|
- dev_err(dev, "xgmac ioremap failed\n");
|
|
- ret = PTR_ERR(pdata->xgmac_regs);
|
|
- goto err_io;
|
|
- }
|
|
- DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
|
|
-
|
|
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
|
- pdata->xpcs_regs = devm_ioremap_resource(dev, res);
|
|
- if (IS_ERR(pdata->xpcs_regs)) {
|
|
- dev_err(dev, "xpcs ioremap failed\n");
|
|
- ret = PTR_ERR(pdata->xpcs_regs);
|
|
+ /* Obtain device settings */
|
|
+ if (pdata->adev && !acpi_disabled)
|
|
+ ret = xgbe_acpi_support(pdata);
|
|
+ else
|
|
+ ret = xgbe_of_support(pdata);
|
|
+ if (ret)
|
|
goto err_io;
|
|
- }
|
|
- DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
|
|
|
|
/* Set the DMA mask */
|
|
if (!dev->dma_mask)
|
|
@@ -275,10 +449,12 @@ static int xgbe_probe(struct platform_device *pdev)
|
|
dev_err(dev, "platform_get_irq 0 failed\n");
|
|
goto err_io;
|
|
}
|
|
+
|
|
pdata->dev_irq = ret;
|
|
|
|
netdev->irq = pdata->dev_irq;
|
|
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
|
|
+ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
|
|
|
|
/* Set all the function pointers */
|
|
xgbe_init_all_fptrs(pdata);
|
|
@@ -291,23 +467,6 @@ static int xgbe_probe(struct platform_device *pdev)
|
|
/* Populate the hardware features */
|
|
xgbe_get_all_hw_features(pdata);
|
|
|
|
- /* Retrieve the MAC address */
|
|
- mac_addr = of_get_mac_address(dev->of_node);
|
|
- if (!mac_addr) {
|
|
- dev_err(dev, "invalid mac address for this device\n");
|
|
- ret = -EINVAL;
|
|
- goto err_io;
|
|
- }
|
|
- memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
|
|
-
|
|
- /* Retrieve the PHY mode - it must be "xgmii" */
|
|
- pdata->phy_mode = of_get_phy_mode(dev->of_node);
|
|
- if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
|
|
- dev_err(dev, "invalid phy-mode specified for this device\n");
|
|
- ret = -EINVAL;
|
|
- goto err_io;
|
|
- }
|
|
-
|
|
/* Set default configuration data */
|
|
xgbe_default_config(pdata);
|
|
|
|
@@ -491,10 +650,22 @@ static int xgbe_resume(struct device *dev)
|
|
}
|
|
#endif /* CONFIG_PM */
|
|
|
|
+#ifdef CONFIG_ACPI
|
|
+static const struct acpi_device_id xgbe_acpi_match[] = {
|
|
+ { "AMDI8000", 0 },
|
|
+ {},
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_OF
|
|
static const struct of_device_id xgbe_of_match[] = {
|
|
+ { .compatible = "amd,xgbe-seattle-v0a", },
|
|
{ .compatible = "amd,xgbe-seattle-v1a", },
|
|
{},
|
|
};
|
|
+#endif
|
|
|
|
MODULE_DEVICE_TABLE(of, xgbe_of_match);
|
|
static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
|
|
@@ -502,7 +673,12 @@ static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
|
|
static struct platform_driver xgbe_driver = {
|
|
.driver = {
|
|
.name = "amd-xgbe",
|
|
+#ifdef CONFIG_ACPI
|
|
+ .acpi_match_table = xgbe_acpi_match,
|
|
+#endif
|
|
+#ifdef CONFIG_OF
|
|
.of_match_table = xgbe_of_match,
|
|
+#endif
|
|
.pm = &xgbe_pm_ops,
|
|
},
|
|
.probe = xgbe_probe,
|
|
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
|
|
index 363b210..5d2c89b 100644
|
|
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
|
|
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
|
|
@@ -119,6 +119,7 @@
|
|
#include <linux/mdio.h>
|
|
#include <linux/phy.h>
|
|
#include <linux/of.h>
|
|
+#include <linux/acpi.h>
|
|
|
|
#include "xgbe.h"
|
|
#include "xgbe-common.h"
|
|
@@ -205,25 +206,16 @@ void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
|
|
|
|
int xgbe_mdio_register(struct xgbe_prv_data *pdata)
|
|
{
|
|
- struct device_node *phy_node;
|
|
struct mii_bus *mii;
|
|
struct phy_device *phydev;
|
|
int ret = 0;
|
|
|
|
DBGPR("-->xgbe_mdio_register\n");
|
|
|
|
- /* Retrieve the phy-handle */
|
|
- phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
|
|
- if (!phy_node) {
|
|
- dev_err(pdata->dev, "unable to parse phy-handle\n");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
mii = mdiobus_alloc();
|
|
if (mii == NULL) {
|
|
dev_err(pdata->dev, "mdiobus_alloc failed\n");
|
|
- ret = -ENOMEM;
|
|
- goto err_node_get;
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
/* Register on the MDIO bus (don't probe any PHYs) */
|
|
@@ -252,12 +244,9 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
|
|
request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
|
|
MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
|
|
|
|
- of_node_get(phy_node);
|
|
- phydev->dev.of_node = phy_node;
|
|
ret = phy_device_register(phydev);
|
|
if (ret) {
|
|
dev_err(pdata->dev, "phy_device_register failed\n");
|
|
- of_node_put(phy_node);
|
|
goto err_phy_device;
|
|
}
|
|
|
|
@@ -283,8 +272,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
|
|
|
|
pdata->phydev = phydev;
|
|
|
|
- of_node_put(phy_node);
|
|
-
|
|
DBGPHY_REGS(pdata);
|
|
|
|
DBGPR("<--xgbe_mdio_register\n");
|
|
@@ -300,9 +287,6 @@ err_mdiobus_register:
|
|
err_mdiobus_alloc:
|
|
mdiobus_free(mii);
|
|
|
|
-err_node_get:
|
|
- of_node_put(phy_node);
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
|
|
index a1bf9d1c..fa67203 100644
|
|
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
|
|
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
|
|
@@ -239,7 +239,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
|
|
snprintf(info->name, sizeof(info->name), "%s",
|
|
netdev_name(pdata->netdev));
|
|
info->owner = THIS_MODULE;
|
|
- info->max_adj = clk_get_rate(pdata->ptpclk);
|
|
+ info->max_adj = pdata->ptpclk_rate;
|
|
info->adjfreq = xgbe_adjfreq;
|
|
info->adjtime = xgbe_adjtime;
|
|
info->gettime = xgbe_gettime;
|
|
@@ -260,7 +260,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
|
|
*/
|
|
dividend = 50000000;
|
|
dividend <<= 32;
|
|
- pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk));
|
|
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
|
|
|
|
/* Setup the timecounter */
|
|
cc->read = xgbe_cc_read;
|
|
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
|
|
index f9ec762..6f3a39e 100644
|
|
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
|
|
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
|
|
@@ -187,6 +187,12 @@
|
|
#define XGBE_PTP_CLOCK "ptp_clk"
|
|
#define XGBE_DMA_IRQS "amd,per-channel-interrupt"
|
|
|
|
+/* ACPI property names */
|
|
+#define XGBE_ACPI_MAC_ADDR "mac-address"
|
|
+#define XGBE_ACPI_PHY_MODE "phy-mode"
|
|
+#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
|
|
+#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
|
|
+
|
|
/* Timestamp support - values based on 50MHz PTP clock
|
|
* 50MHz => 20 nsec
|
|
*/
|
|
@@ -201,8 +207,11 @@
|
|
#define XGBE_FIFO_SIZE_B(x) (x)
|
|
#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
|
|
|
|
+#define XGBE_TC_CNT 2
|
|
#define XGBE_TC_MIN_QUANTUM 10
|
|
|
|
+#define XGBE_SEATTLE_A0 ((read_cpuid_id() & 0x00f0000f) == 0)
|
|
+
|
|
/* Helper macro for descriptor handling
|
|
* Always use XGBE_GET_DESC_DATA to access the descriptor data
|
|
* since the index is free-running and needs to be and-ed
|
|
@@ -650,6 +659,7 @@ struct xgbe_hw_features {
|
|
struct xgbe_prv_data {
|
|
struct net_device *netdev;
|
|
struct platform_device *pdev;
|
|
+ struct acpi_device *adev;
|
|
struct device *dev;
|
|
|
|
/* XGMAC/XPCS related mmio registers */
|
|
@@ -739,6 +749,7 @@ struct xgbe_prv_data {
|
|
unsigned int phy_rx_pause;
|
|
|
|
/* Netdev related settings */
|
|
+ unsigned char mac_addr[MAX_ADDR_LEN];
|
|
netdev_features_t netdev_features;
|
|
struct napi_struct napi;
|
|
struct xgbe_mmc_stats mmc_stats;
|
|
@@ -748,7 +759,9 @@ struct xgbe_prv_data {
|
|
|
|
/* Device clocks */
|
|
struct clk *sysclk;
|
|
+ unsigned long sysclk_rate;
|
|
struct clk *ptpclk;
|
|
+ unsigned long ptpclk_rate;
|
|
|
|
/* Timestamp support */
|
|
spinlock_t tstamp_lock;
|
|
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
|
|
index 7ba83ff..29aad5e 100644
|
|
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
|
|
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
|
|
@@ -663,15 +663,20 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
|
|
struct phy_device *phy_dev;
|
|
struct device *dev = &pdata->pdev->dev;
|
|
|
|
- phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
|
|
- if (!phy_np) {
|
|
- netdev_dbg(ndev, "No phy-handle found\n");
|
|
- return -ENODEV;
|
|
+ if (dev->of_node) {
|
|
+ phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
|
|
+ if (!phy_np) {
|
|
+ netdev_dbg(ndev, "No phy-handle found in DT\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ pdata->phy_dev = of_phy_find_device(phy_np);
|
|
}
|
|
|
|
- phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
|
|
- 0, pdata->phy_mode);
|
|
- if (!phy_dev) {
|
|
+ phy_dev = pdata->phy_dev;
|
|
+
|
|
+ if (phy_dev == NULL ||
|
|
+ phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
|
|
+ pdata->phy_mode)) {
|
|
netdev_err(ndev, "Could not connect to PHY\n");
|
|
return -ENODEV;
|
|
}
|
|
@@ -681,11 +686,52 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
|
|
~SUPPORTED_100baseT_Half &
|
|
~SUPPORTED_1000baseT_Half;
|
|
phy_dev->advertising = phy_dev->supported;
|
|
- pdata->phy_dev = phy_dev;
|
|
|
|
return 0;
|
|
}
|
|
|
|
+#ifdef CONFIG_ACPI
|
|
+static int xgene_acpi_mdiobus_register(struct xgene_enet_pdata *pdata,
|
|
+ struct mii_bus *mdio)
|
|
+{
|
|
+ struct device *dev = &pdata->pdev->dev;
|
|
+ struct phy_device *phy;
|
|
+ int i, ret;
|
|
+ u32 phy_id;
|
|
+
|
|
+ /* Mask out all PHYs from auto probing. */
|
|
+ mdio->phy_mask = ~0;
|
|
+
|
|
+ /* Clear all the IRQ properties */
|
|
+ if (mdio->irq)
|
|
+ for (i = 0; i < PHY_MAX_ADDR; i++)
|
|
+ mdio->irq[i] = PHY_POLL;
|
|
+
|
|
+ /* Register the MDIO bus */
|
|
+ ret = mdiobus_register(mdio);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = device_property_read_u32(dev, "phy-channel", &phy_id);
|
|
+ if (ret)
|
|
+ return -EINVAL;
|
|
+
|
|
+ phy = get_phy_device(mdio, phy_id, true);
|
|
+ if (!phy || IS_ERR(phy))
|
|
+ return -EIO;
|
|
+
|
|
+ ret = phy_device_register(phy);
|
|
+ if (ret)
|
|
+ phy_device_free(phy);
|
|
+ else
|
|
+ pdata->phy_dev = phy;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+#else
|
|
+#define xgene_acpi_mdiobus_register(a, b) -1
|
|
+#endif
|
|
+
|
|
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
|
|
{
|
|
struct net_device *ndev = pdata->ndev;
|
|
@@ -702,7 +748,7 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
|
|
}
|
|
}
|
|
|
|
- if (!mdio_np) {
|
|
+ if (dev->of_node && !mdio_np) {
|
|
netdev_dbg(ndev, "No mdio node in the dts\n");
|
|
return -ENXIO;
|
|
}
|
|
@@ -720,7 +766,10 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
|
|
mdio_bus->priv = pdata;
|
|
mdio_bus->parent = &ndev->dev;
|
|
|
|
- ret = of_mdiobus_register(mdio_bus, mdio_np);
|
|
+ if (dev->of_node)
|
|
+ ret = of_mdiobus_register(mdio_bus, mdio_np);
|
|
+ else
|
|
+ ret = xgene_acpi_mdiobus_register(pdata, mdio_bus);
|
|
if (ret) {
|
|
netdev_err(ndev, "Failed to register MDIO bus\n");
|
|
mdiobus_free(mdio_bus);
|
|
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
|
|
index 83a5028..f66598a 100644
|
|
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
|
|
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
|
|
@@ -746,6 +746,42 @@ static const struct net_device_ops xgene_ndev_ops = {
|
|
.ndo_set_mac_address = xgene_enet_set_mac_address,
|
|
};
|
|
|
|
+#ifdef CONFIG_ACPI
|
|
+static int acpi_get_mac_address(struct device *dev,
|
|
+ unsigned char *addr)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = device_property_read_u8_array(dev, "mac-address", addr, 6);
|
|
+ if (ret)
|
|
+ return 0;
|
|
+
|
|
+ return 6;
|
|
+}
|
|
+
|
|
+static int acpi_get_phy_mode(struct device *dev)
|
|
+{
|
|
+ int i, ret, phy_mode;
|
|
+ char *modestr;
|
|
+
|
|
+ ret = device_property_read_string(dev, "phy-mode", &modestr);
|
|
+ if (ret)
|
|
+ return -1;
|
|
+
|
|
+ phy_mode = -1;
|
|
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
|
|
+ if (!strcasecmp(modestr, phy_modes(i))) {
|
|
+ phy_mode = i;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ return phy_mode;
|
|
+}
|
|
+#else
|
|
+#define acpi_get_mac_address(a, b, c) 0
|
|
+#define acpi_get_phy_mode(a) -1
|
|
+#endif
|
|
+
|
|
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
|
|
{
|
|
struct platform_device *pdev;
|
|
@@ -761,6 +797,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
|
|
ndev = pdata->ndev;
|
|
|
|
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
|
|
+ if (!res)
|
|
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ if (!res) {
|
|
+ dev_err(dev, "Resource enet_csr not defined\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
pdata->base_addr = devm_ioremap_resource(dev, res);
|
|
if (IS_ERR(pdata->base_addr)) {
|
|
dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
|
|
@@ -768,6 +810,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
|
|
}
|
|
|
|
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
|
|
+ if (!res)
|
|
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
|
+ if (!res) {
|
|
+ dev_err(dev, "Resource ring_csr not defined\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
|
|
if (IS_ERR(pdata->ring_csr_addr)) {
|
|
dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
|
|
@@ -775,6 +823,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
|
|
}
|
|
|
|
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
|
|
+ if (!res)
|
|
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
|
|
+ if (!res) {
|
|
+ dev_err(dev, "Resource ring_cmd not defined\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
|
|
if (IS_ERR(pdata->ring_cmd_addr)) {
|
|
dev_err(dev, "Unable to retrieve ENET Ring command region\n");
|
|
@@ -792,11 +846,13 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
|
|
mac = of_get_mac_address(dev->of_node);
|
|
if (mac)
|
|
memcpy(ndev->dev_addr, mac, ndev->addr_len);
|
|
- else
|
|
+ else if (!acpi_get_mac_address(dev, ndev->dev_addr))
|
|
eth_hw_addr_random(ndev);
|
|
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
|
|
|
|
pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
|
|
+ if (pdata->phy_mode < 0)
|
|
+ pdata->phy_mode = acpi_get_phy_mode(dev);
|
|
if (pdata->phy_mode < 0) {
|
|
dev_err(dev, "Unable to get phy-connection-type\n");
|
|
return pdata->phy_mode;
|
|
@@ -809,11 +865,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
|
|
}
|
|
|
|
pdata->clk = devm_clk_get(&pdev->dev, NULL);
|
|
- ret = IS_ERR(pdata->clk);
|
|
if (IS_ERR(pdata->clk)) {
|
|
- dev_err(&pdev->dev, "can't get clock\n");
|
|
- ret = PTR_ERR(pdata->clk);
|
|
- return ret;
|
|
+ /*
|
|
+ * Not necessarily an error. Firmware may have
|
|
+ * set up the clock already.
|
|
+ */
|
|
+ pdata->clk = NULL;
|
|
}
|
|
|
|
base_addr = pdata->base_addr;
|
|
@@ -863,7 +920,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
|
|
pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
|
|
pdata->mac_ops->init(pdata);
|
|
|
|
- return ret;
|
|
+ return 0;
|
|
}
|
|
|
|
static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
|
|
@@ -924,7 +981,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
|
|
goto err;
|
|
}
|
|
|
|
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
|
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
|
if (ret) {
|
|
netdev_err(ndev, "No usable DMA configuration\n");
|
|
goto err;
|
|
@@ -972,6 +1029,14 @@ static int xgene_enet_remove(struct platform_device *pdev)
|
|
return 0;
|
|
}
|
|
|
|
+#ifdef CONFIG_ACPI
|
|
+static const struct acpi_device_id xgene_enet_acpi_match[] = {
|
|
+ { "APMC0D05", },
|
|
+ { }
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
|
|
+#endif
|
|
+
|
|
static struct of_device_id xgene_enet_match[] = {
|
|
{.compatible = "apm,xgene-enet",},
|
|
{},
|
|
@@ -983,6 +1048,7 @@ static struct platform_driver xgene_enet_driver = {
|
|
.driver = {
|
|
.name = "xgene-enet",
|
|
.of_match_table = xgene_enet_match,
|
|
+ .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
|
|
},
|
|
.probe = xgene_enet_probe,
|
|
.remove = xgene_enet_remove,
|
|
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
|
|
index f9958fa..0e06cad 100644
|
|
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
|
|
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
|
|
@@ -31,6 +31,7 @@
|
|
#include <linux/prefetch.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/phy.h>
|
|
+#include <linux/acpi.h>
|
|
#include "xgene_enet_hw.h"
|
|
|
|
#define XGENE_DRV_VERSION "v1.0"
|
|
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
|
|
index 88a55f9..944b177 100644
|
|
--- a/drivers/net/ethernet/smsc/smc91x.c
|
|
+++ b/drivers/net/ethernet/smsc/smc91x.c
|
|
@@ -82,6 +82,7 @@ static const char version[] =
|
|
#include <linux/of.h>
|
|
#include <linux/of_device.h>
|
|
#include <linux/of_gpio.h>
|
|
+#include <linux/acpi.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
#include <linux/etherdevice.h>
|
|
@@ -2467,6 +2468,14 @@ static struct dev_pm_ops smc_drv_pm_ops = {
|
|
.resume = smc_drv_resume,
|
|
};
|
|
|
|
+#ifdef CONFIG_ACPI
|
|
+static const struct acpi_device_id smc91x_acpi_match[] = {
|
|
+ { "LNRO0003", },
|
|
+ { }
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, smc91x_acpi_match);
|
|
+#endif
|
|
+
|
|
static struct platform_driver smc_driver = {
|
|
.probe = smc_drv_probe,
|
|
.remove = smc_drv_remove,
|
|
@@ -2474,6 +2483,7 @@ static struct platform_driver smc_driver = {
|
|
.name = CARDNAME,
|
|
.pm = &smc_drv_pm_ops,
|
|
.of_match_table = of_match_ptr(smc91x_match),
|
|
+ .acpi_match_table = ACPI_PTR(smc91x_acpi_match),
|
|
},
|
|
};
|
|
|
|
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
|
|
index 903dc3d..fcc4fc7 100644
|
|
--- a/drivers/net/phy/amd-xgbe-phy.c
|
|
+++ b/drivers/net/phy/amd-xgbe-phy.c
|
|
@@ -74,15 +74,19 @@
|
|
#include <linux/of_platform.h>
|
|
#include <linux/of_device.h>
|
|
#include <linux/uaccess.h>
|
|
+#include <linux/acpi.h>
|
|
+
|
|
|
|
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
-MODULE_VERSION("1.0.0-a");
|
|
+MODULE_VERSION("0.0.0-a");
|
|
MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
|
|
|
|
-#define XGBE_PHY_ID 0x000162d0
|
|
+#define XGBE_PHY_ID 0x7996ced0
|
|
#define XGBE_PHY_MASK 0xfffffff0
|
|
|
|
+#define XGBE_PHY_SERDES_RETRY 32
|
|
+#define XGBE_PHY_CHANNEL_PROPERTY "amd,serdes-channel"
|
|
#define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
|
|
|
|
#define XGBE_AN_INT_CMPLT 0x01
|
|
@@ -99,11 +103,9 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
|
|
#ifndef MDIO_PMA_10GBR_PMD_CTRL
|
|
#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
|
|
#endif
|
|
-
|
|
#ifndef MDIO_PMA_10GBR_FEC_CTRL
|
|
#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
|
|
#endif
|
|
-
|
|
#ifndef MDIO_AN_XNP
|
|
#define MDIO_AN_XNP 0x0016
|
|
#endif
|
|
@@ -111,93 +113,14 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
|
|
#ifndef MDIO_AN_INTMASK
|
|
#define MDIO_AN_INTMASK 0x8001
|
|
#endif
|
|
-
|
|
#ifndef MDIO_AN_INT
|
|
#define MDIO_AN_INT 0x8002
|
|
#endif
|
|
|
|
-#ifndef MDIO_AN_KR_CTRL
|
|
-#define MDIO_AN_KR_CTRL 0x8003
|
|
-#endif
|
|
-
|
|
#ifndef MDIO_CTRL1_SPEED1G
|
|
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
|
|
#endif
|
|
|
|
-#ifndef MDIO_KR_CTRL_PDETECT
|
|
-#define MDIO_KR_CTRL_PDETECT 0x01
|
|
-#endif
|
|
-
|
|
-/* SerDes integration register offsets */
|
|
-#define SIR0_KR_RT_1 0x002c
|
|
-#define SIR0_STATUS 0x0040
|
|
-#define SIR1_SPEED 0x0000
|
|
-
|
|
-/* SerDes integration register entry bit positions and sizes */
|
|
-#define SIR0_KR_RT_1_RESET_INDEX 11
|
|
-#define SIR0_KR_RT_1_RESET_WIDTH 1
|
|
-#define SIR0_STATUS_RX_READY_INDEX 0
|
|
-#define SIR0_STATUS_RX_READY_WIDTH 1
|
|
-#define SIR0_STATUS_TX_READY_INDEX 8
|
|
-#define SIR0_STATUS_TX_READY_WIDTH 1
|
|
-#define SIR1_SPEED_DATARATE_INDEX 4
|
|
-#define SIR1_SPEED_DATARATE_WIDTH 2
|
|
-#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
|
|
-#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
|
|
-#define SIR1_SPEED_PLLSEL_INDEX 3
|
|
-#define SIR1_SPEED_PLLSEL_WIDTH 1
|
|
-#define SIR1_SPEED_RATECHANGE_INDEX 6
|
|
-#define SIR1_SPEED_RATECHANGE_WIDTH 1
|
|
-#define SIR1_SPEED_TXAMP_INDEX 8
|
|
-#define SIR1_SPEED_TXAMP_WIDTH 4
|
|
-#define SIR1_SPEED_WORDMODE_INDEX 0
|
|
-#define SIR1_SPEED_WORDMODE_WIDTH 3
|
|
-
|
|
-#define SPEED_10000_CDR 0x7
|
|
-#define SPEED_10000_PLL 0x1
|
|
-#define SPEED_10000_RATE 0x0
|
|
-#define SPEED_10000_TXAMP 0xa
|
|
-#define SPEED_10000_WORD 0x7
|
|
-
|
|
-#define SPEED_2500_CDR 0x2
|
|
-#define SPEED_2500_PLL 0x0
|
|
-#define SPEED_2500_RATE 0x1
|
|
-#define SPEED_2500_TXAMP 0xf
|
|
-#define SPEED_2500_WORD 0x1
|
|
-
|
|
-#define SPEED_1000_CDR 0x2
|
|
-#define SPEED_1000_PLL 0x0
|
|
-#define SPEED_1000_RATE 0x3
|
|
-#define SPEED_1000_TXAMP 0xf
|
|
-#define SPEED_1000_WORD 0x1
|
|
-
|
|
-/* SerDes RxTx register offsets */
|
|
-#define RXTX_REG20 0x0050
|
|
-#define RXTX_REG114 0x01c8
|
|
-
|
|
-/* SerDes RxTx register entry bit positions and sizes */
|
|
-#define RXTX_REG20_BLWC_ENA_INDEX 2
|
|
-#define RXTX_REG20_BLWC_ENA_WIDTH 1
|
|
-#define RXTX_REG114_PQ_REG_INDEX 9
|
|
-#define RXTX_REG114_PQ_REG_WIDTH 7
|
|
-
|
|
-#define RXTX_10000_BLWC 0
|
|
-#define RXTX_10000_PQ 0x1e
|
|
-
|
|
-#define RXTX_2500_BLWC 1
|
|
-#define RXTX_2500_PQ 0xa
|
|
-
|
|
-#define RXTX_1000_BLWC 1
|
|
-#define RXTX_1000_PQ 0xa
|
|
-
|
|
-/* Bit setting and getting macros
|
|
- * The get macro will extract the current bit field value from within
|
|
- * the variable
|
|
- *
|
|
- * The set macro will clear the current bit field value within the
|
|
- * variable and then set the bit field of the variable to the
|
|
- * specified value
|
|
- */
|
|
#define GET_BITS(_var, _index, _width) \
|
|
(((_var) >> (_index)) & ((0x1 << (_width)) - 1))
|
|
|
|
@@ -207,70 +130,12 @@ do { \
|
|
(_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
|
|
} while (0)
|
|
|
|
-#define XSIR_GET_BITS(_var, _prefix, _field) \
|
|
- GET_BITS((_var), \
|
|
- _prefix##_##_field##_INDEX, \
|
|
- _prefix##_##_field##_WIDTH)
|
|
-
|
|
-#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
|
|
- SET_BITS((_var), \
|
|
- _prefix##_##_field##_INDEX, \
|
|
- _prefix##_##_field##_WIDTH, (_val))
|
|
-
|
|
-/* Macros for reading or writing SerDes integration registers
|
|
- * The ioread macros will get bit fields or full values using the
|
|
- * register definitions formed using the input names
|
|
- *
|
|
- * The iowrite macros will set bit fields or full values using the
|
|
- * register definitions formed using the input names
|
|
- */
|
|
-#define XSIR0_IOREAD(_priv, _reg) \
|
|
- ioread16((_priv)->sir0_regs + _reg)
|
|
-
|
|
-#define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
|
|
- GET_BITS(XSIR0_IOREAD((_priv), _reg), \
|
|
- _reg##_##_field##_INDEX, \
|
|
- _reg##_##_field##_WIDTH)
|
|
-
|
|
-#define XSIR0_IOWRITE(_priv, _reg, _val) \
|
|
- iowrite16((_val), (_priv)->sir0_regs + _reg)
|
|
-
|
|
-#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
|
|
-do { \
|
|
- u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
|
|
- SET_BITS(reg_val, \
|
|
- _reg##_##_field##_INDEX, \
|
|
- _reg##_##_field##_WIDTH, (_val)); \
|
|
- XSIR0_IOWRITE((_priv), _reg, reg_val); \
|
|
-} while (0)
|
|
-
|
|
-#define XSIR1_IOREAD(_priv, _reg) \
|
|
- ioread16((_priv)->sir1_regs + _reg)
|
|
-
|
|
-#define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
|
|
- GET_BITS(XSIR1_IOREAD((_priv), _reg), \
|
|
- _reg##_##_field##_INDEX, \
|
|
- _reg##_##_field##_WIDTH)
|
|
+#define XCMU_IOREAD(_priv, _reg) \
|
|
+ ioread16((_priv)->cmu_regs + _reg)
|
|
|
|
-#define XSIR1_IOWRITE(_priv, _reg, _val) \
|
|
- iowrite16((_val), (_priv)->sir1_regs + _reg)
|
|
+#define XCMU_IOWRITE(_priv, _reg, _val) \
|
|
+ iowrite16((_val), (_priv)->cmu_regs + _reg)
|
|
|
|
-#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
|
|
-do { \
|
|
- u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
|
|
- SET_BITS(reg_val, \
|
|
- _reg##_##_field##_INDEX, \
|
|
- _reg##_##_field##_WIDTH, (_val)); \
|
|
- XSIR1_IOWRITE((_priv), _reg, reg_val); \
|
|
-} while (0)
|
|
-
|
|
-/* Macros for reading or writing SerDes RxTx registers
|
|
- * The ioread macros will get bit fields or full values using the
|
|
- * register definitions formed using the input names
|
|
- *
|
|
- * The iowrite macros will set bit fields or full values using the
|
|
- * register definitions formed using the input names
|
|
- */
|
|
#define XRXTX_IOREAD(_priv, _reg) \
|
|
ioread16((_priv)->rxtx_regs + _reg)
|
|
|
|
@@ -291,6 +156,78 @@ do { \
|
|
XRXTX_IOWRITE((_priv), _reg, reg_val); \
|
|
} while (0)
|
|
|
|
+/* SerDes CMU register offsets */
|
|
+#define CMU_REG15 0x003c
|
|
+#define CMU_REG16 0x0040
|
|
+
|
|
+/* SerDes CMU register entry bit positions and sizes */
|
|
+#define CMU_REG16_TX_RATE_CHANGE_BASE 15
|
|
+#define CMU_REG16_RX_RATE_CHANGE_BASE 14
|
|
+#define CMU_REG16_RATE_CHANGE_DECR 2
|
|
+
|
|
+
|
|
+/* SerDes RxTx register offsets */
|
|
+#define RXTX_REG2 0x0008
|
|
+#define RXTX_REG3 0x000c
|
|
+#define RXTX_REG5 0x0014
|
|
+#define RXTX_REG6 0x0018
|
|
+#define RXTX_REG20 0x0050
|
|
+#define RXTX_REG53 0x00d4
|
|
+#define RXTX_REG114 0x01c8
|
|
+#define RXTX_REG115 0x01cc
|
|
+#define RXTX_REG142 0x0238
|
|
+
|
|
+/* SerDes RxTx register entry bit positions and sizes */
|
|
+#define RXTX_REG2_RESETB_INDEX 15
|
|
+#define RXTX_REG2_RESETB_WIDTH 1
|
|
+#define RXTX_REG3_TX_DATA_RATE_INDEX 14
|
|
+#define RXTX_REG3_TX_DATA_RATE_WIDTH 2
|
|
+#define RXTX_REG3_TX_WORD_MODE_INDEX 11
|
|
+#define RXTX_REG3_TX_WORD_MODE_WIDTH 3
|
|
+#define RXTX_REG5_TXAMP_CNTL_INDEX 7
|
|
+#define RXTX_REG5_TXAMP_CNTL_WIDTH 4
|
|
+#define RXTX_REG6_RX_DATA_RATE_INDEX 9
|
|
+#define RXTX_REG6_RX_DATA_RATE_WIDTH 2
|
|
+#define RXTX_REG6_RX_WORD_MODE_INDEX 11
|
|
+#define RXTX_REG6_RX_WORD_MODE_WIDTH 3
|
|
+#define RXTX_REG20_BLWC_ENA_INDEX 2
|
|
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
|
|
+#define RXTX_REG53_RX_PLLSELECT_INDEX 15
|
|
+#define RXTX_REG53_RX_PLLSELECT_WIDTH 1
|
|
+#define RXTX_REG53_TX_PLLSELECT_INDEX 14
|
|
+#define RXTX_REG53_TX_PLLSELECT_WIDTH 1
|
|
+#define RXTX_REG53_PI_SPD_SEL_CDR_INDEX 10
|
|
+#define RXTX_REG53_PI_SPD_SEL_CDR_WIDTH 4
|
|
+#define RXTX_REG114_PQ_REG_INDEX 9
|
|
+#define RXTX_REG114_PQ_REG_WIDTH 7
|
|
+#define RXTX_REG115_FORCE_LAT_CAL_START_INDEX 2
|
|
+#define RXTX_REG115_FORCE_LAT_CAL_START_WIDTH 1
|
|
+#define RXTX_REG115_FORCE_SUM_CAL_START_INDEX 1
|
|
+#define RXTX_REG115_FORCE_SUM_CAL_START_WIDTH 1
|
|
+#define RXTX_REG142_SUM_CALIB_DONE_INDEX 15
|
|
+#define RXTX_REG142_SUM_CALIB_DONE_WIDTH 1
|
|
+#define RXTX_REG142_SUM_CALIB_ERR_INDEX 14
|
|
+#define RXTX_REG142_SUM_CALIB_ERR_WIDTH 1
|
|
+#define RXTX_REG142_LAT_CALIB_DONE_INDEX 11
|
|
+#define RXTX_REG142_LAT_CALIB_DONE_WIDTH 1
|
|
+
|
|
+#define RXTX_FULL_RATE 0x0
|
|
+#define RXTX_HALF_RATE 0x1
|
|
+#define RXTX_FIFTH_RATE 0x3
|
|
+#define RXTX_66BIT_WORD 0x7
|
|
+#define RXTX_10BIT_WORD 0x1
|
|
+#define RXTX_10G_TX_AMP 0xa
|
|
+#define RXTX_1G_TX_AMP 0xf
|
|
+#define RXTX_10G_CDR 0x7
|
|
+#define RXTX_1G_CDR 0x2
|
|
+#define RXTX_10G_PLL 0x1
|
|
+#define RXTX_1G_PLL 0x0
|
|
+#define RXTX_10G_PQ 0x1e
|
|
+#define RXTX_1G_PQ 0xa
|
|
+
|
|
+
|
|
+DEFINE_SPINLOCK(cmu_lock);
|
|
+
|
|
enum amd_xgbe_phy_an {
|
|
AMD_XGBE_AN_READY = 0,
|
|
AMD_XGBE_AN_START,
|
|
@@ -316,29 +253,31 @@ enum amd_xgbe_phy_mode {
|
|
};
|
|
|
|
enum amd_xgbe_phy_speedset {
|
|
- AMD_XGBE_PHY_SPEEDSET_1000_10000,
|
|
+ AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
|
|
AMD_XGBE_PHY_SPEEDSET_2500_10000,
|
|
};
|
|
|
|
struct amd_xgbe_phy_priv {
|
|
struct platform_device *pdev;
|
|
+ struct acpi_device *adev;
|
|
struct device *dev;
|
|
|
|
struct phy_device *phydev;
|
|
|
|
/* SerDes related mmio resources */
|
|
struct resource *rxtx_res;
|
|
- struct resource *sir0_res;
|
|
- struct resource *sir1_res;
|
|
+ struct resource *cmu_res;
|
|
|
|
/* SerDes related mmio registers */
|
|
void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
|
|
- void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
|
|
- void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
|
|
+ void __iomem *cmu_regs; /* SerDes CMU CSRs */
|
|
+
|
|
+ unsigned int serdes_channel;
|
|
+ unsigned int speed_set;
|
|
|
|
/* Maintain link status for re-starting auto-negotiation */
|
|
unsigned int link;
|
|
- unsigned int speed_set;
|
|
+ enum amd_xgbe_phy_mode mode;
|
|
|
|
/* Auto-negotiation state machine support */
|
|
struct mutex an_mutex;
|
|
@@ -348,7 +287,6 @@ struct amd_xgbe_phy_priv {
|
|
enum amd_xgbe_phy_rx kx_state;
|
|
struct work_struct an_work;
|
|
struct workqueue_struct *an_workqueue;
|
|
- unsigned int parallel_detect;
|
|
};
|
|
|
|
static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
|
|
@@ -401,33 +339,51 @@ static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
|
|
static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
|
|
{
|
|
struct amd_xgbe_phy_priv *priv = phydev->priv;
|
|
+ u16 val, mask;
|
|
+
|
|
+ /* Assert Rx and Tx ratechange in CMU_reg16 */
|
|
+ val = XCMU_IOREAD(priv, CMU_REG16);
|
|
|
|
- /* Assert Rx and Tx ratechange */
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
|
|
+ mask = (1 << (CMU_REG16_TX_RATE_CHANGE_BASE -
|
|
+ (priv->serdes_channel * CMU_REG16_RATE_CHANGE_DECR))) |
|
|
+ (1 << (CMU_REG16_RX_RATE_CHANGE_BASE -
|
|
+ (priv->serdes_channel * CMU_REG16_RATE_CHANGE_DECR)));
|
|
+ val |= mask;
|
|
+
|
|
+ XCMU_IOWRITE(priv, CMU_REG16, val);
|
|
}
|
|
|
|
static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
|
|
{
|
|
struct amd_xgbe_phy_priv *priv = phydev->priv;
|
|
+ u16 val, mask;
|
|
unsigned int wait;
|
|
- u16 status;
|
|
|
|
- /* Release Rx and Tx ratechange */
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
|
|
+ /* Release Rx and Tx ratechange for proper channel in CMU_reg16 */
|
|
+ val = XCMU_IOREAD(priv, CMU_REG16);
|
|
+
|
|
+ mask = (1 << (CMU_REG16_TX_RATE_CHANGE_BASE -
|
|
+ (priv->serdes_channel * CMU_REG16_RATE_CHANGE_DECR))) |
|
|
+ (1 << (CMU_REG16_RX_RATE_CHANGE_BASE -
|
|
+ (priv->serdes_channel * CMU_REG16_RATE_CHANGE_DECR)));
|
|
+ val &= ~mask;
|
|
|
|
- /* Wait for Rx and Tx ready */
|
|
+ XCMU_IOWRITE(priv, CMU_REG16, val);
|
|
+
|
|
+ /* Wait for Rx and Tx ready in CMU_reg15 */
|
|
+ mask = (1 << priv->serdes_channel) |
|
|
+ (1 << (priv->serdes_channel + 8));
|
|
wait = XGBE_PHY_RATECHANGE_COUNT;
|
|
while (wait--) {
|
|
- usleep_range(50, 75);
|
|
+ udelay(50);
|
|
|
|
- status = XSIR0_IOREAD(priv, SIR0_STATUS);
|
|
- if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
|
|
- XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
|
|
+ val = XCMU_IOREAD(priv, CMU_REG15);
|
|
+ if ((val & mask) == mask)
|
|
return;
|
|
}
|
|
|
|
netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
|
|
- status);
|
|
+ val);
|
|
}
|
|
|
|
static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
|
|
@@ -435,8 +391,8 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
|
|
struct amd_xgbe_phy_priv *priv = phydev->priv;
|
|
int ret;
|
|
|
|
- /* Enable KR training */
|
|
- ret = amd_xgbe_an_enable_kr_training(phydev);
|
|
+ /* Disable KR training */
|
|
+ ret = amd_xgbe_an_disable_kr_training(phydev);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
@@ -462,19 +418,32 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
|
|
return ret;
|
|
|
|
/* Set SerDes to 10G speed */
|
|
+ spin_lock(&cmu_lock);
|
|
+
|
|
amd_xgbe_phy_serdes_start_ratechange(phydev);
|
|
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_DATA_RATE, RXTX_FULL_RATE);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_WORD_MODE, RXTX_66BIT_WORD);
|
|
|
|
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
|
|
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG5, TXAMP_CNTL, RXTX_10G_TX_AMP);
|
|
+
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_DATA_RATE, RXTX_FULL_RATE);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_WORD_MODE, RXTX_66BIT_WORD);
|
|
+
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, 0);
|
|
+
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, RX_PLLSELECT, RXTX_10G_PLL);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, TX_PLLSELECT, RXTX_10G_PLL);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, PI_SPD_SEL_CDR, RXTX_10G_CDR);
|
|
+
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10G_PQ);
|
|
|
|
amd_xgbe_phy_serdes_complete_ratechange(phydev);
|
|
|
|
+ spin_unlock(&cmu_lock);
|
|
+
|
|
+ priv->mode = AMD_XGBE_MODE_KR;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -510,19 +479,32 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
|
|
return ret;
|
|
|
|
/* Set SerDes to 2.5G speed */
|
|
+ spin_lock(&cmu_lock);
|
|
+
|
|
amd_xgbe_phy_serdes_start_ratechange(phydev);
|
|
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_DATA_RATE, RXTX_HALF_RATE);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_WORD_MODE, RXTX_10BIT_WORD);
|
|
+
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG5, TXAMP_CNTL, RXTX_1G_TX_AMP);
|
|
+
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_DATA_RATE, RXTX_HALF_RATE);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_WORD_MODE, RXTX_10BIT_WORD);
|
|
|
|
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
|
|
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, 1);
|
|
+
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, RX_PLLSELECT, RXTX_1G_PLL);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, TX_PLLSELECT, RXTX_1G_PLL);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, PI_SPD_SEL_CDR, RXTX_1G_CDR);
|
|
+
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1G_PQ);
|
|
|
|
amd_xgbe_phy_serdes_complete_ratechange(phydev);
|
|
|
|
+ spin_unlock(&cmu_lock);
|
|
+
|
|
+ priv->mode = AMD_XGBE_MODE_KX;
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -558,47 +540,33 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
|
|
return ret;
|
|
|
|
/* Set SerDes to 1G speed */
|
|
+ spin_lock(&cmu_lock);
|
|
+
|
|
amd_xgbe_phy_serdes_start_ratechange(phydev);
|
|
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
|
|
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_DATA_RATE, RXTX_FIFTH_RATE);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_WORD_MODE, RXTX_10BIT_WORD);
|
|
|
|
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
|
|
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG5, TXAMP_CNTL, RXTX_1G_TX_AMP);
|
|
|
|
- amd_xgbe_phy_serdes_complete_ratechange(phydev);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_DATA_RATE, RXTX_FIFTH_RATE);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_WORD_MODE, RXTX_10BIT_WORD);
|
|
|
|
- return 0;
|
|
-}
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, 1);
|
|
|
|
-static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
|
|
- enum amd_xgbe_phy_mode *mode)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, RX_PLLSELECT, RXTX_1G_PLL);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, TX_PLLSELECT, RXTX_1G_PLL);
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, PI_SPD_SEL_CDR, RXTX_1G_CDR);
|
|
|
|
- if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
|
|
- *mode = AMD_XGBE_MODE_KR;
|
|
- else
|
|
- *mode = AMD_XGBE_MODE_KX;
|
|
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1G_PQ);
|
|
|
|
- return 0;
|
|
-}
|
|
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
|
|
|
|
-static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
|
|
-{
|
|
- enum amd_xgbe_phy_mode mode;
|
|
+ spin_unlock(&cmu_lock);
|
|
|
|
- if (amd_xgbe_phy_cur_mode(phydev, &mode))
|
|
- return false;
|
|
+ priv->mode = AMD_XGBE_MODE_KX;
|
|
|
|
- return (mode == AMD_XGBE_MODE_KR);
|
|
+ return 0;
|
|
}
|
|
|
|
static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
|
|
@@ -607,7 +575,7 @@ static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
|
|
int ret;
|
|
|
|
/* If we are in KR switch to KX, and vice-versa */
|
|
- if (amd_xgbe_phy_in_kr_mode(phydev)) {
|
|
+ if (priv->mode == AMD_XGBE_MODE_KR) {
|
|
if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
|
|
ret = amd_xgbe_phy_gmii_mode(phydev);
|
|
else
|
|
@@ -619,20 +587,15 @@ static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
|
|
return ret;
|
|
}
|
|
|
|
-static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
|
|
- enum amd_xgbe_phy_mode mode)
|
|
+static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev)
|
|
{
|
|
- enum amd_xgbe_phy_mode cur_mode;
|
|
int ret;
|
|
|
|
- ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- if (mode != cur_mode)
|
|
- ret = amd_xgbe_phy_switch_mode(phydev);
|
|
+ ret = amd_xgbe_phy_switch_mode(phydev);
|
|
+ if (ret < 0)
|
|
+ return AMD_XGBE_AN_ERROR;
|
|
|
|
- return ret;
|
|
+ return AMD_XGBE_AN_START;
|
|
}
|
|
|
|
static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
|
|
@@ -643,8 +606,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
|
|
|
|
*state = AMD_XGBE_RX_COMPLETE;
|
|
|
|
- /* If we're not in KR mode then we're done */
|
|
- if (!amd_xgbe_phy_in_kr_mode(phydev))
|
|
+ /* If we're in KX mode then we're done */
|
|
+ if (priv->mode == AMD_XGBE_MODE_KX)
|
|
return AMD_XGBE_AN_EVENT;
|
|
|
|
/* Enable/Disable FEC */
|
|
@@ -672,13 +635,9 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
|
|
if (ret < 0)
|
|
return AMD_XGBE_AN_ERROR;
|
|
|
|
- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
|
|
-
|
|
ret |= 0x01;
|
|
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
|
|
|
|
- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
|
|
-
|
|
return AMD_XGBE_AN_EVENT;
|
|
}
|
|
|
|
@@ -702,6 +661,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
|
|
static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
|
|
enum amd_xgbe_phy_rx *state)
|
|
{
|
|
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
|
|
unsigned int link_support;
|
|
int ret, ad_reg, lp_reg;
|
|
|
|
@@ -711,9 +671,9 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
|
|
return AMD_XGBE_AN_ERROR;
|
|
|
|
/* Check for a supported mode, otherwise restart in a different one */
|
|
- link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
|
|
+ link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20;
|
|
if (!(ret & link_support))
|
|
- return AMD_XGBE_AN_INCOMPAT_LINK;
|
|
+ return amd_xgbe_an_switch_mode(phydev);
|
|
|
|
/* Check Extended Next Page support */
|
|
ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
|
|
@@ -754,7 +714,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
|
|
int ret;
|
|
|
|
/* Be sure we aren't looping trying to negotiate */
|
|
- if (amd_xgbe_phy_in_kr_mode(phydev)) {
|
|
+ if (priv->mode == AMD_XGBE_MODE_KR) {
|
|
if (priv->kr_state != AMD_XGBE_RX_READY)
|
|
return AMD_XGBE_AN_NO_LINK;
|
|
priv->kr_state = AMD_XGBE_RX_BPA;
|
|
@@ -817,13 +777,6 @@ static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
|
|
/* Enable and start auto-negotiation */
|
|
phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
|
|
|
|
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL);
|
|
- if (ret < 0)
|
|
- return AMD_XGBE_AN_ERROR;
|
|
-
|
|
- ret |= MDIO_KR_CTRL_PDETECT;
|
|
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret);
|
|
-
|
|
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
|
|
if (ret < 0)
|
|
return AMD_XGBE_AN_ERROR;
|
|
@@ -864,8 +817,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
|
|
enum amd_xgbe_phy_rx *state;
|
|
int ret;
|
|
|
|
- state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
|
|
- : &priv->kx_state;
|
|
+ state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state
|
|
+ : &priv->kx_state;
|
|
|
|
switch (*state) {
|
|
case AMD_XGBE_RX_BPA:
|
|
@@ -885,13 +838,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
|
|
|
|
static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
|
|
{
|
|
- int ret;
|
|
-
|
|
- ret = amd_xgbe_phy_switch_mode(phydev);
|
|
- if (ret)
|
|
- return AMD_XGBE_AN_ERROR;
|
|
-
|
|
- return AMD_XGBE_AN_START;
|
|
+ return amd_xgbe_an_switch_mode(phydev);
|
|
}
|
|
|
|
static void amd_xgbe_an_state_machine(struct work_struct *work)
|
|
@@ -904,10 +851,6 @@ static void amd_xgbe_an_state_machine(struct work_struct *work)
|
|
int sleep;
|
|
unsigned int an_supported = 0;
|
|
|
|
- /* Start in KX mode */
|
|
- if (amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX))
|
|
- priv->an_state = AMD_XGBE_AN_ERROR;
|
|
-
|
|
while (1) {
|
|
mutex_lock(&priv->an_mutex);
|
|
|
|
@@ -915,9 +858,8 @@ static void amd_xgbe_an_state_machine(struct work_struct *work)
|
|
|
|
switch (priv->an_state) {
|
|
case AMD_XGBE_AN_START:
|
|
- an_supported = 0;
|
|
- priv->parallel_detect = 0;
|
|
priv->an_state = amd_xgbe_an_start(phydev);
|
|
+ an_supported = 0;
|
|
break;
|
|
|
|
case AMD_XGBE_AN_EVENT:
|
|
@@ -934,7 +876,6 @@ static void amd_xgbe_an_state_machine(struct work_struct *work)
|
|
break;
|
|
|
|
case AMD_XGBE_AN_COMPLETE:
|
|
- priv->parallel_detect = an_supported ? 0 : 1;
|
|
netdev_info(phydev->attached_dev, "%s successful\n",
|
|
an_supported ? "Auto negotiation"
|
|
: "Parallel detection");
|
|
@@ -1070,6 +1011,7 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
|
|
{
|
|
struct amd_xgbe_phy_priv *priv = phydev->priv;
|
|
u32 mmd_mask = phydev->c45_ids.devices_in_package;
|
|
+ int ret;
|
|
|
|
if (phydev->autoneg != AUTONEG_ENABLE)
|
|
return amd_xgbe_phy_setup_forced(phydev);
|
|
@@ -1078,6 +1020,11 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
|
|
if (!(mmd_mask & MDIO_DEVS_AN))
|
|
return -EINVAL;
|
|
|
|
+ /* Get the current speed mode */
|
|
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
/* Start/Restart the auto-negotiation state machine */
|
|
mutex_lock(&priv->an_mutex);
|
|
priv->an_result = AMD_XGBE_AN_READY;
|
|
@@ -1167,14 +1114,18 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
|
|
{
|
|
struct amd_xgbe_phy_priv *priv = phydev->priv;
|
|
u32 mmd_mask = phydev->c45_ids.devices_in_package;
|
|
- int ret, ad_ret, lp_ret;
|
|
+ int ret, mode, ad_ret, lp_ret;
|
|
|
|
ret = amd_xgbe_phy_update_link(phydev);
|
|
if (ret)
|
|
return ret;
|
|
|
|
- if ((phydev->autoneg == AUTONEG_ENABLE) &&
|
|
- !priv->parallel_detect) {
|
|
+ mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
|
|
+ if (mode < 0)
|
|
+ return mode;
|
|
+ mode &= MDIO_PCS_CTRL2_TYPE;
|
|
+
|
|
+ if (phydev->autoneg == AUTONEG_ENABLE) {
|
|
if (!(mmd_mask & MDIO_DEVS_AN))
|
|
return -EINVAL;
|
|
|
|
@@ -1205,39 +1156,40 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
|
|
ad_ret &= lp_ret;
|
|
if (ad_ret & 0x80) {
|
|
phydev->speed = SPEED_10000;
|
|
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
|
|
- if (ret)
|
|
- return ret;
|
|
+ if (mode != MDIO_PCS_CTRL2_10GBR) {
|
|
+ ret = amd_xgbe_phy_xgmii_mode(phydev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ }
|
|
} else {
|
|
- switch (priv->speed_set) {
|
|
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
|
|
- phydev->speed = SPEED_1000;
|
|
- break;
|
|
+ int (*mode_fcn)(struct phy_device *);
|
|
|
|
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
|
|
+ if (priv->speed_set ==
|
|
+ AMD_XGBE_PHY_SPEEDSET_1000_10000) {
|
|
+ phydev->speed = SPEED_1000;
|
|
+ mode_fcn = amd_xgbe_phy_gmii_mode;
|
|
+ } else {
|
|
phydev->speed = SPEED_2500;
|
|
- break;
|
|
+ mode_fcn = amd_xgbe_phy_gmii_2500_mode;
|
|
}
|
|
|
|
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
|
|
- if (ret)
|
|
- return ret;
|
|
+ if (mode == MDIO_PCS_CTRL2_10GBR) {
|
|
+ ret = mode_fcn(phydev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+ }
|
|
}
|
|
|
|
phydev->duplex = DUPLEX_FULL;
|
|
} else {
|
|
- if (amd_xgbe_phy_in_kr_mode(phydev)) {
|
|
+ if (mode == MDIO_PCS_CTRL2_10GBR) {
|
|
phydev->speed = SPEED_10000;
|
|
} else {
|
|
- switch (priv->speed_set) {
|
|
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
|
|
+ if (priv->speed_set ==
|
|
+ AMD_XGBE_PHY_SPEEDSET_1000_10000)
|
|
phydev->speed = SPEED_1000;
|
|
- break;
|
|
-
|
|
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
|
|
+ else
|
|
phydev->speed = SPEED_2500;
|
|
- break;
|
|
- }
|
|
}
|
|
phydev->duplex = DUPLEX_FULL;
|
|
phydev->pause = 0;
|
|
@@ -1289,29 +1241,188 @@ unlock:
|
|
return ret;
|
|
}
|
|
|
|
+static int amd_xgbe_phy_map_resources(struct amd_xgbe_phy_priv *priv,
|
|
+ struct platform_device *phy_pdev,
|
|
+ unsigned int phy_resnum)
|
|
+{
|
|
+ struct device *dev = priv->dev;
|
|
+ int ret;
|
|
+
|
|
+ /* Get the device mmio areas */
|
|
+ priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
|
|
+ phy_resnum++);
|
|
+ priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
|
|
+ if (IS_ERR(priv->rxtx_regs)) {
|
|
+ dev_err(dev, "rxtx ioremap failed\n");
|
|
+ return PTR_ERR(priv->rxtx_regs);
|
|
+ }
|
|
+
|
|
+ /* All xgbe phy devices share the CMU registers so retrieve
|
|
+ * the resource and do the ioremap directly rather than
|
|
+ * the devm_ioremap_resource call
|
|
+ */
|
|
+ priv->cmu_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
|
|
+ phy_resnum++);
|
|
+ if (!priv->cmu_res) {
|
|
+ dev_err(dev, "cmu invalid resource\n");
|
|
+ ret = -EINVAL;
|
|
+ goto err_rxtx;
|
|
+ }
|
|
+ priv->cmu_regs = devm_ioremap_nocache(dev, priv->cmu_res->start,
|
|
+ resource_size(priv->cmu_res));
|
|
+ if (!priv->cmu_regs) {
|
|
+ dev_err(dev, "cmu ioremap failed\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto err_rxtx;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_rxtx:
|
|
+ devm_iounmap(dev, priv->rxtx_regs);
|
|
+ devm_release_mem_region(dev, priv->rxtx_res->start,
|
|
+ resource_size(priv->rxtx_res));
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void amd_xgbe_phy_unmap_resources(struct amd_xgbe_phy_priv *priv)
|
|
+{
|
|
+ struct device *dev = priv->dev;
|
|
+
|
|
+ devm_iounmap(dev, priv->cmu_regs);
|
|
+
|
|
+ devm_iounmap(dev, priv->rxtx_regs);
|
|
+ devm_release_mem_region(dev, priv->rxtx_res->start,
|
|
+ resource_size(priv->rxtx_res));
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_ACPI
|
|
+static int amd_xgbe_phy_acpi_support(struct amd_xgbe_phy_priv *priv)
|
|
+{
|
|
+ struct platform_device *phy_pdev = priv->pdev;
|
|
+ struct acpi_device *adev = priv->adev;
|
|
+ struct device *dev = priv->dev;
|
|
+ const union acpi_object *property;
|
|
+ int ret;
|
|
+
|
|
+ /* Map the memory resources */
|
|
+ ret = amd_xgbe_phy_map_resources(priv, phy_pdev, 2);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* Get the device serdes channel property */
|
|
+ ret = acpi_dev_get_property(adev, XGBE_PHY_CHANNEL_PROPERTY,
|
|
+ ACPI_TYPE_INTEGER, &property);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "unable to obtain %s acpi property\n",
|
|
+ XGBE_PHY_CHANNEL_PROPERTY);
|
|
+ goto err_resources;
|
|
+ }
|
|
+ priv->serdes_channel = property->integer.value;
|
|
+
|
|
+ /* Get the device speed set property */
|
|
+ ret = acpi_dev_get_property(adev, XGBE_PHY_SPEEDSET_PROPERTY,
|
|
+ ACPI_TYPE_INTEGER, &property);
|
|
+ if (ret) {
|
|
+ dev_err(dev, "unable to obtain %s acpi property\n",
|
|
+ XGBE_PHY_SPEEDSET_PROPERTY);
|
|
+ goto err_resources;
|
|
+ }
|
|
+ priv->speed_set = property->integer.value;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_resources:
|
|
+ amd_xgbe_phy_unmap_resources(priv);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+#else /* CONFIG_ACPI */
|
|
+static int amd_xgbe_phy_acpi_support(struct amd_xgbe_phy_priv *priv)
|
|
+{
|
|
+ return -EINVAL;
|
|
+}
|
|
+#endif /* CONFIG_ACPI */
|
|
+
|
|
+#ifdef CONFIG_OF
|
|
+static int amd_xgbe_phy_of_support(struct amd_xgbe_phy_priv *priv)
|
|
+{
|
|
+ struct platform_device *phy_pdev;
|
|
+ struct device_node *bus_node;
|
|
+ struct device_node *phy_node;
|
|
+ struct device *dev = priv->dev;
|
|
+ const __be32 *property;
|
|
+ int ret;
|
|
+
|
|
+ bus_node = priv->dev->of_node;
|
|
+ phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
|
|
+ if (!phy_node) {
|
|
+ dev_err(dev, "unable to parse phy-handle\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ phy_pdev = of_find_device_by_node(phy_node);
|
|
+ if (!phy_pdev) {
|
|
+ dev_err(dev, "unable to obtain phy device\n");
|
|
+ ret = -EINVAL;
|
|
+ goto err_put;
|
|
+ }
|
|
+
|
|
+ /* Map the memory resources */
|
|
+ ret = amd_xgbe_phy_map_resources(priv, phy_pdev, 0);
|
|
+ if (ret)
|
|
+ goto err_put;
|
|
+
|
|
+ /* Get the device serdes channel property */
|
|
+ property = of_get_property(phy_node, XGBE_PHY_CHANNEL_PROPERTY, NULL);
|
|
+ if (!property) {
|
|
+ dev_err(dev, "unable to obtain %s property\n",
|
|
+ XGBE_PHY_CHANNEL_PROPERTY);
|
|
+ ret = -EINVAL;
|
|
+ goto err_resources;
|
|
+ }
|
|
+ priv->serdes_channel = be32_to_cpu(*property);
|
|
+
|
|
+ /* Get the device speed set property */
|
|
+ property = of_get_property(phy_node, XGBE_PHY_SPEEDSET_PROPERTY, NULL);
|
|
+ if (property)
|
|
+ priv->speed_set = be32_to_cpu(*property);
|
|
+
|
|
+ of_node_put(phy_node);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_resources:
|
|
+ amd_xgbe_phy_unmap_resources(priv);
|
|
+
|
|
+err_put:
|
|
+ of_node_put(phy_node);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+#else /* CONFIG_OF */
|
|
+static int amd_xgbe_phy_of_support(struct amd_xgbe_phy_priv *priv)
|
|
+{
|
|
+ return -EINVAL;
|
|
+}
|
|
+#endif /* CONFIG_OF */
|
|
+
|
|
static int amd_xgbe_phy_probe(struct phy_device *phydev)
|
|
{
|
|
struct amd_xgbe_phy_priv *priv;
|
|
- struct platform_device *pdev;
|
|
struct device *dev;
|
|
char *wq_name;
|
|
- const __be32 *property;
|
|
- unsigned int speed_set;
|
|
int ret;
|
|
|
|
- if (!phydev->dev.of_node)
|
|
+ if (!phydev->bus || !phydev->bus->parent)
|
|
return -EINVAL;
|
|
|
|
- pdev = of_find_device_by_node(phydev->dev.of_node);
|
|
- if (!pdev)
|
|
- return -EINVAL;
|
|
- dev = &pdev->dev;
|
|
+ dev = phydev->bus->parent;
|
|
|
|
wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
|
|
- if (!wq_name) {
|
|
- ret = -ENOMEM;
|
|
- goto err_pdev;
|
|
- }
|
|
+ if (!wq_name)
|
|
+ return -ENOMEM;
|
|
|
|
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
|
if (!priv) {
|
|
@@ -1319,86 +1430,54 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
|
|
goto err_name;
|
|
}
|
|
|
|
- priv->pdev = pdev;
|
|
+ priv->pdev = to_platform_device(dev);
|
|
+ priv->adev = ACPI_COMPANION(dev);
|
|
priv->dev = dev;
|
|
priv->phydev = phydev;
|
|
|
|
- /* Get the device mmio areas */
|
|
- priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
- priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
|
|
- if (IS_ERR(priv->rxtx_regs)) {
|
|
- dev_err(dev, "rxtx ioremap failed\n");
|
|
- ret = PTR_ERR(priv->rxtx_regs);
|
|
+ if (priv->adev && !acpi_disabled)
|
|
+ ret = amd_xgbe_phy_acpi_support(priv);
|
|
+ else
|
|
+ ret = amd_xgbe_phy_of_support(priv);
|
|
+ if (ret)
|
|
goto err_priv;
|
|
- }
|
|
-
|
|
- priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
|
- priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
|
|
- if (IS_ERR(priv->sir0_regs)) {
|
|
- dev_err(dev, "sir0 ioremap failed\n");
|
|
- ret = PTR_ERR(priv->sir0_regs);
|
|
- goto err_rxtx;
|
|
- }
|
|
-
|
|
- priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
|
|
- priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
|
|
- if (IS_ERR(priv->sir1_regs)) {
|
|
- dev_err(dev, "sir1 ioremap failed\n");
|
|
- ret = PTR_ERR(priv->sir1_regs);
|
|
- goto err_sir0;
|
|
- }
|
|
|
|
- /* Get the device speed set property */
|
|
- speed_set = 0;
|
|
- property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY,
|
|
- NULL);
|
|
- if (property)
|
|
- speed_set = be32_to_cpu(*property);
|
|
-
|
|
- switch (speed_set) {
|
|
- case 0:
|
|
- priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000;
|
|
- break;
|
|
- case 1:
|
|
- priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
|
|
+ switch (priv->speed_set) {
|
|
+ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
|
|
+ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
|
|
break;
|
|
default:
|
|
dev_err(dev, "invalid amd,speed-set property\n");
|
|
ret = -EINVAL;
|
|
- goto err_sir1;
|
|
+ goto err_resources;
|
|
}
|
|
|
|
priv->link = 1;
|
|
|
|
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
|
|
+ if (ret < 0)
|
|
+ goto err_resources;
|
|
+ if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
|
|
+ priv->mode = AMD_XGBE_MODE_KR;
|
|
+ else
|
|
+ priv->mode = AMD_XGBE_MODE_KX;
|
|
+
|
|
mutex_init(&priv->an_mutex);
|
|
INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
|
|
priv->an_workqueue = create_singlethread_workqueue(wq_name);
|
|
if (!priv->an_workqueue) {
|
|
ret = -ENOMEM;
|
|
- goto err_sir1;
|
|
+ goto err_resources;
|
|
}
|
|
|
|
phydev->priv = priv;
|
|
|
|
kfree(wq_name);
|
|
- of_dev_put(pdev);
|
|
|
|
return 0;
|
|
|
|
-err_sir1:
|
|
- devm_iounmap(dev, priv->sir1_regs);
|
|
- devm_release_mem_region(dev, priv->sir1_res->start,
|
|
- resource_size(priv->sir1_res));
|
|
-
|
|
-err_sir0:
|
|
- devm_iounmap(dev, priv->sir0_regs);
|
|
- devm_release_mem_region(dev, priv->sir0_res->start,
|
|
- resource_size(priv->sir0_res));
|
|
-
|
|
-err_rxtx:
|
|
- devm_iounmap(dev, priv->rxtx_regs);
|
|
- devm_release_mem_region(dev, priv->rxtx_res->start,
|
|
- resource_size(priv->rxtx_res));
|
|
+err_resources:
|
|
+ amd_xgbe_phy_unmap_resources(priv);
|
|
|
|
err_priv:
|
|
devm_kfree(dev, priv);
|
|
@@ -1406,9 +1485,6 @@ err_priv:
|
|
err_name:
|
|
kfree(wq_name);
|
|
|
|
-err_pdev:
|
|
- of_dev_put(pdev);
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
@@ -1425,18 +1501,7 @@ static void amd_xgbe_phy_remove(struct phy_device *phydev)
|
|
flush_workqueue(priv->an_workqueue);
|
|
destroy_workqueue(priv->an_workqueue);
|
|
|
|
- /* Release resources */
|
|
- devm_iounmap(dev, priv->sir1_regs);
|
|
- devm_release_mem_region(dev, priv->sir1_res->start,
|
|
- resource_size(priv->sir1_res));
|
|
-
|
|
- devm_iounmap(dev, priv->sir0_regs);
|
|
- devm_release_mem_region(dev, priv->sir0_res->start,
|
|
- resource_size(priv->sir0_res));
|
|
-
|
|
- devm_iounmap(dev, priv->rxtx_regs);
|
|
- devm_release_mem_region(dev, priv->rxtx_res->start,
|
|
- resource_size(priv->rxtx_res));
|
|
+ amd_xgbe_phy_unmap_resources(priv);
|
|
|
|
devm_kfree(dev, priv);
|
|
}
|
|
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
|
|
index b1d0596..06b8f97 100644
|
|
--- a/drivers/pci/host/pci-xgene.c
|
|
+++ b/drivers/pci/host/pci-xgene.c
|
|
@@ -29,6 +29,7 @@
|
|
#include <linux/pci.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/acpi.h>
|
|
|
|
#define PCIECORE_CTLANDSTATUS 0x50
|
|
#define PIM1_1L 0x80
|
|
@@ -235,6 +236,13 @@ static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
|
|
break;
|
|
case 2:
|
|
xgene_pcie_cfg_in16(addr, offset, val);
|
|
+ /* FIXME.
|
|
+ * Something wrong with Configuration Request Retry Status
|
|
+ * on this hw. Pretend it isn't supported until the problem
|
|
+ * gets sorted out properly.
|
|
+ */
|
|
+ if (pci_is_root_bus(bus) && offset == (0x40 + PCI_EXP_RTCAP))
|
|
+ *val &= ~PCI_EXP_RTCAP_CRSVIS;
|
|
break;
|
|
default:
|
|
xgene_pcie_cfg_in32(addr, offset, val);
|
|
@@ -600,6 +608,165 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
|
|
return 0;
|
|
}
|
|
|
|
+#ifdef CONFIG_ACPI
|
|
+struct xgene_mcfg_info {
|
|
+ void __iomem *csr_base;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * When the address bit [17:16] is 2'b01, the Configuration access will be
|
|
+ * treated as Type 1 and it will be forwarded to external PCIe device.
|
|
+ */
|
|
+static void __iomem *__get_cfg_base(struct pci_mmcfg_region *cfg,
|
|
+ unsigned int bus)
|
|
+{
|
|
+ if (bus > cfg->start_bus)
|
|
+ return cfg->virt + AXI_EP_CFG_ACCESS;
|
|
+
|
|
+ return cfg->virt;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * For Configuration request, RTDID register is used as Bus Number,
|
|
+ * Device Number and Function number of the header fields.
|
|
+ */
|
|
+static void __set_rtdid_reg(struct pci_mmcfg_region *cfg,
|
|
+ unsigned int bus, unsigned int devfn)
|
|
+{
|
|
+ struct xgene_mcfg_info *info = cfg->data;
|
|
+ unsigned int b, d, f;
|
|
+ u32 rtdid_val = 0;
|
|
+
|
|
+ b = bus;
|
|
+ d = PCI_SLOT(devfn);
|
|
+ f = PCI_FUNC(devfn);
|
|
+
|
|
+ if (bus != cfg->start_bus)
|
|
+ rtdid_val = (b << 8) | (d << 3) | f;
|
|
+
|
|
+ writel(rtdid_val, info->csr_base + RTDID);
|
|
+ /* read the register back to ensure flush */
|
|
+ readl(info->csr_base + RTDID);
|
|
+}
|
|
+
|
|
+static int xgene_raw_pci_read(struct pci_mmcfg_region *cfg, unsigned int bus,
|
|
+ unsigned int devfn, int offset, int len, u32 *val)
|
|
+{
|
|
+ void __iomem *addr;
|
|
+
|
|
+ if (bus == cfg->start_bus) {
|
|
+ if (devfn != 0) {
|
|
+ *val = 0xffffffff;
|
|
+ return PCIBIOS_DEVICE_NOT_FOUND;
|
|
+ }
|
|
+
|
|
+ /* see xgene_pcie_hide_rc_bars() above */
|
|
+ if (offset == PCI_BASE_ADDRESS_0 ||
|
|
+ offset == PCI_BASE_ADDRESS_1) {
|
|
+ *val = 0;
|
|
+ return PCIBIOS_SUCCESSFUL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ __set_rtdid_reg(cfg, bus, devfn);
|
|
+ addr = __get_cfg_base(cfg, bus);
|
|
+ switch (len) {
|
|
+ case 1:
|
|
+ xgene_pcie_cfg_in8(addr, offset, val);
|
|
+ break;
|
|
+ case 2:
|
|
+ xgene_pcie_cfg_in16(addr, offset, val);
|
|
+ /* FIXME.
|
|
+ * Something wrong with Configuration Request Retry Status
|
|
+ * on this hw. Pretend it isn't supported until the problem
|
|
+ * gets sorted out properly.
|
|
+ */
|
|
+ if (bus == cfg->start_bus && offset == (0x40 + PCI_EXP_RTCAP))
|
|
+ *val &= ~PCI_EXP_RTCAP_CRSVIS;
|
|
+ break;
|
|
+ default:
|
|
+ xgene_pcie_cfg_in32(addr, offset, val);
|
|
+ break;
|
|
+ }
|
|
+ return PCIBIOS_SUCCESSFUL;
|
|
+}
|
|
+
|
|
+static int xgene_raw_pci_write(struct pci_mmcfg_region *cfg, unsigned int bus,
|
|
+ unsigned int devfn, int offset, int len, u32 val)
|
|
+{
|
|
+ void __iomem *addr;
|
|
+
|
|
+ if (bus == cfg->start_bus && devfn != 0)
|
|
+ return PCIBIOS_DEVICE_NOT_FOUND;
|
|
+
|
|
+ __set_rtdid_reg(cfg, bus, devfn);
|
|
+ addr = __get_cfg_base(cfg, bus);
|
|
+ switch (len) {
|
|
+ case 1:
|
|
+ xgene_pcie_cfg_out8(addr, offset, (u8)val);
|
|
+ break;
|
|
+ case 2:
|
|
+ xgene_pcie_cfg_out16(addr, offset, (u16)val);
|
|
+ break;
|
|
+ default:
|
|
+ xgene_pcie_cfg_out32(addr, offset, val);
|
|
+ break;
|
|
+ }
|
|
+ return PCIBIOS_SUCCESSFUL;
|
|
+}
|
|
+
|
|
+static acpi_status find_csr_base(struct acpi_resource *acpi_res, void *data)
|
|
+{
|
|
+ struct pci_mmcfg_region *cfg = data;
|
|
+ struct xgene_mcfg_info *info = cfg->data;
|
|
+ struct acpi_resource_fixed_memory32 *fixed32;
|
|
+
|
|
+ if (acpi_res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
|
|
+ fixed32 = &acpi_res->data.fixed_memory32;
|
|
+ info->csr_base = ioremap(fixed32->address,
|
|
+ fixed32->address_length);
|
|
+ return AE_CTRL_TERMINATE;
|
|
+ }
|
|
+ return AE_OK;
|
|
+}
|
|
+
|
|
+static int xgene_mcfg_fixup(struct acpi_pci_root *root,
|
|
+ struct pci_mmcfg_region *cfg)
|
|
+{
|
|
+ struct acpi_device *device = root->device;
|
|
+ struct xgene_mcfg_info *info;
|
|
+
|
|
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
|
|
+ if (info == NULL)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ cfg->data = info;
|
|
+
|
|
+ acpi_walk_resources(device->handle, METHOD_NAME__CRS,
|
|
+ find_csr_base, cfg);
|
|
+
|
|
+ if (!info->csr_base) {
|
|
+ kfree(info);
|
|
+ cfg->data = NULL;
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ cfg->read = xgene_raw_pci_read;
|
|
+ cfg->write = xgene_raw_pci_write;
|
|
+
|
|
+ /* actual last bus reachable through this mmconfig */
|
|
+ cfg->end_bus = root->secondary.end;
|
|
+
|
|
+ /* firmware should have done this */
|
|
+ xgene_raw_pci_write(cfg, cfg->start_bus, 0, PCI_PRIMARY_BUS, 4,
|
|
+ cfg->start_bus | ((cfg->start_bus + 1) << 8) |
|
|
+ (cfg->end_bus << 16));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+DECLARE_ACPI_MCFG_FIXUP("APM ", "XGENE ", xgene_mcfg_fixup);
|
|
+#endif /* CONFIG_ACPI */
|
|
+
|
|
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
|
|
{
|
|
struct device_node *dn = pdev->dev.of_node;
|
|
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
|
|
index 782e822..d952462 100644
|
|
--- a/drivers/pnp/resource.c
|
|
+++ b/drivers/pnp/resource.c
|
|
@@ -313,6 +313,7 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
|
|
progif = class & 0xff;
|
|
class >>= 8;
|
|
|
|
+#ifdef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
|
|
if (class == PCI_CLASS_STORAGE_IDE) {
|
|
/*
|
|
* Unless both channels are native-PCI mode only,
|
|
@@ -326,6 +327,7 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
|
|
return 1;
|
|
}
|
|
}
|
|
+#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
|
|
index b24aa01..50fe279 100644
|
|
--- a/drivers/tty/Kconfig
|
|
+++ b/drivers/tty/Kconfig
|
|
@@ -419,4 +419,10 @@ config DA_CONSOLE
|
|
help
|
|
This enables a console on a Dash channel.
|
|
|
|
+config SBSAUART_TTY
|
|
+ tristate "SBSA UART TTY Driver"
|
|
+ help
|
|
+ Console and system TTY driver for the SBSA UART which is defined
|
|
+ in the Server Base System Architecure document for ARM64 servers.
|
|
+
|
|
endif # TTY
|
|
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
|
|
index 58ad1c0..c3211c0 100644
|
|
--- a/drivers/tty/Makefile
|
|
+++ b/drivers/tty/Makefile
|
|
@@ -29,5 +29,6 @@ obj-$(CONFIG_SYNCLINK) += synclink.o
|
|
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
|
|
obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o
|
|
obj-$(CONFIG_DA_TTY) += metag_da.o
|
|
+obj-$(CONFIG_SBSAUART_TTY) += sbsauart.o
|
|
|
|
obj-y += ipwireless/
|
|
diff --git a/drivers/tty/sbsauart.c b/drivers/tty/sbsauart.c
|
|
new file mode 100644
|
|
index 0000000..0f44624
|
|
--- /dev/null
|
|
+++ b/drivers/tty/sbsauart.c
|
|
@@ -0,0 +1,358 @@
|
|
+/*
|
|
+ * SBSA (Server Base System Architecture) Compatible UART driver
|
|
+ *
|
|
+ * Copyright (C) 2014 Linaro Ltd
|
|
+ *
|
|
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
|
|
+ *
|
|
+ * This software is licensed under the terms of the GNU General Public
|
|
+ * License version 2, as published by the Free Software Foundation, and
|
|
+ * may be copied, distributed, and modified under those terms.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/amba/serial.h>
|
|
+#include <linux/console.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/io.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/serial_core.h>
|
|
+#include <linux/tty.h>
|
|
+#include <linux/tty_flip.h>
|
|
+
|
|
+struct sbsa_tty {
|
|
+ struct tty_port port;
|
|
+ spinlock_t lock;
|
|
+ void __iomem *base;
|
|
+ u32 irq;
|
|
+ int opencount;
|
|
+ struct console console;
|
|
+};
|
|
+
|
|
+static struct tty_driver *sbsa_tty_driver;
|
|
+static struct sbsa_tty *sbsa_tty;
|
|
+
|
|
+#define SBSAUART_CHAR_MASK 0xFF
|
|
+
|
|
+static void sbsa_raw_putc(struct uart_port *port, int c)
|
|
+{
|
|
+ while (readw(port->membase + UART01x_FR) & UART01x_FR_TXFF)
|
|
+ ;
|
|
+ writew(c & 0xFF, port->membase + UART01x_DR);
|
|
+}
|
|
+
|
|
+static void sbsa_uart_early_write(struct console *con, const char *buf,
|
|
+ unsigned count)
|
|
+{
|
|
+ struct earlycon_device *dev = con->data;
|
|
+
|
|
+ uart_console_write(&dev->port, buf, count, sbsa_raw_putc);
|
|
+}
|
|
+
|
|
+static int __init sbsa_uart_early_console_setup(struct earlycon_device *device,
|
|
+ const char *opt)
|
|
+{
|
|
+ if (!device->port.membase)
|
|
+ return -ENODEV;
|
|
+
|
|
+ device->con->write = sbsa_uart_early_write;
|
|
+ return 0;
|
|
+}
|
|
+EARLYCON_DECLARE(sbsauart, sbsa_uart_early_console_setup);
|
|
+
|
|
+static void sbsa_tty_do_write(const char *buf, unsigned count)
|
|
+{
|
|
+ unsigned long irq_flags;
|
|
+ struct sbsa_tty *qtty = sbsa_tty;
|
|
+ void __iomem *base = qtty->base;
|
|
+ unsigned n;
|
|
+
|
|
+ spin_lock_irqsave(&qtty->lock, irq_flags);
|
|
+ for (n = 0; n < count; n++) {
|
|
+ while (readw(base + UART01x_FR) & UART01x_FR_TXFF)
|
|
+ ;
|
|
+ writew(buf[n], base + UART01x_DR);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&qtty->lock, irq_flags);
|
|
+}
|
|
+
|
|
+static void sbsauart_fifo_to_tty(struct sbsa_tty *qtty)
|
|
+{
|
|
+ void __iomem *base = qtty->base;
|
|
+ unsigned int flag, max_count = 32;
|
|
+ u16 status, ch;
|
|
+
|
|
+ while (max_count--) {
|
|
+ status = readw(base + UART01x_FR);
|
|
+ if (status & UART01x_FR_RXFE)
|
|
+ break;
|
|
+
|
|
+ /* Take chars from the FIFO and update status */
|
|
+ ch = readw(base + UART01x_DR);
|
|
+ flag = TTY_NORMAL;
|
|
+
|
|
+ if (ch & UART011_DR_BE)
|
|
+ flag = TTY_BREAK;
|
|
+ else if (ch & UART011_DR_PE)
|
|
+ flag = TTY_PARITY;
|
|
+ else if (ch & UART011_DR_FE)
|
|
+ flag = TTY_FRAME;
|
|
+ else if (ch & UART011_DR_OE)
|
|
+ flag = TTY_OVERRUN;
|
|
+
|
|
+ ch &= SBSAUART_CHAR_MASK;
|
|
+
|
|
+ tty_insert_flip_char(&qtty->port, ch, flag);
|
|
+ }
|
|
+
|
|
+ tty_schedule_flip(&qtty->port);
|
|
+
|
|
+ /* Clear the RX IRQ */
|
|
+ writew(UART011_RXIC | UART011_RXIC, base + UART011_ICR);
|
|
+}
|
|
+
|
|
+static irqreturn_t sbsa_tty_interrupt(int irq, void *dev_id)
|
|
+{
|
|
+ struct sbsa_tty *qtty = sbsa_tty;
|
|
+ unsigned long irq_flags;
|
|
+
|
|
+ spin_lock_irqsave(&qtty->lock, irq_flags);
|
|
+ sbsauart_fifo_to_tty(qtty);
|
|
+ spin_unlock_irqrestore(&qtty->lock, irq_flags);
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static int sbsa_tty_open(struct tty_struct *tty, struct file *filp)
|
|
+{
|
|
+ struct sbsa_tty *qtty = sbsa_tty;
|
|
+
|
|
+ return tty_port_open(&qtty->port, tty, filp);
|
|
+}
|
|
+
|
|
+static void sbsa_tty_close(struct tty_struct *tty, struct file *filp)
|
|
+{
|
|
+ tty_port_close(tty->port, tty, filp);
|
|
+}
|
|
+
|
|
+static void sbsa_tty_hangup(struct tty_struct *tty)
|
|
+{
|
|
+ tty_port_hangup(tty->port);
|
|
+}
|
|
+
|
|
+static int sbsa_tty_write(struct tty_struct *tty, const unsigned char *buf,
|
|
+ int count)
|
|
+{
|
|
+ sbsa_tty_do_write(buf, count);
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static int sbsa_tty_write_room(struct tty_struct *tty)
|
|
+{
|
|
+ return 32;
|
|
+}
|
|
+
|
|
+static void sbsa_tty_console_write(struct console *co, const char *b,
|
|
+ unsigned count)
|
|
+{
|
|
+ sbsa_tty_do_write(b, count);
|
|
+
|
|
+ if (b[count - 1] == '\n')
|
|
+ sbsa_tty_do_write("\r", 1);
|
|
+}
|
|
+
|
|
+static struct tty_driver *sbsa_tty_console_device(struct console *c,
|
|
+ int *index)
|
|
+{
|
|
+ *index = c->index;
|
|
+ return sbsa_tty_driver;
|
|
+}
|
|
+
|
|
+static int sbsa_tty_console_setup(struct console *co, char *options)
|
|
+{
|
|
+ if ((unsigned)co->index > 0)
|
|
+ return -ENODEV;
|
|
+ if (sbsa_tty->base == NULL)
|
|
+ return -ENODEV;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct tty_port_operations sbsa_port_ops = {
|
|
+};
|
|
+
|
|
+static const struct tty_operations sbsa_tty_ops = {
|
|
+ .open = sbsa_tty_open,
|
|
+ .close = sbsa_tty_close,
|
|
+ .hangup = sbsa_tty_hangup,
|
|
+ .write = sbsa_tty_write,
|
|
+ .write_room = sbsa_tty_write_room,
|
|
+};
|
|
+
|
|
+static int sbsa_tty_create_driver(void)
|
|
+{
|
|
+ int ret;
|
|
+ struct tty_driver *tty;
|
|
+
|
|
+ sbsa_tty = kzalloc(sizeof(*sbsa_tty), GFP_KERNEL);
|
|
+ if (sbsa_tty == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_alloc_sbsa_tty_failed;
|
|
+ }
|
|
+ tty = alloc_tty_driver(1);
|
|
+ if (tty == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_alloc_tty_driver_failed;
|
|
+ }
|
|
+ tty->driver_name = "sbsauart";
|
|
+ tty->name = "ttySBSA";
|
|
+ tty->type = TTY_DRIVER_TYPE_SERIAL;
|
|
+ tty->subtype = SERIAL_TYPE_NORMAL;
|
|
+ tty->init_termios = tty_std_termios;
|
|
+ tty->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW |
|
|
+ TTY_DRIVER_DYNAMIC_DEV;
|
|
+ tty_set_operations(tty, &sbsa_tty_ops);
|
|
+ ret = tty_register_driver(tty);
|
|
+ if (ret)
|
|
+ goto err_tty_register_driver_failed;
|
|
+
|
|
+ sbsa_tty_driver = tty;
|
|
+ return 0;
|
|
+
|
|
+err_tty_register_driver_failed:
|
|
+ put_tty_driver(tty);
|
|
+err_alloc_tty_driver_failed:
|
|
+ kfree(sbsa_tty);
|
|
+ sbsa_tty = NULL;
|
|
+err_alloc_sbsa_tty_failed:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void sbsa_tty_delete_driver(void)
|
|
+{
|
|
+ tty_unregister_driver(sbsa_tty_driver);
|
|
+ put_tty_driver(sbsa_tty_driver);
|
|
+ sbsa_tty_driver = NULL;
|
|
+ kfree(sbsa_tty);
|
|
+ sbsa_tty = NULL;
|
|
+}
|
|
+
|
|
+static int sbsa_tty_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct sbsa_tty *qtty;
|
|
+ int ret = -EINVAL;
|
|
+ int i;
|
|
+ struct resource *r;
|
|
+ struct device *ttydev;
|
|
+ void __iomem *base;
|
|
+ u32 irq;
|
|
+
|
|
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ if (r == NULL)
|
|
+ return -EINVAL;
|
|
+
|
|
+ base = ioremap(r->start, r->end - r->start);
|
|
+ if (base == NULL)
|
|
+ pr_err("sbsa_tty: unable to remap base\n");
|
|
+
|
|
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
|
+ if (r == NULL)
|
|
+ goto err_unmap;
|
|
+
|
|
+ irq = r->start;
|
|
+
|
|
+ if (pdev->id > 0)
|
|
+ goto err_unmap;
|
|
+
|
|
+ ret = sbsa_tty_create_driver();
|
|
+ if (ret)
|
|
+ goto err_unmap;
|
|
+
|
|
+ qtty = sbsa_tty;
|
|
+ spin_lock_init(&qtty->lock);
|
|
+ tty_port_init(&qtty->port);
|
|
+ qtty->port.ops = &sbsa_port_ops;
|
|
+ qtty->base = base;
|
|
+ qtty->irq = irq;
|
|
+
|
|
+ /* Clear and Mask all IRQs */
|
|
+ writew(0, base + UART011_IMSC);
|
|
+ writew(0xFFFF, base + UART011_ICR);
|
|
+
|
|
+ ret = request_irq(irq, sbsa_tty_interrupt, IRQF_SHARED,
|
|
+ "sbsa_tty", pdev);
|
|
+ if (ret)
|
|
+ goto err_request_irq_failed;
|
|
+
|
|
+ /* Unmask the RX IRQ */
|
|
+ writew(UART011_RXIM | UART011_RTIM, base + UART011_IMSC);
|
|
+
|
|
+ ttydev = tty_port_register_device(&qtty->port, sbsa_tty_driver,
|
|
+ 0, &pdev->dev);
|
|
+ if (IS_ERR(ttydev)) {
|
|
+ ret = PTR_ERR(ttydev);
|
|
+ goto err_tty_register_device_failed;
|
|
+ }
|
|
+
|
|
+ strcpy(qtty->console.name, "ttySBSA");
|
|
+ qtty->console.write = sbsa_tty_console_write;
|
|
+ qtty->console.device = sbsa_tty_console_device;
|
|
+ qtty->console.setup = sbsa_tty_console_setup;
|
|
+ qtty->console.flags = CON_PRINTBUFFER;
|
|
+ /* if no console= on cmdline, make this the console device */
|
|
+ if (!console_set_on_cmdline)
|
|
+ qtty->console.flags |= CON_CONSDEV;
|
|
+ qtty->console.index = pdev->id;
|
|
+ register_console(&qtty->console);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+ tty_unregister_device(sbsa_tty_driver, i);
|
|
+err_tty_register_device_failed:
|
|
+ free_irq(irq, pdev);
|
|
+err_request_irq_failed:
|
|
+ sbsa_tty_delete_driver();
|
|
+err_unmap:
|
|
+ iounmap(base);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int sbsa_tty_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct sbsa_tty *qtty;
|
|
+
|
|
+ qtty = sbsa_tty;
|
|
+ unregister_console(&qtty->console);
|
|
+ tty_unregister_device(sbsa_tty_driver, pdev->id);
|
|
+ iounmap(qtty->base);
|
|
+ qtty->base = 0;
|
|
+ free_irq(qtty->irq, pdev);
|
|
+ sbsa_tty_delete_driver();
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct acpi_device_id sbsa_acpi_match[] = {
|
|
+ { "ARMH0011", 0 },
|
|
+ { }
|
|
+};
|
|
+
|
|
+static struct platform_driver sbsa_tty_platform_driver = {
|
|
+ .probe = sbsa_tty_probe,
|
|
+ .remove = sbsa_tty_remove,
|
|
+ .driver = {
|
|
+ .name = "sbsa_tty",
|
|
+ .acpi_match_table = ACPI_PTR(sbsa_acpi_match),
|
|
+ }
|
|
+};
|
|
+
|
|
+module_platform_driver(sbsa_tty_platform_driver);
|
|
+
|
|
+MODULE_LICENSE("GPL v2");
|
|
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
|
|
index 555de07..3991aa0 100644
|
|
--- a/drivers/tty/serial/8250/8250_dw.c
|
|
+++ b/drivers/tty/serial/8250/8250_dw.c
|
|
@@ -351,10 +351,18 @@ static int dw8250_probe_of(struct uart_port *p,
|
|
static int dw8250_probe_acpi(struct uart_8250_port *up,
|
|
struct dw8250_data *data)
|
|
{
|
|
+ const struct acpi_device_id *id;
|
|
struct uart_port *p = &up->port;
|
|
|
|
dw8250_setup_port(up);
|
|
|
|
+ id = acpi_match_device(p->dev->driver->acpi_match_table, p->dev);
|
|
+ if (!id)
|
|
+ return -ENODEV;
|
|
+
|
|
+ if (!p->uartclk)
|
|
+ p->uartclk = (unsigned int)id->driver_data;
|
|
+
|
|
p->iotype = UPIO_MEM32;
|
|
p->serial_in = dw8250_serial_in32;
|
|
p->serial_out = dw8250_serial_out32;
|
|
@@ -577,6 +585,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
|
|
{ "INT3435", 0 },
|
|
{ "80860F0A", 0 },
|
|
{ "8086228A", 0 },
|
|
+ { "APMC0D08", 50000000},
|
|
{ },
|
|
};
|
|
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
|
|
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
|
|
index 00d115b..cd9b974 100644
|
|
--- a/drivers/virtio/virtio_mmio.c
|
|
+++ b/drivers/virtio/virtio_mmio.c
|
|
@@ -100,8 +100,7 @@
|
|
#include <linux/virtio_config.h>
|
|
#include <linux/virtio_mmio.h>
|
|
#include <linux/virtio_ring.h>
|
|
-
|
|
-
|
|
+#include <linux/acpi.h>
|
|
|
|
/* The alignment to use between consumer and producer parts of vring.
|
|
* Currently hardcoded to the page size. */
|
|
@@ -635,12 +634,21 @@ static struct of_device_id virtio_mmio_match[] = {
|
|
};
|
|
MODULE_DEVICE_TABLE(of, virtio_mmio_match);
|
|
|
|
+#ifdef CONFIG_ACPI
|
|
+static const struct acpi_device_id virtio_mmio_acpi_match[] = {
|
|
+ { "LNRO0005", },
|
|
+ { }
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
|
|
+#endif
|
|
+
|
|
static struct platform_driver virtio_mmio_driver = {
|
|
.probe = virtio_mmio_probe,
|
|
.remove = virtio_mmio_remove,
|
|
.driver = {
|
|
.name = "virtio-mmio",
|
|
.of_match_table = virtio_mmio_match,
|
|
+ .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
|
|
},
|
|
};
|
|
|
|
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
|
|
index 61e32ec..1fec6f5 100644
|
|
--- a/include/acpi/acpi_bus.h
|
|
+++ b/include/acpi/acpi_bus.h
|
|
@@ -69,6 +69,8 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs);
|
|
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
|
|
int rev, int func, union acpi_object *argv4);
|
|
|
|
+acpi_status acpi_check_coherency(acpi_handle handle, int *val);
|
|
+
|
|
static inline union acpi_object *
|
|
acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func,
|
|
union acpi_object *argv4, acpi_object_type type)
|
|
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
|
|
index 444671e..9d573db 100644
|
|
--- a/include/acpi/acpi_io.h
|
|
+++ b/include/acpi/acpi_io.h
|
|
@@ -1,11 +1,17 @@
|
|
#ifndef _ACPI_IO_H_
|
|
#define _ACPI_IO_H_
|
|
|
|
+#include <linux/mm.h>
|
|
#include <linux/io.h>
|
|
|
|
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
|
|
acpi_size size)
|
|
{
|
|
+#ifdef CONFIG_ARM64
|
|
+ if (!page_is_ram(phys >> PAGE_SHIFT))
|
|
+ return ioremap(phys, size);
|
|
+#endif
|
|
+
|
|
return ioremap_cache(phys, size);
|
|
}
|
|
|
|
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
|
|
index bee5d68..140d514 100644
|
|
--- a/include/asm-generic/vmlinux.lds.h
|
|
+++ b/include/asm-generic/vmlinux.lds.h
|
|
@@ -276,6 +276,13 @@
|
|
VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
|
|
} \
|
|
\
|
|
+ /* ACPI quirks */ \
|
|
+ .acpi_fixup : AT(ADDR(.acpi_fixup) - LOAD_OFFSET) { \
|
|
+ VMLINUX_SYMBOL(__start_acpi_mcfg_fixups) = .; \
|
|
+ *(.acpi_fixup_mcfg) \
|
|
+ VMLINUX_SYMBOL(__end_acpi_mcfg_fixups) = .; \
|
|
+ } \
|
|
+ \
|
|
/* Built-in firmware blobs */ \
|
|
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
|
|
VMLINUX_SYMBOL(__start_builtin_fw) = .; \
|
|
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
|
|
index ac4888d..d68268d 100644
|
|
--- a/include/kvm/arm_vgic.h
|
|
+++ b/include/kvm/arm_vgic.h
|
|
@@ -290,17 +290,19 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
|
|
#define vgic_ready(k) ((k)->arch.vgic.ready)
|
|
|
|
-int vgic_v2_probe(struct device_node *vgic_node,
|
|
- const struct vgic_ops **ops,
|
|
- const struct vgic_params **params);
|
|
+int vgic_v2_dt_probe(struct device_node *vgic_node,
|
|
+ const struct vgic_ops **ops,
|
|
+ const struct vgic_params **params);
|
|
+int vgic_v2_acpi_probe(const struct vgic_ops **ops,
|
|
+ const struct vgic_params **params);
|
|
#ifdef CONFIG_ARM_GIC_V3
|
|
-int vgic_v3_probe(struct device_node *vgic_node,
|
|
- const struct vgic_ops **ops,
|
|
- const struct vgic_params **params);
|
|
+int vgic_v3_dt_probe(struct device_node *vgic_node,
|
|
+ const struct vgic_ops **ops,
|
|
+ const struct vgic_params **params);
|
|
#else
|
|
-static inline int vgic_v3_probe(struct device_node *vgic_node,
|
|
- const struct vgic_ops **ops,
|
|
- const struct vgic_params **params)
|
|
+static inline int vgic_v3_dt_probe(struct device_node *vgic_node,
|
|
+ const struct vgic_ops **ops,
|
|
+ const struct vgic_params **params)
|
|
{
|
|
return -ENODEV;
|
|
}
|
|
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
|
|
index 856d381..13e6200 100644
|
|
--- a/include/linux/acpi.h
|
|
+++ b/include/linux/acpi.h
|
|
@@ -72,6 +72,7 @@ enum acpi_irq_model_id {
|
|
ACPI_IRQ_MODEL_IOAPIC,
|
|
ACPI_IRQ_MODEL_IOSAPIC,
|
|
ACPI_IRQ_MODEL_PLATFORM,
|
|
+ ACPI_IRQ_MODEL_GIC,
|
|
ACPI_IRQ_MODEL_COUNT
|
|
};
|
|
|
|
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
|
|
index abcafaa..4f5caa1 100644
|
|
--- a/include/linux/clocksource.h
|
|
+++ b/include/linux/clocksource.h
|
|
@@ -346,4 +346,10 @@ extern void clocksource_of_init(void);
|
|
static inline void clocksource_of_init(void) {}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_ACPI
|
|
+void acpi_generic_timer_init(void);
|
|
+#else
|
|
+static inline void acpi_generic_timer_init(void) {}
|
|
+#endif
|
|
+
|
|
#endif /* _LINUX_CLOCKSOURCE_H */
|
|
diff --git a/include/linux/irqchip/arm-gic-acpi.h b/include/linux/irqchip/arm-gic-acpi.h
|
|
new file mode 100644
|
|
index 0000000..ad5b577
|
|
--- /dev/null
|
|
+++ b/include/linux/irqchip/arm-gic-acpi.h
|
|
@@ -0,0 +1,31 @@
|
|
+/*
|
|
+ * Copyright (C) 2014, Linaro Ltd.
|
|
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
+ * published by the Free Software Foundation.
|
|
+ */
|
|
+
|
|
+#ifndef ARM_GIC_ACPI_H_
|
|
+#define ARM_GIC_ACPI_H_
|
|
+
|
|
+#ifdef CONFIG_ACPI
|
|
+
|
|
+/*
|
|
+ * Hard code here, we can not get memory size from MADT (but FDT does),
|
|
+ * Actually no need to do that, because this size can be inferred
|
|
+ * from GIC spec.
|
|
+ */
|
|
+#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
|
|
+#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
|
|
+
|
|
+struct acpi_table_header;
|
|
+
|
|
+void acpi_gic_init(void);
|
|
+int gic_v2_acpi_init(struct acpi_table_header *table);
|
|
+#else
|
|
+static inline void acpi_gic_init(void) { }
|
|
+#endif
|
|
+
|
|
+#endif /* ARM_GIC_ACPI_H_ */
|
|
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
|
|
index 71d706d..5c55f37 100644
|
|
--- a/include/linux/irqchip/arm-gic.h
|
|
+++ b/include/linux/irqchip/arm-gic.h
|
|
@@ -55,6 +55,8 @@
|
|
(GICD_INT_DEF_PRI << 8) |\
|
|
GICD_INT_DEF_PRI)
|
|
|
|
+#define GIC_DIST_SOFTINT_NSATT 0x8000
|
|
+
|
|
#define GICH_HCR 0x0
|
|
#define GICH_VTR 0x4
|
|
#define GICH_VMCR 0x8
|
|
diff --git a/include/linux/pci.h b/include/linux/pci.h
|
|
index 360a966..1476a66 100644
|
|
--- a/include/linux/pci.h
|
|
+++ b/include/linux/pci.h
|
|
@@ -564,15 +564,6 @@ struct pci_ops {
|
|
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
|
|
};
|
|
|
|
-/*
|
|
- * ACPI needs to be able to access PCI config space before we've done a
|
|
- * PCI bus scan and created pci_bus structures.
|
|
- */
|
|
-int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
|
|
- int reg, int len, u32 *val);
|
|
-int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
|
|
- int reg, int len, u32 val);
|
|
-
|
|
struct pci_bus_region {
|
|
dma_addr_t start;
|
|
dma_addr_t end;
|
|
@@ -1329,6 +1320,16 @@ typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
|
|
unsigned int command_bits, u32 flags);
|
|
void pci_register_set_vga_state(arch_set_vga_state_t func);
|
|
|
|
+/*
|
|
+ * ACPI needs to be able to access PCI config space before we've done a
|
|
+ * PCI bus scan and created pci_bus structures.
|
|
+ */
|
|
+int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
|
|
+ int reg, int len, u32 *val);
|
|
+int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
|
|
+ int reg, int len, u32 val);
|
|
+void pcibios_penalize_isa_irq(int irq, int active);
|
|
+
|
|
#else /* CONFIG_PCI is not enabled */
|
|
|
|
/*
|
|
@@ -1430,6 +1431,23 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
|
|
unsigned int devfn)
|
|
{ return NULL; }
|
|
|
|
+static inline struct pci_bus *pci_find_bus(int domain, int busnr)
|
|
+{ return NULL; }
|
|
+
|
|
+static inline int pci_bus_write_config_byte(struct pci_bus *bus,
|
|
+ unsigned int devfn, int where, u8 val)
|
|
+{ return -ENOSYS; }
|
|
+
|
|
+static inline int raw_pci_read(unsigned int domain, unsigned int bus,
|
|
+ unsigned int devfn, int reg, int len, u32 *val)
|
|
+{ return -ENOSYS; }
|
|
+
|
|
+static inline int raw_pci_write(unsigned int domain, unsigned int bus,
|
|
+ unsigned int devfn, int reg, int len, u32 val)
|
|
+{ return -ENOSYS; }
|
|
+
|
|
+static inline void pcibios_penalize_isa_irq(int irq, int active) { }
|
|
+
|
|
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
|
|
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
|
|
static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
|
|
@@ -1639,7 +1657,6 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
|
|
enum pcie_reset_state state);
|
|
int pcibios_add_device(struct pci_dev *dev);
|
|
void pcibios_release_device(struct pci_dev *dev);
|
|
-void pcibios_penalize_isa_irq(int irq, int active);
|
|
|
|
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
|
extern struct dev_pm_ops pcibios_pm_ops;
|
|
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
|
|
index 1c0772b..b9d11aa 100644
|
|
--- a/virt/kvm/arm/arch_timer.c
|
|
+++ b/virt/kvm/arm/arch_timer.c
|
|
@@ -21,9 +21,11 @@
|
|
#include <linux/kvm.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/interrupt.h>
|
|
+#include <linux/acpi.h>
|
|
|
|
#include <clocksource/arm_arch_timer.h>
|
|
#include <asm/arch_timer.h>
|
|
+#include <asm/acpi.h>
|
|
|
|
#include <kvm/arm_vgic.h>
|
|
#include <kvm/arm_arch_timer.h>
|
|
@@ -246,60 +248,91 @@ static const struct of_device_id arch_timer_of_match[] = {
|
|
{},
|
|
};
|
|
|
|
-int kvm_timer_hyp_init(void)
|
|
+static int kvm_timer_ppi_parse_dt(unsigned int *ppi)
|
|
{
|
|
struct device_node *np;
|
|
- unsigned int ppi;
|
|
- int err;
|
|
-
|
|
- timecounter = arch_timer_get_timecounter();
|
|
- if (!timecounter)
|
|
- return -ENODEV;
|
|
|
|
np = of_find_matching_node(NULL, arch_timer_of_match);
|
|
if (!np) {
|
|
- kvm_err("kvm_arch_timer: can't find DT node\n");
|
|
return -ENODEV;
|
|
}
|
|
|
|
- ppi = irq_of_parse_and_map(np, 2);
|
|
- if (!ppi) {
|
|
- kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
|
|
- err = -EINVAL;
|
|
- goto out;
|
|
+ *ppi = irq_of_parse_and_map(np, 2);
|
|
+ if (*ppi == 0) {
|
|
+ of_node_put(np);
|
|
+ return -EINVAL;
|
|
}
|
|
|
|
- err = request_percpu_irq(ppi, kvm_arch_timer_handler,
|
|
- "kvm guest timer", kvm_get_running_vcpus());
|
|
- if (err) {
|
|
- kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
|
|
- ppi, err);
|
|
- goto out;
|
|
- }
|
|
+ return 0;
|
|
+}
|
|
|
|
- host_vtimer_irq = ppi;
|
|
+extern int arch_timer_ppi[];
|
|
|
|
- err = __register_cpu_notifier(&kvm_timer_cpu_nb);
|
|
- if (err) {
|
|
- kvm_err("Cannot register timer CPU notifier\n");
|
|
- goto out_free;
|
|
- }
|
|
+static int kvm_timer_ppi_parse_acpi(unsigned int *ppi)
|
|
|
|
- wqueue = create_singlethread_workqueue("kvm_arch_timer");
|
|
- if (!wqueue) {
|
|
- err = -ENOMEM;
|
|
- goto out_free;
|
|
- }
|
|
+{
|
|
+ /* retrieve VIRT_PPI info */
|
|
+ *ppi = arch_timer_ppi[2];
|
|
|
|
- kvm_info("%s IRQ%d\n", np->name, ppi);
|
|
- on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
|
|
+ if (*ppi == 0)
|
|
+ return -EINVAL;
|
|
+ else
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_timer_hyp_init(void)
|
|
+{
|
|
+ unsigned int ppi;
|
|
+ int err;
|
|
+
|
|
+ timecounter = arch_timer_get_timecounter();
|
|
+ if (!timecounter)
|
|
+ return -ENODEV;
|
|
+
|
|
+ /* PPI DT parsing */
|
|
+ err = kvm_timer_ppi_parse_dt(&ppi);
|
|
|
|
- goto out;
|
|
+ /* if DT parsing fails, try ACPI next */
|
|
+ if (err && !acpi_disabled)
|
|
+ err = kvm_timer_ppi_parse_acpi(&ppi);
|
|
+
|
|
+ if (err) {
|
|
+ kvm_err("kvm_timer_hyp_init: can't find virtual timer info or "
|
|
+ "config virtual timer interrupt\n");
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ /* configure IRQ handler */
|
|
+ err = request_percpu_irq(ppi, kvm_arch_timer_handler,
|
|
+ "kvm guest timer", kvm_get_running_vcpus());
|
|
+ if (err) {
|
|
+ kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
|
|
+ ppi, err);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ host_vtimer_irq = ppi;
|
|
+
|
|
+ err = __register_cpu_notifier(&kvm_timer_cpu_nb);
|
|
+ if (err) {
|
|
+ kvm_err("Cannot register timer CPU notifier\n");
|
|
+ goto out_free;
|
|
+ }
|
|
+
|
|
+ wqueue = create_singlethread_workqueue("kvm_arch_timer");
|
|
+ if (!wqueue) {
|
|
+ err = -ENOMEM;
|
|
+ goto out_free;
|
|
+ }
|
|
+
|
|
+ kvm_info("timer IRQ%d\n", ppi);
|
|
+ on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
|
|
+
|
|
+ goto out;
|
|
out_free:
|
|
- free_percpu_irq(ppi, kvm_get_running_vcpus());
|
|
+ free_percpu_irq(ppi, kvm_get_running_vcpus());
|
|
out:
|
|
- of_node_put(np);
|
|
- return err;
|
|
+ return err;
|
|
}
|
|
|
|
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
|
|
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
|
|
index 2935405..510049c 100644
|
|
--- a/virt/kvm/arm/vgic-v2.c
|
|
+++ b/virt/kvm/arm/vgic-v2.c
|
|
@@ -19,6 +19,7 @@
|
|
#include <linux/kvm.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/interrupt.h>
|
|
+#include <linux/acpi.h>
|
|
#include <linux/io.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_address.h>
|
|
@@ -26,6 +27,7 @@
|
|
|
|
#include <linux/irqchip/arm-gic.h>
|
|
|
|
+#include <asm/acpi.h>
|
|
#include <asm/kvm_emulate.h>
|
|
#include <asm/kvm_arm.h>
|
|
#include <asm/kvm_mmu.h>
|
|
@@ -159,7 +161,7 @@ static const struct vgic_ops vgic_v2_ops = {
|
|
static struct vgic_params vgic_v2_params;
|
|
|
|
/**
|
|
- * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
|
|
+ * vgic_v2_dt_probe - probe for a GICv2 compatible interrupt controller in DT
|
|
* @node: pointer to the DT node
|
|
* @ops: address of a pointer to the GICv2 operations
|
|
* @params: address of a pointer to HW-specific parameters
|
|
@@ -168,7 +170,7 @@ static struct vgic_params vgic_v2_params;
|
|
* in *ops and the HW parameters in *params. Returns an error code
|
|
* otherwise.
|
|
*/
|
|
-int vgic_v2_probe(struct device_node *vgic_node,
|
|
+int vgic_v2_dt_probe(struct device_node *vgic_node,
|
|
const struct vgic_ops **ops,
|
|
const struct vgic_params **params)
|
|
{
|
|
@@ -222,11 +224,22 @@ int vgic_v2_probe(struct device_node *vgic_node,
|
|
}
|
|
|
|
if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
|
|
+#if 0
|
|
kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
|
|
(unsigned long long)resource_size(&vcpu_res),
|
|
PAGE_SIZE);
|
|
ret = -ENXIO;
|
|
goto out_unmap;
|
|
+#else
|
|
+ /*
|
|
+ * The check fails for arm64 with 64K pagesize and certain firmware.
|
|
+ * Ignore for now until firmware takes care of the problem.
|
|
+ */
|
|
+ kvm_info("GICV size 0x%llx not a multiple of page size 0x%lx\n",
|
|
+ (unsigned long long)resource_size(&vcpu_res),
|
|
+ PAGE_SIZE);
|
|
+ kvm_info("Update DT to assign GICV a multiple of kernel page size \n");
|
|
+#endif
|
|
}
|
|
|
|
vgic->vcpu_base = vcpu_res.start;
|
|
@@ -245,3 +258,72 @@ out:
|
|
of_node_put(vgic_node);
|
|
return ret;
|
|
}
|
|
+
|
|
+struct acpi_madt_generic_interrupt *vgic_acpi;
|
|
+static void gic_get_acpi_header(struct acpi_subtable_header *header)
|
|
+{
|
|
+ vgic_acpi = (struct acpi_madt_generic_interrupt *)header;
|
|
+}
|
|
+
|
|
+int vgic_v2_acpi_probe(const struct vgic_ops **ops,
|
|
+ const struct vgic_params **params)
|
|
+{
|
|
+ struct vgic_params *vgic = &vgic_v2_params;
|
|
+ int irq_mode, ret;
|
|
+
|
|
+ /* MADT table */
|
|
+ ret = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
|
|
+ (acpi_tbl_entry_handler)gic_get_acpi_header, 0);
|
|
+ if (!ret) {
|
|
+ pr_err("Failed to get MADT VGIC CPU entry\n");
|
|
+ ret = -ENODEV;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /* IRQ trigger mode */
|
|
+ irq_mode = (vgic_acpi->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
|
|
+ ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
|
|
+ /* According to GIC-400 manual, all PPIs are active-LOW, level
|
|
+ * sensative. We register IRQ as active-low.
|
|
+ */
|
|
+ vgic->maint_irq = acpi_register_gsi(NULL, vgic_acpi->vgic_interrupt,
|
|
+ irq_mode, ACPI_ACTIVE_LOW);
|
|
+ if (!vgic->maint_irq) {
|
|
+ pr_err("Cannot register VGIC ACPI maintenance irq\n");
|
|
+ ret = -ENXIO;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /* GICH resource */
|
|
+ vgic->vctrl_base = ioremap(vgic_acpi->gich_base_address, SZ_8K);
|
|
+ if (!vgic->vctrl_base) {
|
|
+ pr_err("cannot ioremap GICH memory\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
|
|
+ vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
|
|
+
|
|
+ ret = create_hyp_io_mappings(vgic->vctrl_base,
|
|
+ vgic->vctrl_base + SZ_8K,
|
|
+ vgic_acpi->gich_base_address);
|
|
+ if (ret) {
|
|
+ kvm_err("Cannot map GICH into hyp\n");
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ vgic->vcpu_base = vgic_acpi->gicv_base_address;
|
|
+
|
|
+ kvm_info("GICH base=0x%llx, GICV base=0x%llx, IRQ=%d\n",
|
|
+ (unsigned long long)vgic_acpi->gich_base_address,
|
|
+ (unsigned long long)vgic_acpi->gicv_base_address,
|
|
+ vgic->maint_irq);
|
|
+
|
|
+ vgic->type = VGIC_V2;
|
|
+ *ops = &vgic_v2_ops;
|
|
+ *params = vgic;
|
|
+
|
|
+out:
|
|
+ return ret;
|
|
+}
|
|
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
|
|
index 1c2c8ee..8b56920 100644
|
|
--- a/virt/kvm/arm/vgic-v3.c
|
|
+++ b/virt/kvm/arm/vgic-v3.c
|
|
@@ -173,7 +173,7 @@ static const struct vgic_ops vgic_v3_ops = {
|
|
static struct vgic_params vgic_v3_params;
|
|
|
|
/**
|
|
- * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
|
|
+ * vgic_v3_dt_probe - probe for a GICv3 compatible interrupt controller in DT
|
|
* @node: pointer to the DT node
|
|
* @ops: address of a pointer to the GICv3 operations
|
|
* @params: address of a pointer to HW-specific parameters
|
|
@@ -182,9 +182,9 @@ static struct vgic_params vgic_v3_params;
|
|
* in *ops and the HW parameters in *params. Returns an error code
|
|
* otherwise.
|
|
*/
|
|
-int vgic_v3_probe(struct device_node *vgic_node,
|
|
- const struct vgic_ops **ops,
|
|
- const struct vgic_params **params)
|
|
+int vgic_v3_dt_probe(struct device_node *vgic_node,
|
|
+ const struct vgic_ops **ops,
|
|
+ const struct vgic_params **params)
|
|
{
|
|
int ret = 0;
|
|
u32 gicv_idx;
|
|
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
|
|
index 03affc7..cdd4c64 100644
|
|
--- a/virt/kvm/arm/vgic.c
|
|
+++ b/virt/kvm/arm/vgic.c
|
|
@@ -25,9 +25,11 @@
|
|
#include <linux/of_address.h>
|
|
#include <linux/of_irq.h>
|
|
#include <linux/uaccess.h>
|
|
+#include <linux/acpi.h>
|
|
|
|
#include <linux/irqchip/arm-gic.h>
|
|
|
|
+#include <asm/acpi.h>
|
|
#include <asm/kvm_emulate.h>
|
|
#include <asm/kvm_arm.h>
|
|
#include <asm/kvm_mmu.h>
|
|
@@ -2431,8 +2433,8 @@ static struct notifier_block vgic_cpu_nb = {
|
|
};
|
|
|
|
static const struct of_device_id vgic_ids[] = {
|
|
- { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
|
|
- { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
|
|
+ { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_dt_probe, },
|
|
+ { .compatible = "arm,gic-v3", .data = vgic_v3_dt_probe, },
|
|
{},
|
|
};
|
|
|
|
@@ -2442,20 +2444,26 @@ int kvm_vgic_hyp_init(void)
|
|
const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
|
|
const struct vgic_params **);
|
|
struct device_node *vgic_node;
|
|
- int ret;
|
|
+ int ret = -ENODEV;
|
|
|
|
- vgic_node = of_find_matching_node_and_match(NULL,
|
|
- vgic_ids, &matched_id);
|
|
- if (!vgic_node) {
|
|
- kvm_err("error: no compatible GIC node found\n");
|
|
- return -ENODEV;
|
|
+ /* probe VGIC */
|
|
+ if ((vgic_node = of_find_matching_node_and_match(NULL,
|
|
+ vgic_ids, &matched_id))) {
|
|
+ /* probe VGIC in DT */
|
|
+ vgic_probe = matched_id->data;
|
|
+ ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
|
|
+ }
|
|
+ else if (!acpi_disabled) {
|
|
+ /* probe VGIC in ACPI */
|
|
+ ret = vgic_v2_acpi_probe(&vgic_ops, &vgic);
|
|
}
|
|
|
|
- vgic_probe = matched_id->data;
|
|
- ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ kvm_err("error: no compatible GIC info found\n");
|
|
return ret;
|
|
+ }
|
|
|
|
+ /* configuration */
|
|
ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
|
|
"vgic", kvm_get_running_vcpus());
|
|
if (ret) {
|