kernel/kernel-arm64.patch

23671 lines
709 KiB
Diff
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

Documentation/ABI/testing/sysfs-firmware-dmi | 10 +
Documentation/arm64/acpi_object_usage.txt | 593 ++++
Documentation/arm64/arm-acpi.txt | 505 ++++
.../devicetree/bindings/edac/apm-xgene-edac.txt | 83 +
Documentation/kernel-parameters.txt | 3 +-
MAINTAINERS | 8 +
arch/arm64/Kconfig | 13 +
arch/arm64/boot/dts/apm/apm-storm.dtsi | 98 +
arch/arm64/include/asm/acenv.h | 18 +
arch/arm64/include/asm/acpi.h | 99 +
arch/arm64/include/asm/cpu_ops.h | 1 +
arch/arm64/include/asm/edac.h | 38 +
arch/arm64/include/asm/efi.h | 28 +-
arch/arm64/include/asm/elf.h | 3 +-
arch/arm64/include/asm/fixmap.h | 3 +
arch/arm64/include/asm/irq.h | 13 +
arch/arm64/include/asm/mmu.h | 2 +
arch/arm64/include/asm/pci.h | 66 +
arch/arm64/include/asm/psci.h | 3 +-
arch/arm64/include/asm/smp.h | 10 +-
arch/arm64/kernel/Makefile | 4 +-
arch/arm64/kernel/acpi.c | 432 +++
arch/arm64/kernel/cpu_ops.c | 6 +-
arch/arm64/kernel/efi.c | 223 +-
arch/arm64/kernel/pci.c | 424 ++-
arch/arm64/kernel/perf_event.c | 102 +
arch/arm64/kernel/psci.c | 78 +-
arch/arm64/kernel/setup.c | 44 +-
arch/arm64/kernel/smp.c | 2 +-
arch/arm64/kernel/smp_parking_protocol.c | 110 +
arch/arm64/kernel/time.c | 7 +
arch/arm64/mm/dma-mapping.c | 98 +
arch/arm64/mm/mmu.c | 14 +-
arch/ia64/Kconfig | 1 +
arch/ia64/kernel/acpi.c | 2 +-
arch/x86/Kconfig | 4 +
arch/x86/include/asm/pci_x86.h | 34 +-
arch/x86/kernel/acpi/boot.c | 2 +-
arch/x86/pci/Makefile | 5 +-
arch/x86/pci/acpi.c | 3 +-
arch/x86/pci/mmconfig-shared.c | 346 +--
arch/x86/pci/mmconfig_32.c | 47 +-
arch/x86/pci/mmconfig_64.c | 153 -
arch/x86/pci/numachip.c | 27 +-
drivers/acpi/Kconfig | 7 +-
drivers/acpi/Makefile | 3 +-
drivers/acpi/acpi_processor.c | 7 +-
drivers/acpi/acpica/acutils.h | 3 +
drivers/acpi/acpica/nsxfname.c | 21 +-
drivers/acpi/acpica/utids.c | 71 +
drivers/acpi/bus.c | 3 +
drivers/acpi/internal.h | 4 +
drivers/acpi/mcfg.c | 140 +
drivers/acpi/osl.c | 6 +-
drivers/acpi/processor_core.c | 60 +-
drivers/acpi/scan.c | 17 +-
drivers/acpi/tables.c | 52 +-
drivers/acpi/utils.c | 26 +
drivers/ata/Kconfig | 2 +-
drivers/ata/ahci_platform.c | 9 +
drivers/ata/ahci_xgene.c | 27 +-
drivers/clocksource/arm_arch_timer.c | 135 +-
drivers/edac/Kconfig | 9 +-
drivers/edac/Makefile | 2 +
drivers/edac/xgene_edac.c | 2132 ++++++++++++++
drivers/firmware/dmi-sysfs.c | 42 +
drivers/firmware/dmi_scan.c | 26 +
drivers/firmware/efi/libstub/arm-stub.c | 59 -
drivers/firmware/efi/libstub/efistub.h | 4 -
drivers/firmware/efi/libstub/fdt.c | 62 +-
drivers/iommu/arm-smmu.c | 8 +-
drivers/irqchip/irq-gic-v2m.c | 148 +-
drivers/irqchip/irq-gic-v3-its.c | 35 +-
drivers/irqchip/irq-gic-v3.c | 10 +
drivers/irqchip/irq-gic.c | 125 +-
drivers/irqchip/irqchip.c | 3 +
drivers/net/ethernet/amd/Makefile | 1 +
drivers/net/ethernet/amd/xgbe-a0/Makefile | 8 +
drivers/net/ethernet/amd/xgbe-a0/xgbe-common.h | 1142 ++++++++
drivers/net/ethernet/amd/xgbe-a0/xgbe-dcb.c | 269 ++
drivers/net/ethernet/amd/xgbe-a0/xgbe-debugfs.c | 373 +++
drivers/net/ethernet/amd/xgbe-a0/xgbe-desc.c | 636 +++++
drivers/net/ethernet/amd/xgbe-a0/xgbe-dev.c | 2964 ++++++++++++++++++++
drivers/net/ethernet/amd/xgbe-a0/xgbe-drv.c | 2204 +++++++++++++++
drivers/net/ethernet/amd/xgbe-a0/xgbe-ethtool.c | 616 ++++
drivers/net/ethernet/amd/xgbe-a0/xgbe-main.c | 643 +++++
drivers/net/ethernet/amd/xgbe-a0/xgbe-mdio.c | 312 +++
drivers/net/ethernet/amd/xgbe-a0/xgbe-ptp.c | 284 ++
drivers/net/ethernet/amd/xgbe-a0/xgbe.h | 868 ++++++
drivers/net/ethernet/smsc/smc91x.c | 10 +
drivers/net/phy/Makefile | 1 +
drivers/net/phy/amd-xgbe-phy-a0.c | 1829 ++++++++++++
drivers/pci/Kconfig | 7 +
drivers/pci/Makefile | 5 +
drivers/pci/ecam.c | 361 +++
drivers/pci/host/pci-xgene.c | 156 ++
drivers/pci/msi.c | 3 +-
drivers/pci/of.c | 20 +
drivers/pci/pci-acpi.c | 36 +
drivers/pci/pci.c | 26 +-
drivers/pci/probe.c | 33 +
drivers/tty/Kconfig | 6 +
drivers/tty/Makefile | 1 +
drivers/tty/sbsauart.c | 358 +++
drivers/tty/serial/8250/8250_dw.c | 14 +-
drivers/tty/serial/amba-pl011.c | 8 +
drivers/usb/host/xhci-plat.c | 15 +-
drivers/virtio/virtio_mmio.c | 12 +-
drivers/xen/pci.c | 6 +-
include/acpi/acnames.h | 1 +
include/acpi/acpi_bus.h | 2 +
include/acpi/acpi_io.h | 4 +
include/acpi/actypes.h | 4 +-
include/acpi/processor.h | 6 +-
include/asm-generic/vmlinux.lds.h | 7 +
include/kvm/arm_vgic.h | 20 +-
include/linux/acpi.h | 8 +-
include/linux/acpi_irq.h | 10 +
include/linux/clocksource.h | 6 +
include/linux/device.h | 20 +
include/linux/dmi.h | 3 +
include/linux/ecam.h | 81 +
include/linux/irqchip/arm-gic-acpi.h | 32 +
include/linux/irqchip/arm-gic.h | 7 +
include/linux/mod_devicetable.h | 1 +
include/linux/msi.h | 4 +-
include/linux/pci-acpi.h | 3 +
include/linux/pci.h | 3 +
kernel/irq/msi.c | 24 +
scripts/mod/devicetable-offsets.c | 1 +
scripts/mod/file2alias.c | 13 +-
virt/kvm/arm/arch_timer.c | 107 +-
virt/kvm/arm/vgic-v2.c | 86 +-
virt/kvm/arm/vgic-v3.c | 8 +-
virt/kvm/arm/vgic.c | 32 +-
135 files changed, 19707 insertions(+), 1036 deletions(-)
diff --git a/Documentation/ABI/testing/sysfs-firmware-dmi b/Documentation/ABI/testing/sysfs-firmware-dmi
index c78f9ab..3a9ffe8 100644
--- a/Documentation/ABI/testing/sysfs-firmware-dmi
+++ b/Documentation/ABI/testing/sysfs-firmware-dmi
@@ -12,6 +12,16 @@ Description:
cannot ensure that the data as exported to userland is
without error either.
+ The firmware provides DMI structures as a packed list of
+ data referenced by a SMBIOS table entry point. The SMBIOS
+ entry point contains general information, like SMBIOS
+ version, DMI table size, etc. The structure, content and
+ size of SMBIOS entry point is dependent on SMBIOS version.
+ That's why SMBIOS entry point is represented in dmi sysfs
+ like a raw attribute and is accessible via
+ /sys/firmware/dmi/smbios_raw_header. The format of SMBIOS
+ entry point header can be read in SMBIOS specification.
+
DMI is structured as a large table of entries, where
each entry has a common header indicating the type and
length of the entry, as well as a firmware-provided
diff --git a/Documentation/arm64/acpi_object_usage.txt b/Documentation/arm64/acpi_object_usage.txt
new file mode 100644
index 0000000..96e2273
--- /dev/null
+++ b/Documentation/arm64/acpi_object_usage.txt
@@ -0,0 +1,593 @@
+ACPI Tables
+-----------
+The expectations of individual ACPI tables are discussed in the list that
+follows.
+
+If a section number is used, it refers to a section number in the ACPI
+specification where the object is defined. If "Signature Reserved" is used,
+the table signature (the first four bytes of the table) is the only portion
+of the table recognized by the specification, and the actual table is defined
+outside of the UEFI Forum (see Section 5.2.6 of the specification).
+
+For ACPI on arm64, tables also fall into the following categories:
+
+ -- Required: DSDT, FADT, GTDT, MADT, MCFG, RSDP, SPCR, XSDT
+
+ -- Recommended: BERT, EINJ, ERST, HEST, SSDT
+
+ -- Optional: BGRT, CPEP, CSRT, DRTM, ECDT, FACS, FPDT, MCHI, MPST,
+ MSCT, RASF, SBST, SLIT, SPMI, SRAT, TCPA, TPM2, UEFI
+
+ -- Not supported: BOOT, DBG2, DBGP, DMAR, ETDT, HPET, IBFT, IVRS,
+ LPIT, MSDM, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
+
+
+Table Usage for ARMv8 Linux
+----- ----------------------------------------------------------------
+BERT Section 18.3 (signature == "BERT")
+ == Boot Error Record Table ==
+ Must be supplied if RAS support is provided by the platform. It
+ is recommended this table be supplied.
+
+BOOT Signature Reserved (signature == "BOOT")
+ == simple BOOT flag table ==
+ Microsoft only table, will not be supported.
+
+BGRT Section 5.2.22 (signature == "BGRT")
+ == Boot Graphics Resource Table ==
+ Optional, not currently supported, with no real use-case for an
+ ARM server.
+
+CPEP Section 5.2.18 (signature == "CPEP")
+ == Corrected Platform Error Polling table ==
+ Optional, not currently supported, and not recommended until such
+ time as ARM-compatible hardware is available, and the specification
+ suitably modified.
+
+CSRT Signature Reserved (signature == "CSRT")
+ == Core System Resources Table ==
+ Optional, not currently supported.
+
+DBG2 Signature Reserved (signature == "DBG2")
+ == DeBuG port table 2 ==
+ Microsoft only table, will not be supported.
+
+DBGP Signature Reserved (signature == "DBGP")
+ == DeBuG Port table ==
+ Microsoft only table, will not be supported.
+
+DSDT Section 5.2.11.1 (signature == "DSDT")
+ == Differentiated System Description Table ==
+ A DSDT is required; see also SSDT.
+
+ ACPI tables contain only one DSDT but can contain one or more SSDTs,
+ which are optional. Each SSDT can only add to the ACPI namespace,
+ but cannot modify or replace anything in the DSDT.
+
+DMAR Signature Reserved (signature == "DMAR")
+ == DMA Remapping table ==
+ x86 only table, will not be supported.
+
+DRTM Signature Reserved (signature == "DRTM")
+ == Dynamic Root of Trust for Measurement table ==
+ Optional, not currently supported.
+
+ECDT Section 5.2.16 (signature == "ECDT")
+ == Embedded Controller Description Table ==
+ Optional, not currently supported, but could be used on ARM if and
+ only if one uses the GPE_BIT field to represent an IRQ number, since
+ there are no GPE blocks defined in hardware reduced mode. This would
+ need to be modified in the ACPI specification.
+
+EINJ Section 18.6 (signature == "EINJ")
+ == Error Injection table ==
+ This table is very useful for testing platform response to error
+ conditions; it allows one to inject an error into the system as
+ if it had actually occurred. However, this table should not be
+ shipped with a production system; it should be dynamically loaded
+ and executed with the ACPICA tools only during testing.
+
+ERST Section 18.5 (signature == "ERST")
+ == Error Record Serialization Table ==
+ On a platform supports RAS, this table must be supplied if it is not
+ UEFI-based; if it is UEFI-based, this table may be supplied. When this
+ table is not present, UEFI run time service will be utilized to save
+ and retrieve hardware error information to and from a persistent store.
+
+ETDT Signature Reserved (signature == "ETDT")
+ == Event Timer Description Table ==
+ Obsolete table, will not be supported.
+
+FACS Section 5.2.10 (signature == "FACS")
+ == Firmware ACPI Control Structure ==
+ It is unlikely that this table will be terribly useful. If it is
+ provided, the Global Lock will NOT be used since it is not part of
+ the hardware reduced profile, and only 64-bit address fields will
+ be considered valid.
+
+FADT Section 5.2.9 (signature == "FACP")
+ == Fixed ACPI Description Table ==
+ Required for arm64.
+
+ The HW_REDUCED_ACPI flag must be set. All of the fields that are
+ to be ignored when HW_REDUCED_ACPI is set are expected to be set to
+ zero.
+
+ If an FACS table is provided, the X_FIRMWARE_CTRL field is to be
+ used, not FIRMWARE_CTRL.
+
+ If PSCI is used (as is recommended), make sure that ARM_BOOT_ARCH is
+ filled in properly -- that the PSCI_COMPLIANT flag is set and that
+ PSCI_USE_HVC is set or unset as needed (see table 5-37).
+
+ For the DSDT that is also required, the X_DSDT field is to be used,
+ not the DSDT field.
+
+FPDT Section 5.2.23 (signature == "FPDT")
+ == Firmware Performance Data Table ==
+ Optional, not currently supported.
+
+GTDT Section 5.2.24 (signature == "GTDT")
+ == Generic Timer Description Table ==
+ Required for arm64.
+
+HEST Section 18.3.2 (signature == "HEST")
+ == Hardware Error Source Table ==
+ Until further error source types are defined, use only types 6 (AER
+ Root Port), 7 (AER Endpoint), 8 (AER Bridge), or 9 (Generic Hardware
+ Error Source). Firmware first error handling is possible if and only
+ if Trusted Firmware is being used on arm64.
+
+ Must be supplied if RAS support is provided by the platform. It
+ is recommended this table be supplied.
+
+HPET Signature Reserved (signature == "HPET")
+ == High Precision Event timer Table ==
+ x86 only table, will not be supported.
+
+IBFT Signature Reserved (signature == "IBFT")
+ == iSCSI Boot Firmware Table ==
+ Microsoft defined table, support TBD.
+
+IVRS Signature Reserved (signature == "IVRS")
+ == I/O Virtualization Reporting Structure ==
+ x86_64 (AMD) only table, will not be supported.
+
+LPIT Signature Reserved (signature == "LPIT")
+ == Low Power Idle Table ==
+ x86 only table as of ACPI 5.1; future versions have been adapted for
+ use with ARM and will be recommended in order to support ACPI power
+ management.
+
+MADT Section 5.2.12 (signature == "APIC")
+ == Multiple APIC Description Table ==
+ Required for arm64. Only the GIC interrupt controller structures
+ should be used (types 0xA - 0xE).
+
+MCFG Signature Reserved (signature == "MCFG")
+ == Memory-mapped ConFiGuration space ==
+ If the platform supports PCI/PCIe, an MCFG table is required.
+
+MCHI Signature Reserved (signature == "MCHI")
+ == Management Controller Host Interface table ==
+ Optional, not currently supported.
+
+MPST Section 5.2.21 (signature == "MPST")
+ == Memory Power State Table ==
+ Optional, not currently supported.
+
+MSDM Signature Reserved (signature == "MSDM")
+ == Microsoft Data Management table ==
+ Microsoft only table, will not be supported.
+
+MSCT Section 5.2.19 (signature == "MSCT")
+ == Maximum System Characteristic Table ==
+ Optional, not currently supported.
+
+RASF Section 5.2.20 (signature == "RASF")
+ == RAS Feature table ==
+ Optional, not currently supported.
+
+RSDP Section 5.2.5 (signature == "RSD PTR")
+ == Root System Description PoinTeR ==
+ Required for arm64.
+
+RSDT Section 5.2.7 (signature == "RSDT")
+ == Root System Description Table ==
+ Since this table can only provide 32-bit addresses, it is deprecated
+ on arm64, and will not be used.
+
+SBST Section 5.2.14 (signature == "SBST")
+ == Smart Battery Subsystem Table ==
+ Optional, not currently supported.
+
+SLIC Signature Reserved (signature == "SLIC")
+ == Software LIcensing table ==
+ Microsoft only table, will not be supported.
+
+SLIT Section 5.2.17 (signature == "SLIT")
+ == System Locality distance Information Table ==
+ Optional in general, but required for NUMA systems.
+
+SPCR Signature Reserved (signature == "SPCR")
+ == Serial Port Console Redirection table ==
+ Required for arm64.
+
+SPMI Signature Reserved (signature == "SPMI")
+ == Server Platform Management Interface table ==
+ Optional, not currently supported.
+
+SRAT Section 5.2.16 (signature == "SRAT")
+ == System Resource Affinity Table ==
+ Optional, but if used, only the GICC Affinity structures are read.
+ To support NUMA, this table is required.
+
+SSDT Section 5.2.11.2 (signature == "SSDT")
+ == Secondary System Description Table ==
+ These tables are a continuation of the DSDT; these are recommended
+ for use with devices that can be added to a running system, but can
+ also serve the purpose of dividing up device descriptions into more
+ manageable pieces.
+
+ An SSDT can only ADD to the ACPI namespace. It cannot modify or
+ replace existing device descriptions already in the namespace.
+
+ These tables are optional, however. ACPI tables should contain only
+ one DSDT but can contain many SSDTs.
+
+TCPA Signature Reserved (signature == "TCPA")
+ == Trusted Computing Platform Alliance table ==
+ Optional, not currently supported, and may need changes to fully
+ interoperate with arm64.
+
+TPM2 Signature Reserved (signature == "TPM2")
+ == Trusted Platform Module 2 table ==
+ Optional, not currently supported, and may need changes to fully
+ interoperate with arm64.
+
+UEFI Signature Reserved (signature == "UEFI")
+ == UEFI ACPI data table ==
+ Optional, not currently supported. No known use case for arm64,
+ at present.
+
+WAET Signature Reserved (signature == "WAET")
+ == Windows ACPI Emulated devices Table ==
+ Microsoft only table, will not be supported.
+
+WDAT Signature Reserved (signature == "WDAT")
+ == Watch Dog Action Table ==
+ Microsoft only table, will not be supported.
+
+WDRT Signature Reserved (signature == "WDRT")
+ == Watch Dog Resource Table ==
+ Microsoft only table, will not be supported.
+
+WPBT Signature Reserved (signature == "WPBT")
+ == Windows Platform Binary Table ==
+ Microsoft only table, will not be supported.
+
+XSDT Section 5.2.8 (signature == "XSDT")
+ == eXtended System Description Table ==
+ Required for arm64.
+
+
+ACPI Objects
+------------
+The expectations on individual ACPI objects are discussed in the list that
+follows:
+
+Name Section Usage for ARMv8 Linux
+---- ------------ -------------------------------------------------
+_ADR 6.1.1 Use as needed.
+
+_BBN 6.5.5 Use as needed; PCI-specific.
+
+_BDN 6.5.3 Optional; not likely to be used on arm64.
+
+_CCA 6.2.17 This method should be defined for all bus masters
+ on arm64. While cache coherency is assumed, making
+ it explicit ensures the kernel will set up DMA as
+ it should.
+
+_CDM 6.2.1 Optional, to be used only for processor devices.
+
+_CID 6.1.2 Use as needed.
+
+_CLS 6.1.3 Use as needed.
+
+_CRS 6.2.2 Required on arm64.
+
+_DCK 6.5.2 Optional; not likely to be used on arm64.
+
+_DDN 6.1.4 This field can be used for a device name. However,
+ it is meant for DOS device names (e.g., COM1), so be
+ careful of its use across OSes.
+
+_DEP 6.5.8 Use as needed.
+
+_DIS 6.2.3 Optional, for power management use.
+
+_DLM 5.7.5 Optional.
+
+_DMA 6.2.4 Optional.
+
+_DSD 6.2.5 To be used with caution. If this object is used, try
+ to use it within the constraints already defined by the
+ Device Properties UUID. Only in rare circumstances
+ should it be necessary to create a new _DSD UUID.
+
+ In either case, submit the _DSD definition along with
+ any driver patches for discussion, especially when
+ device properties are used. A driver will not be
+ considered complete without a corresponding _DSD
+ description. Once approved by kernel maintainers,
+ the UUID or device properties must then be registered
+ with the UEFI Forum; this may cause some iteration as
+ more than one OS will be registering entries.
+
+_DSM Do not use this method. It is not standardized, the
+ return values are not well documented, and it is
+ currently a frequent source of error.
+
+_DSW 7.2.1 Use as needed; power management specific.
+
+_EDL 6.3.1 Optional.
+
+_EJD 6.3.2 Optional.
+
+_EJx 6.3.3 Optional.
+
+_FIX 6.2.7 x86 specific, not used on arm64.
+
+\_GL 5.7.1 This object is not to be used in hardware reduced
+ mode, and therefore should not be used on arm64.
+
+_GLK 6.5.7 This object requires a global lock be defined; there
+ is no global lock on arm64 since it runs in hardware
+ reduced mode. Hence, do not use this object on arm64.
+
+\_GPE 5.3.1 This namespace is for x86 use only. Do not use it
+ on arm64.
+
+_GSB 6.2.7 Optional.
+
+_HID 6.1.5 Use as needed. This is the primary object to use in
+ device probing, though _CID and _CLS may also be used.
+
+_HPP 6.2.8 Optional, PCI specific.
+
+_HPX 6.2.9 Optional, PCI specific.
+
+_HRV 6.1.6 Optional, use as needed to clarify device behavior; in
+ some cases, this may be easier to use than _DSD.
+
+_INI 6.5.1 Not required, but can be useful in setting up devices
+ when UEFI leaves them in a state that may not be what
+ the driver expects before it starts probing.
+
+_IRC 7.2.15 Use as needed; power management specific.
+
+_LCK 6.3.4 Optional.
+
+_MAT 6.2.10 Optional; see also the MADT.
+
+_MLS 6.1.7 Optional, but highly recommended for use in
+ internationalization.
+
+_OFF 7.1.2 It is recommended to define this method for any device
+ that can be turned on or off.
+
+_ON 7.1.3 It is recommended to define this method for any device
+ that can be turned on or off.
+
+\_OS 5.7.3 This method will return "Linux" by default (this is
+ the value of the macro ACPI_OS_NAME on Linux). The
+ command line parameter acpi_os=<string> can be used
+ to set it to some other value.
+
+_OSC 6.2.11 This method can be a global method in ACPI (i.e.,
+ \_SB._OSC), or it may be associated with a specific
+ device (e.g., \_SB.DEV0._OSC), or both. When used
+ as a global method, only capabilities published in
+ the ACPI specification are allowed. When used as
+ a device-specific method, the process described for
+ using _DSD MUST be used to create an _OSC definition;
+ out-of-process use of _OSC is not allowed. That is,
+ submit the device-specific _OSC usage description as
+ part of the kernel driver submission, get it approved
+ by the kernel community, then register it with the
+ UEFI Forum.
+
+\_OSI 5.7.2 Deprecated on ARM64. Any invocation of this method
+ will print a warning on the console and return false.
+ That is, as far as ACPI firmware is concerned, _OSI
+ cannot be used to determine what sort of system is
+ being used or what functionality is provided. The
+ _OSC method is to be used instead.
+
+_OST 6.3.5 Optional.
+
+_PDC 8.4.1 Deprecated, do not use on arm64.
+
+\_PIC 5.8.1 The method should not be used. On arm64, the only
+ interrupt model available is GIC.
+
+_PLD 6.1.8 Optional.
+
+\_PR 5.3.1 This namespace is for x86 use only on legacy systems.
+ Do not use it on arm64.
+
+_PRS 6.2.12 Optional.
+
+_PRT 6.2.13 Required as part of the definition of all PCI root
+ devices.
+
+_PRW 7.2.13 Use as needed; power management specific.
+
+_PRx 7.2.8-11 Use as needed; power management specific. If _PR0 is
+ defined, _PR3 must also be defined.
+
+_PSC 7.2.6 Use as needed; power management specific.
+
+_PSE 7.2.7 Use as needed; power management specific.
+
+_PSW 7.2.14 Use as needed; power management specific.
+
+_PSx 7.2.2-5 Use as needed; power management specific. If _PS0 is
+ defined, _PS3 must also be defined. If clocks or
+ regulators need adjusting to be consistent with power
+ usage, change them in these methods.
+
+\_PTS 7.3.1 Use as needed; power management specific.
+
+_PXM 6.2.14 Optional.
+
+_REG 6.5.4 Use as needed.
+
+\_REV 5.7.4 Always returns the latest version of ACPI supported.
+
+_RMV 6.3.6 Optional.
+
+\_SB 5.3.1 Required on arm64; all devices must be defined in this
+ namespace.
+
+_SEG 6.5.6 Use as needed; PCI-specific.
+
+\_SI 5.3.1, Optional.
+ 9.1
+
+_SLI 6.2.15 Optional; recommended when SLIT table is in use.
+
+_STA 6.3.7, It is recommended to define this method for any device
+ 7.1.4 that can be turned on or off.
+
+_SRS 6.2.16 Optional; see also _PRS.
+
+_STR 6.1.10 Recommended for conveying device names to end users;
+ this is preferred over using _DDN.
+
+_SUB 6.1.9 Use as needed; _HID or _CID are preferred.
+
+_SUN 6.1.11 Optional.
+
+\_Sx 7.3.2 Use as needed; power management specific.
+
+_SxD 7.2.16-19 Use as needed; power management specific.
+
+_SxW 7.2.20-24 Use as needed; power management specific.
+
+_SWS 7.3.3 Use as needed; power management specific; this may
+ require specification changes for use on arm64.
+
+\_TTS 7.3.4 Use as needed; power management specific.
+
+\_TZ 5.3.1 Optional.
+
+_UID 6.1.12 Recommended for distinguishing devices of the same
+ class; define it if at all possible.
+
+\_WAK 7.3.5 Use as needed; power management specific.
+
+
+ACPI Event Model
+----------------
+Do not use GPE block devices; these are not supported in the hardware reduced
+profile used by arm64. Since there are no GPE blocks defined for use on ARM
+platforms, GPIO-signaled interrupts should be used for creating system events.
+
+
+ACPI Processor Control
+----------------------
+Section 8 of the ACPI specification is currently undergoing change that
+should be completed in the 6.0 version of the specification. Processor
+performance control will be handled differently for arm64 at that point
+in time. Processor aggregator devices (section 8.5) will not be used,
+for example, but another similar mechanism instead.
+
+While UEFI constrains what we can say until the release of 6.0, it is
+recommended that CPPC (8.4.5) be used as the primary model. This will
+still be useful into the future. C-states and P-states will still be
+provided, but most of the current design work appears to favor CPPC.
+
+Further, it is essential that the ARMv8 SoC provide a fully functional
+implementation of PSCI; this will be the only mechanism supported by ACPI
+to control CPU power state (including secondary CPU booting).
+
+More details will be provided on the release of the ACPI 6.0 specification.
+
+
+ACPI System Address Map Interfaces
+----------------------------------
+In Section 15 of the ACPI specification, several methods are mentioned as
+possible mechanisms for conveying memory resource information to the kernel.
+For arm64, we will only support UEFI for booting with ACPI, hence the UEFI
+GetMemoryMap() boot service is the only mechanism that will be used.
+
+
+ACPI Platform Error Interfaces (APEI)
+-------------------------------------
+The APEI tables supported are described above.
+
+APEI requires the equivalent of an SCI and an NMI on ARMv8. The SCI is used
+to notify the OSPM of errors that have occurred but can be corrected and the
+system can continue correct operation, even if possibly degraded. The NMI is
+used to indicate fatal errors that cannot be corrected, and require immediate
+attention.
+
+Since there is no direct equivalent of the x86 SCI or NMI, arm64 handles
+these slightly differently. The SCI is handled as a normal GPIO-signaled
+interrupt; given that these are corrected (or correctable) errors being
+reported, this is sufficient. The NMI is emulated as the highest priority
+GPIO-signaled interrupt possible. This implies some caution must be used
+since there could be interrupts at higher privilege levels or even interrupts
+at the same priority as the emulated NMI. In Linux, this should not be the
+case but one should be aware it could happen.
+
+
+ACPI Objects Not Supported on ARM64
+-----------------------------------
+While this may change in the future, there are several classes of objects
+that can be defined, but are not currently of general interest to ARM servers.
+
+These are not supported:
+
+ -- Section 9.2: ambient light sensor devices
+
+ -- Section 9.3: battery devices
+
+ -- Section 9.4: lids (e.g., laptop lids)
+
+ -- Section 9.8.2: IDE controllers
+
+ -- Section 9.9: floppy controllers
+
+ -- Section 9.10: GPE block devices
+
+ -- Section 9.15: PC/AT RTC/CMOS devices
+
+ -- Section 9.16: user presence detection devices
+
+ -- Section 9.17: I/O APIC devices; all GICs must be enumerable via MADT
+
+ -- Section 9.18: time and alarm devices (see 9.15)
+
+
+ACPI Objects Not Yet Implemented
+--------------------------------
+While these objects have x86 equivalents, and they do make some sense in ARM
+servers, there is either no hardware available at present, or in some cases
+there may not yet be a non-ARM implementation. Hence, they are currently not
+implemented though that may change in the future.
+
+Not yet implemented are:
+
+ -- Section 10: power source and power meter devices
+
+ -- Section 11: thermal management
+
+ -- Section 12: embedded controllers interface
+
+ -- Section 13: SMBus interfaces
+
+ -- Section 17: NUMA support (prototypes have been submitted for
+ review)
diff --git a/Documentation/arm64/arm-acpi.txt b/Documentation/arm64/arm-acpi.txt
new file mode 100644
index 0000000..7d6e636
--- /dev/null
+++ b/Documentation/arm64/arm-acpi.txt
@@ -0,0 +1,505 @@
+ACPI on ARMv8 Servers
+---------------------
+ACPI can be used for ARMv8 general purpose servers designed to follow
+the ARM SBSA (Server Base System Architecture) [0] and SBBR (Server
+Base Boot Requirements) [1] specifications. Please note that the SBBR
+can be retrieved simply by visiting [1], but the SBSA is currently only
+available to those with an ARM login due to ARM IP licensing concerns.
+
+The ARMv8 kernel implements the reduced hardware model of ACPI version
+5.1 or later. Links to the specification and all external documents
+it refers to are managed by the UEFI Forum. The specification is
+available at http://www.uefi.org/specifications and documents referenced
+by the specification can be found via http://www.uefi.org/acpi.
+
+If an ARMv8 system does not meet the requirements of the SBSA and SBBR,
+or cannot be described using the mechanisms defined in the required ACPI
+specifications, then ACPI may not be a good fit for the hardware.
+
+While the documents mentioned above set out the requirements for building
+industry-standard ARMv8 servers, they also apply to more than one operating
+system. The purpose of this document is to describe the interaction between
+ACPI and Linux only, on an ARMv8 system -- that is, what Linux expects of
+ACPI and what ACPI can expect of Linux.
+
+
+Why ACPI on ARM?
+----------------
+Before examining the details of the interface between ACPI and Linux, it is
+useful to understand why ACPI is being used. Several technologies already
+exist in Linux for describing non-enumerable hardware, after all. In this
+section we summarize a blog post [2] from Grant Likely that outlines the
+reasoning behind ACPI on ARMv8 servers. Actually, we snitch a good portion
+of the summary text almost directly, to be honest.
+
+The short form of the rationale for ACPI on ARM is:
+
+-- ACPIs bytecode (AML) allows the platform to encode hardware behavior,
+ while DT explicitly does not support this. For hardware vendors, being
+ able to encode behavior is a key tool used in supporting operating
+ system releases on new hardware.
+
+-- ACPIs OSPM defines a power management model that constrains what the
+ platform is allowed to do into a specific model, while still providing
+ flexibility in hardware design.
+
+-- In the enterprise server environment, ACPI has established bindings (such
+ as for RAS) which are currently used in production systems. DT does not.
+ Such bindings could be defined in DT at some point, but doing so means ARM
+ and x86 would end up using completely different code paths in both firmware
+ and the kernel.
+
+-- Choosing a single interface to describe the abstraction between a platform
+ and an OS is important. Hardware vendors would not be required to implement
+ both DT and ACPI if they want to support multiple operating systems. And,
+ agreeing on a single interface instead of being fragmented into per OS
+ interfaces makes for better interoperability overall.
+
+-- The new ACPI governance process works well and Linux is now at the same
+ table as hardware vendors and other OS vendors. In fact, there is no
+ longer any reason to feel that ACPI is only belongs to Windows or that
+ Linux is in any way secondary to Microsoft in this arena. The move of
+ ACPI governance into the UEFI forum has significantly opened up the
+ specification development process, and currently, a large portion of the
+ changes being made to ACPI is being driven by Linux.
+
+Key to the use of ACPI is the support model. For servers in general, the
+responsibility for hardware behaviour cannot solely be the domain of the
+kernel, but rather must be split between the platform and the kernel, in
+order to allow for orderly change over time. ACPI frees the OS from needing
+to understand all the minute details of the hardware so that the OS doesnt
+need to be ported to each and every device individually. It allows the
+hardware vendors to take responsibility for power management behaviour without
+depending on an OS release cycle which is not under their control.
+
+ACPI is also important because hardware and OS vendors have already worked
+out the mechanisms for supporting a general purpose computing ecosystem. The
+infrastructure is in place, the bindings are in place, and the processes are
+in place. DT does exactly what Linux needs it to when working with vertically
+integrated devices, but there are no good processes for supporting what the
+server vendors need. Linux could potentially get there with DT, but doing so
+really just duplicates something that already works. ACPI already does what
+the hardware vendors need, Microsoft wont collaborate on DT, and hardware
+vendors would still end up providing two completely separate firmware
+interfaces -- one for Linux and one for Windows.
+
+
+Kernel Compatibility
+--------------------
+One of the primary motivations for ACPI is standardization, and using that
+to provide backward compatibility for Linux kernels. In the server market,
+software and hardware are often used for long periods. ACPI allows the
+kernel and firmware to agree on a consistent abstraction that can be
+maintained over time, even as hardware or software change. As long as the
+abstraction is supported, systems can be updated without necessarily having
+to replace the kernel.
+
+When a Linux driver or subsystem is first implemented using ACPI, it by
+definition ends up requiring a specific version of the ACPI specification
+-- it's baseline. ACPI firmware must continue to work, even though it may
+not be optimal, with the earliest kernel version that first provides support
+for that baseline version of ACPI. There may be a need for additional drivers,
+but adding new functionality (e.g., CPU power management) should not break
+older kernel versions. Further, ACPI firmware must also work with the most
+recent version of the kernel.
+
+
+Relationship with Device Tree
+-----------------------------
+ACPI support in drivers and subsystems for ARMv8 should never be mutually
+exclusive with DT support at compile time.
+
+At boot time the kernel will only use one description method depending on
+parameters passed from the bootloader (including kernel bootargs).
+
+Regardless of whether DT or ACPI is used, the kernel must always be capable
+of booting with either scheme (in kernels with both schemes enabled at compile
+time).
+
+
+Booting using ACPI tables
+-------------------------
+The only defined method for passing ACPI tables to the kernel on ARMv8
+is via the UEFI system configuration table. Just so it is explicit, this
+means that ACPI is only supported on platforms that boot via UEFI.
+
+When an ARMv8 system boots, it can either have DT information, ACPI tables,
+or in some very unusual cases, both. If no command line parameters are used,
+the kernel will try to use DT for device enumeration; if there is no DT
+present, the kernel will try to use ACPI tables, but only if they are present.
+In neither is available, the kernel will not boot. If acpi=force is used
+on the command line, the kernel will attempt to use ACPI tables first, but
+fall back to DT if there are no ACPI tables present. The basic idea is that
+the kernel will not fail to boot unless it absolutely has no other choice.
+
+Processing of ACPI tables may be disabled by passing acpi=off on the kernel
+command line; this is the default behavior.
+
+In order for the kernel to load and use ACPI tables, the UEFI implementation
+MUST set the ACPI_20_TABLE_GUID to point to the RSDP table (the table with
+the ACPI signature "RSD PTR "). If this pointer is incorrect and acpi=force
+is used, the kernel will disable ACPI and try to use DT to boot instead; the
+kernel has, in effect, determined that ACPI tables are not present at that
+point.
+
+If the pointer to the RSDP table is correct, the table will be mapped into
+the kernel by the ACPI core, using the address provided by UEFI.
+
+The ACPI core will then locate and map in all other ACPI tables provided by
+using the addresses in the RSDP table to find the XSDT (eXtended System
+Description Table). The XSDT in turn provides the addresses to all other
+ACPI tables provided by the system firmware; the ACPI core will then traverse
+this table and map in the tables listed.
+
+The ACPI core will ignore any provided RSDT (Root System Description Table).
+RSDTs have been deprecated and are ignored on arm64 since they only allow
+for 32-bit addresses.
+
+Further, the ACPI core will only use the 64-bit address fields in the FADT
+(Fixed ACPI Description Table). Any 32-bit address fields in the FADT will
+be ignored on arm64.
+
+Hardware reduced mode (see Section 4.1 of the ACPI 5.1 specification) will
+be enforced by the ACPI core on arm64. Doing so allows the ACPI core to
+run less complex code since it no longer has to provide support for legacy
+hardware from other architectures. Any fields that are not to be used for
+hardware reduced mode must be set to zero.
+
+For the ACPI core to operate properly, and in turn provide the information
+the kernel needs to configure devices, it expects to find the following
+tables (all section numbers refer to the ACPI 5.1 specfication):
+
+ -- RSDP (Root System Description Pointer), section 5.2.5
+
+ -- XSDT (eXtended System Description Table), section 5.2.8
+
+ -- FADT (Fixed ACPI Description Table), section 5.2.9
+
+ -- DSDT (Differentiated System Description Table), section
+ 5.2.11.1
+
+ -- MADT (Multiple APIC Description Table), section 5.2.12
+
+ -- GTDT (Generic Timer Description Table), section 5.2.24
+
+ -- If PCI is supported, the MCFG (Memory mapped ConFiGuration
+ Table), section 5.2.6, specifically Table 5-31.
+
+If the above tables are not all present, the kernel may or may not be
+able to boot properly since it may not be able to configure all of the
+devices available.
+
+
+ACPI Detection
+--------------
+Drivers should determine their probe() type by checking for a null
+value for ACPI_HANDLE, or checking .of_node, or other information in
+the device structure. This is detailed further in the "Driver
+Recommendations" section.
+
+In non-driver code, if the presence of ACPI needs to be detected at
+runtime, then check the value of acpi_disabled. If CONFIG_ACPI is not
+set, acpi_disabled will always be 1.
+
+
+Device Enumeration
+------------------
+Device descriptions in ACPI should use standard recognized ACPI interfaces.
+These may contain less information than is typically provided via a Device
+Tree description for the same device. This is also one of the reasons that
+ACPI can be useful -- the driver takes into account that it may have less
+detailed information about the device and uses sensible defaults instead.
+If done properly in the driver, the hardware can change and improve over
+time without the driver having to change at all.
+
+Clocks provide an excellent example. In DT, clocks need to be specified
+and the drivers need to take them into account. In ACPI, the assumption
+is that UEFI will leave the device in a reasonable default state, including
+any clock settings. If for some reason the driver needs to change a clock
+value, this can be done in an ACPI method; all the driver needs to do is
+invoke the method and not concern itself with what the method needs to do
+to change the clock. Changing the hardware can then take place over time
+by changing what the ACPI method does, and not the driver.
+
+In DT, the parameters needed by the driver to set up clocks as in the example
+above are known as "bindings"; in ACPI, these are known as "Device Properties"
+and provided to a driver via the _DSD object.
+
+ACPI tables are described with a formal language called ASL, the ACPI
+Source Language (section 19 of the specification). This means that there
+are always multiple ways to describe the same thing -- including device
+properties. For example, device properties could use an ASL construct
+that looks like this: Name(KEY0, "value0"). An ACPI device driver would
+then retrieve the value of the property by evaluating the KEY0 object.
+However, using Name() this way has multiple problems: (1) ACPI limits
+names ("KEY0") to four characters unlike DT; (2) there is no industry
+wide registry that maintains a list of names, minimzing re-use; (3)
+there is also no registry for the definition of property values ("value0"),
+again making re-use difficult; and (4) how does one maintain backward
+compatibility as new hardware comes out? The _DSD method was created
+to solve precisely these sorts of problems; Linux drivers should ALWAYS
+use the _DSD method for device properties and nothing else.
+
+The _DSM object (ACPI Section 9.14.1) could also be used for conveying
+device properties to a driver. Linux drivers should only expect it to
+be used if _DSD cannot represent the data required, and there is no way
+to create a new UUID for the _DSD object. Note that there is even less
+regulation of the use of _DSM than there is of _DSD. Drivers that depend
+on the contents of _DSM objects will be more difficult to maintain over
+time because of this; as of this writing, the use of _DSM is the cause
+of quite a few firmware problems and is not recommended.
+
+Drivers should look for device properties in the _DSD object ONLY; the _DSD
+object is described in the ACPI specification section 6.2.5, but this only
+describes how to define the structure of an object returned via _DSD, and
+how specific data structures are defined by specific UUIDs. Linux should
+only use the _DSD Device Properties UUID [5]:
+
+ -- UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301
+
+ -- http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf
+
+The UEFI Forum provides a mechanism for registering device properties [4]
+so that they may be used across all operating systems supporting ACPI.
+Device properties that have not been registered with the UEFI Forum should
+not be used.
+
+Before creating new device properties, check to be sure that they have not
+been defined before and either registered in the Linux kernel documentation
+as DT bindings, or the UEFI Forum as device properties. While we do not want
+to simply move all DT bindings into ACPI device properties, we can learn from
+what has been previously defined.
+
+If it is necessary to define a new device property, or if it makes sense to
+synthesize the definition of a binding so it can be used in any firmware,
+both DT bindings and ACPI device properties for device drivers have review
+processes. Use them both. When the driver itself is submitted for review
+to the Linux mailing lists, the device property definitions needed must be
+submitted at the same time. A driver that supports ACPI and uses device
+properties will not be considered complete without their definitions. Once
+the device property has been accepted by the Linux community, it must be
+registered with the UEFI Forum [4], which will review it again for consistency
+within the registry. This may require iteration. The UEFI Forum, though,
+will always be the canonical site for device property definitions.
+
+It may make sense to provide notice to the UEFI Forum that there is the
+intent to register a previously unused device property name as a means of
+reserving the name for later use. Other operating system vendors will
+also be submitting registration requests and this may help smooth the
+process.
+
+Once registration and review have been completed, the kernel provides an
+interface for looking up device properties in a manner independent of
+whether DT or ACPI is being used. This API should be used [6]; it can
+eliminate some duplication of code paths in driver probing functions and
+discourage divergence between DT bindings and ACPI device properties.
+
+
+Programmable Power Control Resources
+------------------------------------
+Programmable power control resources include such resources as voltage/current
+providers (regulators) and clock sources.
+
+With ACPI, the kernel clock and regulator framework is not expected to be used
+at all.
+
+The kernel assumes that power control of these resources is represented with
+Power Resource Objects (ACPI section 7.1). The ACPI core will then handle
+correctly enabling and disabling resources as they are needed. In order to
+get that to work, ACPI assumes each device has defined D-states and that these
+can be controlled through the optional ACPI methods _PS0, _PS1, _PS2, and _PS3;
+in ACPI, _PS0 is the method to invoke to turn a device full on, and _PS3 is for
+turning a device full off.
+
+There are two options for using those Power Resources. They can:
+
+ -- be managed in a _PSx method which gets called on entry to power
+ state Dx.
+
+ -- be declared separately as power resources with their own _ON and _OFF
+ methods. They are then tied back to D-states for a particular device
+ via _PRx which specifies which power resources a device needs to be on
+ while in Dx. Kernel then tracks number of devices using a power resource
+ and calls _ON/_OFF as needed.
+
+The kernel ACPI code will also assume that the _PSx methods follow the normal
+ACPI rules for such methods:
+
+ -- If either _PS0 or _PS3 is implemented, then the other method must also
+ be implemented.
+
+ -- If a device requires usage or setup of a power resource when on, the ASL
+ should organize that it is allocated/enabled using the _PS0 method.
+
+ -- Resources allocated or enabled in the _PS0 method should be disabled
+ or de-allocated in the _PS3 method.
+
+ -- Firmware will leave the resources in a reasonable state before handing
+ over control to the kernel.
+
+Such code in _PSx methods will of course be very platform specific. But,
+this allows the driver to abstract out the interface for operating the device
+and avoid having to read special non-standard values from ACPI tables. Further,
+abstracting the use of these resources allows the hardware to change over time
+without requiring updates to the driver.
+
+
+Clocks
+------
+ACPI makes the assumption that clocks are initialized by the firmware --
+UEFI, in this case -- to some working value before control is handed over
+to the kernel. This has implications for devices such as UARTs, or SoC-driven
+LCD displays, for example.
+
+When the kernel boots, the clocks are assumed to be set to reasonable
+working values. If for some reason the frequency needs to change -- e.g.,
+throttling for power management -- the device driver should expect that
+process to be abstracted out into some ACPI method that can be invoked
+(please see the ACPI specification for further recommendations on standard
+methods to be expected). The only exceptions to this are CPU clocks where
+CPPC provides a much richer interface than ACPI methods. If the clocks
+are not set, there is no direct way for Linux to control them.
+
+If an SoC vendor wants to provide fine-grained control of the system clocks,
+they could do so by providing ACPI methods that could be invoked by Linux
+drivers. However, this is NOT recommended and Linux drivers should NOT use
+such methods, even if they are provided. Such methods are not currently
+standardized in the ACPI specification, and using them could tie a kernel
+to a very specific SoC, or tie an SoC to a very specific version of the
+kernel, both of which we are trying to avoid.
+
+
+Driver Recommendations
+----------------------
+DO NOT remove any DT handling when adding ACPI support for a driver. The
+same device may be used on many different systems.
+
+DO try to structure the driver so that it is data-driven. That is, set up
+a struct containing internal per-device state based on defaults and whatever
+else must be discovered by the driver probe function. Then, have the rest
+of the driver operate off of the contents of that struct. Doing so should
+allow most divergence between ACPI and DT functionality to be kept local to
+the probe function instead of being scattered throughout the driver. For
+example:
+
+static int device_probe_dt(struct platform_device *pdev)
+{
+ /* DT specific functionality */
+ ...
+}
+
+static int device_probe_acpi(struct platform_device *pdev)
+{
+ /* ACPI specific functionality */
+ ...
+}
+
+static int device_probe(struct platform_device *pdev)
+{
+ ...
+ struct device_node node = pdev->dev.of_node;
+ ...
+
+ if (node)
+ ret = device_probe_dt(pdev);
+ else if (ACPI_HANDLE(&pdev->dev))
+ ret = device_probe_acpi(pdev);
+ else
+ /* other initialization */
+ ...
+ /* Continue with any generic probe operations */
+ ...
+}
+
+DO keep the MODULE_DEVICE_TABLE entries together in the driver to make it
+clear the different names the driver is probed for, both from DT and from
+ACPI:
+
+static struct of_device_id virtio_mmio_match[] = {
+ { .compatible = "virtio,mmio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, virtio_mmio_match);
+
+static const struct acpi_device_id virtio_mmio_acpi_match[] = {
+ { "LNRO0005", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
+
+
+ASWG
+----
+The ACPI specification changes regularly. During the year 2014, for instance,
+version 5.1 was released and version 6.0 substantially completed, with most of
+the changes being driven by ARM-specific requirements. Proposed changes are
+presented and discussed in the ASWG (ACPI Specification Working Group) which
+is a part of the UEFI Forum.
+
+Participation in this group is open to all UEFI members. Please see
+http://www.uefi.org/workinggroup for details on group membership.
+
+It is the intent of the ARMv8 ACPI kernel code to follow the ACPI specification
+as closely as possible, and to only implement functionality that complies with
+the released standards from UEFI ASWG. As a practical matter, there will be
+vendors that provide bad ACPI tables or violate the standards in some way.
+If this is because of errors, quirks and fixups may be necessary, but will
+be avoided if possible. If there are features missing from ACPI that preclude
+it from being used on a platform, ECRs (Engineering Change Requests) should be
+submitted to ASWG and go through the normal approval process; for those that
+are not UEFI members, many other members of the Linux community are and would
+likely be willing to assist in submitting ECRs.
+
+
+Linux Code
+----------
+Individual items specific to Linux on ARM, contained in the the Linux
+source code, are in the list that follows:
+
+ACPI_OS_NAME This macro defines the string to be returned when
+ an ACPI method invokes the _OS method. On ARM64
+ systems, this macro will be "Linux" by default.
+ The command line parameter acpi_os=<string>
+ can be used to set it to some other value. The
+ default value for other architectures is "Microsoft
+ Windows NT", for example.
+
+ACPI Objects
+------------
+Detailed expectations for ACPI tables and object are listed in the file
+Documentation/arm64/acpi_object_usage.txt.
+
+
+References
+----------
+[0] http://silver.arm.com -- document ARM-DEN-0029, or newer
+ "Server Base System Architecture", version 2.3, dated 27 Mar 2014
+
+[1] http://infocenter.arm.com/help/topic/com.arm.doc.den0044a/Server_Base_Boot_Requirements.pdf
+ Document ARM-DEN-0044A, or newer: "Server Base Boot Requirements, System
+ Software on ARM Platforms", dated 16 Aug 2014
+
+[2] http://www.secretlab.ca/archives/151, 10 Jan 2015, Copyright (c) 2015,
+ Linaro Ltd., written by Grant Likely. A copy of the verbatim text (apart
+ from formatting) is also in Documentation/arm64/why_use_acpi.txt.
+
+[3] AMD ACPI for Seattle platform documentation:
+ http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2012/10/Seattle_ACPI_Guide.pdf
+
+[4] http://www.uefi.org/acpi -- please see the link for the "ACPI _DSD Device
+ Property Registry Instructions"
+
+[5] http://www.uefi.org/acpi -- please see the link for the "_DSD (Device
+ Specific Data) Implementation Guide"
+
+[6] Kernel code for the unified device property interface can be found in
+ include/linux/property.h and drivers/base/property.c.
+
+
+Authors
+-------
+Al Stone <al.stone@linaro.org>
+Graeme Gregory <graeme.gregory@linaro.org>
+Hanjun Guo <hanjun.guo@linaro.org>
+
+Grant Likely <grant.likely@linaro.org>, for the "Why ACPI on ARM?" section
diff --git a/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt b/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
new file mode 100644
index 0000000..ce8c30e
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
@@ -0,0 +1,83 @@
+* APM X-Gene SoC EDAC nodes
+
+EDAC nodes are defined to describe on-chip error detection and correction.
+There are four types of EDAC:
+
+ memory controller - Memory controller
+ PMD (L1/L2) - Processor module unit (PMD) L1/L2 cache
+ L3 - CPU L3 cache
+ SoC - SoC IP such as SATA, Ethernet, and etc
+
+The following section describes the memory controller DT node binding.
+
+Required properties:
+- compatible : Shall be "apm,xgene-edac-mc".
+- reg : First resource shall be the PCP resource.
+ Second resource shall be the CSW resource.
+ Third resource shall be the MCB-A resource.
+ Fourth resource shall be the MCB-B resource.
+ Fifth resource shall be the MCU resource.
+- interrupts : Interrupt-specifier for MCU error IRQ(s).
+
+The following section describes the L1/L2 DT node binding.
+
+- compatible : Shall be "apm,xgene-edac-pmd".
+- reg : First resource shall be the PCP resource.
+ Second resource shall be the PMD resource.
+ Third resource shall be the PMD efuse resource.
+- interrupts : Interrupt-specifier for PMD error IRQ(s).
+
+The following section describes the L3 DT node binding.
+
+- compatible : Shall be "apm,xgene-edac-l3".
+- reg : First resource shall be the PCP resource.
+ Second resource shall be the L3 resource.
+- interrupts : Interrupt-specifier for L3 error IRQ(s).
+
+The following section describes the SoC DT node binding.
+
+- compatible : Shall be "apm,xgene-edac-soc"".
+- reg : First resource shall be the PCP resource.
+ Second resource shall be the SoC resource.
+ Third resource shall be the register bus resource.
+- interrupts : Interrupt-specifier for SoC error IRQ(s).
+
+Example:
+ edacmc0: edacmc0@7e800000 {
+ compatible = "apm,xgene-edac-mc";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7e200000 0x0 0x1000>,
+ <0x0 0x7e700000 0x0 0x1000>,
+ <0x0 0x7e720000 0x0 0x1000>,
+ <0x0 0x7e800000 0x0 0x1000>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacl3: edacl3@7e600000 {
+ compatible = "apm,xgene-edac-l3";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7e600000 0x0 0x1000>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacpmd0: edacpmd0@7c000000 {
+ compatible = "apm,xgene-edac-pmd";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7c000000 0x0 0x200000>,
+ <0x0 0x1054a000 0x0 0x10>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacsoc: edacsoc@7e930000 {
+ compatible = "apm,xgene-edac-soc";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7e930000 0x0 0x1000>,
+ <0x0 0x7e000000 0x0 0x1000>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>,
+ <0x0 0x27 0x4>;
+ };
+
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index bfcb1a6..d6c35a7 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -165,7 +165,7 @@ multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
bytes respectively. Such letter suffixes can also be entirely omitted.
- acpi= [HW,ACPI,X86]
+ acpi= [HW,ACPI,X86,ARM64]
Advanced Configuration and Power Interface
Format: { force | off | strict | noirq | rsdt }
force -- enable ACPI if default was off
@@ -175,6 +175,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
strictly ACPI specification compliant.
rsdt -- prefer RSDT over (default) XSDT
copy_dsdt -- copy DSDT to memory
+ For ARM64, ONLY "acpi=off" or "acpi=force" are available
See also Documentation/power/runtime_pm.txt, pci=noacpi
diff --git a/MAINTAINERS b/MAINTAINERS
index 0e1abe8..fb54e14 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3714,6 +3714,14 @@ W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/sb_edac.c
+EDAC-XGENE
+APPLIED MICRO (APM) X-GENE SOC EDAC
+M: Loc Ho <lho@apm.com>
+M: Feng Kan <fkan@apm.com>
+S: Supported
+F: drivers/edac/xgene_edac.c
+F: Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
+
EDIROL UA-101/UA-1000 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1b8e973..3bdd120 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,5 +1,6 @@
config ARM64
def_bool y
+ select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -22,6 +23,7 @@ config ARM64
select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS
+ select EDAC_SUPPORT
select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
@@ -248,6 +250,12 @@ config PCI_DOMAINS_GENERIC
config PCI_SYSCALL
def_bool PCI
+config PCI_MMCONFIG
+ def_bool y
+ select PCI_ECAM
+ select PCI_ECAM_GENERIC
+ depends on PCI && ACPI
+
source "drivers/pci/Kconfig"
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/hotplug/Kconfig"
@@ -438,6 +446,9 @@ config SMP
If you don't know what to do here, say N.
+config ARM_PARKING_PROTOCOL
+ def_bool y if SMP
+
config SCHED_MC
bool "Multi-core scheduler support"
depends on SMP
@@ -712,6 +723,8 @@ source "drivers/Kconfig"
source "drivers/firmware/Kconfig"
+source "drivers/acpi/Kconfig"
+
source "fs/Kconfig"
source "arch/arm64/kvm/Kconfig"
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index a857794..fc1c545 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -513,6 +513,104 @@
interrupts = <0x0 0x4f 0x4>;
};
+ edacmc0: edacmc0@7e800000 {
+ compatible = "apm,xgene-edac-mc";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7e200000 0x0 0x1000>,
+ <0x0 0x7e700000 0x0 0x1000>,
+ <0x0 0x7e720000 0x0 0x1000>,
+ <0x0 0x7e800000 0x0 0x1000>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacmc1: edacmc1@7e840000 {
+ compatible = "apm,xgene-edac-mc";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7e200000 0x0 0x1000>,
+ <0x0 0x7e700000 0x0 0x1000>,
+ <0x0 0x7e720000 0x0 0x1000>,
+ <0x0 0x7e840000 0x0 0x1000>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacmc2: edacmc2@7e880000 {
+ compatible = "apm,xgene-edac-mc";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7e200000 0x0 0x1000>,
+ <0x0 0x7e700000 0x0 0x1000>,
+ <0x0 0x7e720000 0x0 0x1000>,
+ <0x0 0x7e880000 0x0 0x1000>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacmc3: edacmc3@7e8c0000 {
+ compatible = "apm,xgene-edac-mc";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7e200000 0x0 0x1000>,
+ <0x0 0x7e700000 0x0 0x1000>,
+ <0x0 0x7e720000 0x0 0x1000>,
+ <0x0 0x7e8c0000 0x0 0x1000>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacpmd0: edacpmd0@7c000000 {
+ compatible = "apm,xgene-edac-pmd";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7c000000 0x0 0x200000>,
+ <0x0 0x1054a000 0x0 0x10>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacpmd1: edacpmd1@7c200000 {
+ compatible = "apm,xgene-edac-pmd";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7c200000 0x0 0x200000>,
+ <0x0 0x1054a000 0x0 0x10>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacpmd2: edacpmd2@7c400000 {
+ compatible = "apm,xgene-edac-pmd";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7c400000 0x0 0x200000>,
+ <0x0 0x1054a000 0x0 0x10>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacpmd3: edacpmd3@7c600000 {
+ compatible = "apm,xgene-edac-pmd";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7c600000 0x0 0x200000>,
+ <0x0 0x1054a000 0x0 0x10>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacl3: edacl3@7e600000 {
+ compatible = "apm,xgene-edac-l3";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7e600000 0x0 0x1000>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>;
+ };
+
+ edacsoc: edacsoc@7e930000 {
+ compatible = "apm,xgene-edac-soc";
+ reg = <0x0 0x78800000 0x0 0x1000>,
+ <0x0 0x7e930000 0x0 0x1000>,
+ <0x0 0x7e000000 0x0 0x1000>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>,
+ <0x0 0x27 0x4>;
+ };
+
phy1: phy@1f21a000 {
compatible = "apm,xgene-phy";
reg = <0x0 0x1f21a000 0x0 0x100>;
diff --git a/arch/arm64/include/asm/acenv.h b/arch/arm64/include/asm/acenv.h
new file mode 100644
index 0000000..b49166f
--- /dev/null
+++ b/arch/arm64/include/asm/acenv.h
@@ -0,0 +1,18 @@
+/*
+ * ARM64 specific ACPICA environments and implementation
+ *
+ * Copyright (C) 2014, Linaro Ltd.
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ACENV_H
+#define _ASM_ACENV_H
+
+/* It is required unconditionally by ACPI core, update it when needed. */
+
+#endif /* _ASM_ACENV_H */
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
new file mode 100644
index 0000000..0f7e976
--- /dev/null
+++ b/arch/arm64/include/asm/acpi.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2013-2014, Linaro Ltd.
+ * Author: Al Stone <al.stone@linaro.org>
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation;
+ */
+
+#ifndef _ASM_ACPI_H
+#define _ASM_ACPI_H
+
+#include <linux/mm.h>
+#include <linux/irqchip/arm-gic-acpi.h>
+
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+/* Basic configuration for ACPI */
+#ifdef CONFIG_ACPI
+/* ACPI table mapping after acpi_gbl_permanent_mmap is set */
+static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
+ acpi_size size)
+{
+ if (!page_is_ram(phys >> PAGE_SHIFT))
+ return ioremap(phys, size);
+
+ return ioremap_cache(phys, size);
+}
+#define acpi_os_ioremap acpi_os_ioremap
+
+typedef u64 phys_cpuid_t;
+#define PHYS_CPUID_INVALID INVALID_HWID
+
+#define acpi_strict 1 /* No out-of-spec workarounds on ARM64 */
+extern int acpi_disabled;
+extern int acpi_noirq;
+extern int acpi_pci_disabled;
+
+/* 1 to indicate PSCI 0.2+ is implemented */
+static inline bool acpi_psci_present(void)
+{
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
+}
+
+/* 1 to indicate HVC must be used instead of SMC as the PSCI conduit */
+static inline bool acpi_psci_use_hvc(void)
+{
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
+}
+
+static inline void disable_acpi(void)
+{
+ acpi_disabled = 1;
+ acpi_pci_disabled = 1;
+ acpi_noirq = 1;
+}
+
+static inline void enable_acpi(void)
+{
+ acpi_disabled = 0;
+ acpi_pci_disabled = 0;
+ acpi_noirq = 0;
+}
+
+/*
+ * The ACPI processor driver for ACPI core code needs this macro
+ * to find out this cpu was already mapped (mapping from CPU hardware
+ * ID to CPU logical ID) or not.
+ */
+#define cpu_physical_id(cpu) cpu_logical_map(cpu)
+
+/*
+ * It's used from ACPI core in kdump to boot UP system with SMP kernel,
+ * with this check the ACPI core will not override the CPU index
+ * obtained from GICC with 0 and not print some error message as well.
+ * Since MADT must provide at least one GICC structure for GIC
+ * initialization, CPU will be always available in MADT on ARM64.
+ */
+static inline bool acpi_has_cpu_in_madt(void)
+{
+ return true;
+}
+
+static inline void arch_fix_phys_package_id(int num, u32 slot) { }
+void __init acpi_init_cpus(void);
+
+extern int acpi_get_cpu_parked_address(int cpu, u64 *addr);
+
+#else
+static inline bool acpi_psci_present(void) { return false; }
+static inline bool acpi_psci_use_hvc(void) { return false; }
+static inline void acpi_init_cpus(void) { }
+static inline int acpi_get_cpu_parked_address(int cpu, u64 *addr) { return -EOPNOTSUPP; }
+#endif /* CONFIG_ACPI */
+
+#endif /*_ASM_ACPI_H*/
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index da301ee..5a31d67 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -66,5 +66,6 @@ struct cpu_operations {
extern const struct cpu_operations *cpu_ops[NR_CPUS];
int __init cpu_read_ops(struct device_node *dn, int cpu);
void __init cpu_read_bootcpu_ops(void);
+const struct cpu_operations *cpu_get_ops(const char *name);
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/edac.h b/arch/arm64/include/asm/edac.h
new file mode 100644
index 0000000..87469eb
--- /dev/null
+++ b/arch/arm64/include/asm/edac.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2013 Calxeda, Inc.
+ * Based on PPC version Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch atomic_scrub() that EDAC use for software
+ * ECC scrubbing. It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+static inline void atomic_scrub(void *va, u32 size)
+{
+ unsigned int *virt_addr = va;
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+ long result;
+ unsigned long tmp;
+
+ asm volatile("/* atomic_scrub */\n"
+ "1: ldxr %w0, %2\n"
+ " stxr %w1, %w0, %2\n"
+ " cbnz %w1, 1b"
+ : "=&amp;r" (result), "=&amp;r" (tmp), "+Q" (*virt_addr) : : );
+ }
+}
+#endif
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index ef57220..7129125 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -6,33 +6,29 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
+extern void efi_idmap_init(void);
#else
#define efi_init()
+#define efi_idmap_init()
#endif
#define efi_call_virt(f, ...) \
({ \
- efi_##f##_t *__f; \
+ efi_##f##_t *__f = efi.systab->runtime->f; \
efi_status_t __s; \
\
kernel_neon_begin(); \
- efi_virtmap_load(); \
- __f = efi.systab->runtime->f; \
__s = __f(__VA_ARGS__); \
- efi_virtmap_unload(); \
kernel_neon_end(); \
__s; \
})
#define __efi_call_virt(f, ...) \
({ \
- efi_##f##_t *__f; \
+ efi_##f##_t *__f = efi.systab->runtime->f; \
\
kernel_neon_begin(); \
- efi_virtmap_load(); \
- __f = efi.systab->runtime->f; \
__f(__VA_ARGS__); \
- efi_virtmap_unload(); \
kernel_neon_end(); \
})
@@ -50,20 +46,4 @@ extern void efi_init(void);
#define EFI_ALLOC_ALIGN SZ_64K
-/*
- * On ARM systems, virtually remapped UEFI runtime services are set up in two
- * distinct stages:
- * - The stub retrieves the final version of the memory map from UEFI, populates
- * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime
- * service to communicate the new mapping to the firmware (Note that the new
- * mapping is not live at this time)
- * - During an early initcall(), the EFI system table is permanently remapped
- * and the virtual remapping of the UEFI Runtime Services regions is loaded
- * into a private set of page tables. If this all succeeds, the Runtime
- * Services are enabled and the EFI_RUNTIME_SERVICES bit set.
- */
-
-void efi_virtmap_load(void);
-void efi_virtmap_unload(void);
-
#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 1f65be3..c0f89a0 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -114,7 +114,8 @@ typedef struct user_fpsimd_state elf_fpregset_t;
*/
#define elf_check_arch(x) ((x)->e_machine == EM_AARCH64)
-#define elf_read_implies_exec(ex,stk) (stk != EXSTACK_DISABLE_X)
+#define elf_read_implies_exec(ex,stk) (test_thread_flag(TIF_32BIT) \
+ ? (stk == EXSTACK_ENABLE_X) : 0)
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index defa0ff9..f196e40 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -62,6 +62,9 @@ void __init early_fixmap_init(void);
#define __early_set_fixmap __set_fixmap
+#define __late_set_fixmap __set_fixmap
+#define __late_clear_fixmap(idx) __set_fixmap((idx), 0, FIXMAP_PAGE_CLEAR)
+
extern void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
#include <asm-generic/fixmap.h>
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 94c5367..bbb251b 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -1,6 +1,8 @@
#ifndef __ASM_IRQ_H
#define __ASM_IRQ_H
+#include <linux/irqchip/arm-gic-acpi.h>
+
#include <asm-generic/irq.h>
struct pt_regs;
@@ -8,4 +10,15 @@ struct pt_regs;
extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
+static inline void acpi_irq_init(void)
+{
+ /*
+ * Hardcode ACPI IRQ chip initialization to GICv2 for now.
+ * Proper irqchip infrastructure will be implemented along with
+ * incoming GICv2m|GICv3|ITS bits.
+ */
+ acpi_gic_init();
+}
+#define acpi_irq_init acpi_irq_init
+
#endif
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 3d31176..5fd40c4 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -31,6 +31,8 @@ extern void paging_init(void);
extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
+/* create an identity mapping for memory (or io if map_io is true) */
+extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot);
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index 872ba93..bbcf88d 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -27,11 +27,77 @@
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ /* no legacy IRQ on arm64 */
+ return -ENODEV;
+}
+
static inline int pci_proc_domain(struct pci_bus *bus)
{
return 1;
}
#endif /* CONFIG_PCI */
+struct acpi_device;
+
+struct pci_sysdata {
+ int domain; /* PCI domain */
+ int node; /* NUMA node */
+ struct acpi_device *companion; /* ACPI companion device */
+ void *iommu; /* IOMMU private data */
+};
+
+
+static inline unsigned char mmio_config_readb(void __iomem *pos)
+{
+ int offset = (__force unsigned long)pos & 3;
+ int shift = offset * 8;
+
+ return readl(pos - offset) >> shift;
+}
+
+static inline unsigned short mmio_config_readw(void __iomem *pos)
+{
+ int offset = (__force unsigned long)pos & 3;
+ int shift = offset * 8;
+
+ return readl(pos - offset) >> shift;
+}
+
+static inline unsigned int mmio_config_readl(void __iomem *pos)
+{
+ return readl(pos);
+}
+
+static inline void mmio_config_writeb(void __iomem *pos, u8 val)
+{
+ int offset = (__force unsigned long)pos & 3;
+ int shift = offset * 8;
+ int mask = ~(0xff << shift);
+ u32 v;
+
+ pos -= offset;
+ v = readl(pos) & mask;
+ writel(v | (val << shift), pos);
+}
+
+static inline void mmio_config_writew(void __iomem *pos, u16 val)
+{
+ int offset = (__force unsigned long)pos & 3;
+ int shift = offset * 8;
+ int mask = ~(0xffff << shift);
+ u32 v;
+
+ pos -= offset;
+ v = readl(pos) & mask;
+ writel(v | (val << shift), pos);
+}
+
+static inline void mmio_config_writel(void __iomem *pos, u32 val)
+{
+ writel(val, pos);
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_PCI_H */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index e5312ea..2454bc5 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,6 +14,7 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
-int psci_init(void);
+int psci_dt_init(void);
+int psci_acpi_init(void);
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 780f82c..3411561 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -39,9 +39,10 @@ extern void show_ipi_list(struct seq_file *p, int prec);
extern void handle_IPI(int ipinr, struct pt_regs *regs);
/*
- * Setup the set of possible CPUs (via set_cpu_possible)
+ * Discover the set of possible CPUs and determine their
+ * SMP operations.
*/
-extern void smp_init_cpus(void);
+extern void of_smp_init_cpus(void);
/*
* Provide a function to raise an IPI cross call on CPUs in callmap.
@@ -51,6 +52,11 @@ extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
extern void (*__smp_cross_call)(const struct cpumask *, unsigned int);
/*
+ * Provide a function to signal a parked secondary CPU.
+ */
+extern void set_smp_boot_wakeup_call(void (*)(int cpu));
+
+/*
* Called from the secondary holding pen, this is the secondary CPU entry point.
*/
asmlinkage void secondary_start_kernel(void);
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5ee07ee..b3ac38a 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -24,7 +24,8 @@ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
+arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o \
+ smp_parking_protocol.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
@@ -35,6 +36,7 @@ arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
arm64-obj-$(CONFIG_PCI) += pci.o
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
+arm64-obj-$(CONFIG_ACPI) += acpi.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
new file mode 100644
index 0000000..0bb0f1f
--- /dev/null
+++ b/arch/arm64/kernel/acpi.c
@@ -0,0 +1,432 @@
+/*
+ * ARM64 Specific Low-Level ACPI Boot Support
+ *
+ * Copyright (C) 2013-2014, Linaro Ltd.
+ * Author: Al Stone <al.stone@linaro.org>
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
+ * Author: Naresh Bhat <naresh.bhat@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/acpi.h>
+#include <linux/bootmem.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+
+#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
+
+int acpi_noirq = 1; /* skip ACPI IRQ initialization */
+int acpi_disabled = 1;
+EXPORT_SYMBOL(acpi_disabled);
+
+int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
+EXPORT_SYMBOL(acpi_pci_disabled);
+
+/* Processors with enabled flag and sane MPIDR */
+static int enabled_cpus;
+
+/* Boot CPU is valid or not in MADT */
+static bool bootcpu_valid __initdata;
+
+static bool param_acpi_off __initdata;
+static bool param_acpi_force __initdata;
+
+static int __init parse_acpi(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ /* "acpi=off" disables both ACPI table parsing and interpreter */
+ if (strcmp(arg, "off") == 0)
+ param_acpi_off = true;
+ else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
+ param_acpi_force = true;
+ else
+ return -EINVAL; /* Core will print when we return error */
+
+ return 0;
+}
+early_param("acpi", parse_acpi);
+
+static int __init dt_scan_depth1_nodes(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ /*
+ * Return 1 as soon as we encounter a node at depth 1 that is
+ * not the /chosen node.
+ */
+ if (depth == 1 && (strcmp(uname, "chosen") != 0))
+ return 1;
+ return 0;
+}
+
+static char *boot_method;
+static u64 parked_address[NR_CPUS];
+
+/*
+ * Since we're on ARM, the default interrupt routing model
+ * clearly has to be GIC.
+ */
+enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_GIC;
+
+/*
+ * __acpi_map_table() will be called before page_init(), so early_ioremap()
+ * or early_memremap() should be called here to for ACPI table mapping.
+ */
+char *__init __acpi_map_table(unsigned long phys, unsigned long size)
+{
+ if (!size)
+ return NULL;
+
+ return early_memremap(phys, size);
+}
+
+void __init __acpi_unmap_table(char *map, unsigned long size)
+{
+ if (!map || !size)
+ return;
+
+ early_memunmap(map, size);
+}
+
+/**
+ * acpi_map_gic_cpu_interface - generates a logical cpu number
+ * and map to MPIDR represented by GICC structure
+ * @mpidr: CPU's hardware id to register, MPIDR represented in MADT
+ * @enabled: this cpu is enabled or not
+ *
+ * Returns the logical cpu number which maps to MPIDR
+ */
+static int __init acpi_map_gic_cpu_interface(u64 mpidr, u64 parked_addr, u8 enabled)
+{
+ int i;
+
+ if (mpidr == INVALID_HWID) {
+ pr_info("Skip MADT cpu entry with invalid MPIDR\n");
+ return -EINVAL;
+ }
+
+ total_cpus++;
+ if (!enabled)
+ return -EINVAL;
+
+ if (enabled_cpus >= NR_CPUS) {
+ pr_warn("NR_CPUS limit of %d reached, Processor %d/0x%llx ignored.\n",
+ NR_CPUS, total_cpus, mpidr);
+ return -EINVAL;
+ }
+
+ /* Check if GICC structure of boot CPU is available in the MADT */
+ if (cpu_logical_map(0) == mpidr) {
+ if (bootcpu_valid) {
+ pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
+ mpidr);
+ return -EINVAL;
+ }
+
+ bootcpu_valid = true;
+ }
+
+ /*
+ * Duplicate MPIDRs are a recipe for disaster. Scan
+ * all initialized entries and check for
+ * duplicates. If any is found just ignore the CPU.
+ */
+ for (i = 1; i < enabled_cpus; i++) {
+ if (cpu_logical_map(i) == mpidr) {
+ pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
+ mpidr);
+ return -EINVAL;
+ }
+ }
+
+ if (!boot_method)
+ return -EOPNOTSUPP;
+
+ parked_address[enabled_cpus] = parked_addr;
+ cpu_ops[enabled_cpus] = cpu_get_ops(boot_method);
+ /* CPU 0 was already initialized */
+ if (enabled_cpus) {
+ if (!cpu_ops[enabled_cpus])
+ return -EINVAL;
+
+ if (cpu_ops[enabled_cpus]->cpu_init(NULL, enabled_cpus))
+ return -EOPNOTSUPP;
+
+ /* map the logical cpu id to cpu MPIDR */
+ cpu_logical_map(enabled_cpus) = mpidr;
+ }
+
+ enabled_cpus++;
+ return enabled_cpus;
+}
+
+static int __init
+acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ acpi_map_gic_cpu_interface(processor->arm_mpidr & MPIDR_HWID_BITMASK,
+ processor->parked_address, processor->flags & ACPI_MADT_ENABLED);
+
+ return 0;
+}
+
+/* Parse GIC cpu interface entries in MADT for SMP init */
+void __init acpi_init_cpus(void)
+{
+ int count, i;
+
+ /*
+ * do a partial walk of MADT to determine how many CPUs
+ * we have including disabled CPUs, and get information
+ * we need for SMP init
+ */
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_parse_gic_cpu_interface, 0);
+
+ if (!count) {
+ pr_err("No GIC CPU interface entries present\n");
+ return;
+ } else if (count < 0) {
+ pr_err("Error parsing GIC CPU interface entry\n");
+ return;
+ }
+
+ if (!bootcpu_valid) {
+ pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n");
+ return;
+ }
+
+ for (i = 0; i < enabled_cpus; i++)
+ set_cpu_possible(i, true);
+
+ /* Make boot-up look pretty */
+ pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus);
+}
+
+static struct irq_domain *acpi_irq_domain;
+
+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
+{
+ *irq = irq_find_mapping(acpi_irq_domain, gsi);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
+
+
+/*
+ * success: return IRQ number (>0)
+ * failure: return =< 0
+ */
+int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
+{
+ unsigned int irq;
+ unsigned int irq_type;
+ struct of_phandle_args args;
+ struct irq_data *d;
+
+ /*
+ * ACPI have no bindings to indicate SPI or PPI, so we
+ * use different mappings from DT in ACPI.
+ *
+ * For FDT
+ * PPI interrupt: in the range [0, 15];
+ * SPI interrupt: in the range [0, 987];
+ *
+ * For ACPI, GSI should be unique so using
+ * the hwirq directly for the mapping:
+ * PPI interrupt: in the range [16, 31];
+ * SPI interrupt: in the range [32, 1019];
+ */
+
+ if (trigger == ACPI_EDGE_SENSITIVE &&
+ polarity == ACPI_ACTIVE_LOW)
+ irq_type = IRQ_TYPE_EDGE_FALLING;
+ else if (trigger == ACPI_EDGE_SENSITIVE &&
+ polarity == ACPI_ACTIVE_HIGH)
+ irq_type = IRQ_TYPE_EDGE_RISING;
+ else if (trigger == ACPI_LEVEL_SENSITIVE &&
+ polarity == ACPI_ACTIVE_LOW)
+ irq_type = IRQ_TYPE_LEVEL_LOW;
+ else if (trigger == ACPI_LEVEL_SENSITIVE &&
+ polarity == ACPI_ACTIVE_HIGH)
+ irq_type = IRQ_TYPE_LEVEL_HIGH;
+ else
+ irq_type = IRQ_TYPE_NONE;
+
+ if (!acpi_irq_domain)
+ BUG();
+
+ args.np = acpi_irq_domain->of_node;
+ args.args_count = 3;
+ if (gsi < 32) {
+ args.args[0] = 1;
+ args.args[1] = gsi - 16;
+ } else {
+ args.args[0] = 0;
+ args.args[1] = gsi - 32;
+ }
+ args.args[2] = irq_type;
+
+ irq = __irq_domain_alloc_irqs(acpi_irq_domain, -1, 1,
+ dev_to_node(dev), &args, false);
+ if (irq < 0)
+ return -ENOSPC;
+
+ d = irq_domain_get_irq_data(acpi_irq_domain, irq);
+ if (!d)
+ return -EFAULT;
+
+ d->chip->irq_set_type(d, irq_type);
+
+ return irq;
+}
+EXPORT_SYMBOL_GPL(acpi_register_gsi);
+
+void acpi_unregister_gsi(u32 gsi)
+{
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
+
+static int __init acpi_parse_fadt(struct acpi_table_header *table)
+{
+ struct acpi_table_fadt *fadt = (struct acpi_table_fadt *)table;
+
+ /*
+ * Revision in table header is the FADT Major revision, and there
+ * is a minor revision of FADT which was introduced by ACPI 5.1,
+ * we only deal with ACPI 5.1 or newer revision to get GIC and SMP
+ * boot protocol configuration data, or we will disable ACPI.
+ */
+ if (table->revision > 5 ||
+ (table->revision == 5 && fadt->minor_revision >= 1)) {
+ if (!acpi_gbl_reduced_hardware) {
+ pr_err("Not hardware reduced ACPI mode, will not be supported\n");
+ goto disable_acpi;
+ }
+
+ /*
+ * ACPI 5.1 only has two explicit methods to boot up SMP,
+ * PSCI and Parking protocol, but the Parking protocol is
+ * only specified for ARMv7 now, so make PSCI as the only
+ * way for the SMP boot protocol before some updates for
+ * the Parking protocol spec.
+ */
+ if (acpi_psci_present())
+ boot_method = "psci";
+ else if (IS_ENABLED(CONFIG_ARM_PARKING_PROTOCOL))
+ boot_method = "parking-protocol";
+
+ if (!boot_method)
+ pr_warn("No PSCI support, will not bring up secondary CPUs\n");
+ return -EOPNOTSUPP;
+ }
+
+ pr_warn("Unsupported FADT revision %d.%d, should be 5.1+, will disable ACPI\n",
+ table->revision, fadt->minor_revision);
+
+disable_acpi:
+ disable_acpi();
+ return -EINVAL;
+}
+
+/*
+ * acpi_boot_table_init() called from setup_arch(), always.
+ * 1. find RSDP and get its address, and then find XSDT
+ * 2. extract all tables and checksums them all
+ * 3. check ACPI FADT revision
+ *
+ * We can parse ACPI boot-time tables such as MADT after
+ * this function is called.
+ */
+void __init acpi_boot_table_init(void)
+{
+ /*
+ * Enable ACPI instead of device tree unless
+ * - ACPI has been disabled explicitly (acpi=off), or
+ * - the device tree is not empty (it has more than just a /chosen node)
+ * and ACPI has not been force enabled (acpi=force)
+ */
+ if (param_acpi_off ||
+ (!param_acpi_force && of_scan_flat_dt(dt_scan_depth1_nodes, NULL)))
+ return;
+
+ enable_acpi();
+
+ /* Initialize the ACPI boot-time table parser. */
+ if (acpi_table_init()) {
+ disable_acpi();
+ return;
+ }
+
+ if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) {
+ /* disable ACPI if no FADT is found */
+ disable_acpi();
+ pr_err("Can't find FADT\n");
+ }
+}
+
+void __init acpi_gic_init(void)
+{
+ struct acpi_table_header *table;
+ acpi_status status;
+ acpi_size tbl_size;
+ int err;
+
+ if (acpi_disabled)
+ return;
+
+ status = acpi_get_table_with_size(ACPI_SIG_MADT, 0, &table, &tbl_size);
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get MADT table, %s\n", msg);
+ return;
+ }
+
+ err = gic_v2_acpi_init(table, &acpi_irq_domain);
+ if (err || !acpi_irq_domain)
+ pr_err("Failed to initialize GIC IRQ controller");
+
+ early_acpi_os_unmap_memory((char *)table, tbl_size);
+}
+
+/*
+ * Parked Address in ACPI GIC structure will be used as the CPU
+ * release address
+ */
+int acpi_get_cpu_parked_address(int cpu, u64 *addr)
+{
+ if (!addr || !parked_address[cpu])
+ return -EINVAL;
+
+ *addr = parked_address[cpu];
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index cce9524..c50ca8f 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -23,6 +23,7 @@
#include <linux/string.h>
extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations smp_parking_protocol_ops;
extern const struct cpu_operations cpu_psci_ops;
const struct cpu_operations *cpu_ops[NR_CPUS];
@@ -30,12 +31,15 @@ const struct cpu_operations *cpu_ops[NR_CPUS];
static const struct cpu_operations *supported_cpu_ops[] __initconst = {
#ifdef CONFIG_SMP
&smp_spin_table_ops,
+#ifdef CONFIG_ARM_PARKING_PROTOCOL
+ &smp_parking_protocol_ops,
+#endif
#endif
&cpu_psci_ops,
NULL,
};
-static const struct cpu_operations * __init cpu_get_ops(const char *name)
+const struct cpu_operations * __init cpu_get_ops(const char *name)
{
const struct cpu_operations **ops = supported_cpu_ops;
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 2b8d701..0ea8829 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,45 +11,26 @@
*
*/
-#include <linux/atomic.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/memblock.h>
-#include <linux/mm_types.h>
#include <linux/bootmem.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
-#include <linux/preempt.h>
-#include <linux/rbtree.h>
-#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
struct efi_memory_map memmap;
-static u64 efi_system_table;
-
-static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
+static efi_runtime_services_t *runtime;
-static struct mm_struct efi_mm = {
- .mm_rb = RB_ROOT,
- .pgd = efi_pgd,
- .mm_users = ATOMIC_INIT(2),
- .mm_count = ATOMIC_INIT(1),
- .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
- .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
- .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
- INIT_MM_CONTEXT(efi_mm)
-};
+static u64 efi_system_table;
static int uefi_debug __initdata;
static int __init uefi_debug_setup(char *str)
@@ -67,33 +48,30 @@ static int __init is_normal_ram(efi_memory_desc_t *md)
return 0;
}
-/*
- * Translate a EFI virtual address into a physical address: this is necessary,
- * as some data members of the EFI system table are virtually remapped after
- * SetVirtualAddressMap() has been called.
- */
-static phys_addr_t efi_to_phys(unsigned long addr)
+static void __init efi_setup_idmap(void)
{
+ struct memblock_region *r;
efi_memory_desc_t *md;
+ u64 paddr, npages, size;
+ for_each_memblock(memory, r)
+ create_id_mapping(r->base, r->size, 0);
+
+ /* map runtime io spaces */
for_each_efi_memory_desc(&memmap, md) {
- if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md))
continue;
- if (md->virt_addr == 0)
- /* no virtual mapping has been installed by the stub */
- break;
- if (md->virt_addr <= addr &&
- (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
- return md->phys_addr + addr - md->virt_addr;
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+ create_id_mapping(paddr, size, 1);
}
- return addr;
}
static int __init uefi_init(void)
{
efi_char16_t *c16;
- void *config_tables;
- u64 table_size;
char vendor[100] = "unknown";
int i, retval;
@@ -121,7 +99,7 @@ static int __init uefi_init(void)
efi.systab->hdr.revision & 0xffff);
/* Show what we know for posterity */
- c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
+ c16 = early_memremap(efi.systab->fw_vendor,
sizeof(vendor));
if (c16) {
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
@@ -134,14 +112,8 @@ static int __init uefi_init(void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
- table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
- config_tables = early_memremap(efi_to_phys(efi.systab->tables),
- table_size);
+ retval = efi_config_init(NULL);
- retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
- sizeof(efi_config_table_64_t), NULL);
-
- early_memunmap(config_tables, table_size);
out:
early_memunmap(efi.systab, sizeof(efi_system_table_t));
return retval;
@@ -226,55 +198,63 @@ void __init efi_init(void)
return;
reserve_regions();
- early_memunmap(memmap.map, params.mmap_size);
}
-static bool __init efi_virtmap_init(void)
+void __init efi_idmap_init(void)
{
- efi_memory_desc_t *md;
+ if (!efi_enabled(EFI_BOOT))
+ return;
- for_each_efi_memory_desc(&memmap, md) {
- u64 paddr, npages, size;
- pgprot_t prot;
+ /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
+ efi_setup_idmap();
+ early_memunmap(memmap.map, memmap.map_end - memmap.map);
+}
- if (!(md->attribute & EFI_MEMORY_RUNTIME))
- continue;
- if (md->virt_addr == 0)
- return false;
+static int __init remap_region(efi_memory_desc_t *md, void **new)
+{
+ u64 paddr, vaddr, npages, size;
- paddr = md->phys_addr;
- npages = md->num_pages;
- memrange_efi_to_native(&paddr, &npages);
- size = npages << PAGE_SHIFT;
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
- pr_info(" EFI remap 0x%016llx => %p\n",
- md->phys_addr, (void *)md->virt_addr);
+ if (is_normal_ram(md))
+ vaddr = (__force u64)ioremap_cache(paddr, size);
+ else
+ vaddr = (__force u64)ioremap(paddr, size);
- /*
- * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
- * executable, everything else can be mapped with the XN bits
- * set.
- */
- if (!is_normal_ram(md))
- prot = __pgprot(PROT_DEVICE_nGnRE);
- else if (md->type == EFI_RUNTIME_SERVICES_CODE)
- prot = PAGE_KERNEL_EXEC;
- else
- prot = PAGE_KERNEL;
-
- create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
+ if (!vaddr) {
+ pr_err("Unable to remap 0x%llx pages @ %p\n",
+ npages, (void *)paddr);
+ return 0;
}
- return true;
+
+ /* adjust for any rounding when EFI and system pagesize differs */
+ md->virt_addr = vaddr + (md->phys_addr - paddr);
+
+ if (uefi_debug)
+ pr_info(" EFI remap 0x%012llx => %p\n",
+ md->phys_addr, (void *)md->virt_addr);
+
+ memcpy(*new, md, memmap.desc_size);
+ *new += memmap.desc_size;
+
+ return 1;
}
/*
- * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
- * non-early mapping of the UEFI system table and virtual mappings for all
- * EFI_MEMORY_RUNTIME regions.
+ * Switch UEFI from an identity map to a kernel virtual map
*/
-static int __init arm64_enable_runtime_services(void)
+static int __init arm64_enter_virtual_mode(void)
{
+ efi_memory_desc_t *md;
+ phys_addr_t virtmap_phys;
+ void *virtmap, *virt_md;
+ efi_status_t status;
u64 mapsize;
+ int count = 0;
+ unsigned long flags;
if (!efi_enabled(EFI_BOOT)) {
pr_info("EFI services will not be available.\n");
@@ -298,28 +278,79 @@ static int __init arm64_enable_runtime_services(void)
memmap.map_end = memmap.map + mapsize;
efi.memmap = &memmap;
- efi.systab = (__force void *)ioremap_cache(efi_system_table,
- sizeof(efi_system_table_t));
- if (!efi.systab) {
- pr_err("Failed to remap EFI System Table\n");
+ /* Map the runtime regions */
+ virtmap = kmalloc(mapsize, GFP_KERNEL);
+ if (!virtmap) {
+ pr_err("Failed to allocate EFI virtual memmap\n");
return -1;
}
+ virtmap_phys = virt_to_phys(virtmap);
+ virt_md = virtmap;
+
+ for_each_efi_memory_desc(&memmap, md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (!remap_region(md, &virt_md))
+ goto err_unmap;
+ ++count;
+ }
+
+ efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
+ if (!efi.systab) {
+ /*
+ * If we have no virtual mapping for the System Table at this
+ * point, the memory map doesn't cover the physical offset where
+ * it resides. This means the System Table will be inaccessible
+ * to Runtime Services themselves once the virtual mapping is
+ * installed.
+ */
+ pr_err("Failed to remap EFI System Table -- buggy firmware?\n");
+ goto err_unmap;
+ }
set_bit(EFI_SYSTEM_TABLES, &efi.flags);
- if (!efi_virtmap_init()) {
- pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
+ local_irq_save(flags);
+ cpu_switch_mm(idmap_pg_dir, &init_mm);
+
+ /* Call SetVirtualAddressMap with the physical address of the map */
+ runtime = efi.systab->runtime;
+ efi.set_virtual_address_map = runtime->set_virtual_address_map;
+
+ status = efi.set_virtual_address_map(count * memmap.desc_size,
+ memmap.desc_size,
+ memmap.desc_version,
+ (efi_memory_desc_t *)virtmap_phys);
+ cpu_set_reserved_ttbr0();
+ flush_tlb_all();
+ local_irq_restore(flags);
+
+ kfree(virtmap);
+
+ if (status != EFI_SUCCESS) {
+ pr_err("Failed to set EFI virtual address map! [%lx]\n",
+ status);
return -1;
}
/* Set up runtime services function pointers */
+ runtime = efi.systab->runtime;
efi_native_runtime_setup();
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
efi.runtime_version = efi.systab->hdr.revision;
return 0;
+
+err_unmap:
+ /* unmap all mappings that succeeded: there are 'count' of those */
+ for (virt_md = virtmap; count--; virt_md += memmap.desc_size) {
+ md = virt_md;
+ iounmap((__force void __iomem *)md->virt_addr);
+ }
+ kfree(virtmap);
+ return -1;
}
-early_initcall(arm64_enable_runtime_services);
+early_initcall(arm64_enter_virtual_mode);
static int __init arm64_dmi_init(void)
{
@@ -335,26 +366,6 @@ static int __init arm64_dmi_init(void)
}
core_initcall(arm64_dmi_init);
-static void efi_set_pgd(struct mm_struct *mm)
-{
- cpu_switch_mm(mm->pgd, mm);
- flush_tlb_all();
- if (icache_is_aivivt())
- __flush_icache_all();
-}
-
-void efi_virtmap_load(void)
-{
- preempt_disable();
- efi_set_pgd(&efi_mm);
-}
-
-void efi_virtmap_unload(void)
-{
- efi_set_pgd(current->active_mm);
- preempt_enable();
-}
-
/*
* UpdateCapsule() depends on the system being shutdown via
* ResetSystem().
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 6f93c24..8456e72 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -10,14 +10,17 @@
*
*/
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
-
+#include <linux/pci-acpi.h>
+#include <linux/ecam.h>
#include <asm/pci-bridge.h>
/*
@@ -42,7 +45,424 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
*/
int pcibios_add_device(struct pci_dev *dev)
{
- dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
+ if (acpi_disabled)
+ dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_sysdata *sd;
+
+ if (!acpi_disabled) {
+ sd = bridge->bus->sysdata;
+ ACPI_COMPANION_SET(&bridge->dev, sd->companion);
+ }
+ return 0;
+}
+
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ if (!acpi_disabled)
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ if (!acpi_disabled)
+ acpi_pci_remove_bus(bus);
+}
+
+int pcibios_enable_irq(struct pci_dev *dev)
+{
+ if (!acpi_disabled && !pci_dev_msi_enabled(dev))
+ acpi_pci_irq_enable(dev);
+ return 0;
+}
+
+int pcibios_disable_irq(struct pci_dev *dev)
+{
+ if (!acpi_disabled && !pci_dev_msi_enabled(dev))
+ acpi_pci_irq_disable(dev);
+ return 0;
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int bars)
+{
+ int err;
+
+ err = pci_enable_resources(dev, bars);
+ if (err < 0)
+ return err;
+
+ if (!pci_dev_msi_enabled(dev))
+ return pcibios_enable_irq(dev);
+ return 0;
+}
+
+static int __init pcibios_assign_resources(void)
+{
+ struct pci_bus *root_bus;
+ if (acpi_disabled)
+ return 0;
+
+ list_for_each_entry(root_bus, &pci_root_buses, node) {
+ pcibios_resource_survey_bus(root_bus);
+ pci_assign_unassigned_root_bus_resources(root_bus);
+ }
return 0;
}
+/*
+ * fs_initcall comes after subsys_initcall, so we know acpi scan
+ * has run.
+ */
+fs_initcall(pcibios_assign_resources);
+
+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ return raw_pci_read(pci_domain_nr(bus), bus->number,
+ devfn, where, size, value);
+}
+
+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ return raw_pci_write(pci_domain_nr(bus), bus->number,
+ devfn, where, size, value);
+}
+
+struct pci_ops pci_root_ops = {
+ .read = pci_read,
+ .write = pci_write,
+};
+
+struct pci_root_info {
+ struct acpi_device *bridge;
+ char name[16];
+ unsigned int res_num;
+ struct resource *res;
+ resource_size_t *res_offset;
+ struct pci_sysdata sd;
+ u16 segment;
+ u8 start_bus;
+ u8 end_bus;
+};
+
+static acpi_status resource_to_addr(struct acpi_resource *resource,
+ struct acpi_resource_address64 *addr)
+{
+ acpi_status status;
+
+ memset(addr, 0, sizeof(*addr));
+ switch (resource->type) {
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ status = acpi_resource_to_address64(resource, addr);
+ if (ACPI_SUCCESS(status) &&
+ (addr->resource_type == ACPI_MEMORY_RANGE ||
+ addr->resource_type == ACPI_IO_RANGE) &&
+ addr->producer_consumer == ACPI_PRODUCER &&
+ addr->address.address_length > 0) {
+ return AE_OK;
+ }
+ break;
+ }
+ return AE_ERROR;
+}
+
+static acpi_status count_resource(struct acpi_resource *acpi_res, void *data)
+{
+ struct pci_root_info *info = data;
+ struct acpi_resource_address64 addr;
+ acpi_status status;
+
+ status = resource_to_addr(acpi_res, &addr);
+ if (ACPI_SUCCESS(status))
+ info->res_num++;
+ return AE_OK;
+}
+
+static acpi_status setup_resource(struct acpi_resource *acpi_res, void *data)
+{
+ struct pci_root_info *info = data;
+ struct resource *res;
+ struct acpi_resource_address64 addr;
+ acpi_status status;
+ unsigned long flags;
+ u64 start, end;
+
+ status = resource_to_addr(acpi_res, &addr);
+ if (!ACPI_SUCCESS(status))
+ return AE_OK;
+
+ if (addr.resource_type == ACPI_MEMORY_RANGE) {
+ flags = IORESOURCE_MEM;
+ if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
+ flags |= IORESOURCE_PREFETCH;
+ } else if (addr.resource_type == ACPI_IO_RANGE) {
+ flags = IORESOURCE_IO;
+ } else
+ return AE_OK;
+
+ start = addr.address.minimum + addr.address.translation_offset;
+ end = addr.address.maximum + addr.address.translation_offset;
+
+ res = &info->res[info->res_num];
+ res->name = info->name;
+ res->flags = flags;
+ res->start = start;
+ res->end = end;
+
+ if (flags & IORESOURCE_IO) {
+ unsigned long port;
+ int err;
+
+ err = pci_register_io_range(start, addr.address.address_length);
+ if (err)
+ return AE_OK;
+
+ port = pci_address_to_pio(start);
+ if (port == (unsigned long)-1) {
+ res->start = -1;
+ res->end = -1;
+ return AE_OK;
+ }
+
+ res->start = port;
+ res->end = res->start + addr.address.address_length - 1;
+
+ if (pci_remap_iospace(res, start) < 0)
+ return AE_OK;
+
+ info->res_offset[info->res_num] = port - addr.address.minimum;
+ } else
+ info->res_offset[info->res_num] = addr.address.translation_offset;
+
+ info->res_num++;
+
+ return AE_OK;
+}
+
+static void coalesce_windows(struct pci_root_info *info, unsigned long type)
+{
+ int i, j;
+ struct resource *res1, *res2;
+
+ for (i = 0; i < info->res_num; i++) {
+ res1 = &info->res[i];
+ if (!(res1->flags & type))
+ continue;
+
+ for (j = i + 1; j < info->res_num; j++) {
+ res2 = &info->res[j];
+ if (!(res2->flags & type))
+ continue;
+
+ /*
+ * I don't like throwing away windows because then
+ * our resources no longer match the ACPI _CRS, but
+ * the kernel resource tree doesn't allow overlaps.
+ */
+ if (resource_overlaps(res1, res2)) {
+ res2->start = min(res1->start, res2->start);
+ res2->end = max(res1->end, res2->end);
+ dev_info(&info->bridge->dev,
+ "host bridge window expanded to %pR; %pR ignored\n",
+ res2, res1);
+ res1->flags = 0;
+ }
+ }
+ }
+}
+
+static void add_resources(struct pci_root_info *info,
+ struct list_head *resources)
+{
+ int i;
+ struct resource *res, *root, *conflict;
+
+ coalesce_windows(info, IORESOURCE_MEM);
+ coalesce_windows(info, IORESOURCE_IO);
+
+ for (i = 0; i < info->res_num; i++) {
+ res = &info->res[i];
+
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+ else if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ else
+ continue;
+
+ conflict = insert_resource_conflict(root, res);
+ if (conflict)
+ dev_info(&info->bridge->dev,
+ "ignoring host bridge window %pR (conflicts with %s %pR)\n",
+ res, conflict->name, conflict);
+ else
+ pci_add_resource_offset(resources, res,
+ info->res_offset[i]);
+ }
+}
+
+static void free_pci_root_info_res(struct pci_root_info *info)
+{
+ kfree(info->res);
+ info->res = NULL;
+ kfree(info->res_offset);
+ info->res_offset = NULL;
+ info->res_num = 0;
+}
+
+static void __release_pci_root_info(struct pci_root_info *info)
+{
+ int i;
+ struct resource *res;
+
+ for (i = 0; i < info->res_num; i++) {
+ res = &info->res[i];
+
+ if (!res->parent)
+ continue;
+
+ if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
+ continue;
+
+ release_resource(res);
+ }
+
+ free_pci_root_info_res(info);
+
+ kfree(info);
+}
+
+static void release_pci_root_info(struct pci_host_bridge *bridge)
+{
+ struct pci_root_info *info = bridge->release_data;
+
+ __release_pci_root_info(info);
+}
+
+static void probe_pci_root_info(struct pci_root_info *info,
+ struct acpi_device *device,
+ int busnum, int domain)
+{
+ size_t size;
+
+ sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
+ info->bridge = device;
+
+ info->res_num = 0;
+ acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
+ info);
+ if (!info->res_num)
+ return;
+
+ size = sizeof(*info->res) * info->res_num;
+ info->res = kzalloc_node(size, GFP_KERNEL, info->sd.node);
+ if (!info->res) {
+ info->res_num = 0;
+ return;
+ }
+
+ size = sizeof(*info->res_offset) * info->res_num;
+ info->res_num = 0;
+ info->res_offset = kzalloc_node(size, GFP_KERNEL, info->sd.node);
+ if (!info->res_offset) {
+ kfree(info->res);
+ info->res = NULL;
+ return;
+ }
+
+ acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
+ info);
+}
+
+/* Root bridge scanning */
+struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+{
+ struct acpi_device *device = root->device;
+ struct pci_ecam_region *mcfg;
+ struct pci_root_info *info;
+ int domain = root->segment;
+ int busnum = root->secondary.start;
+ LIST_HEAD(resources);
+ struct pci_bus *bus;
+ struct pci_sysdata *sd;
+ int node;
+
+ /* we need mmconfig */
+ mcfg = pci_ecam_lookup(domain, busnum);
+ if (!mcfg) {
+ pr_err("pci_bus %04x:%02x has no MCFG table\n",
+ domain, busnum);
+ return NULL;
+ }
+
+ /* temporary hack */
+ if (mcfg->fixup)
+ (*mcfg->fixup)(root, mcfg);
+
+ if (domain && !pci_domains_supported) {
+ pr_warn("PCI %04x:%02x: multiple domains not supported.\n",
+ domain, busnum);
+ return NULL;
+ }
+
+ node = NUMA_NO_NODE;
+
+ info = kzalloc_node(sizeof(*info), GFP_KERNEL, node);
+ if (!info) {
+ pr_warn("PCI %04x:%02x: ignored (out of memory)\n",
+ domain, busnum);
+ return NULL;
+ }
+ info->segment = domain;
+ info->start_bus = busnum;
+ info->end_bus = root->secondary.end;
+
+ sd = &info->sd;
+ sd->domain = domain;
+ sd->node = node;
+ sd->companion = device;
+
+ probe_pci_root_info(info, device, busnum, domain);
+
+ /* insert busn res at first */
+ pci_add_resource(&resources, &root->secondary);
+
+ /* then _CRS resources */
+ add_resources(info, &resources);
+
+ bus = pci_create_root_bus(NULL, busnum, &pci_root_ops, sd, &resources);
+ if (bus) {
+ pci_scan_child_bus(bus);
+ pci_set_host_bridge_release(to_pci_host_bridge(bus->bridge),
+ release_pci_root_info, info);
+ } else {
+ pci_free_resource_list(&resources);
+ __release_pci_root_info(info);
+ }
+
+ /* After the PCI-E bus has been walked and all devices discovered,
+ * configure any settings of the fabric that might be necessary.
+ */
+ if (bus) {
+ struct pci_bus *child;
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
+
+ if (bus && node != NUMA_NO_NODE)
+ dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
+
+ return bus;
+}
+#endif
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 25a5308..1e4fd17 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
+#include <linux/acpi.h>
#include <asm/cputype.h>
#include <asm/irq.h>
@@ -1310,6 +1311,107 @@ static int __init register_pmu_driver(void)
}
device_initcall(register_pmu_driver);
+#ifdef CONFIG_ACPI
+struct acpi_pmu_irq {
+ int gsi;
+ int trigger;
+};
+
+static struct acpi_pmu_irq acpi_pmu_irqs[NR_CPUS] __initdata;
+
+static int __init
+acpi_parse_pmu_irqs(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *gic;
+ int cpu;
+ u64 mpidr;
+
+ gic = (struct acpi_madt_generic_interrupt *)header;
+ if (BAD_MADT_ENTRY(gic, end))
+ return -EINVAL;
+
+ mpidr = gic->arm_mpidr & MPIDR_HWID_BITMASK;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu_logical_map(cpu) != mpidr)
+ continue;
+
+ acpi_pmu_irqs[cpu].gsi = gic->performance_interrupt;
+ if (gic->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
+ acpi_pmu_irqs[cpu].trigger = ACPI_EDGE_SENSITIVE;
+ else
+ acpi_pmu_irqs[cpu].trigger = ACPI_LEVEL_SENSITIVE;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int __init pmu_acpi_init(void)
+{
+ struct platform_device *pdev;
+ struct acpi_pmu_irq *pirq = acpi_pmu_irqs;
+ struct resource *res, *r;
+ int err = -ENOMEM;
+ int i, count, irq;
+
+ if (acpi_disabled)
+ return 0;
+
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_parse_pmu_irqs, num_possible_cpus());
+ /* Must have irq for boot boot cpu, at least */
+ if (count <= 0 || pirq->gsi == 0)
+ return -EINVAL;
+
+ irq = acpi_register_gsi(NULL, pirq->gsi, pirq->trigger,
+ ACPI_ACTIVE_HIGH);
+
+ if (irq_is_percpu(irq))
+ count = 1;
+
+ pdev = platform_device_alloc("arm-pmu", -1);
+ if (!pdev)
+ return err;
+
+ res = kcalloc(count, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ goto err_free_device;
+
+ for (i = 0, r = res; i < count; i++, pirq++, r++) {
+ if (i)
+ irq = acpi_register_gsi(NULL, pirq->gsi, pirq->trigger,
+ ACPI_ACTIVE_HIGH);
+ r->start = r->end = irq;
+ r->flags = IORESOURCE_IRQ;
+ if (pirq->trigger == ACPI_EDGE_SENSITIVE)
+ r->flags |= IORESOURCE_IRQ_HIGHEDGE;
+ else
+ r->flags |= IORESOURCE_IRQ_HIGHLEVEL;
+ }
+
+ err = platform_device_add_resources(pdev, res, count);
+ if (err)
+ goto err_free_res;
+
+ err = platform_device_add(pdev);
+ if (err)
+ goto err_free_res;
+
+ return 0;
+
+err_free_res:
+ kfree(res);
+
+err_free_device:
+ platform_device_put(pdev);
+ return err;
+}
+arch_initcall(pmu_acpi_init);
+
+#endif /* ACPI */
+
static struct pmu_hw_events *armpmu_get_cpu_events(void)
{
return this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 9b8a70a..d3c52ce 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) "psci: " fmt
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
@@ -24,6 +25,7 @@
#include <linux/slab.h>
#include <uapi/linux/psci.h>
+#include <asm/acpi.h>
#include <asm/compiler.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
@@ -273,6 +275,33 @@ static void psci_sys_poweroff(void)
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
+static void __init psci_0_2_set_functions(void)
+{
+ pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
+ psci_ops.cpu_suspend = psci_cpu_suspend;
+
+ psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+ psci_ops.cpu_off = psci_cpu_off;
+
+ psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
+ psci_ops.cpu_on = psci_cpu_on;
+
+ psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
+ psci_ops.migrate = psci_migrate;
+
+ psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
+ psci_ops.affinity_info = psci_affinity_info;
+
+ psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
+ PSCI_0_2_FN_MIGRATE_INFO_TYPE;
+ psci_ops.migrate_info_type = psci_migrate_info_type;
+
+ arm_pm_restart = psci_sys_reset;
+
+ pm_power_off = psci_sys_poweroff;
+}
+
/*
* PSCI Function IDs for v0.2+ are well defined so use
* standard values.
@@ -306,29 +335,7 @@ static int __init psci_0_2_init(struct device_node *np)
}
}
- pr_info("Using standard PSCI v0.2 function IDs\n");
- psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
- psci_ops.cpu_suspend = psci_cpu_suspend;
-
- psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
- psci_ops.cpu_off = psci_cpu_off;
-
- psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
- psci_ops.cpu_on = psci_cpu_on;
-
- psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
- psci_ops.migrate = psci_migrate;
-
- psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
- psci_ops.affinity_info = psci_affinity_info;
-
- psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
- PSCI_0_2_FN_MIGRATE_INFO_TYPE;
- psci_ops.migrate_info_type = psci_migrate_info_type;
-
- arm_pm_restart = psci_sys_reset;
-
- pm_power_off = psci_sys_poweroff;
+ psci_0_2_set_functions();
out_put_node:
of_node_put(np);
@@ -381,7 +388,7 @@ static const struct of_device_id psci_of_match[] __initconst = {
{},
};
-int __init psci_init(void)
+int __init psci_dt_init(void)
{
struct device_node *np;
const struct of_device_id *matched_np;
@@ -396,6 +403,29 @@ int __init psci_init(void)
return init_fn(np);
}
+/*
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
+ * explicitly clarified in SBBR
+ */
+int __init psci_acpi_init(void)
+{
+ if (!acpi_psci_present()) {
+ pr_info("is not implemented in ACPI.\n");
+ return -EOPNOTSUPP;
+ }
+
+ pr_info("probing for conduit method from ACPI.\n");
+
+ if (acpi_psci_use_hvc())
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ else
+ invoke_psci_fn = __invoke_psci_fn_smc;
+
+ psci_0_2_set_functions();
+
+ return 0;
+}
+
#ifdef CONFIG_SMP
static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index e8420f6..5c3e289 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
@@ -46,6 +47,7 @@
#include <linux/efi.h>
#include <linux/personality.h>
+#include <asm/acpi.h>
#include <asm/fixmap.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
@@ -380,18 +382,28 @@ void __init setup_arch(char **cmdline_p)
efi_init();
arm64_memblock_init();
+ /* Parse the ACPI tables for possible boot-time configuration */
+ acpi_boot_table_init();
+
paging_init();
request_standard_resources();
+ efi_idmap_init();
early_ioremap_reset();
- unflatten_device_tree();
-
- psci_init();
+ if (acpi_disabled) {
+ unflatten_device_tree();
+ psci_dt_init();
+ cpu_read_bootcpu_ops();
+#ifdef CONFIG_SMP
+ of_smp_init_cpus();
+#endif
+ } else {
+ psci_acpi_init();
+ acpi_init_cpus();
+ }
- cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
- smp_init_cpus();
smp_build_mpidr_hash();
#endif
@@ -547,3 +559,25 @@ const struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = c_show
};
+
+/*
+ * Temporary hack to avoid need for console= on command line
+ */
+static int __init arm64_console_setup(void)
+{
+ /* Allow cmdline to override our assumed preferences */
+ if (console_set_on_cmdline)
+ return 0;
+
+ if (IS_ENABLED(CONFIG_SBSAUART_TTY))
+ add_preferred_console("ttySBSA", 0, "115200");
+
+ if (IS_ENABLED(CONFIG_SERIAL_AMBA_PL011))
+ add_preferred_console("ttyAMA", 0, "115200");
+
+ if (IS_ENABLED(CONFIG_SERIAL_8250))
+ add_preferred_console("ttyS", 0, "115200");
+
+ return 0;
+}
+early_initcall(arm64_console_setup);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 328b8ce..52998b7 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -322,7 +322,7 @@ void __init smp_prepare_boot_cpu(void)
* cpu logical map array containing MPIDR values related to logical
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
-void __init smp_init_cpus(void)
+void __init of_smp_init_cpus(void)
{
struct device_node *dn = NULL;
unsigned int i, cpu = 1;
diff --git a/arch/arm64/kernel/smp_parking_protocol.c b/arch/arm64/kernel/smp_parking_protocol.c
new file mode 100644
index 0000000..e1153ce
--- /dev/null
+++ b/arch/arm64/kernel/smp_parking_protocol.c
@@ -0,0 +1,110 @@
+/*
+ * Parking Protocol SMP initialisation
+ *
+ * Based largely on spin-table method.
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+static phys_addr_t cpu_mailbox_addr[NR_CPUS];
+
+static void (*__smp_boot_wakeup)(int cpu);
+
+void set_smp_boot_wakeup_call(void (*fn)(int cpu))
+{
+ __smp_boot_wakeup = fn;
+}
+
+static int smp_parking_protocol_cpu_init(struct device_node *dn,
+ unsigned int cpu)
+{
+ /*
+ * Determine the mailbox address.
+ */
+ if (!acpi_get_cpu_parked_address(cpu, &cpu_mailbox_addr[cpu])) {
+ pr_info("%s: ACPI parked addr=%llx\n",
+ __func__, cpu_mailbox_addr[cpu]);
+ return 0;
+ }
+
+ pr_err("CPU %d: missing or invalid parking protocol mailbox\n", cpu);
+
+ return -1;
+}
+
+static int smp_parking_protocol_cpu_prepare(unsigned int cpu)
+{
+ return 0;
+}
+
+struct parking_protocol_mailbox {
+ __le32 cpu_id;
+ __le32 reserved;
+ __le64 entry_point;
+};
+
+static int smp_parking_protocol_cpu_boot(unsigned int cpu)
+{
+ struct parking_protocol_mailbox __iomem *mailbox;
+
+ if (!cpu_mailbox_addr[cpu] || !__smp_boot_wakeup)
+ return -ENODEV;
+
+ /*
+ * The mailbox may or may not be inside the linear mapping.
+ * As ioremap_cache will either give us a new mapping or reuse the
+ * existing linear mapping, we can use it to cover both cases. In
+ * either case the memory will be MT_NORMAL.
+ */
+ mailbox = ioremap_cache(cpu_mailbox_addr[cpu], sizeof(*mailbox));
+ if (!mailbox)
+ return -ENOMEM;
+
+ /*
+ * We write the entry point and cpu id as LE regardless of the
+ * native endianess of the kernel. Therefore, any boot-loaders
+ * that read this address need to convert this address to the
+ * Boot-Loader's endianess before jumping.
+ */
+ writeq(__pa(secondary_entry), &mailbox->entry_point);
+ writel(cpu, &mailbox->cpu_id);
+ __flush_dcache_area(mailbox, sizeof(*mailbox));
+ __smp_boot_wakeup(cpu);
+
+ /* temp hack for broken firmware */
+ sev();
+
+ iounmap(mailbox);
+
+ return 0;
+}
+
+const struct cpu_operations smp_parking_protocol_ops = {
+ .name = "parking-protocol",
+ .cpu_init = smp_parking_protocol_cpu_init,
+ .cpu_prepare = smp_parking_protocol_cpu_prepare,
+ .cpu_boot = smp_parking_protocol_cpu_boot,
+};
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 1a7125c..42f9195 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/clk-provider.h>
+#include <linux/acpi.h>
#include <clocksource/arm_arch_timer.h>
@@ -72,6 +73,12 @@ void __init time_init(void)
tick_setup_hrtimer_broadcast();
+ /*
+ * Since ACPI or FDT will only one be available in the system,
+ * we can use acpi_generic_timer_init() here safely
+ */
+ acpi_generic_timer_init();
+
arch_timer_rate = arch_timer_get_rate();
if (!arch_timer_rate)
panic("Unable to initialise architected timer.\n");
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 58e0c2b..360edc6 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -23,8 +23,14 @@
#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
+#include <linux/amba/bus.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
#include <asm/cacheflush.h>
@@ -409,10 +415,102 @@ out:
return -ENOMEM;
}
+#ifdef CONFIG_PCI
+static void arm64_of_set_dma_ops(void *_dev)
+{
+ struct device *dev = _dev;
+
+ /*
+ * PCI devices won't have an ACPI handle but the bridge will.
+ * Search up the device chain until we find an of_node
+ * to check.
+ */
+ while (dev) {
+ if (dev->of_node) {
+ if (of_dma_is_coherent(dev->of_node))
+ ((struct device *)_dev)->archdata.dma_coherent = true;
+ break;
+ }
+ dev = dev->parent;
+ }
+}
+#else
+static inline arm64_of_set_dma_ops(void *_dev) {}
+#endif
+
+
+#ifdef CONFIG_ACPI
+static void arm64_acpi_set_dma_ops(void *_dev)
+{
+ struct device *dev = _dev;
+
+ /*
+ * Kernel defaults to noncoherent ops but ACPI 5.1 spec says arm64
+ * defaults to coherent. Set coherent ops if _CCA not found or _CCA
+ * found and non-zero.
+ *
+ * PCI devices won't have an of_node but the bridge will.
+ * Search up the device chain until we find an ACPI handle
+ * to check.
+ */
+ while (dev) {
+ if (ACPI_HANDLE(dev)) {
+ acpi_status status;
+ int coherent;
+ status = acpi_check_coherency(ACPI_HANDLE(dev),
+ &coherent);
+ if (ACPI_FAILURE(status) || coherent)
+ ((struct device *)_dev)->archdata.dma_coherent = true;
+ break;
+ }
+ dev = dev->parent;
+ }
+}
+#else
+static inline arm64_acpi_set_dma_ops(void *_dev) {}
+#endif
+
+static int dma_bus_notifier(struct notifier_block *nb,
+ unsigned long event, void *_dev)
+{
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ if (acpi_disabled)
+ arm64_of_set_dma_ops(_dev);
+ else
+ arm64_acpi_set_dma_ops(_dev);
+
+ return NOTIFY_OK;
+}
+
+#ifdef CONFIG_ACPI
+static struct notifier_block platform_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
+static struct notifier_block amba_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+#endif
+
+#ifdef CONFIG_PCI
+static struct notifier_block pci_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+#endif
+
static int __init arm64_dma_init(void)
{
int ret;
+ if (IS_ENABLED(CONFIG_ACPI)) {
+ bus_register_notifier(&platform_bus_type, &platform_bus_nb);
+ bus_register_notifier(&amba_bustype, &amba_bus_nb);
+ }
+ if (IS_ENABLED(CONFIG_PCI))
+ bus_register_notifier(&pci_bus_type, &pci_bus_nb);
+
dma_ops = &swiotlb_dma_ops;
ret = atomic_pool_init();
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c6daaf6..c3c3134 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -276,12 +276,24 @@ static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
size, prot, early_alloc);
}
+void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
+{
+ if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
+ pr_warn("BUG: not creating id mapping for %pa\n", &addr);
+ return;
+ }
+ __create_mapping(&init_mm, &idmap_pg_dir[pgd_index(addr)],
+ addr, addr, size,
+ map_io ? __pgprot(PROT_DEVICE_nGnRE)
+ : PAGE_KERNEL_EXEC, early_alloc);
+}
+
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot)
{
__create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
- late_alloc);
+ early_alloc);
}
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 074e52b..e8728d7 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -10,6 +10,7 @@ config IA64
select ARCH_MIGHT_HAVE_PC_SERIO
select PCI if (!IA64_HP_SIM)
select ACPI if (!IA64_HP_SIM)
+ select ACPI_GENERIC_SLEEP if ACPI
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 2c44989..067ef44 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -887,7 +887,7 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b7d31ca..b77071a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,6 +22,7 @@ config X86_64
### Arch settings
config X86
def_bool y
+ select ACPI_GENERIC_SLEEP if ACPI
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_FAST_MULTIPLIER
@@ -141,6 +142,7 @@ config X86
select ACPI_LEGACY_TABLES_LOOKUP if ACPI
select X86_FEATURE_NAMES if PROC_FS
select SRCU
+ select PCI_ECAM_GENERIC if X86_64
config INSTRUCTION_DECODER
def_bool y
@@ -2277,6 +2279,7 @@ config PCI_DIRECT
config PCI_MMCONFIG
def_bool y
+ select PCI_ECAM
depends on X86_32 && PCI && (ACPI || SFI) && (PCI_GOMMCONFIG || PCI_GOANY)
config PCI_OLPC
@@ -2294,6 +2297,7 @@ config PCI_DOMAINS
config PCI_MMCONFIG
bool "Support mmconfig PCI config space access"
+ select PCI_ECAM
depends on X86_64 && PCI && ACPI
config PCI_CNB20LE_QUIRK
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index fa1195d..e8a237f 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -122,40 +122,18 @@ extern int pci_legacy_init(void);
extern void pcibios_fixup_irqs(void);
/* pci-mmconfig.c */
-
-/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
-#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
-
-struct pci_mmcfg_region {
- struct list_head list;
- struct resource res;
- u64 address;
- char __iomem *virt;
- u16 segment;
- u8 start_bus;
- u8 end_bus;
- char name[PCI_MMCFG_RESOURCE_NAME_LEN];
-};
-
-extern int __init pci_mmcfg_arch_init(void);
-extern void __init pci_mmcfg_arch_free(void);
-extern int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg);
-extern void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg);
extern int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
phys_addr_t addr);
-extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end);
-extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
-
-extern struct list_head pci_mmcfg_list;
-
-#define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20)
/*
* AMD Fam10h CPUs are buggy, and cannot access MMIO config space
- * on their northbrige except through the * %eax register. As such, you MUST
- * NOT use normal IOMEM accesses, you need to only use the magic mmio-config
+ * on their northbridge except through the * %eax register. As such, you MUST
+ * NOT use normal IOMEM accesses, you need to only use the magic mmio_config_*
* accessor functions.
- * In fact just use pci_config_*, nothing else please.
+ *
+ * Please refer to the following doc:
+ * "BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h Processors",
+ * rev. 3.48, sec 2.11.1, "MMIO Configuration Coding Requirements".
*/
static inline unsigned char mmio_config_readb(void __iomem *pos)
{
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 3d525c6..e4f8582 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -757,7 +757,7 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 5c6fc35..35c765b 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -1,7 +1,10 @@
obj-y := i386.o init.o
obj-$(CONFIG_PCI_BIOS) += pcbios.o
-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o
+obj-$(CONFIG_PCI_MMCONFIG) += direct.o mmconfig-shared.o
+ifeq ($(BITS),32)
+obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_32.o
+endif
obj-$(CONFIG_PCI_DIRECT) += direct.o
obj-$(CONFIG_PCI_OLPC) += olpc.o
obj-$(CONFIG_PCI_XEN) += xen.o
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index e469598..fc9eb43 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -4,6 +4,7 @@
#include <linux/irq.h>
#include <linux/dmi.h>
#include <linux/slab.h>
+#include <linux/ecam.h>
#include <asm/numa.h>
#include <asm/pci_x86.h>
@@ -198,7 +199,7 @@ static int setup_mcfg_map(struct pci_root_info *info, u16 seg, u8 start,
static void teardown_mcfg_map(struct pci_root_info *info)
{
if (info->mcfg_added) {
- pci_mmconfig_delete(info->segment, info->start_bus,
+ pci_ecam_delete(info->segment, info->start_bus,
info->end_bus);
info->mcfg_added = false;
}
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index dd30b7e..8f78671 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
+#include <linux/ecam.h>
#include <asm/e820.h>
#include <asm/pci_x86.h>
#include <asm/acpi.h>
@@ -27,103 +28,52 @@
/* Indicate if the mmcfg resources have been placed into the resource table. */
static bool pci_mmcfg_running_state;
static bool pci_mmcfg_arch_init_failed;
-static DEFINE_MUTEX(pci_mmcfg_lock);
-LIST_HEAD(pci_mmcfg_list);
-
-static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
-{
- if (cfg->res.parent)
- release_resource(&cfg->res);
- list_del(&cfg->list);
- kfree(cfg);
-}
+const struct pci_raw_ops pci_mmcfg = {
+ .read = pci_ecam_read,
+ .write = pci_ecam_write,
+};
-static void __init free_all_mmcfg(void)
+static u32
+pci_mmconfig_amd_read(int len, void __iomem *addr)
{
- struct pci_mmcfg_region *cfg, *tmp;
-
- pci_mmcfg_arch_free();
- list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
- pci_mmconfig_remove(cfg);
-}
+ u32 data = 0;
-static void list_add_sorted(struct pci_mmcfg_region *new)
-{
- struct pci_mmcfg_region *cfg;
-
- /* keep list sorted by segment and starting bus number */
- list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) {
- if (cfg->segment > new->segment ||
- (cfg->segment == new->segment &&
- cfg->start_bus >= new->start_bus)) {
- list_add_tail_rcu(&new->list, &cfg->list);
- return;
- }
+ switch (len) {
+ case 1:
+ data = mmio_config_readb(addr);
+ break;
+ case 2:
+ data = mmio_config_readw(addr);
+ break;
+ case 4:
+ data = mmio_config_readl(addr);
+ break;
}
- list_add_tail_rcu(&new->list, &pci_mmcfg_list);
-}
-
-static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
- int end, u64 addr)
-{
- struct pci_mmcfg_region *new;
- struct resource *res;
-
- if (addr == 0)
- return NULL;
- new = kzalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
- return NULL;
-
- new->address = addr;
- new->segment = segment;
- new->start_bus = start;
- new->end_bus = end;
-
- res = &new->res;
- res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
- res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
- "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
- res->name = new->name;
-
- return new;
+ return data;
}
-static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
- int end, u64 addr)
+static void
+pci_mmconfig_amd_write(int len, void __iomem *addr, u32 value)
{
- struct pci_mmcfg_region *new;
-
- new = pci_mmconfig_alloc(segment, start, end, addr);
- if (new) {
- mutex_lock(&pci_mmcfg_lock);
- list_add_sorted(new);
- mutex_unlock(&pci_mmcfg_lock);
-
- pr_info(PREFIX
- "MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
- "(base %#lx)\n",
- segment, start, end, &new->res, (unsigned long)addr);
+ switch (len) {
+ case 1:
+ mmio_config_writeb(addr, value);
+ break;
+ case 2:
+ mmio_config_writew(addr, value);
+ break;
+ case 4:
+ mmio_config_writel(addr, value);
+ break;
}
-
- return new;
}
-struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
-{
- struct pci_mmcfg_region *cfg;
-
- list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
- if (cfg->segment == segment &&
- cfg->start_bus <= bus && bus <= cfg->end_bus)
- return cfg;
-
- return NULL;
-}
+static struct pci_ecam_mmio_ops pci_mmcfg_mmio_amd_fam10h = {
+ .read = pci_mmconfig_amd_read,
+ .write = pci_mmconfig_amd_write,
+};
static const char *__init pci_mmcfg_e7520(void)
{
@@ -134,7 +84,7 @@ static const char *__init pci_mmcfg_e7520(void)
if (win == 0x0000 || win == 0xf000)
return NULL;
- if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
+ if (pci_ecam_add(0, 0, 255, win << 16) == NULL)
return NULL;
return "Intel Corporation E7520 Memory Controller Hub";
@@ -178,7 +128,7 @@ static const char *__init pci_mmcfg_intel_945(void)
if ((pciexbar & mask) >= 0xf0000000U)
return NULL;
- if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
+ if (pci_ecam_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
return NULL;
return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
@@ -225,12 +175,14 @@ static const char *__init pci_mmcfg_amd_fam10h(void)
end_bus = (1 << busnbits) - 1;
for (i = 0; i < (1 << segnbits); i++)
- if (pci_mmconfig_add(i, 0, end_bus,
+ if (pci_ecam_add(i, 0, end_bus,
base + (1<<28) * i) == NULL) {
- free_all_mmcfg();
+ pci_ecam_free_all();
return NULL;
}
+ pci_ecam_register_mmio(&pci_mmcfg_mmio_amd_fam10h);
+
return "AMD Family 10h NB";
}
@@ -258,7 +210,7 @@ static const char *__init pci_mmcfg_nvidia_mcp55(void)
/*
* do check if amd fam10h already took over
*/
- if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
+ if (!acpi_disabled || !list_empty(&pci_ecam_list) || mcp55_checked)
return NULL;
mcp55_checked = true;
@@ -287,7 +239,7 @@ static const char *__init pci_mmcfg_nvidia_mcp55(void)
base <<= extcfg_base_lshift;
start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
end = start + extcfg_sizebus[size_index] - 1;
- if (pci_mmconfig_add(0, start, end, base) == NULL)
+ if (pci_ecam_add(0, start, end, base) == NULL)
continue;
mcp55_mmconf_found++;
}
@@ -321,15 +273,15 @@ static const struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initconst =
static void __init pci_mmcfg_check_end_bus_number(void)
{
- struct pci_mmcfg_region *cfg, *cfgx;
+ struct pci_ecam_region *cfg, *cfgx;
/* Fixup overlaps */
- list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ list_for_each_entry(cfg, &pci_ecam_list, list) {
if (cfg->end_bus < cfg->start_bus)
cfg->end_bus = 255;
/* Don't access the list head ! */
- if (cfg->list.next == &pci_mmcfg_list)
+ if (cfg->list.next == &pci_ecam_list)
break;
cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
@@ -349,7 +301,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
if (!raw_pci_ops)
return 0;
- free_all_mmcfg();
+ pci_ecam_free_all();
for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
bus = pci_mmcfg_probes[i].bus;
@@ -370,7 +322,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
/* some end_bus_number is crazy, fix it */
pci_mmcfg_check_end_bus_number();
- return !list_empty(&pci_mmcfg_list);
+ return !list_empty(&pci_ecam_list);
}
static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data)
@@ -443,7 +395,7 @@ static int is_acpi_reserved(u64 start, u64 end, unsigned not_used)
typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type);
static int __ref is_mmconf_reserved(check_reserved_t is_reserved,
- struct pci_mmcfg_region *cfg,
+ struct pci_ecam_region *cfg,
struct device *dev, int with_e820)
{
u64 addr = cfg->res.start;
@@ -473,8 +425,8 @@ static int __ref is_mmconf_reserved(check_reserved_t is_reserved,
cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
num_buses = cfg->end_bus - cfg->start_bus + 1;
cfg->res.end = cfg->res.start +
- PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
- snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
+ PCI_ECAM_BUS_OFFSET(num_buses) - 1;
+ snprintf(cfg->name, PCI_ECAM_RESOURCE_NAME_LEN,
"PCI MMCONFIG %04x [bus %02x-%02x]",
cfg->segment, cfg->start_bus, cfg->end_bus);
@@ -495,7 +447,7 @@ static int __ref is_mmconf_reserved(check_reserved_t is_reserved,
}
static int __ref pci_mmcfg_check_reserved(struct device *dev,
- struct pci_mmcfg_region *cfg, int early)
+ struct pci_ecam_region *cfg, int early)
{
if (!early && !acpi_disabled) {
if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0))
@@ -532,84 +484,17 @@ static int __ref pci_mmcfg_check_reserved(struct device *dev,
static void __init pci_mmcfg_reject_broken(int early)
{
- struct pci_mmcfg_region *cfg;
+ struct pci_ecam_region *cfg;
- list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ list_for_each_entry(cfg, &pci_ecam_list, list) {
if (pci_mmcfg_check_reserved(NULL, cfg, early) == 0) {
pr_info(PREFIX "not using MMCONFIG\n");
- free_all_mmcfg();
+ pci_ecam_free_all();
return;
}
}
}
-static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
- struct acpi_mcfg_allocation *cfg)
-{
- int year;
-
- if (cfg->address < 0xFFFFFFFF)
- return 0;
-
- if (!strncmp(mcfg->header.oem_id, "SGI", 3))
- return 0;
-
- if (mcfg->header.revision >= 1) {
- if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
- year >= 2010)
- return 0;
- }
-
- pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
- "is above 4GB, ignored\n", cfg->pci_segment,
- cfg->start_bus_number, cfg->end_bus_number, cfg->address);
- return -EINVAL;
-}
-
-static int __init pci_parse_mcfg(struct acpi_table_header *header)
-{
- struct acpi_table_mcfg *mcfg;
- struct acpi_mcfg_allocation *cfg_table, *cfg;
- unsigned long i;
- int entries;
-
- if (!header)
- return -EINVAL;
-
- mcfg = (struct acpi_table_mcfg *)header;
-
- /* how many config structures do we have */
- free_all_mmcfg();
- entries = 0;
- i = header->length - sizeof(struct acpi_table_mcfg);
- while (i >= sizeof(struct acpi_mcfg_allocation)) {
- entries++;
- i -= sizeof(struct acpi_mcfg_allocation);
- }
- if (entries == 0) {
- pr_err(PREFIX "MMCONFIG has no entries\n");
- return -ENODEV;
- }
-
- cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
- for (i = 0; i < entries; i++) {
- cfg = &cfg_table[i];
- if (acpi_mcfg_check_entry(mcfg, cfg)) {
- free_all_mmcfg();
- return -ENODEV;
- }
-
- if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
- cfg->end_bus_number, cfg->address) == NULL) {
- pr_warn(PREFIX "no memory for MCFG entries\n");
- free_all_mmcfg();
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
#ifdef CONFIG_ACPI_APEI
extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
void *data), void *data);
@@ -617,13 +502,13 @@ extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
void *data), void *data)
{
- struct pci_mmcfg_region *cfg;
+ struct pci_ecam_region *cfg;
int rc;
- if (list_empty(&pci_mmcfg_list))
+ if (list_empty(&pci_ecam_list))
return 0;
- list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ list_for_each_entry(cfg, &pci_ecam_list, list) {
rc = func(cfg->res.start, resource_size(&cfg->res), data);
if (rc)
return rc;
@@ -639,23 +524,24 @@ static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
static void __init __pci_mmcfg_init(int early)
{
pci_mmcfg_reject_broken(early);
- if (list_empty(&pci_mmcfg_list))
+ if (list_empty(&pci_ecam_list))
return;
if (pcibios_last_bus < 0) {
- const struct pci_mmcfg_region *cfg;
+ const struct pci_ecam_region *cfg;
- list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ list_for_each_entry(cfg, &pci_ecam_list, list) {
if (cfg->segment)
break;
pcibios_last_bus = cfg->end_bus;
}
}
- if (pci_mmcfg_arch_init())
+ if (pci_ecam_arch_init()) {
+ raw_pci_ext_ops = &pci_mmcfg;
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
- else {
- free_all_mmcfg();
+ } else {
+ pci_ecam_free_all();
pci_mmcfg_arch_init_failed = true;
}
}
@@ -668,7 +554,7 @@ void __init pci_mmcfg_early_init(void)
if (pci_mmcfg_check_hostbridge())
known_bridge = 1;
else
- acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+ acpi_sfi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
__pci_mmcfg_init(1);
set_apei_filter();
@@ -686,14 +572,14 @@ void __init pci_mmcfg_late_init(void)
/* MMCONFIG hasn't been enabled yet, try again */
if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) {
- acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+ acpi_sfi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
__pci_mmcfg_init(0);
}
}
static int __init pci_mmcfg_late_insert_resources(void)
{
- struct pci_mmcfg_region *cfg;
+ struct pci_ecam_region *cfg;
pci_mmcfg_running_state = true;
@@ -706,7 +592,7 @@ static int __init pci_mmcfg_late_insert_resources(void)
* marked so it won't cause request errors when __request_region is
* called.
*/
- list_for_each_entry(cfg, &pci_mmcfg_list, list)
+ list_for_each_entry(cfg, &pci_ecam_list, list)
if (!cfg->res.parent)
insert_resource(&iomem_resource, &cfg->res);
@@ -726,93 +612,45 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
{
int rc;
struct resource *tmp = NULL;
- struct pci_mmcfg_region *cfg;
+ struct pci_ecam_region *cfg;
if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
return -ENODEV;
- if (start > end)
+ if (start > end || !addr)
return -EINVAL;
- mutex_lock(&pci_mmcfg_lock);
- cfg = pci_mmconfig_lookup(seg, start);
- if (cfg) {
- if (cfg->end_bus < end)
- dev_info(dev, FW_INFO
- "MMCONFIG for "
- "domain %04x [bus %02x-%02x] "
- "only partially covers this bridge\n",
- cfg->segment, cfg->start_bus, cfg->end_bus);
- mutex_unlock(&pci_mmcfg_lock);
- return -EEXIST;
- }
-
- if (!addr) {
- mutex_unlock(&pci_mmcfg_lock);
- return -EINVAL;
- }
-
rc = -EBUSY;
- cfg = pci_mmconfig_alloc(seg, start, end, addr);
+ cfg = pci_ecam_alloc(seg, start, end, addr);
if (cfg == NULL) {
dev_warn(dev, "fail to add MMCONFIG (out of memory)\n");
- rc = -ENOMEM;
+ return -ENOMEM;
} else if (!pci_mmcfg_check_reserved(dev, cfg, 0)) {
dev_warn(dev, FW_BUG "MMCONFIG %pR isn't reserved\n",
&cfg->res);
- } else {
- /* Insert resource if it's not in boot stage */
- if (pci_mmcfg_running_state)
- tmp = insert_resource_conflict(&iomem_resource,
- &cfg->res);
-
- if (tmp) {
- dev_warn(dev,
- "MMCONFIG %pR conflicts with "
- "%s %pR\n",
- &cfg->res, tmp->name, tmp);
- } else if (pci_mmcfg_arch_map(cfg)) {
- dev_warn(dev, "fail to map MMCONFIG %pR.\n",
- &cfg->res);
- } else {
- list_add_sorted(cfg);
- dev_info(dev, "MMCONFIG at %pR (base %#lx)\n",
- &cfg->res, (unsigned long)addr);
- cfg = NULL;
- rc = 0;
- }
+ goto error;
}
- if (cfg) {
- if (cfg->res.parent)
- release_resource(&cfg->res);
- kfree(cfg);
- }
+ /* Insert resource if it's not in boot stage */
+ if (pci_mmcfg_running_state)
+ tmp = insert_resource_conflict(&iomem_resource, &cfg->res);
- mutex_unlock(&pci_mmcfg_lock);
+ if (tmp) {
+ dev_warn(dev,
+ "MMCONFIG %pR conflicts with %s %pR\n",
+ &cfg->res, tmp->name, tmp);
+ goto error;
+ }
- return rc;
-}
+ rc = pci_ecam_inject(cfg);
+ if (rc)
+ goto error;
-/* Delete MMCFG information for host bridges */
-int pci_mmconfig_delete(u16 seg, u8 start, u8 end)
-{
- struct pci_mmcfg_region *cfg;
-
- mutex_lock(&pci_mmcfg_lock);
- list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
- if (cfg->segment == seg && cfg->start_bus == start &&
- cfg->end_bus == end) {
- list_del_rcu(&cfg->list);
- synchronize_rcu();
- pci_mmcfg_arch_unmap(cfg);
- if (cfg->res.parent)
- release_resource(&cfg->res);
- mutex_unlock(&pci_mmcfg_lock);
- kfree(cfg);
- return 0;
- }
- mutex_unlock(&pci_mmcfg_lock);
+ return 0;
- return -ENOENT;
+error:
+ if (cfg->res.parent)
+ release_resource(&cfg->res);
+ kfree(cfg);
+ return rc;
}
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index 43984bc..27e707d 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
+#include <linux/ecam.h>
#include <asm/e820.h>
#include <asm/pci_x86.h>
@@ -27,7 +28,7 @@ static int mmcfg_last_accessed_cpu;
*/
static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
{
- struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
+ struct pci_ecam_region *cfg = pci_ecam_lookup(seg, bus);
if (cfg)
return cfg->address;
@@ -39,7 +40,7 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
*/
static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
{
- u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12);
+ u32 dev_base = base | PCI_ECAM_BUS_OFFSET(bus) | (devfn << 12);
int cpu = smp_processor_id();
if (dev_base != mmcfg_last_accessed_device ||
cpu != mmcfg_last_accessed_cpu) {
@@ -49,7 +50,7 @@ static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
}
}
-static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
+int pci_ecam_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
unsigned long flags;
@@ -71,24 +72,14 @@ err: *value = -1;
pci_exp_set_dev_base(base, bus, devfn);
- switch (len) {
- case 1:
- *value = mmio_config_readb(mmcfg_virt_addr + reg);
- break;
- case 2:
- *value = mmio_config_readw(mmcfg_virt_addr + reg);
- break;
- case 4:
- *value = mmio_config_readl(mmcfg_virt_addr + reg);
- break;
- }
+ *value = pci_mmio_read(len, mmcfg_virt_addr + reg);
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
rcu_read_unlock();
return 0;
}
-static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
+int pci_ecam_write(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 value)
{
unsigned long flags;
@@ -108,45 +99,29 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
pci_exp_set_dev_base(base, bus, devfn);
- switch (len) {
- case 1:
- mmio_config_writeb(mmcfg_virt_addr + reg, value);
- break;
- case 2:
- mmio_config_writew(mmcfg_virt_addr + reg, value);
- break;
- case 4:
- mmio_config_writel(mmcfg_virt_addr + reg, value);
- break;
- }
+ pci_mmio_write(len, mmcfg_virt_addr + reg, value);
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
rcu_read_unlock();
return 0;
}
-const struct pci_raw_ops pci_mmcfg = {
- .read = pci_mmcfg_read,
- .write = pci_mmcfg_write,
-};
-
-int __init pci_mmcfg_arch_init(void)
+int __init pci_ecam_arch_init(void)
{
printk(KERN_INFO "PCI: Using MMCONFIG for extended config space\n");
- raw_pci_ext_ops = &pci_mmcfg;
return 1;
}
-void __init pci_mmcfg_arch_free(void)
+void __init pci_ecam_arch_free(void)
{
}
-int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
+int pci_ecam_arch_map(struct pci_ecam_region *cfg)
{
return 0;
}
-void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg)
+void pci_ecam_arch_unmap(struct pci_ecam_region *cfg)
{
unsigned long flags;
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
deleted file mode 100644
index bea5249..0000000
--- a/arch/x86/pci/mmconfig_64.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * mmconfig.c - Low-level direct PCI config space access via MMCONFIG
- *
- * This is an 64bit optimized version that always keeps the full mmconfig
- * space mapped. This allows lockless config space operation.
- */
-
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/bitmap.h>
-#include <linux/rcupdate.h>
-#include <asm/e820.h>
-#include <asm/pci_x86.h>
-
-#define PREFIX "PCI: "
-
-static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
-{
- struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
-
- if (cfg && cfg->virt)
- return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
- return NULL;
-}
-
-static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
- unsigned int devfn, int reg, int len, u32 *value)
-{
- char __iomem *addr;
-
- /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
- if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) {
-err: *value = -1;
- return -EINVAL;
- }
-
- rcu_read_lock();
- addr = pci_dev_base(seg, bus, devfn);
- if (!addr) {
- rcu_read_unlock();
- goto err;
- }
-
- switch (len) {
- case 1:
- *value = mmio_config_readb(addr + reg);
- break;
- case 2:
- *value = mmio_config_readw(addr + reg);
- break;
- case 4:
- *value = mmio_config_readl(addr + reg);
- break;
- }
- rcu_read_unlock();
-
- return 0;
-}
-
-static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
- unsigned int devfn, int reg, int len, u32 value)
-{
- char __iomem *addr;
-
- /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
- if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095)))
- return -EINVAL;
-
- rcu_read_lock();
- addr = pci_dev_base(seg, bus, devfn);
- if (!addr) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- switch (len) {
- case 1:
- mmio_config_writeb(addr + reg, value);
- break;
- case 2:
- mmio_config_writew(addr + reg, value);
- break;
- case 4:
- mmio_config_writel(addr + reg, value);
- break;
- }
- rcu_read_unlock();
-
- return 0;
-}
-
-const struct pci_raw_ops pci_mmcfg = {
- .read = pci_mmcfg_read,
- .write = pci_mmcfg_write,
-};
-
-static void __iomem *mcfg_ioremap(struct pci_mmcfg_region *cfg)
-{
- void __iomem *addr;
- u64 start, size;
- int num_buses;
-
- start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
- num_buses = cfg->end_bus - cfg->start_bus + 1;
- size = PCI_MMCFG_BUS_OFFSET(num_buses);
- addr = ioremap_nocache(start, size);
- if (addr)
- addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
- return addr;
-}
-
-int __init pci_mmcfg_arch_init(void)
-{
- struct pci_mmcfg_region *cfg;
-
- list_for_each_entry(cfg, &pci_mmcfg_list, list)
- if (pci_mmcfg_arch_map(cfg)) {
- pci_mmcfg_arch_free();
- return 0;
- }
-
- raw_pci_ext_ops = &pci_mmcfg;
-
- return 1;
-}
-
-void __init pci_mmcfg_arch_free(void)
-{
- struct pci_mmcfg_region *cfg;
-
- list_for_each_entry(cfg, &pci_mmcfg_list, list)
- pci_mmcfg_arch_unmap(cfg);
-}
-
-int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
-{
- cfg->virt = mcfg_ioremap(cfg);
- if (!cfg->virt) {
- pr_err(PREFIX "can't map MMCONFIG at %pR\n", &cfg->res);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg)
-{
- if (cfg && cfg->virt) {
- iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
- cfg->virt = NULL;
- }
-}
diff --git a/arch/x86/pci/numachip.c b/arch/x86/pci/numachip.c
index 2e565e6..f60d403 100644
--- a/arch/x86/pci/numachip.c
+++ b/arch/x86/pci/numachip.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/ecam.h>
#include <linux/pci.h>
#include <asm/pci_x86.h>
@@ -20,7 +21,7 @@ static u8 limit __read_mostly;
static inline char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
{
- struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
+ struct pci_ecam_region *cfg = pci_ecam_lookup(seg, bus);
if (cfg && cfg->virt)
return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
@@ -51,17 +52,7 @@ err: *value = -1;
goto err;
}
- switch (len) {
- case 1:
- *value = mmio_config_readb(addr + reg);
- break;
- case 2:
- *value = mmio_config_readw(addr + reg);
- break;
- case 4:
- *value = mmio_config_readl(addr + reg);
- break;
- }
+ *value = pci_mmio_read(len, addr + reg);
rcu_read_unlock();
return 0;
@@ -87,17 +78,7 @@ static int pci_mmcfg_write_numachip(unsigned int seg, unsigned int bus,
return -EINVAL;
}
- switch (len) {
- case 1:
- mmio_config_writeb(addr + reg, value);
- break;
- case 2:
- mmio_config_writew(addr + reg, value);
- break;
- case 4:
- mmio_config_writel(addr + reg, value);
- break;
- }
+ pci_mmio_write(len, addr + reg, value);
rcu_read_unlock();
return 0;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index e6c3ddd..25226c9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -5,7 +5,7 @@
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
depends on !IA64_HP_SIM
- depends on IA64 || X86
+ depends on IA64 || X86 || ARM64
depends on PCI
select PNP
default y
@@ -48,9 +48,13 @@ config ACPI_LEGACY_TABLES_LOOKUP
config ARCH_MIGHT_HAVE_ACPI_PDC
bool
+config ACPI_GENERIC_SLEEP
+ bool
+
config ACPI_SLEEP
bool
depends on SUSPEND || HIBERNATION
+ depends on ACPI_GENERIC_SLEEP
default y
config ACPI_PROCFS_POWER
@@ -163,6 +167,7 @@ config ACPI_PROCESSOR
tristate "Processor"
select THERMAL
select CPU_IDLE
+ depends on X86 || IA64
default y
help
This driver installs ACPI as the idle handler for Linux and uses
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 623b117..9595d13 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -23,7 +23,7 @@ acpi-y += nvs.o
# Power management related files
acpi-y += wakeup.o
-acpi-y += sleep.o
+acpi-$(CONFIG_ACPI_GENERIC_SLEEP) += sleep.o
acpi-y += device_pm.o
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
@@ -67,6 +67,7 @@ obj-$(CONFIG_ACPI_BUTTON) += button.o
obj-$(CONFIG_ACPI_FAN) += fan.o
obj-$(CONFIG_ACPI_VIDEO) += video.o
obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
+obj-$(CONFIG_PCI_MMCONFIG) += mcfg.o
obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-y += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 1020b1b..58f335c 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status;
int ret;
- if (pr->phys_id == -1)
+ if (pr->phys_id == PHYS_CPUID_INVALID)
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -215,7 +215,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
- int phys_id, cpu_index, device_declaration = 0;
+ phys_cpuid_t phys_id;
+ int cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
@@ -263,7 +264,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
- if (phys_id < 0)
+ if (phys_id == PHYS_CPUID_INVALID)
acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
pr->phys_id = phys_id;
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index c2f03e8..2aef850 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -430,6 +430,9 @@ acpi_status
acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
struct acpi_pnp_device_id_list ** return_cid_list);
+acpi_status
+acpi_ut_execute_CLS(struct acpi_namespace_node *device_node,
+ struct acpi_pnp_device_id **return_id);
/*
* utlock - reader/writer locks
*/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index d66c326..590ef06 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -276,11 +276,12 @@ acpi_get_object_info(acpi_handle handle,
struct acpi_pnp_device_id *hid = NULL;
struct acpi_pnp_device_id *uid = NULL;
struct acpi_pnp_device_id *sub = NULL;
+ struct acpi_pnp_device_id *cls = NULL;
char *next_id_string;
acpi_object_type type;
acpi_name name;
u8 param_count = 0;
- u8 valid = 0;
+ u16 valid = 0;
u32 info_size;
u32 i;
acpi_status status;
@@ -320,7 +321,7 @@ acpi_get_object_info(acpi_handle handle,
if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) {
/*
* Get extra info for ACPI Device/Processor objects only:
- * Run the Device _HID, _UID, _SUB, and _CID methods.
+ * Run the Device _HID, _UID, _SUB, _CID and _CLS methods.
*
* Note: none of these methods are required, so they may or may
* not be present for this device. The Info->Valid bitfield is used
@@ -351,6 +352,14 @@ acpi_get_object_info(acpi_handle handle,
valid |= ACPI_VALID_SUB;
}
+ /* Execute the Device._CLS method */
+
+ status = acpi_ut_execute_CLS(node, &cls);
+ if (ACPI_SUCCESS(status)) {
+ info_size += cls->length;
+ valid |= ACPI_VALID_CLS;
+ }
+
/* Execute the Device._CID method */
status = acpi_ut_execute_CID(node, &cid_list);
@@ -468,6 +477,11 @@ acpi_get_object_info(acpi_handle handle,
sub, next_id_string);
}
+ if (cls) {
+ next_id_string = acpi_ns_copy_device_id(&info->cls,
+ cls, next_id_string);
+ }
+
if (cid_list) {
info->compatible_id_list.count = cid_list->count;
info->compatible_id_list.list_size = cid_list->list_size;
@@ -507,6 +521,9 @@ cleanup:
if (sub) {
ACPI_FREE(sub);
}
+ if (cls) {
+ ACPI_FREE(cls);
+ }
if (cid_list) {
ACPI_FREE(cid_list);
}
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 27431cf..a64b5d1 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -416,3 +416,74 @@ cleanup:
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_execute_CLS
+ *
+ * PARAMETERS: device_node - Node for the device
+ * return_id - Where the string UID is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes the _CLS control method that returns PCI-defined
+ * class code of the device. The ACPI spec define _CLS as a
+ * package with three integers. The returned string has format:
+ *
+ * "bbsspp"
+ * where:
+ * bb = Base-class code
+ * ss = Sub-class code
+ * pp = Programming Interface code
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_CLS(struct acpi_namespace_node *device_node,
+ struct acpi_pnp_device_id **return_id)
+{
+ struct acpi_pnp_device_id *cls;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object **cls_objects;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_execute_CLS);
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CLS,
+ ACPI_BTYPE_PACKAGE, &obj_desc);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+
+ cls_objects = obj_desc->package.elements;
+
+ if (obj_desc->common.type == ACPI_TYPE_PACKAGE &&
+ obj_desc->package.count == 3 &&
+ cls_objects[0]->common.type == ACPI_TYPE_INTEGER &&
+ cls_objects[1]->common.type == ACPI_TYPE_INTEGER &&
+ cls_objects[2]->common.type == ACPI_TYPE_INTEGER) {
+
+ /* Allocate a buffer for the CLS */
+ cls = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
+ (acpi_size) 7);
+ if (!cls) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ cls->string =
+ ACPI_ADD_PTR(char, cls, sizeof(struct acpi_pnp_device_id));
+
+ sprintf(cls->string, "%02x%02x%02x",
+ (u8)ACPI_TO_INTEGER(cls_objects[0]->integer.value),
+ (u8)ACPI_TO_INTEGER(cls_objects[1]->integer.value),
+ (u8)ACPI_TO_INTEGER(cls_objects[2]->integer.value));
+ cls->length = 7;
+ *return_id = cls;
+ }
+
+cleanup:
+
+ /* On exit, we must delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 8b67bd0..c412fdb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -448,6 +448,9 @@ static int __init acpi_bus_init_irq(void)
case ACPI_IRQ_MODEL_IOSAPIC:
message = "IOSAPIC";
break;
+ case ACPI_IRQ_MODEL_GIC:
+ message = "GIC";
+ break;
case ACPI_IRQ_MODEL_PLATFORM:
message = "platform specific model";
break;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 56b321a..b5eef4c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -161,7 +161,11 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
/*--------------------------------------------------------------------------
Suspend/Resume
-------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_GENERIC_SLEEP
extern int acpi_sleep_init(void);
+#else
+static inline int acpi_sleep_init(void) { return -ENOSYS; }
+#endif
#ifdef CONFIG_ACPI_SLEEP
int acpi_sleep_proc_init(void);
diff --git a/drivers/acpi/mcfg.c b/drivers/acpi/mcfg.c
new file mode 100644
index 0000000..ed4b85b
--- /dev/null
+++ b/drivers/acpi/mcfg.c
@@ -0,0 +1,140 @@
+/*
+ * MCFG ACPI table parser.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/ecam.h>
+
+#define PREFIX "MCFG: "
+
+extern struct acpi_mcfg_fixup __start_acpi_mcfg_fixups[];
+extern struct acpi_mcfg_fixup __end_acpi_mcfg_fixups[];
+
+/*
+ * raw_pci_read/write - ACPI PCI config space accessors.
+ *
+ * ACPI spec defines MCFG table as the way we can describe access to PCI config
+ * space, so let MCFG be default (__weak).
+ *
+ * If platform needs more fancy stuff, should provides its own implementation.
+ */
+int __weak raw_pci_read(unsigned int domain, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 *val)
+{
+ return pci_ecam_read(domain, bus, devfn, reg, len, val);
+}
+
+int __weak raw_pci_write(unsigned int domain, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 val)
+{
+ return pci_ecam_write(domain, bus, devfn, reg, len, val);
+}
+
+static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
+ struct acpi_mcfg_allocation *cfg)
+{
+ int year;
+
+ if (IS_ENABLED(CONFIG_ARM64))
+ return 0;
+
+ if (cfg->address < 0xFFFFFFFF)
+ return 0;
+
+ if (!strncmp(mcfg->header.oem_id, "SGI", 3))
+ return 0;
+
+ if (mcfg->header.revision >= 1) {
+ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
+ year >= 2010)
+ return 0;
+ }
+
+ pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
+ "is above 4GB, ignored\n", cfg->pci_segment,
+ cfg->start_bus_number, cfg->end_bus_number, cfg->address);
+ return -EINVAL;
+}
+
+int __init acpi_parse_mcfg(struct acpi_table_header *header)
+{
+ struct acpi_table_mcfg *mcfg;
+ struct acpi_mcfg_allocation *cfg_table, *cfg;
+ struct acpi_mcfg_fixup *fixup;
+ struct pci_ecam_region *new;
+ unsigned long i;
+ int entries;
+
+ if (!header)
+ return -EINVAL;
+
+ mcfg = (struct acpi_table_mcfg *)header;
+
+ /* how many config structures do we have */
+ pci_ecam_free_all();
+ entries = 0;
+ i = header->length - sizeof(struct acpi_table_mcfg);
+ while (i >= sizeof(struct acpi_mcfg_allocation)) {
+ entries++;
+ i -= sizeof(struct acpi_mcfg_allocation);
+ }
+ if (entries == 0) {
+ pr_err(PREFIX "MCFG table has no entries\n");
+ return -ENODEV;
+ }
+
+ fixup = __start_acpi_mcfg_fixups;
+ while (fixup < __end_acpi_mcfg_fixups) {
+ if (!strncmp(fixup->oem_id, header->oem_id, 6) &&
+ !strncmp(fixup->oem_table_id, header->oem_table_id, 8))
+ break;
+ ++fixup;
+ }
+
+ cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
+ for (i = 0; i < entries; i++) {
+ cfg = &cfg_table[i];
+ if (acpi_mcfg_check_entry(mcfg, cfg)) {
+ pci_ecam_free_all();
+ return -ENODEV;
+ }
+
+ new = pci_ecam_add(cfg->pci_segment, cfg->start_bus_number,
+ cfg->end_bus_number, cfg->address);
+ if (!new) {
+ pr_warn(PREFIX "no memory for MCFG entries\n");
+ pci_ecam_free_all();
+ return -ENOMEM;
+ }
+ if (fixup < __end_acpi_mcfg_fixups)
+ new->fixup = fixup->hook;
+ }
+
+ return 0;
+}
+
+void __init __weak pci_mmcfg_early_init(void)
+{
+
+}
+
+void __init __weak pci_mmcfg_late_init(void)
+{
+ struct pci_ecam_region *cfg;
+
+ acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
+
+ if (list_empty(&pci_ecam_list))
+ return;
+ if (!pci_ecam_arch_init())
+ pci_ecam_free_all();
+
+ list_for_each_entry(cfg, &pci_ecam_list, list)
+ insert_resource(&iomem_resource, &cfg->res);
+}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index f9eeae8..39748bb 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -336,11 +336,11 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
return NULL;
}
-#ifndef CONFIG_IA64
-#define should_use_kmap(pfn) page_is_ram(pfn)
-#else
+#if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
/* ioremap will take care of cache attributes */
#define should_use_kmap(pfn) 0
+#else
+#define should_use_kmap(pfn) page_is_ram(pfn)
#endif
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 7962651..b1ec78b 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -32,7 +32,7 @@ static struct acpi_table_madt *get_madt_table(void)
}
static int map_lapic_id(struct acpi_subtable_header *entry,
- u32 acpi_id, int *apic_id)
+ u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_apic *lapic =
container_of(entry, struct acpi_madt_local_apic, header);
@@ -48,7 +48,7 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, int *apic_id)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_x2apic *apic =
container_of(entry, struct acpi_madt_local_x2apic, header);
@@ -65,7 +65,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, int *apic_id)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
container_of(entry, struct acpi_madt_local_sapic, header);
@@ -83,10 +83,35 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
return 0;
}
-static int map_madt_entry(int type, u32 acpi_id)
+/*
+ * Retrieve the ARM CPU physical identifier (MPIDR)
+ */
+static int map_gicc_mpidr(struct acpi_subtable_header *entry,
+ int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ container_of(entry, struct acpi_madt_generic_interrupt, header);
+
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return -ENODEV;
+
+ /* device_declaration means Device object in DSDT, in the
+ * GIC interrupt model, logical processors are required to
+ * have a Processor Device object in the DSDT, so we should
+ * check device_declaration here
+ */
+ if (device_declaration && (gicc->uid == acpi_id)) {
+ *mpidr = gicc->arm_mpidr;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static phys_cpuid_t map_madt_entry(int type, u32 acpi_id)
{
unsigned long madt_end, entry;
- int phys_id = -1; /* CPU hardware ID */
+ phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
struct acpi_table_madt *madt;
madt = get_madt_table();
@@ -111,18 +136,21 @@ static int map_madt_entry(int type, u32 acpi_id)
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
+ } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
+ if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
+ break;
}
entry += header->length;
}
return phys_id;
}
-static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
+static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
- int phys_id = -1;
+ phys_cpuid_t phys_id = PHYS_CPUID_INVALID;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
@@ -143,33 +171,35 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
map_x2apic_id(header, type, acpi_id, &phys_id);
+ else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
+ map_gicc_mpidr(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
return phys_id;
}
-int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
+phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
- int phys_id;
+ phys_cpuid_t phys_id;
phys_id = map_mat_entry(handle, type, acpi_id);
- if (phys_id == -1)
+ if (phys_id == PHYS_CPUID_INVALID)
phys_id = map_madt_entry(type, acpi_id);
return phys_id;
}
-int acpi_map_cpuid(int phys_id, u32 acpi_id)
+int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
int i;
#endif
- if (phys_id == -1) {
+ if (phys_id == PHYS_CPUID_INVALID) {
/*
* On UP processor, there is no _MAT or MADT table.
- * So above phys_id is always set to -1.
+ * So above phys_id is always set to PHYS_CPUID_INVALID.
*
* BIOS may define multiple CPU handles even for UP processor.
* For example,
@@ -190,7 +220,7 @@ int acpi_map_cpuid(int phys_id, u32 acpi_id)
if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
- return phys_id;
+ return -1;
}
#ifdef CONFIG_SMP
@@ -208,7 +238,7 @@ int acpi_map_cpuid(int phys_id, u32 acpi_id)
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
- int phys_id;
+ phys_cpuid_t phys_id;
phys_id = acpi_get_phys_id(handle, type, acpi_id);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index bbca783..f6ecbd1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -907,10 +907,19 @@ static const struct acpi_device_id *__acpi_match_device(
if (!device->status.present)
return NULL;
- for (id = ids; id->id[0]; id++)
- list_for_each_entry(hwid, &device->pnp.ids, list)
- if (!strcmp((char *) id->id, hwid->id))
+ for (id = ids; id->id[0] || id->cls; id++) {
+ list_for_each_entry(hwid, &device->pnp.ids, list) {
+ if (id->id[0] && !strcmp((char *) id->id, hwid->id)) {
return id;
+ } else if (id->cls) {
+ char buf[7];
+
+ sprintf(buf, "%06x", id->cls);
+ if (!strcmp(buf, hwid->id))
+ return id;
+ }
+ }
+ }
return NULL;
}
@@ -1974,6 +1983,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
if (info->valid & ACPI_VALID_UID)
pnp->unique_id = kstrdup(info->unique_id.string,
GFP_KERNEL);
+ if (info->valid & ACPI_VALID_CLS)
+ acpi_add_id(pnp, info->cls.string);
kfree(info);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 93b8152..2e19189 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -23,6 +23,8 @@
*
*/
+/* Uncomment next line to get verbose printout */
+/* #define DEBUG */
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/init.h>
@@ -61,9 +63,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_apic *p =
(struct acpi_madt_local_apic *)header;
- pr_info("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
- p->processor_id, p->id,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
+ p->processor_id, p->id,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -71,9 +73,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_x2apic *p =
(struct acpi_madt_local_x2apic *)header;
- pr_info("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
- p->local_apic_id, p->uid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
+ p->local_apic_id, p->uid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -81,8 +83,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_io_apic *p =
(struct acpi_madt_io_apic *)header;
- pr_info("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
- p->id, p->address, p->global_irq_base);
+ pr_debug("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
+ p->id, p->address, p->global_irq_base);
}
break;
@@ -155,9 +157,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_io_sapic *p =
(struct acpi_madt_io_sapic *)header;
- pr_info("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
- p->id, (void *)(unsigned long)p->address,
- p->global_irq_base);
+ pr_debug("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
+ p->id, (void *)(unsigned long)p->address,
+ p->global_irq_base);
}
break;
@@ -165,9 +167,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_sapic *p =
(struct acpi_madt_local_sapic *)header;
- pr_info("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
- p->processor_id, p->id, p->eid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
+ p->processor_id, p->id, p->eid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -183,6 +185,28 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
break;
+ case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
+ {
+ struct acpi_madt_generic_interrupt *p =
+ (struct acpi_madt_generic_interrupt *)header;
+ pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n",
+ p->uid, p->base_address,
+ p->arm_mpidr,
+ (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+
+ }
+ break;
+
+ case ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR:
+ {
+ struct acpi_madt_generic_distributor *p =
+ (struct acpi_madt_generic_distributor *)header;
+ pr_debug("GIC Distributor (gic_id[0x%04x] address[%llx] gsi_base[%d])\n",
+ p->gic_id, p->base_address,
+ p->global_irq_base);
+ }
+ break;
+
default:
pr_warn("Found unsupported MADT entry (type = 0x%x)\n",
header->type);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index cd49a39..7f68f96 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -712,3 +712,29 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
return false;
}
EXPORT_SYMBOL(acpi_check_dsm);
+
+/**
+ * acpi_check_coherency - check for memory coherency of a device
+ * @handle: ACPI device handle
+ * @val: Pointer to returned value
+ *
+ * Search a device and its parents for a _CCA method and return
+ * its value.
+ */
+acpi_status acpi_check_coherency(acpi_handle handle, int *val)
+{
+ unsigned long long data;
+ acpi_status status;
+
+ do {
+ status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
+ if (!ACPI_FAILURE(status)) {
+ *val = data;
+ break;
+ }
+ status = acpi_get_parent(handle, &handle);
+ } while (!ACPI_FAILURE(status));
+
+ return status;
+}
+EXPORT_SYMBOL(acpi_check_coherency);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 5f60155..50305e3 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -48,7 +48,7 @@ config ATA_VERBOSE_ERROR
config ATA_ACPI
bool "ATA ACPI Support"
- depends on ACPI && PCI
+ depends on ACPI
default y
help
This option adds support for ATA-related ACPI objects.
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 78d6ae0..842cd13 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -20,6 +20,8 @@
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
#include "ahci.h"
#define DRV_NAME "ahci"
@@ -78,12 +80,19 @@ static const struct of_device_id ahci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
+static const struct acpi_device_id ahci_acpi_match[] = {
+ { "", 0, PCI_CLASS_STORAGE_SATA_AHCI },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
.remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
+ .acpi_match_table = ahci_acpi_match,
.pm = &ahci_pm_ops,
},
};
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 2e8bb60..33d7784 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -28,6 +28,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/phy/phy.h>
+#include <linux/acpi.h>
#include "ahci.h"
#define DRV_NAME "xgene-ahci"
@@ -225,14 +226,6 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
return rc;
}
-static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
-{
- void __iomem *diagcsr = ctx->csr_diag;
-
- return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
- readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
-}
-
/**
* xgene_ahci_read_id - Read ID data from the specified device
* @dev: device
@@ -685,11 +678,6 @@ static int xgene_ahci_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (xgene_ahci_is_memram_inited(ctx)) {
- dev_info(dev, "skip clock and PHY initialization\n");
- goto skip_clk_phy;
- }
-
/* Due to errata, HW requires full toggle transition */
rc = ahci_platform_enable_clks(hpriv);
if (rc)
@@ -702,7 +690,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
/* Configure the host controller */
xgene_ahci_hw_init(hpriv);
-skip_clk_phy:
+
hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info,
@@ -718,6 +706,16 @@ disable_resources:
return rc;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_ahci_acpi_match[] = {
+ { "APMC0D00", },
+ { "APMC0D0D", },
+ { "APMC0D09", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
+#endif
+
static const struct of_device_id xgene_ahci_of_match[] = {
{.compatible = "apm,xgene-ahci"},
{},
@@ -730,6 +728,7 @@ static struct platform_driver xgene_ahci_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = xgene_ahci_of_match,
+ .acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
},
};
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index a3025e7..3b2e2d0 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/sched_clock.h>
+#include <linux/acpi.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
@@ -62,7 +63,8 @@ enum ppi_nr {
MAX_TIMER_PPI
};
-static int arch_timer_ppi[MAX_TIMER_PPI];
+int arch_timer_ppi[MAX_TIMER_PPI];
+EXPORT_SYMBOL(arch_timer_ppi);
static struct clock_event_device __percpu *arch_timer_evt;
@@ -371,8 +373,12 @@ arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
if (arch_timer_rate)
return;
- /* Try to determine the frequency from the device tree or CNTFRQ */
- if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
+ /*
+ * Try to determine the frequency from the device tree or CNTFRQ,
+ * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
+ */
+ if (!acpi_disabled ||
+ of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
if (cntbase)
arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
else
@@ -691,28 +697,8 @@ static void __init arch_timer_common_init(void)
arch_timer_arch_init();
}
-static void __init arch_timer_init(struct device_node *np)
+static void __init arch_timer_init(void)
{
- int i;
-
- if (arch_timers_present & ARCH_CP15_TIMER) {
- pr_warn("arch_timer: multiple nodes in dt, skipping\n");
- return;
- }
-
- arch_timers_present |= ARCH_CP15_TIMER;
- for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
- arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
- arch_timer_detect_rate(NULL, np);
-
- /*
- * If we cannot rely on firmware initializing the timer registers then
- * we should use the physical timers instead.
- */
- if (IS_ENABLED(CONFIG_ARM) &&
- of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
- arch_timer_use_virtual = false;
-
/*
* If HYP mode is available, we know that the physical timer
* has been configured to be accessible from PL1. Use it, so
@@ -731,13 +717,39 @@ static void __init arch_timer_init(struct device_node *np)
}
}
- arch_timer_c3stop = !of_property_read_bool(np, "always-on");
-
arch_timer_register();
arch_timer_common_init();
}
-CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
-CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);
+
+static void __init arch_timer_of_init(struct device_node *np)
+{
+ int i;
+
+ if (arch_timers_present & ARCH_CP15_TIMER) {
+ pr_warn("arch_timer: multiple nodes in dt, skipping\n");
+ return;
+ }
+
+ arch_timers_present |= ARCH_CP15_TIMER;
+ for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
+ arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
+
+ arch_timer_detect_rate(NULL, np);
+
+ arch_timer_c3stop = !of_property_read_bool(np, "always-on");
+
+ /*
+ * If we cannot rely on firmware initializing the timer registers then
+ * we should use the physical timers instead.
+ */
+ if (IS_ENABLED(CONFIG_ARM) &&
+ of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
+ arch_timer_use_virtual = false;
+
+ arch_timer_init();
+}
+CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
+CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
static void __init arch_timer_mem_init(struct device_node *np)
{
@@ -804,3 +816,70 @@ static void __init arch_timer_mem_init(struct device_node *np)
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_init);
+
+#ifdef CONFIG_ACPI
+static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
+{
+ int trigger, polarity;
+
+ if (!interrupt)
+ return 0;
+
+ trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
+ : ACPI_LEVEL_SENSITIVE;
+
+ polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
+ : ACPI_ACTIVE_HIGH;
+
+ return acpi_register_gsi(NULL, interrupt, trigger, polarity);
+}
+
+/* Initialize per-processor generic timer */
+static int __init arch_timer_acpi_init(struct acpi_table_header *table)
+{
+ struct acpi_table_gtdt *gtdt;
+
+ if (arch_timers_present & ARCH_CP15_TIMER) {
+ pr_warn("arch_timer: already initialized, skipping\n");
+ return -EINVAL;
+ }
+
+ gtdt = container_of(table, struct acpi_table_gtdt, header);
+
+ arch_timers_present |= ARCH_CP15_TIMER;
+
+ arch_timer_ppi[PHYS_SECURE_PPI] =
+ map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
+ gtdt->secure_el1_flags);
+
+ arch_timer_ppi[PHYS_NONSECURE_PPI] =
+ map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
+ gtdt->non_secure_el1_flags);
+
+ arch_timer_ppi[VIRT_PPI] =
+ map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
+ gtdt->virtual_timer_flags);
+
+ arch_timer_ppi[HYP_PPI] =
+ map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
+ gtdt->non_secure_el2_flags);
+
+ /* Get the frequency from CNTFRQ */
+ arch_timer_detect_rate(NULL, NULL);
+
+ /* Always-on capability */
+ arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
+
+ arch_timer_init();
+ return 0;
+}
+
+/* Initialize all the generic timers presented in GTDT */
+void __init acpi_generic_timer_init(void)
+{
+ if (acpi_disabled)
+ return;
+
+ acpi_table_parse(ACPI_SIG_GTDT, arch_timer_acpi_init);
+}
+#endif
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index cb59619..6289dd9 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -10,7 +10,7 @@ config EDAC_SUPPORT
menuconfig EDAC
bool "EDAC (Error Detection And Correction) reporting"
depends on HAS_IOMEM
- depends on X86 || PPC || TILE || ARM || EDAC_SUPPORT
+ depends on X86 || PPC || TILE || ARM || ARM64 || EDAC_SUPPORT
help
EDAC is designed to report errors in the core system.
These are low-level errors that are reported in the CPU or
@@ -392,4 +392,11 @@ config EDAC_SYNOPSYS
Support for error detection and correction on the Synopsys DDR
memory controller.
+config EDAC_XGENE
+ tristate "APM X-Gene SoC"
+ depends on EDAC_MM_EDAC && ARM64
+ help
+ Support for error detection and correction on the
+ APM X-Gene family of SOCs.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index b255f36..4c2b0bd 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -68,3 +68,5 @@ obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
+
+obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
new file mode 100644
index 0000000..027e5d9
--- /dev/null
+++ b/drivers/edac/xgene_edac.c
@@ -0,0 +1,2132 @@
+/*
+ * APM X-Gene SoC EDAC (error detection and correction) Module
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Feng Kan <fkan@apm.com>
+ * Loc Ho <lho@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/edac.h>
+#include <linux/of.h>
+#include "edac_core.h"
+
+#define EDAC_MOD_STR "xgene_edac"
+
+static int edac_mc_idx;
+static int edac_mc_active_mask;
+static int edac_mc_registered_mask;
+static DEFINE_MUTEX(xgene_edac_lock);
+
+/* Global error configuration status registers (CSR) */
+#define PCPHPERRINTSTS 0x0000
+#define PCPHPERRINTMSK 0x0004
+#define MCU_CTL_ERR_MASK BIT(12)
+#define IOB_PA_ERR_MASK BIT(11)
+#define IOB_BA_ERR_MASK BIT(10)
+#define IOB_XGIC_ERR_MASK BIT(9)
+#define IOB_RB_ERR_MASK BIT(8)
+#define L3C_UNCORR_ERR_MASK BIT(5)
+#define MCU_UNCORR_ERR_MASK BIT(4)
+#define PMD3_MERR_MASK BIT(3)
+#define PMD2_MERR_MASK BIT(2)
+#define PMD1_MERR_MASK BIT(1)
+#define PMD0_MERR_MASK BIT(0)
+#define PCPLPERRINTSTS 0x0008
+#define PCPLPERRINTMSK 0x000C
+#define CSW_SWITCH_TRACE_ERR_MASK BIT(2)
+#define L3C_CORR_ERR_MASK BIT(1)
+#define MCU_CORR_ERR_MASK BIT(0)
+#define MEMERRINTSTS 0x0010
+#define MEMERRINTMSK 0x0014
+
+/* Memory controller error CSR */
+#define MCU_MAX_RANK 8
+#define MCU_RANK_STRIDE 0x40
+
+#define MCUGECR 0x0110
+#define MCU_GECR_DEMANDUCINTREN_MASK BIT(0)
+#define MCU_GECR_BACKUCINTREN_MASK BIT(1)
+#define MCU_GECR_CINTREN_MASK BIT(2)
+#define MUC_GECR_MCUADDRERREN_MASK BIT(9)
+#define MCUGESR 0x0114
+#define MCU_GESR_ADDRNOMATCH_ERR_MASK BIT(7)
+#define MCU_GESR_ADDRMULTIMATCH_ERR_MASK BIT(6)
+#define MCU_GESR_PHYP_ERR_MASK BIT(3)
+#define MCUESRR0 0x0314
+#define MCU_ESRR_MULTUCERR_MASK BIT(3)
+#define MCU_ESRR_BACKUCERR_MASK BIT(2)
+#define MCU_ESRR_DEMANDUCERR_MASK BIT(1)
+#define MCU_ESRR_CERR_MASK BIT(0)
+#define MCUESRRA0 0x0318
+#define MCUEBLRR0 0x031c
+#define MCU_EBLRR_ERRBANK_RD(src) (((src) & 0x00000007) >> 0)
+#define MCUERCRR0 0x0320
+#define MCU_ERCRR_ERRROW_RD(src) (((src) & 0xFFFF0000) >> 16)
+#define MCU_ERCRR_ERRCOL_RD(src) ((src) & 0x00000FFF)
+#define MCUSBECNT0 0x0324
+#define MCU_SBECNT_COUNT(src) ((src) & 0xFFFF)
+
+#define CSW_CSWCR 0x0000
+#define CSW_CSWCR_DUALMCB_MASK BIT(0)
+
+#define MCBADDRMR 0x0000
+#define MCBADDRMR_MCU_INTLV_MODE_MASK BIT(3)
+#define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
+#define MCBADDRMR_MCB_INTLV_MODE_MASK BIT(1)
+#define MCBADDRMR_ADDRESS_MODE_MASK BIT(0)
+
+struct xgene_edac_mc_ctx {
+ char *name;
+ void __iomem *pcp_csr;
+ void __iomem *csw_csr;
+ void __iomem *mcba_csr;
+ void __iomem *mcbb_csr;
+ void __iomem *mcu_csr;
+ int mcu_id;
+};
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+#ifdef CONFIG_EDAC_DEBUG
+static ssize_t xgene_edac_mc_err_inject_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct mem_ctl_info *mci = file->private_data;
+ struct xgene_edac_mc_ctx *ctx = mci->pvt_info;
+ int i;
+
+ for (i = 0; i < MCU_MAX_RANK; i++) {
+ writel(MCU_ESRR_MULTUCERR_MASK | MCU_ESRR_BACKUCERR_MASK |
+ MCU_ESRR_DEMANDUCERR_MASK | MCU_ESRR_CERR_MASK,
+ ctx->mcu_csr + MCUESRRA0 + i * MCU_RANK_STRIDE);
+ }
+ return count;
+}
+
+static const struct file_operations xgene_edac_mc_debug_inject_fops = {
+ .open = simple_open,
+ .write = xgene_edac_mc_err_inject_write,
+ .llseek = generic_file_llseek,
+};
+
+static void xgene_edac_mc_create_debugfs_node(struct mem_ctl_info *mci)
+{
+ if (!mci->debugfs)
+ return;
+
+ debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
+ &xgene_edac_mc_debug_inject_fops);
+}
+#else
+static void xgene_edac_mc_create_debugfs_node(struct mem_ctl_info *mci)
+{
+}
+#endif
+
+static void xgene_edac_mc_check(struct mem_ctl_info *mci)
+{
+ struct xgene_edac_mc_ctx *ctx = mci->pvt_info;
+ u32 pcp_hp_stat;
+ u32 pcp_lp_stat;
+ u32 reg;
+ u32 rank;
+ u32 bank;
+ u32 count;
+ u32 col_row;
+
+ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS);
+ pcp_lp_stat = readl(ctx->pcp_csr + PCPLPERRINTSTS);
+ if (!((MCU_UNCORR_ERR_MASK & pcp_hp_stat) ||
+ (MCU_CTL_ERR_MASK & pcp_hp_stat) ||
+ (MCU_CORR_ERR_MASK & pcp_lp_stat)))
+ return;
+
+ for (rank = 0; rank < MCU_MAX_RANK; rank++) {
+ reg = readl(ctx->mcu_csr + MCUESRR0 + rank * MCU_RANK_STRIDE);
+
+ /* Detect uncorrectable memory error */
+ if (reg & (MCU_ESRR_DEMANDUCERR_MASK |
+ MCU_ESRR_BACKUCERR_MASK)) {
+ /* Detected uncorrectable memory error */
+ edac_mc_chipset_printk(mci, KERN_ERR, "X-Gene",
+ "MCU uncorrectable error at rank %d\n", rank);
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1, mci->ctl_name, "");
+ }
+
+ /* Detect correctable memory error */
+ if (reg & MCU_ESRR_CERR_MASK) {
+ bank = readl(ctx->mcu_csr + MCUEBLRR0 +
+ rank * MCU_RANK_STRIDE);
+ col_row = readl(ctx->mcu_csr + MCUERCRR0 +
+ rank * MCU_RANK_STRIDE);
+ count = readl(ctx->mcu_csr + MCUSBECNT0 +
+ rank * MCU_RANK_STRIDE);
+ edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene",
+ "MCU correctable error at rank %d bank %d column %d row %d count %d\n",
+ rank, MCU_EBLRR_ERRBANK_RD(bank),
+ MCU_ERCRR_ERRCOL_RD(col_row),
+ MCU_ERCRR_ERRROW_RD(col_row),
+ MCU_SBECNT_COUNT(count));
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1, mci->ctl_name, "");
+ }
+
+ /* Clear all error registers */
+ writel(0x0, ctx->mcu_csr + MCUEBLRR0 + rank * MCU_RANK_STRIDE);
+ writel(0x0, ctx->mcu_csr + MCUERCRR0 + rank * MCU_RANK_STRIDE);
+ writel(0x0, ctx->mcu_csr + MCUSBECNT0 +
+ rank * MCU_RANK_STRIDE);
+ writel(reg, ctx->mcu_csr + MCUESRR0 + rank * MCU_RANK_STRIDE);
+ }
+
+ /* Detect memory controller error */
+ reg = readl(ctx->mcu_csr + MCUGESR);
+ if (reg) {
+ if (reg & MCU_GESR_ADDRNOMATCH_ERR_MASK)
+ edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene",
+ "MCU address miss-match error\n");
+ if (reg & MCU_GESR_ADDRMULTIMATCH_ERR_MASK)
+ edac_mc_chipset_printk(mci, KERN_WARNING, "X-Gene",
+ "MCU address multi-match error\n");
+
+ writel(reg, ctx->mcu_csr + MCUGESR);
+ }
+}
+
+static irqreturn_t xgene_edac_mc_isr(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct xgene_edac_mc_ctx *ctx = mci->pvt_info;
+ u32 pcp_hp_stat;
+ u32 pcp_lp_stat;
+
+ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS);
+ pcp_lp_stat = readl(ctx->pcp_csr + PCPLPERRINTSTS);
+ if (!((MCU_UNCORR_ERR_MASK & pcp_hp_stat) ||
+ (MCU_CTL_ERR_MASK & pcp_hp_stat) ||
+ (MCU_CORR_ERR_MASK & pcp_lp_stat)))
+ return IRQ_NONE;
+
+ xgene_edac_mc_check(mci);
+
+ return IRQ_HANDLED;
+}
+
+static void xgene_edac_mc_irq_ctl(struct mem_ctl_info *mci, bool enable)
+{
+ struct xgene_edac_mc_ctx *ctx = mci->pvt_info;
+ u32 val;
+
+ if (edac_op_state != EDAC_OPSTATE_INT)
+ return;
+
+ mutex_lock(&xgene_edac_lock);
+
+ /*
+ * As there is only single bit for enable error and interrupt mask,
+ * we must only enable top level interrupt after all MCUs are
+ * registered. Otherwise, if there is an error and the corresponding
+ * MCU has not registered, the interrupt will never get cleared. To
+ * determine all MCU have registered, we will keep track of active
+ * MCUs and registered MCUs.
+ */
+ if (enable) {
+ /* Set registered MCU bit */
+ edac_mc_registered_mask |= 1 << ctx->mcu_id;
+
+ /* Enable interrupt after all active MCU registered */
+ if (edac_mc_registered_mask == edac_mc_active_mask) {
+ /* Enable memory controller top level interrupt */
+ val = readl(ctx->pcp_csr + PCPHPERRINTMSK);
+ val &= ~(MCU_UNCORR_ERR_MASK | MCU_CTL_ERR_MASK);
+ writel(val, ctx->pcp_csr + PCPHPERRINTMSK);
+ val = readl(ctx->pcp_csr + PCPLPERRINTMSK);
+ val &= ~MCU_CORR_ERR_MASK;
+ writel(val, ctx->pcp_csr + PCPLPERRINTMSK);
+ }
+
+ /* Enable MCU interrupt and error reporting */
+ val = readl(ctx->mcu_csr + MCUGECR);
+ val |= MCU_GECR_DEMANDUCINTREN_MASK |
+ MCU_GECR_BACKUCINTREN_MASK |
+ MCU_GECR_CINTREN_MASK |
+ MUC_GECR_MCUADDRERREN_MASK;
+ writel(val, ctx->mcu_csr + MCUGECR);
+ } else {
+ /* Disable MCU interrupt */
+ val = readl(ctx->mcu_csr + MCUGECR);
+ val &= ~(MCU_GECR_DEMANDUCINTREN_MASK |
+ MCU_GECR_BACKUCINTREN_MASK |
+ MCU_GECR_CINTREN_MASK |
+ MUC_GECR_MCUADDRERREN_MASK);
+ writel(val, ctx->mcu_csr + MCUGECR);
+
+ /* Disable memory controller top level interrupt */
+ val = readl(ctx->pcp_csr + PCPHPERRINTMSK);
+ val |= MCU_UNCORR_ERR_MASK | MCU_CTL_ERR_MASK;
+ writel(val, ctx->pcp_csr + PCPHPERRINTMSK);
+ val = readl(ctx->pcp_csr + PCPLPERRINTMSK);
+ val |= MCU_CORR_ERR_MASK;
+ writel(val, ctx->pcp_csr + PCPLPERRINTMSK);
+
+ /* Clear registered MCU bit */
+ edac_mc_registered_mask &= ~(1 << ctx->mcu_id);
+ }
+
+ mutex_unlock(&xgene_edac_lock);
+}
+
+static int xgene_edac_mc_is_active(struct xgene_edac_mc_ctx *ctx, int mc_idx)
+{
+ u32 reg;
+ u32 mcu_mask;
+
+ reg = readl(ctx->csw_csr + CSW_CSWCR);
+ if (reg & CSW_CSWCR_DUALMCB_MASK) {
+ /*
+ * Dual MCB active - Determine if all 4 active or just MCU0
+ * and MCU2 active
+ */
+ reg = readl(ctx->mcbb_csr + MCBADDRMR);
+ mcu_mask = (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
+ } else {
+ /*
+ * Single MCB active - Determine if MCU0/MCU1 or just MCU0
+ * active
+ */
+ reg = readl(ctx->mcba_csr + MCBADDRMR);
+ mcu_mask = (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
+ }
+
+ /* Save active MC mask if hasn't set already */
+ if (!edac_mc_active_mask)
+ edac_mc_active_mask = mcu_mask;
+
+ return (mcu_mask & (1 << mc_idx)) ? 1 : 0;
+}
+
+static int xgene_edac_mc_probe(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
+ struct xgene_edac_mc_ctx tmp_ctx;
+ struct xgene_edac_mc_ctx *ctx;
+ struct resource *res;
+ int rc = 0;
+
+ if (!devres_open_group(&pdev->dev, xgene_edac_mc_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ /* Retrieve resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no PCP resource address\n");
+ rc = -EINVAL;
+ goto err_group;
+ }
+ tmp_ctx.pcp_csr = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(tmp_ctx.pcp_csr)) {
+ dev_err(&pdev->dev, "no PCP resource address\n");
+ rc = PTR_ERR(tmp_ctx.pcp_csr);
+ goto err_group;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "no CSW resource address\n");
+ rc = -EINVAL;
+ goto err_group;
+ }
+ tmp_ctx.csw_csr = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(tmp_ctx.csw_csr)) {
+ dev_err(&pdev->dev, "no CSW resource address\n");
+ rc = PTR_ERR(tmp_ctx.csw_csr);
+ goto err_group;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ dev_err(&pdev->dev, "no MCBA resource address\n");
+ rc = -EINVAL;
+ goto err_group;
+ }
+ tmp_ctx.mcba_csr = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(tmp_ctx.mcba_csr)) {
+ dev_err(&pdev->dev, "no MCBA resource address\n");
+ rc = PTR_ERR(tmp_ctx.mcba_csr);
+ goto err_group;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!res) {
+ dev_err(&pdev->dev, "no MCBB resource address\n");
+ rc = -EINVAL;
+ goto err_group;
+ }
+ tmp_ctx.mcbb_csr = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(tmp_ctx.mcbb_csr)) {
+ dev_err(&pdev->dev, "no MCBB resource address\n");
+ rc = PTR_ERR(tmp_ctx.mcbb_csr);
+ goto err_group;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ tmp_ctx.mcu_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tmp_ctx.mcu_csr)) {
+ dev_err(&pdev->dev, "no MCU resource address\n");
+ rc = PTR_ERR(tmp_ctx.mcu_csr);
+ goto err_group;
+ }
+ /* Ignore non-active MCU */
+ tmp_ctx.mcu_id = ((res->start >> 16) & 0xF) / 4;
+ if (!xgene_edac_mc_is_active(&tmp_ctx, tmp_ctx.mcu_id)) {
+ rc = -ENODEV;
+ goto err_group;
+ }
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 4;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 2;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx++, ARRAY_SIZE(layers), layers,
+ sizeof(*ctx));
+ if (!mci) {
+ rc = -ENOMEM;
+ goto err_group;
+ }
+
+ ctx = mci->pvt_info;
+ *ctx = tmp_ctx; /* Copy over resource value */
+ ctx->name = "xgene_edac_mc_err";
+ mci->pdev = &pdev->dev;
+ dev_set_drvdata(mci->pdev, mci);
+ mci->ctl_name = ctx->name;
+ mci->dev_name = ctx->name;
+
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | MEM_FLAG_RDDR3 |
+ MEM_FLAG_DDR | MEM_FLAG_DDR2 | MEM_FLAG_DDR3;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = "0.1";
+ mci->ctl_page_to_phys = NULL;
+ mci->scrub_cap = SCRUB_FLAG_HW_SRC;
+ mci->scrub_mode = SCRUB_HW_SRC;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = xgene_edac_mc_check;
+
+ if (edac_mc_add_mc(mci)) {
+ dev_err(&pdev->dev, "edac_mc_add_mc failed\n");
+ rc = -EINVAL;
+ goto err_free;
+ }
+
+ xgene_edac_mc_create_debugfs_node(mci);
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ int irq;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ rc = -EINVAL;
+ goto err_del;
+ }
+ rc = devm_request_irq(&pdev->dev, irq,
+ xgene_edac_mc_isr, IRQF_SHARED,
+ dev_name(&pdev->dev), mci);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Could not request IRQ %d\n", irq);
+ goto err_del;
+ }
+ }
+ }
+
+ xgene_edac_mc_irq_ctl(mci, true);
+
+ devres_remove_group(&pdev->dev, xgene_edac_mc_probe);
+
+ dev_info(&pdev->dev, "X-Gene EDAC MC registered\n");
+ return 0;
+
+err_del:
+ edac_mc_del_mc(&pdev->dev);
+err_free:
+ edac_mc_free(mci);
+err_group:
+ devres_release_group(&pdev->dev, xgene_edac_mc_probe);
+ return rc;
+}
+
+static int xgene_edac_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = dev_get_drvdata(&pdev->dev);
+
+ xgene_edac_mc_irq_ctl(mci, false);
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id xgene_edac_mc_of_match[] = {
+ { .compatible = "apm,xgene-edac-mc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_edac_mc_of_match);
+#endif
+
+static struct platform_driver xgene_edac_mc_driver = {
+ .probe = xgene_edac_mc_probe,
+ .remove = xgene_edac_mc_remove,
+ .driver = {
+ .name = "xgene-edac-mc",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(xgene_edac_mc_of_match),
+ },
+};
+
+/* CPU L1/L2 error device */
+#define MAX_CPU_PER_PMD 2
+#define CPU_CSR_STRIDE 0x00100000
+#define CPU_L2C_PAGE 0x000D0000
+#define CPU_MEMERR_L2C_PAGE 0x000E0000
+#define CPU_MEMERR_CPU_PAGE 0x000F0000
+
+#define MEMERR_CPU_ICFECR_PAGE_OFFSET 0x0000
+#define MEMERR_CPU_ICFESR_PAGE_OFFSET 0x0004
+#define MEMERR_CPU_ICFESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24)
+#define MEMERR_CPU_ICFESR_ERRINDEX_RD(src) (((src) & 0x003F0000) >> 16)
+#define MEMERR_CPU_ICFESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8)
+#define MEMERR_CPU_ICFESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4)
+#define MEMERR_CPU_ICFESR_MULTCERR_MASK BIT(2)
+#define MEMERR_CPU_ICFESR_CERR_MASK BIT(0)
+#define MEMERR_CPU_LSUESR_PAGE_OFFSET 0x000c
+#define MEMERR_CPU_LSUESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24)
+#define MEMERR_CPU_LSUESR_ERRINDEX_RD(src) (((src) & 0x003F0000) >> 16)
+#define MEMERR_CPU_LSUESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8)
+#define MEMERR_CPU_LSUESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4)
+#define MEMERR_CPU_LSUESR_MULTCERR_MASK BIT(2)
+#define MEMERR_CPU_LSUESR_CERR_MASK BIT(0)
+#define MEMERR_CPU_LSUECR_PAGE_OFFSET 0x0008
+#define MEMERR_CPU_MMUECR_PAGE_OFFSET 0x0010
+#define MEMERR_CPU_MMUESR_PAGE_OFFSET 0x0014
+#define MEMERR_CPU_MMUESR_ERRWAY_RD(src) (((src) & 0xFF000000) >> 24)
+#define MEMERR_CPU_MMUESR_ERRINDEX_RD(src) (((src) & 0x007F0000) >> 16)
+#define MEMERR_CPU_MMUESR_ERRINFO_RD(src) (((src) & 0x0000FF00) >> 8)
+#define MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK BIT(7)
+#define MEMERR_CPU_MMUESR_ERRTYPE_RD(src) (((src) & 0x00000070) >> 4)
+#define MEMERR_CPU_MMUESR_MULTCERR_MASK BIT(2)
+#define MEMERR_CPU_MMUESR_CERR_MASK BIT(0)
+#define MEMERR_CPU_ICFESRA_PAGE_OFFSET 0x0804
+#define MEMERR_CPU_LSUESRA_PAGE_OFFSET 0x080c
+#define MEMERR_CPU_MMUESRA_PAGE_OFFSET 0x0814
+
+#define MEMERR_L2C_L2ECR_PAGE_OFFSET 0x0000
+#define MEMERR_L2C_L2ESR_PAGE_OFFSET 0x0004
+#define MEMERR_L2C_L2ESR_ERRSYN_RD(src) (((src) & 0xFF000000) >> 24)
+#define MEMERR_L2C_L2ESR_ERRWAY_RD(src) (((src) & 0x00FC0000) >> 18)
+#define MEMERR_L2C_L2ESR_ERRCPU_RD(src) (((src) & 0x00020000) >> 17)
+#define MEMERR_L2C_L2ESR_ERRGROUP_RD(src) (((src) & 0x0000E000) >> 13)
+#define MEMERR_L2C_L2ESR_ERRACTION_RD(src) (((src) & 0x00001C00) >> 10)
+#define MEMERR_L2C_L2ESR_ERRTYPE_RD(src) (((src) & 0x00000300) >> 8)
+#define MEMERR_L2C_L2ESR_MULTUCERR_MASK BIT(3)
+#define MEMERR_L2C_L2ESR_MULTICERR_MASK BIT(2)
+#define MEMERR_L2C_L2ESR_UCERR_MASK BIT(1)
+#define MEMERR_L2C_L2ESR_ERR_MASK BIT(0)
+#define MEMERR_L2C_L2EALR_PAGE_OFFSET 0x0008
+#define CPUX_L2C_L2RTOCR_PAGE_OFFSET 0x0010
+#define MEMERR_L2C_L2EAHR_PAGE_OFFSET 0x000c
+#define CPUX_L2C_L2RTOSR_PAGE_OFFSET 0x0014
+#define CPUX_L2C_L2RTOALR_PAGE_OFFSET 0x0018
+#define CPUX_L2C_L2RTOAHR_PAGE_OFFSET 0x001c
+#define MEMERR_L2C_L2ESRA_PAGE_OFFSET 0x0804
+
+/*
+ * Processor Module Domain (PMD) context - Context for a pair of processsors.
+ * Each PMD consists of 2 CPUs and a shared L2 cache. Each CPU consists of
+ * its own L1 cache.
+ */
+struct xgene_edac_pmd_ctx {
+ char *name;
+ void __iomem *pcp_csr; /* PCP CSR for reading error interrupt reg */
+ void __iomem *pmd_csr; /* PMD CSR for reading L1/L2 error reg */
+ int pmd; /* Identify the register in pcp_csr */
+};
+
+static void xgene_edac_pmd_l1_check(struct edac_device_ctl_info *edac_dev,
+ int cpu_idx)
+{
+ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
+ void __iomem *pg_f;
+ u32 val;
+
+ pg_f = ctx->pmd_csr + cpu_idx * CPU_CSR_STRIDE + CPU_MEMERR_CPU_PAGE;
+
+ val = readl(pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
+ if (val) {
+ dev_err(edac_dev->dev,
+ "CPU%d L1 memory error ICF 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
+ ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
+ MEMERR_CPU_ICFESR_ERRWAY_RD(val),
+ MEMERR_CPU_ICFESR_ERRINDEX_RD(val),
+ MEMERR_CPU_ICFESR_ERRINFO_RD(val));
+ if (val & MEMERR_CPU_ICFESR_CERR_MASK)
+ dev_err(edac_dev->dev,
+ "One or more correctable error\n");
+ if (val & MEMERR_CPU_ICFESR_MULTCERR_MASK)
+ dev_err(edac_dev->dev, "Multiple correctable error\n");
+ switch (MEMERR_CPU_ICFESR_ERRTYPE_RD(val)) {
+ case 1:
+ dev_err(edac_dev->dev, "L1 TLB multiple hit\n");
+ break;
+ case 2:
+ dev_err(edac_dev->dev, "Way select multiple hit\n");
+ break;
+ case 3:
+ dev_err(edac_dev->dev, "Physical tag parity error\n");
+ break;
+ case 4:
+ case 5:
+ dev_err(edac_dev->dev, "L1 data parity error\n");
+ break;
+ case 6:
+ dev_err(edac_dev->dev, "L1 pre-decode parity error\n");
+ break;
+ }
+
+ /* Clear SW generated and HW errors */
+ writel(0x0, pg_f + MEMERR_CPU_ICFESRA_PAGE_OFFSET);
+ writel(val, pg_f + MEMERR_CPU_ICFESR_PAGE_OFFSET);
+
+ if (val & (MEMERR_CPU_ICFESR_CERR_MASK |
+ MEMERR_CPU_ICFESR_MULTCERR_MASK))
+ edac_device_handle_ce(edac_dev, 0, 0,
+ edac_dev->ctl_name);
+ }
+
+ val = readl(pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
+ if (val) {
+ dev_err(edac_dev->dev,
+ "CPU%d memory error LSU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X\n",
+ ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
+ MEMERR_CPU_LSUESR_ERRWAY_RD(val),
+ MEMERR_CPU_LSUESR_ERRINDEX_RD(val),
+ MEMERR_CPU_LSUESR_ERRINFO_RD(val));
+ if (val & MEMERR_CPU_LSUESR_CERR_MASK)
+ dev_err(edac_dev->dev,
+ "One or more correctable error\n");
+ if (val & MEMERR_CPU_LSUESR_MULTCERR_MASK)
+ dev_err(edac_dev->dev, "Multiple correctable error\n");
+ switch (MEMERR_CPU_LSUESR_ERRTYPE_RD(val)) {
+ case 0:
+ dev_err(edac_dev->dev, "Load tag error\n");
+ break;
+ case 1:
+ dev_err(edac_dev->dev, "Load data error\n");
+ break;
+ case 2:
+ dev_err(edac_dev->dev, "WSL multihit error\n");
+ break;
+ case 3:
+ dev_err(edac_dev->dev, "Store tag error\n");
+ break;
+ case 4:
+ dev_err(edac_dev->dev,
+ "DTB multihit from load pipeline error\n");
+ break;
+ case 5:
+ dev_err(edac_dev->dev,
+ "DTB multihit from store pipeline error\n");
+ break;
+ }
+
+ /* Clear SW generated and HW errors */
+ writel(0x0, pg_f + MEMERR_CPU_LSUESRA_PAGE_OFFSET);
+ writel(val, pg_f + MEMERR_CPU_LSUESR_PAGE_OFFSET);
+
+ if (val & (MEMERR_CPU_LSUESR_CERR_MASK |
+ MEMERR_CPU_LSUESR_MULTCERR_MASK))
+ edac_device_handle_ce(edac_dev, 0, 0,
+ edac_dev->ctl_name);
+ else
+ edac_device_handle_ue(edac_dev, 0, 0,
+ edac_dev->ctl_name);
+ }
+
+ val = readl(pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
+ if (val) {
+ dev_err(edac_dev->dev,
+ "CPU%d memory error MMU 0x%08X Way 0x%02X Index 0x%02X Info 0x%02X %s\n",
+ ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val,
+ MEMERR_CPU_MMUESR_ERRWAY_RD(val),
+ MEMERR_CPU_MMUESR_ERRINDEX_RD(val),
+ MEMERR_CPU_MMUESR_ERRINFO_RD(val),
+ val & MEMERR_CPU_MMUESR_ERRREQSTR_LSU_MASK ? "LSU" :
+ "ICF");
+ if (val & MEMERR_CPU_MMUESR_CERR_MASK)
+ dev_err(edac_dev->dev,
+ "One or more correctable error\n");
+ if (val & MEMERR_CPU_MMUESR_MULTCERR_MASK)
+ dev_err(edac_dev->dev, "Multiple correctable error\n");
+ switch (MEMERR_CPU_MMUESR_ERRTYPE_RD(val)) {
+ case 0:
+ dev_err(edac_dev->dev, "Stage 1 UTB hit error\n");
+ break;
+ case 1:
+ dev_err(edac_dev->dev, "Stage 1 UTB miss error\n");
+ break;
+ case 2:
+ dev_err(edac_dev->dev, "Stage 1 UTB allocate error\n");
+ break;
+ case 3:
+ dev_err(edac_dev->dev,
+ "TMO operation single bank error\n");
+ break;
+ case 4:
+ dev_err(edac_dev->dev, "Stage 2 UTB error\n");
+ break;
+ case 5:
+ dev_err(edac_dev->dev, "Stage 2 UTB miss error\n");
+ break;
+ case 6:
+ dev_err(edac_dev->dev, "Stage 2 UTB allocate error\n");
+ break;
+ case 7:
+ dev_err(edac_dev->dev,
+ "TMO operation multiple bank error\n");
+ break;
+ }
+
+ /* Clear SW generated and HW errors */
+ writel(0x0, pg_f + MEMERR_CPU_MMUESRA_PAGE_OFFSET);
+ writel(val, pg_f + MEMERR_CPU_MMUESR_PAGE_OFFSET);
+
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+ }
+}
+
+static void xgene_edac_pmd_l2_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
+ void __iomem *pg_d;
+ void __iomem *pg_e;
+ u32 val_hi;
+ u32 val_lo;
+ u32 val;
+
+ /* Check L2 */
+ pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
+ val = readl(pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
+ if (val) {
+ val_lo = readl(pg_e + MEMERR_L2C_L2EALR_PAGE_OFFSET);
+ val_hi = readl(pg_e + MEMERR_L2C_L2EAHR_PAGE_OFFSET);
+ dev_err(edac_dev->dev,
+ "PMD%d memory error L2C L2ESR 0x%08X @ 0x%08X.%08X\n",
+ ctx->pmd, val, val_hi, val_lo);
+ dev_err(edac_dev->dev,
+ "ErrSyndrome 0x%02X ErrWay 0x%02X ErrCpu %d ErrGroup 0x%02X ErrAction 0x%02X\n",
+ MEMERR_L2C_L2ESR_ERRSYN_RD(val),
+ MEMERR_L2C_L2ESR_ERRWAY_RD(val),
+ MEMERR_L2C_L2ESR_ERRCPU_RD(val),
+ MEMERR_L2C_L2ESR_ERRGROUP_RD(val),
+ MEMERR_L2C_L2ESR_ERRACTION_RD(val));
+
+ if (val & MEMERR_L2C_L2ESR_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "One or more correctable error\n");
+ if (val & MEMERR_L2C_L2ESR_MULTICERR_MASK)
+ dev_err(edac_dev->dev, "Multiple correctable error\n");
+ if (val & MEMERR_L2C_L2ESR_UCERR_MASK)
+ dev_err(edac_dev->dev,
+ "One or more uncorrectable error\n");
+ if (val & MEMERR_L2C_L2ESR_MULTUCERR_MASK)
+ dev_err(edac_dev->dev,
+ "Multiple uncorrectable error\n");
+
+ switch (MEMERR_L2C_L2ESR_ERRTYPE_RD(val)) {
+ case 0:
+ dev_err(edac_dev->dev, "Outbound SDB parity error\n");
+ break;
+ case 1:
+ dev_err(edac_dev->dev, "Inbound SDB parity error\n");
+ break;
+ case 2:
+ dev_err(edac_dev->dev, "Tag ECC error\n");
+ break;
+ case 3:
+ dev_err(edac_dev->dev, "Data ECC error\n");
+ break;
+ }
+
+ writel(0x0, pg_e + MEMERR_L2C_L2EALR_PAGE_OFFSET);
+ writel(0x0, pg_e + MEMERR_L2C_L2EAHR_PAGE_OFFSET);
+
+ /* Clear SW generated and HW errors */
+ writel(0x0, pg_e + MEMERR_L2C_L2ESRA_PAGE_OFFSET);
+ writel(val, pg_e + MEMERR_L2C_L2ESR_PAGE_OFFSET);
+
+ if (val & (MEMERR_L2C_L2ESR_ERR_MASK |
+ MEMERR_L2C_L2ESR_MULTICERR_MASK))
+ edac_device_handle_ce(edac_dev, 0, 0,
+ edac_dev->ctl_name);
+ if (val & (MEMERR_L2C_L2ESR_UCERR_MASK |
+ MEMERR_L2C_L2ESR_MULTUCERR_MASK))
+ edac_device_handle_ue(edac_dev, 0, 0,
+ edac_dev->ctl_name);
+ }
+
+ /* Check if any memory request timed out on L2 cache */
+ pg_d = ctx->pmd_csr + CPU_L2C_PAGE;
+ val = readl(pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET);
+ if (val) {
+ val_lo = readl(pg_d + CPUX_L2C_L2RTOALR_PAGE_OFFSET);
+ val_hi = readl(pg_d + CPUX_L2C_L2RTOAHR_PAGE_OFFSET);
+ dev_err(edac_dev->dev,
+ "PMD%d L2C error L2C RTOSR 0x%08X @ 0x%08X.%08X\n",
+ ctx->pmd, val, val_hi, val_lo);
+ writel(0x0, pg_d + CPUX_L2C_L2RTOALR_PAGE_OFFSET);
+ writel(0x0, pg_d + CPUX_L2C_L2RTOAHR_PAGE_OFFSET);
+ writel(0x0, pg_d + CPUX_L2C_L2RTOSR_PAGE_OFFSET);
+ }
+}
+
+static void xgene_edac_pmd_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
+ u32 pcp_hp_stat;
+ int i;
+
+ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS);
+ if (!((PMD0_MERR_MASK << ctx->pmd) & pcp_hp_stat))
+ return;
+
+ /* Check CPU L1 error */
+ for (i = 0; i < MAX_CPU_PER_PMD; i++)
+ xgene_edac_pmd_l1_check(edac_dev, i);
+
+ /* Check CPU L2 error */
+ xgene_edac_pmd_l2_check(edac_dev);
+}
+
+static irqreturn_t xgene_edac_pmd_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
+ u32 pcp_hp_stat;
+
+ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS);
+ if (!(pcp_hp_stat & (PMD0_MERR_MASK << ctx->pmd)))
+ return IRQ_NONE;
+
+ xgene_edac_pmd_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static void xgene_edac_pmd_cpu_hw_cfg(struct edac_device_ctl_info *edac_dev,
+ int cpu)
+{
+ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
+ void __iomem *pg_f = ctx->pmd_csr + cpu * CPU_CSR_STRIDE +
+ CPU_MEMERR_CPU_PAGE;
+
+ /*
+ * Enable CPU memory error:
+ * MEMERR_CPU_ICFESRA, MEMERR_CPU_LSUESRA, and MEMERR_CPU_MMUESRA
+ */
+ writel(0x00000301, pg_f + MEMERR_CPU_ICFECR_PAGE_OFFSET);
+ writel(0x00000301, pg_f + MEMERR_CPU_LSUECR_PAGE_OFFSET);
+ writel(0x00000101, pg_f + MEMERR_CPU_MMUECR_PAGE_OFFSET);
+}
+
+static void xgene_edac_pmd_hw_cfg(struct edac_device_ctl_info *edac_dev)
+{
+ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
+ void __iomem *pg_d = ctx->pmd_csr + CPU_L2C_PAGE;
+ void __iomem *pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
+
+ /* Enable PMD memory error - MEMERR_L2C_L2ECR and L2C_L2RTOCR */
+ writel(0x00000703, pg_e + MEMERR_L2C_L2ECR_PAGE_OFFSET);
+ writel(0x00000119, pg_d + CPUX_L2C_L2RTOCR_PAGE_OFFSET);
+}
+
+static void xgene_edac_pmd_hw_ctl(struct edac_device_ctl_info *edac_dev,
+ bool enable)
+{
+ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
+ u32 val;
+ int i;
+
+ /* Enable PMD error interrupt */
+ if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
+ mutex_lock(&xgene_edac_lock);
+
+ val = readl(ctx->pcp_csr + PCPHPERRINTMSK);
+ if (enable)
+ val &= ~(PMD0_MERR_MASK << ctx->pmd);
+ else
+ val |= PMD0_MERR_MASK << ctx->pmd;
+ writel(val, ctx->pcp_csr + PCPHPERRINTMSK);
+
+ mutex_unlock(&xgene_edac_lock);
+ }
+
+ if (enable) {
+ xgene_edac_pmd_hw_cfg(edac_dev);
+
+ /* Two CPUs per a PMD */
+ for (i = 0; i < MAX_CPU_PER_PMD; i++)
+ xgene_edac_pmd_cpu_hw_cfg(edac_dev, i);
+ }
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+static ssize_t xgene_edac_pmd_l1_inject_ctrl_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dev = file->private_data;
+ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
+ void __iomem *cpux_pg_f;
+ int i;
+
+ for (i = 0; i < MAX_CPU_PER_PMD; i++) {
+ cpux_pg_f = ctx->pmd_csr + i * CPU_CSR_STRIDE +
+ CPU_MEMERR_CPU_PAGE;
+
+ writel(MEMERR_CPU_ICFESR_MULTCERR_MASK |
+ MEMERR_CPU_ICFESR_CERR_MASK,
+ cpux_pg_f + MEMERR_CPU_ICFESRA_PAGE_OFFSET);
+ writel(MEMERR_CPU_LSUESR_MULTCERR_MASK |
+ MEMERR_CPU_LSUESR_CERR_MASK,
+ cpux_pg_f + MEMERR_CPU_LSUESRA_PAGE_OFFSET);
+ writel(MEMERR_CPU_MMUESR_MULTCERR_MASK |
+ MEMERR_CPU_MMUESR_CERR_MASK,
+ cpux_pg_f + MEMERR_CPU_MMUESRA_PAGE_OFFSET);
+ }
+ return count;
+}
+
+static ssize_t xgene_edac_pmd_l2_inject_ctrl_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dev = file->private_data;
+ struct xgene_edac_pmd_ctx *ctx = edac_dev->pvt_info;
+ void __iomem *pg_e = ctx->pmd_csr + CPU_MEMERR_L2C_PAGE;
+
+ writel(MEMERR_L2C_L2ESR_MULTUCERR_MASK |
+ MEMERR_L2C_L2ESR_MULTICERR_MASK |
+ MEMERR_L2C_L2ESR_UCERR_MASK |
+ MEMERR_L2C_L2ESR_ERR_MASK,
+ pg_e + MEMERR_L2C_L2ESRA_PAGE_OFFSET);
+ return count;
+}
+
+static const struct file_operations xgene_edac_pmd_debug_inject_fops[] = {
+ {
+ .open = simple_open,
+ .write = xgene_edac_pmd_l1_inject_ctrl_write,
+ .llseek = generic_file_llseek, },
+ {
+ .open = simple_open,
+ .write = xgene_edac_pmd_l2_inject_ctrl_write,
+ .llseek = generic_file_llseek, },
+ { }
+};
+
+static void xgene_edac_pmd_create_debugfs_nodes(
+ struct edac_device_ctl_info *edac_dev)
+{
+ struct dentry *edac_debugfs;
+
+ /*
+ * Todo: Switch to common EDAC debug file system for edac device
+ * when available.
+ */
+ edac_debugfs = debugfs_create_dir(edac_dev->dev->kobj.name, NULL);
+ if (!edac_debugfs)
+ return;
+
+ debugfs_create_file("l1_inject_ctrl", S_IWUSR, edac_debugfs, edac_dev,
+ &xgene_edac_pmd_debug_inject_fops[0]);
+ debugfs_create_file("l2_inject_ctrl", S_IWUSR, edac_debugfs, edac_dev,
+ &xgene_edac_pmd_debug_inject_fops[1]);
+}
+#else
+static void xgene_edac_pmd_create_debugfs_nodes(
+ struct edac_device_ctl_info *edac_dev)
+{
+}
+#endif
+
+static int xgene_edac_pmd_available(u32 efuse, int pmd)
+{
+ return (efuse & (1 << pmd)) ? 0 : 1;
+}
+
+static int xgene_edac_pmd_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct xgene_edac_pmd_ctx *ctx;
+ char edac_name[10];
+ struct resource *res;
+ void __iomem *pmd_efuse;
+ int pmd;
+ int rc = 0;
+
+ if (!devres_open_group(&pdev->dev, xgene_edac_pmd_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ /* Find the PMD number from its address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res || resource_size(res) <= 0) {
+ rc = -ENODEV;
+ goto err_group;
+ }
+ pmd = ((res->start >> 20) & 0x1E) >> 1;
+
+ /* Determine if this PMD is disabled */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res || resource_size(res) <= 0) {
+ rc = -ENODEV;
+ goto err_group;
+ }
+ pmd_efuse = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (IS_ERR(pmd_efuse)) {
+ dev_err(&pdev->dev, "no PMD efuse resource address\n");
+ rc = PTR_ERR(pmd_efuse);
+ goto err_group;
+ }
+ if (!xgene_edac_pmd_available(readl(pmd_efuse), pmd)) {
+ rc = -ENODEV;
+ goto err_group;
+ }
+ devm_iounmap(&pdev->dev, pmd_efuse);
+
+ sprintf(edac_name, "l2c%d", pmd);
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
+ edac_name, 1, "l2c", 1, 2, NULL,
+ 0, edac_device_alloc_index());
+ if (!edac_dev) {
+ rc = -ENOMEM;
+ goto err_group;
+ }
+
+ ctx = edac_dev->pvt_info;
+ ctx->name = "xgene_pmd_err";
+ ctx->pmd = pmd;
+ edac_dev->dev = &pdev->dev;
+ dev_set_drvdata(edac_dev->dev, edac_dev);
+ edac_dev->ctl_name = ctx->name;
+ edac_dev->dev_name = ctx->name;
+ edac_dev->mod_name = EDAC_MOD_STR;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no PCP resource address\n");
+ rc = -EINVAL;
+ goto err_free;
+ }
+ ctx->pcp_csr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (IS_ERR(ctx->pcp_csr)) {
+ dev_err(&pdev->dev, "no PCP resource address\n");
+ rc = PTR_ERR(ctx->pcp_csr);
+ goto err_free;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "no PMD resource address\n");
+ rc = -EINVAL;
+ goto err_free;
+ }
+ ctx->pmd_csr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (IS_ERR(ctx->pmd_csr)) {
+ dev_err(&pdev->dev,
+ "devm_ioremap failed for PMD resource address\n");
+ rc = PTR_ERR(ctx->pmd_csr);
+ goto err_free;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = xgene_edac_pmd_check;
+
+ xgene_edac_pmd_create_debugfs_nodes(edac_dev);
+
+ rc = edac_device_add_device(edac_dev);
+ if (rc > 0) {
+ dev_err(&pdev->dev, "edac_device_add_device failed\n");
+ rc = -ENOMEM;
+ goto err_free;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ rc = -EINVAL;
+ goto err_del;
+ }
+ rc = devm_request_irq(&pdev->dev, irq,
+ xgene_edac_pmd_isr, IRQF_SHARED,
+ dev_name(&pdev->dev), edac_dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
+ goto err_del;
+ }
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+ }
+
+ xgene_edac_pmd_hw_ctl(edac_dev, 1);
+
+ devres_remove_group(&pdev->dev, xgene_edac_pmd_probe);
+
+ dev_info(&pdev->dev, "X-Gene EDAC PMD registered\n");
+ return 0;
+
+err_del:
+ edac_device_del_device(&pdev->dev);
+err_free:
+ edac_device_free_ctl_info(edac_dev);
+err_group:
+ devres_release_group(&pdev->dev, xgene_edac_pmd_probe);
+ return rc;
+}
+
+static int xgene_edac_pmd_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&pdev->dev);
+
+ xgene_edac_pmd_hw_ctl(edac_dev, 0);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(edac_dev);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id xgene_edac_pmd_of_match[] = {
+ { .compatible = "apm,xgene-edac-pmd" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_edac_pmd_of_match);
+#endif
+
+static struct platform_driver xgene_edac_pmd_driver = {
+ .probe = xgene_edac_pmd_probe,
+ .remove = xgene_edac_pmd_remove,
+ .driver = {
+ .name = "xgene-edac-pmd",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(xgene_edac_pmd_of_match),
+ },
+};
+
+/* L3 Error device */
+#define L3C_ESR (0x0A * 4)
+#define L3C_ESR_DATATAG_MASK BIT(9)
+#define L3C_ESR_MULTIHIT_MASK BIT(8)
+#define L3C_ESR_UCEVICT_MASK BIT(6)
+#define L3C_ESR_MULTIUCERR_MASK BIT(5)
+#define L3C_ESR_MULTICERR_MASK BIT(4)
+#define L3C_ESR_UCERR_MASK BIT(3)
+#define L3C_ESR_CERR_MASK BIT(2)
+#define L3C_ESR_UCERRINTR_MASK BIT(1)
+#define L3C_ESR_CERRINTR_MASK BIT(0)
+#define L3C_ECR (0x0B * 4)
+#define L3C_ECR_UCINTREN BIT(3)
+#define L3C_ECR_CINTREN BIT(2)
+#define L3C_UCERREN BIT(1)
+#define L3C_CERREN BIT(0)
+#define L3C_ELR (0x0C * 4)
+#define L3C_ELR_ERRSYN(src) ((src & 0xFF800000) >> 23)
+#define L3C_ELR_ERRWAY(src) ((src & 0x007E0000) >> 17)
+#define L3C_ELR_AGENTID(src) ((src & 0x0001E000) >> 13)
+#define L3C_ELR_ERRGRP(src) ((src & 0x00000F00) >> 8)
+#define L3C_ELR_OPTYPE(src) ((src & 0x000000F0) >> 4)
+#define L3C_ELR_PADDRHIGH(src) (src & 0x0000000F)
+#define L3C_AELR (0x0D * 4)
+#define L3C_BELR (0x0E * 4)
+#define L3C_BELR_BANK(src) (src & 0x0000000F)
+
+struct xgene_edac_dev_ctx {
+ char *name;
+ int edac_idx;
+ void __iomem *pcp_csr;
+ void __iomem *dev_csr;
+ void __iomem *bus_csr;
+};
+
+static void xgene_edac_l3_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+ u32 l3cesr;
+ u32 l3celr;
+ u32 l3caelr;
+ u32 l3cbelr;
+
+ l3cesr = readl(ctx->dev_csr + L3C_ESR);
+ if (!(l3cesr & (L3C_ESR_UCERR_MASK | L3C_ESR_CERR_MASK)))
+ return;
+
+ if (l3cesr & L3C_ESR_UCERR_MASK)
+ dev_err(edac_dev->dev, "L3C uncorrectable error\n");
+ if (l3cesr & L3C_ESR_CERR_MASK)
+ dev_warn(edac_dev->dev, "L3C correctable error\n");
+
+ l3celr = readl(ctx->dev_csr + L3C_ELR);
+ l3caelr = readl(ctx->dev_csr + L3C_AELR);
+ l3cbelr = readl(ctx->dev_csr + L3C_BELR);
+ if (l3cesr & L3C_ESR_MULTIHIT_MASK)
+ dev_err(edac_dev->dev, "L3C multiple hit error\n");
+ if (l3cesr & L3C_ESR_UCEVICT_MASK)
+ dev_err(edac_dev->dev,
+ "L3C dropped eviction of line with error\n");
+ if (l3cesr & L3C_ESR_MULTIUCERR_MASK)
+ dev_err(edac_dev->dev, "L3C multiple uncorrectable error\n");
+ if (l3cesr & L3C_ESR_DATATAG_MASK)
+ dev_err(edac_dev->dev,
+ "L3C data error syndrome 0x%X group 0x%X\n",
+ L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRGRP(l3celr));
+ else
+ dev_err(edac_dev->dev,
+ "L3C tag error syndrome 0x%X Way of Tag 0x%X Agent ID 0x%X Operation type 0x%X\n",
+ L3C_ELR_ERRSYN(l3celr), L3C_ELR_ERRWAY(l3celr),
+ L3C_ELR_AGENTID(l3celr), L3C_ELR_OPTYPE(l3celr));
+ /*
+ * NOTE: Address [41:38] in L3C_ELR_PADDRHIGH(l3celr).
+ * Address [37:6] in l3caelr. Lower 6 bits are zero.
+ */
+ dev_err(edac_dev->dev, "L3C error address 0x%08X.%08X bank %d\n",
+ L3C_ELR_PADDRHIGH(l3celr) << 6 | (l3caelr >> 26),
+ (l3caelr & 0x3FFFFFFF) << 6, L3C_BELR_BANK(l3cbelr));
+ dev_err(edac_dev->dev,
+ "L3C error status register value 0x%X\n", l3cesr);
+
+ /* Clear L3C error interrupt */
+ writel(0, ctx->dev_csr + L3C_ESR);
+
+ if (l3cesr & L3C_ESR_CERR_MASK)
+ edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+ if (l3cesr & L3C_ESR_UCERR_MASK)
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t xgene_edac_l3_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+ u32 l3cesr;
+
+ l3cesr = readl(ctx->dev_csr + L3C_ESR);
+ if (!(l3cesr & (L3C_ESR_UCERRINTR_MASK | L3C_ESR_CERRINTR_MASK)))
+ return IRQ_NONE;
+
+ xgene_edac_l3_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static void xgene_edac_l3_hw_ctl(struct edac_device_ctl_info *edac_dev,
+ bool enable)
+{
+ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+ u32 val;
+
+ val = readl(ctx->dev_csr + L3C_ECR);
+ val |= L3C_UCERREN | L3C_CERREN;
+ /* On disable, we just disable interrupt but keep error enabled */
+ if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
+ if (enable)
+ val |= L3C_ECR_UCINTREN | L3C_ECR_CINTREN;
+ else
+ val &= ~(L3C_ECR_UCINTREN | L3C_ECR_CINTREN);
+ }
+ writel(val, ctx->dev_csr + L3C_ECR);
+
+ mutex_lock(&xgene_edac_lock);
+
+ if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
+ /* Enable L3C error top level interrupt */
+ val = readl(ctx->pcp_csr + PCPHPERRINTMSK);
+ if (enable)
+ val &= ~L3C_UNCORR_ERR_MASK;
+ else
+ val |= L3C_UNCORR_ERR_MASK;
+ writel(val, ctx->pcp_csr + PCPHPERRINTMSK);
+ val = readl(ctx->pcp_csr + PCPLPERRINTMSK);
+ if (enable)
+ val &= ~L3C_CORR_ERR_MASK;
+ else
+ val |= L3C_CORR_ERR_MASK;
+ writel(val, ctx->pcp_csr + PCPLPERRINTMSK);
+ }
+
+ mutex_unlock(&xgene_edac_lock);
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+static ssize_t xgene_edac_l3_inject_ctrl_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dev = file->private_data;
+ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+
+ writel(L3C_ESR_UCERR_MASK | L3C_ESR_CERR_MASK |
+ L3C_ESR_MULTIUCERR_MASK | L3C_ESR_MULTICERR_MASK,
+ ctx->dev_csr + L3C_ESR);
+ return count;
+}
+
+static const struct file_operations xgene_edac_l3_debug_inject_fops = {
+ .open = simple_open,
+ .write = xgene_edac_l3_inject_ctrl_write,
+ .llseek = generic_file_llseek,
+};
+
+static void xgene_edac_l3_create_debugfs_node(
+ struct edac_device_ctl_info *edac_dev)
+{
+ struct dentry *edac_debugfs;
+
+ /*
+ * Todo: Switch to common EDAC debug file system for edac device
+ * when available.
+ */
+ edac_debugfs = debugfs_create_dir(edac_dev->dev->kobj.name, NULL);
+ if (!edac_debugfs)
+ return;
+
+ debugfs_create_file("inject_ctrl", S_IWUSR, edac_debugfs, edac_dev,
+ &xgene_edac_l3_debug_inject_fops);
+}
+#else
+static void xgene_edac_l3_create_debugfs_node(
+ struct edac_device_ctl_info *edac_dev)
+{
+}
+#endif
+
+static int xgene_edac_l3_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct xgene_edac_dev_ctx *ctx;
+ struct resource *res;
+ int rc = 0;
+
+ if (!devres_open_group(&pdev->dev, xgene_edac_l3_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
+ "l3c", 1, "l3c", 1, 0, NULL, 0,
+ edac_device_alloc_index());
+ if (!edac_dev) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ ctx = edac_dev->pvt_info;
+ ctx->name = "xgene_l3_err";
+ edac_dev->dev = &pdev->dev;
+ dev_set_drvdata(edac_dev->dev, edac_dev);
+ edac_dev->ctl_name = ctx->name;
+ edac_dev->dev_name = ctx->name;
+ edac_dev->mod_name = EDAC_MOD_STR;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no PCP resource address\n");
+ rc = -EINVAL;
+ goto err1;
+ }
+ ctx->pcp_csr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (IS_ERR(ctx->pcp_csr)) {
+ dev_err(&pdev->dev,
+ "devm_ioremap failed for PCP resource address\n");
+ rc = PTR_ERR(ctx->pcp_csr);
+ goto err1;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ctx->dev_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->dev_csr)) {
+ dev_err(&pdev->dev, "no L3 resource address\n");
+ rc = PTR_ERR(ctx->dev_csr);
+ goto err1;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = xgene_edac_l3_check;
+
+ xgene_edac_l3_create_debugfs_node(edac_dev);
+
+ rc = edac_device_add_device(edac_dev);
+ if (rc > 0) {
+ dev_err(&pdev->dev, "edac_device_add_device failed\n");
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ int irq;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ rc = -EINVAL;
+ goto err2;
+ }
+ rc = devm_request_irq(&pdev->dev, irq,
+ xgene_edac_l3_isr, IRQF_SHARED,
+ dev_name(&pdev->dev), edac_dev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Could not request IRQ %d\n", irq);
+ goto err2;
+ }
+ }
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+ }
+
+ xgene_edac_l3_hw_ctl(edac_dev, true);
+
+ devres_remove_group(&pdev->dev, xgene_edac_l3_probe);
+
+ dev_info(&pdev->dev, "X-Gene EDAC L3 registered\n");
+ return 0;
+
+err2:
+ edac_device_del_device(&pdev->dev);
+err1:
+ edac_device_free_ctl_info(edac_dev);
+err:
+ devres_release_group(&pdev->dev, xgene_edac_l3_probe);
+ return rc;
+}
+
+static int xgene_edac_l3_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&pdev->dev);
+
+ xgene_edac_l3_hw_ctl(edac_dev, false);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(edac_dev);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id xgene_edac_l3_of_match[] = {
+ { .compatible = "apm,xgene-edac-l3" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_edac_l3_of_match);
+#endif
+
+static struct platform_driver xgene_edac_l3_driver = {
+ .probe = xgene_edac_l3_probe,
+ .remove = xgene_edac_l3_remove,
+ .driver = {
+ .name = "xgene-edac-l3",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(xgene_edac_l3_of_match),
+ },
+};
+
+/* SoC Error device */
+#define IOBAXIS0TRANSERRINTSTS 0x0000
+#define IOBAXIS0_M_ILLEGAL_ACCESS_MASK BIT(1)
+#define IOBAXIS0_ILLEGAL_ACCESS_MASK BIT(0)
+#define IOBAXIS0TRANSERRINTMSK 0x0004
+#define IOBAXIS0TRANSERRREQINFOL 0x0008
+#define IOBAXIS0TRANSERRREQINFOH 0x000c
+#define REQTYPE_RD(src) (((src) & BIT(0)))
+#define ERRADDRH_RD(src) (((src) & 0xffc00000) >> 22)
+#define IOBAXIS1TRANSERRINTSTS 0x0010
+#define IOBAXIS1TRANSERRINTMSK 0x0014
+#define IOBAXIS1TRANSERRREQINFOL 0x0018
+#define IOBAXIS1TRANSERRREQINFOH 0x001c
+#define IOBPATRANSERRINTSTS 0x0020
+#define IOBPA_M_REQIDRAM_CORRUPT_MASK BIT(7)
+#define IOBPA_REQIDRAM_CORRUPT_MASK BIT(6)
+#define IOBPA_M_TRANS_CORRUPT_MASK BIT(5)
+#define IOBPA_TRANS_CORRUPT_MASK BIT(4)
+#define IOBPA_M_WDATA_CORRUPT_MASK BIT(3)
+#define IOBPA_WDATA_CORRUPT_MASK BIT(2)
+#define IOBPA_M_RDATA_CORRUPT_MASK BIT(1)
+#define IOBPA_RDATA_CORRUPT_MASK BIT(0)
+#define IOBBATRANSERRINTSTS 0x0030
+#define M_ILLEGAL_ACCESS_MASK 0x00008000
+#define ILLEGAL_ACCESS_MASK 0x00004000
+#define M_WIDRAM_CORRUPT_MASK 0x00002000
+#define WIDRAM_CORRUPT_MASK BIT(12)
+#define M_RIDRAM_CORRUPT_MASK BIT(11)
+#define RIDRAM_CORRUPT_MASK BIT(10)
+#define M_TRANS_CORRUPT_MASK BIT(9)
+#define TRANS_CORRUPT_MASK BIT(8)
+#define M_WDATA_CORRUPT_MASK BIT(7)
+#define WDATA_CORRUPT_MASK BIT(6)
+#define M_RBM_POISONED_REQ_MASK BIT(5)
+#define RBM_POISONED_REQ_MASK BIT(4)
+#define M_XGIC_POISONED_REQ_MASK BIT(3)
+#define XGIC_POISONED_REQ_MASK BIT(2)
+#define M_WRERR_RESP_MASK BIT(1)
+#define WRERR_RESP_MASK BIT(0)
+#define IOBBATRANSERRREQINFOL 0x0038
+#define IOBBATRANSERRREQINFOH 0x003c
+#define REQTYPE_F2_RD(src) (((src) & BIT(0)))
+#define ERRADDRH_F2_RD(src) (((src) & 0xffc00000) >> 22)
+#define IOBBATRANSERRCSWREQID 0x0040
+#define XGICTRANSERRINTSTS 0x0050
+#define M_WR_ACCESS_ERR_MASK BIT(3)
+#define WR_ACCESS_ERR_MASK BIT(2)
+#define M_RD_ACCESS_ERR_MASK BIT(1)
+#define RD_ACCESS_ERR_MASK BIT(0)
+#define XGICTRANSERRINTMSK 0x0054
+#define XGICTRANSERRREQINFO 0x0058
+#define REQTYPE_MASK 0x04000000
+#define ERRADDR_RD(src) ((src) & 0x03ffffff)
+#define GLBL_ERR_STS 0x0800
+#define MDED_ERR_MASK BIT(3)
+#define DED_ERR_MASK BIT(2)
+#define MSEC_ERR_MASK BIT(1)
+#define SEC_ERR_MASK BIT(0)
+#define GLBL_SEC_ERRL 0x0810
+#define GLBL_SEC_ERRH 0x0818
+#define GLBL_MSEC_ERRL 0x0820
+#define GLBL_MSEC_ERRH 0x0828
+#define GLBL_DED_ERRL 0x0830
+#define GLBL_DED_ERRLMASK 0x0834
+#define GLBL_DED_ERRH 0x0838
+#define GLBL_DED_ERRHMASK 0x083c
+#define GLBL_MDED_ERRL 0x0840
+#define GLBL_MDED_ERRLMASK 0x0844
+#define GLBL_MDED_ERRH 0x0848
+#define GLBL_MDED_ERRHMASK 0x084c
+
+/* IO Bus Registers */
+#define RBCSR 0x0000
+#define STICKYERR_MASK BIT(0)
+#define RBEIR 0x0008
+#define AGENT_OFFLINE_ERR_MASK BIT(30)
+#define UNIMPL_RBPAGE_ERR_MASK BIT(29)
+#define WORD_ALIGNED_ERR_MASK BIT(28)
+#define PAGE_ACCESS_ERR_MASK BIT(27)
+#define WRITE_ACCESS_MASK BIT(26)
+#define RBERRADDR_RD(src) ((src) & 0x03FFFFFF)
+
+static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev)
+{
+ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+ u32 err_addr_lo;
+ u32 err_addr_hi;
+ u32 reg;
+ u32 info;
+
+ /* GIC transaction error interrupt */
+ reg = readl(ctx->dev_csr + XGICTRANSERRINTSTS);
+ if (reg) {
+ dev_err(edac_dev->dev, "XGIC transaction error\n");
+ if (reg & RD_ACCESS_ERR_MASK)
+ dev_err(edac_dev->dev, "XGIC read size error\n");
+ if (reg & M_RD_ACCESS_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "Multiple XGIC read size error\n");
+ if (reg & WR_ACCESS_ERR_MASK)
+ dev_err(edac_dev->dev, "XGIC write size error\n");
+ if (reg & M_WR_ACCESS_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "Multiple XGIC write size error\n");
+ info = readl(ctx->dev_csr + XGICTRANSERRREQINFO);
+ dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n",
+ info & REQTYPE_MASK ? "read" : "write",
+ ERRADDR_RD(info), info);
+ writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS);
+ }
+
+ /* IOB memory error */
+ reg = readl(ctx->dev_csr + GLBL_ERR_STS);
+ if (reg) {
+ if (reg & SEC_ERR_MASK) {
+ err_addr_lo = readl(ctx->dev_csr + GLBL_SEC_ERRL);
+ err_addr_hi = readl(ctx->dev_csr + GLBL_SEC_ERRH);
+ dev_err(edac_dev->dev,
+ "IOB single-bit correctable memory at 0x%08X.%08X error\n",
+ err_addr_lo, err_addr_hi);
+ writel(err_addr_lo, ctx->dev_csr + GLBL_SEC_ERRL);
+ writel(err_addr_hi, ctx->dev_csr + GLBL_SEC_ERRH);
+ }
+ if (reg & MSEC_ERR_MASK) {
+ err_addr_lo = readl(ctx->dev_csr + GLBL_MSEC_ERRL);
+ err_addr_hi = readl(ctx->dev_csr + GLBL_MSEC_ERRH);
+ dev_err(edac_dev->dev,
+ "IOB multiple single-bit correctable memory at 0x%08X.%08X error\n",
+ err_addr_lo, err_addr_hi);
+ writel(err_addr_lo, ctx->dev_csr + GLBL_MSEC_ERRL);
+ writel(err_addr_hi, ctx->dev_csr + GLBL_MSEC_ERRH);
+ }
+ if (reg & (SEC_ERR_MASK | MSEC_ERR_MASK))
+ edac_device_handle_ce(edac_dev, 0, 0,
+ edac_dev->ctl_name);
+
+ if (reg & DED_ERR_MASK) {
+ err_addr_lo = readl(ctx->dev_csr + GLBL_DED_ERRL);
+ err_addr_hi = readl(ctx->dev_csr + GLBL_DED_ERRH);
+ dev_err(edac_dev->dev,
+ "IOB double-bit uncorrectable memory at 0x%08X.%08X error\n",
+ err_addr_lo, err_addr_hi);
+ writel(err_addr_lo, ctx->dev_csr + GLBL_DED_ERRL);
+ writel(err_addr_hi, ctx->dev_csr + GLBL_DED_ERRH);
+ }
+ if (reg & MDED_ERR_MASK) {
+ err_addr_lo = readl(ctx->dev_csr + GLBL_MDED_ERRL);
+ err_addr_hi = readl(ctx->dev_csr + GLBL_MDED_ERRH);
+ dev_err(edac_dev->dev,
+ "Multiple IOB double-bit uncorrectable memory at 0x%08X.%08X error\n",
+ err_addr_lo, err_addr_hi);
+ writel(err_addr_lo, ctx->dev_csr + GLBL_MDED_ERRL);
+ writel(err_addr_hi, ctx->dev_csr + GLBL_MDED_ERRH);
+ }
+ if (reg & (DED_ERR_MASK | MDED_ERR_MASK))
+ edac_device_handle_ue(edac_dev, 0, 0,
+ edac_dev->ctl_name);
+ }
+}
+
+static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
+{
+ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+ u32 err_addr_lo;
+ u32 err_addr_hi;
+ u32 reg;
+
+ /*
+ * Check RB acess errors
+ * 1. Out of range
+ * 2. Un-implemented page
+ * 3. Un-aligned access
+ * 4. Offline slave IP
+ */
+ reg = readl(ctx->bus_csr + RBCSR);
+ if (reg & STICKYERR_MASK) {
+ bool write;
+ u32 address;
+
+ dev_err(edac_dev->dev, "IOB bus access error(s)\n");
+ reg = readl(ctx->bus_csr + RBEIR);
+ write = reg & WRITE_ACCESS_MASK ? 1 : 0;
+ address = RBERRADDR_RD(reg);
+ if (reg & AGENT_OFFLINE_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s access to offline agent error\n",
+ write ? "write" : "read");
+ if (reg & UNIMPL_RBPAGE_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s access to unimplemented page error\n",
+ write ? "write" : "read");
+ if (reg & WORD_ALIGNED_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s word aligned access error\n",
+ write ? "write" : "read");
+ if (reg & PAGE_ACCESS_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s to page out of range access error\n",
+ write ? "write" : "read");
+ writel(0x0, ctx->bus_csr + RBEIR);
+ writel(0x0, ctx->bus_csr + RBCSR);
+ }
+
+ /* IOB Bridge agent transaction error interrupt */
+ reg = readl(ctx->dev_csr + IOBBATRANSERRINTSTS);
+ if (!reg)
+ return;
+
+ dev_err(edac_dev->dev, "IOB bridge agent (BA) transaction error\n");
+ if (reg & WRERR_RESP_MASK)
+ dev_err(edac_dev->dev, "IOB BA write response error\n");
+ if (reg & M_WRERR_RESP_MASK)
+ dev_err(edac_dev->dev,
+ "Multiple IOB BA write response error\n");
+ if (reg & XGIC_POISONED_REQ_MASK)
+ dev_err(edac_dev->dev, "IOB BA XGIC poisoned write error\n");
+ if (reg & M_XGIC_POISONED_REQ_MASK)
+ dev_err(edac_dev->dev,
+ "Multiple IOB BA XGIC poisoned write error\n");
+ if (reg & RBM_POISONED_REQ_MASK)
+ dev_err(edac_dev->dev, "IOB BA RBM poisoned write error\n");
+ if (reg & M_RBM_POISONED_REQ_MASK)
+ dev_err(edac_dev->dev,
+ "Multiple IOB BA RBM poisoned write error\n");
+ if (reg & WDATA_CORRUPT_MASK)
+ dev_err(edac_dev->dev, "IOB BA write error\n");
+ if (reg & M_WDATA_CORRUPT_MASK)
+ dev_err(edac_dev->dev, "Multiple IOB BA write error\n");
+ if (reg & TRANS_CORRUPT_MASK)
+ dev_err(edac_dev->dev, "IOB BA transaction error\n");
+ if (reg & M_TRANS_CORRUPT_MASK)
+ dev_err(edac_dev->dev, "Multiple IOB BA transaction error\n");
+ if (reg & RIDRAM_CORRUPT_MASK)
+ dev_err(edac_dev->dev,
+ "IOB BA RDIDRAM read transaction ID error\n");
+ if (reg & M_RIDRAM_CORRUPT_MASK)
+ dev_err(edac_dev->dev,
+ "Multiple IOB BA RDIDRAM read transaction ID error\n");
+ if (reg & WIDRAM_CORRUPT_MASK)
+ dev_err(edac_dev->dev,
+ "IOB BA RDIDRAM write transaction ID error\n");
+ if (reg & M_WIDRAM_CORRUPT_MASK)
+ dev_err(edac_dev->dev,
+ "Multiple IOB BA RDIDRAM write transaction ID error\n");
+ if (reg & ILLEGAL_ACCESS_MASK)
+ dev_err(edac_dev->dev,
+ "IOB BA XGIC/RB illegal access error\n");
+ if (reg & M_ILLEGAL_ACCESS_MASK)
+ dev_err(edac_dev->dev,
+ "Multiple IOB BA XGIC/RB illegal access error\n");
+
+ err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL);
+ err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH);
+ dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n",
+ REQTYPE_F2_RD(err_addr_hi) ? "read" : "write",
+ ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi);
+ if (reg & WRERR_RESP_MASK)
+ dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n",
+ readl(ctx->dev_csr + IOBBATRANSERRCSWREQID));
+ writel(reg, ctx->dev_csr + IOBBATRANSERRINTSTS);
+}
+
+static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
+{
+ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+ u32 err_addr_lo;
+ u32 err_addr_hi;
+ u32 reg;
+
+ /* IOB Processing agent transaction error interrupt */
+ reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
+ if (reg) {
+ dev_err(edac_dev->dev,
+ "IOB procesing agent (PA) transaction error\n");
+ if (reg & IOBPA_RDATA_CORRUPT_MASK)
+ dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
+ if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
+ dev_err(edac_dev->dev,
+ "Mutilple IOB PA read data RAM error\n");
+ if (reg & IOBPA_WDATA_CORRUPT_MASK)
+ dev_err(edac_dev->dev,
+ "IOB PA write data RAM error\n");
+ if (reg & IOBPA_M_WDATA_CORRUPT_MASK)
+ dev_err(edac_dev->dev,
+ "Mutilple IOB PA write data RAM error\n");
+ if (reg & IOBPA_TRANS_CORRUPT_MASK)
+ dev_err(edac_dev->dev, "IOB PA transaction error\n");
+ if (reg & IOBPA_M_TRANS_CORRUPT_MASK)
+ dev_err(edac_dev->dev,
+ "Mutilple IOB PA transaction error\n");
+ if (reg & IOBPA_REQIDRAM_CORRUPT_MASK)
+ dev_err(edac_dev->dev,
+ "IOB PA transaction ID RAM error\n");
+ if (reg & IOBPA_M_REQIDRAM_CORRUPT_MASK)
+ dev_err(edac_dev->dev,
+ "Multiple IOB PA transaction ID RAM error\n");
+ writel(reg, ctx->dev_csr + IOBPATRANSERRINTSTS);
+ }
+
+ /* IOB AXI0 Error */
+ reg = readl(ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
+ if (reg) {
+ err_addr_lo = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOL);
+ err_addr_hi = readl(ctx->dev_csr + IOBAXIS0TRANSERRREQINFOH);
+ dev_err(edac_dev->dev,
+ "%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
+ reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
+ REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
+ writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
+ }
+
+ /* IOB AXI1 Error */
+ reg = readl(ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
+ if (reg) {
+ err_addr_lo = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOL);
+ err_addr_hi = readl(ctx->dev_csr + IOBAXIS1TRANSERRREQINFOH);
+ dev_err(edac_dev->dev,
+ "%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
+ reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
+ REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
+ writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
+ }
+}
+
+static void xgene_edac_soc_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+ static const char * const mem_err_ip[] = {
+ "10GbE0",
+ "10GbE1",
+ "Security",
+ "SATA45",
+ "SATA23/ETH23",
+ "SATA01/ETH01",
+ "USB1",
+ "USB0",
+ "QML",
+ "QM0",
+ "QM1 (XGbE01)",
+ "PCIE4",
+ "PCIE3",
+ "PCIE2",
+ "PCIE1",
+ "PCIE0",
+ "CTX Manager",
+ "OCM",
+ "1GbE",
+ "CLE",
+ "AHBC",
+ "PktDMA",
+ "GFC",
+ "MSLIM",
+ "10GbE2",
+ "10GbE3",
+ "QM2 (XGbE23)",
+ "IOB",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+ };
+ u32 pcp_hp_stat;
+ u32 pcp_lp_stat;
+ u32 reg;
+ int i;
+
+ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS);
+ pcp_lp_stat = readl(ctx->pcp_csr + PCPLPERRINTSTS);
+ reg = readl(ctx->pcp_csr + MEMERRINTSTS);
+ if (!((pcp_hp_stat & (IOB_PA_ERR_MASK | IOB_BA_ERR_MASK |
+ IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK)) ||
+ (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) || reg))
+ return;
+
+ if (pcp_hp_stat & IOB_XGIC_ERR_MASK)
+ xgene_edac_iob_gic_report(edac_dev);
+
+ if (pcp_hp_stat & (IOB_RB_ERR_MASK | IOB_BA_ERR_MASK))
+ xgene_edac_rb_report(edac_dev);
+
+ if (pcp_hp_stat & IOB_PA_ERR_MASK)
+ xgene_edac_pa_report(edac_dev);
+
+ if (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) {
+ dev_info(edac_dev->dev,
+ "CSW switch trace correctable memory parity error\n");
+ edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+ }
+
+ for (i = 0; i < 31; i++) {
+ if (reg & (1 << i)) {
+ dev_err(edac_dev->dev, "%s memory parity error\n",
+ mem_err_ip[i]);
+ edac_device_handle_ue(edac_dev, 0, 0,
+ edac_dev->ctl_name);
+ }
+ }
+}
+
+static irqreturn_t xgene_edac_soc_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+ u32 pcp_hp_stat;
+ u32 pcp_lp_stat;
+ u32 reg;
+
+ pcp_hp_stat = readl(ctx->pcp_csr + PCPHPERRINTSTS);
+ pcp_lp_stat = readl(ctx->pcp_csr + PCPLPERRINTSTS);
+ reg = readl(ctx->pcp_csr + MEMERRINTSTS);
+ if (!((pcp_hp_stat & (IOB_PA_ERR_MASK | IOB_BA_ERR_MASK |
+ IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK)) ||
+ (pcp_lp_stat & CSW_SWITCH_TRACE_ERR_MASK) || reg))
+ return IRQ_NONE;
+
+ xgene_edac_soc_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static void xgene_edac_soc_hw_ctl(struct edac_device_ctl_info *edac_dev,
+ bool enable)
+{
+ struct xgene_edac_dev_ctx *ctx = edac_dev->pvt_info;
+ u32 val;
+
+ /* Enable SoC IP error interrupt */
+ if (edac_dev->op_state == OP_RUNNING_INTERRUPT) {
+ mutex_lock(&xgene_edac_lock);
+
+ val = readl(ctx->pcp_csr + PCPHPERRINTMSK);
+ if (enable)
+ val &= ~(IOB_PA_ERR_MASK | IOB_BA_ERR_MASK |
+ IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK);
+ else
+ val |= IOB_PA_ERR_MASK | IOB_BA_ERR_MASK |
+ IOB_XGIC_ERR_MASK | IOB_RB_ERR_MASK;
+ writel(val, ctx->pcp_csr + PCPHPERRINTMSK);
+ val = readl(ctx->pcp_csr + PCPLPERRINTMSK);
+ if (enable)
+ val &= ~CSW_SWITCH_TRACE_ERR_MASK;
+ else
+ val |= CSW_SWITCH_TRACE_ERR_MASK;
+ writel(val, ctx->pcp_csr + PCPLPERRINTMSK);
+
+ mutex_unlock(&xgene_edac_lock);
+
+ writel(enable ? 0x0 : 0xFFFFFFFF,
+ ctx->dev_csr + IOBAXIS0TRANSERRINTMSK);
+ writel(enable ? 0x0 : 0xFFFFFFFF,
+ ctx->dev_csr + IOBAXIS1TRANSERRINTMSK);
+ writel(enable ? 0x0 : 0xFFFFFFFF,
+ ctx->dev_csr + XGICTRANSERRINTMSK);
+
+ writel(enable ? 0x0 : 0xFFFFFFFF, ctx->pcp_csr + MEMERRINTMSK);
+ }
+}
+
+static int xgene_edac_soc_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct xgene_edac_dev_ctx *ctx;
+ struct resource *res;
+ int rc = 0;
+
+ if (!devres_open_group(&pdev->dev, xgene_edac_soc_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
+ "SOC", 1, "SOC", 1, 2, NULL, 0,
+ edac_device_alloc_index());
+ if (!edac_dev) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ ctx = edac_dev->pvt_info;
+ ctx->name = "xgene_soc_err";
+ edac_dev->dev = &pdev->dev;
+ dev_set_drvdata(edac_dev->dev, edac_dev);
+ edac_dev->ctl_name = ctx->name;
+ edac_dev->dev_name = ctx->name;
+ edac_dev->mod_name = EDAC_MOD_STR;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no PCP resource address\n");
+ rc = -EINVAL;
+ goto err1;
+ }
+ ctx->pcp_csr = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(ctx->pcp_csr)) {
+ dev_err(&pdev->dev, "no PCP resource address\n");
+ rc = PTR_ERR(ctx->pcp_csr);
+ goto err1;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ctx->dev_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->dev_csr)) {
+ dev_err(&pdev->dev, "no SoC resource address\n");
+ rc = PTR_ERR(ctx->dev_csr);
+ goto err1;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ ctx->bus_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->bus_csr)) {
+ dev_err(&pdev->dev, "no SoC bus resource address\n");
+ rc = PTR_ERR(ctx->bus_csr);
+ goto err1;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = xgene_edac_soc_check;
+
+ rc = edac_device_add_device(edac_dev);
+ if (rc > 0) {
+ dev_err(&pdev->dev, "edac_device_add_device failed\n");
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ int irq;
+ int i;
+
+ /*
+ * Register for SoC un-correctable and correctable errors
+ */
+ for (i = 0; i < 3; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ rc = -EINVAL;
+ goto err2;
+ }
+ rc = devm_request_irq(&pdev->dev, irq,
+ xgene_edac_soc_isr, IRQF_SHARED,
+ dev_name(&pdev->dev), edac_dev);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Could not request IRQ %d\n", irq);
+ goto err2;
+ }
+ }
+
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+ }
+
+ xgene_edac_soc_hw_ctl(edac_dev, true);
+
+ devres_remove_group(&pdev->dev, xgene_edac_soc_probe);
+
+ dev_info(&pdev->dev, "X-Gene EDAC SoC registered\n");
+ return 0;
+
+err2:
+ edac_device_del_device(&pdev->dev);
+err1:
+ edac_device_free_ctl_info(edac_dev);
+err:
+ devres_release_group(&pdev->dev, xgene_edac_soc_probe);
+ return rc;
+}
+
+static int xgene_edac_soc_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&pdev->dev);
+
+ xgene_edac_soc_hw_ctl(edac_dev, false);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(edac_dev);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id xgene_edac_soc_of_match[] = {
+ { .compatible = "apm,xgene-edac-soc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_edac_soc_of_match);
+#endif
+
+static struct platform_driver xgene_edac_soc_driver = {
+ .probe = xgene_edac_soc_probe,
+ .remove = xgene_edac_soc_remove,
+ .driver = {
+ .name = "xgene-edac-soc",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(xgene_edac_soc_of_match),
+ },
+};
+
+static int __init xgene_edac_init(void)
+{
+ int rc;
+
+ /* Make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_INT:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_INT;
+ break;
+ }
+
+ rc = platform_driver_register(&xgene_edac_mc_driver);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MOD_STR, "MCU fails to register\n");
+ goto reg_mc_failed;
+ }
+ rc = platform_driver_register(&xgene_edac_pmd_driver);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MOD_STR, "PMD fails to register\n");
+ goto reg_pmd_failed;
+ }
+ rc = platform_driver_register(&xgene_edac_l3_driver);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MOD_STR, "L3 fails to register\n");
+ goto reg_l3_failed;
+ }
+ rc = platform_driver_register(&xgene_edac_soc_driver);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MOD_STR, "SoC fails to register\n");
+ goto reg_soc_failed;
+ }
+
+ return 0;
+
+reg_soc_failed:
+ platform_driver_unregister(&xgene_edac_l3_driver);
+
+reg_l3_failed:
+ platform_driver_unregister(&xgene_edac_pmd_driver);
+
+reg_pmd_failed:
+ platform_driver_unregister(&xgene_edac_mc_driver);
+
+reg_mc_failed:
+ return rc;
+}
+module_init(xgene_edac_init);
+
+static void __exit xgene_edac_exit(void)
+{
+ platform_driver_unregister(&xgene_edac_soc_driver);
+ platform_driver_unregister(&xgene_edac_l3_driver);
+ platform_driver_unregister(&xgene_edac_pmd_driver);
+ platform_driver_unregister(&xgene_edac_mc_driver);
+}
+module_exit(xgene_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Feng Kan <fkan@apm.com>");
+MODULE_DESCRIPTION("APM X-Gene EDAC driver");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state,
+ "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index e0f1cb3..9b396d7 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -29,6 +29,8 @@
#define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider
the top entry type is only 8 bits */
+static const u8 *smbios_raw_header;
+
struct dmi_sysfs_entry {
struct dmi_header dh;
struct kobject kobj;
@@ -646,9 +648,37 @@ static void cleanup_entry_list(void)
}
}
+static ssize_t smbios_entry_area_raw_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ ssize_t size;
+
+ size = bin_attr->size;
+
+ if (size > pos)
+ size -= pos;
+ else
+ return 0;
+
+ if (count < size)
+ size = count;
+
+ memcpy(buf, &smbios_raw_header[pos], size);
+
+ return size;
+}
+
+static struct bin_attribute smbios_raw_area_attr = {
+ .read = smbios_entry_area_raw_read,
+ .attr = {.name = "smbios_raw_header", .mode = 0400},
+};
+
static int __init dmi_sysfs_init(void)
{
int error = -ENOMEM;
+ int size;
int val;
/* Set up our directory */
@@ -669,6 +699,18 @@ static int __init dmi_sysfs_init(void)
goto err;
}
+ smbios_raw_header = dmi_get_smbios_entry_area(&size);
+ if (!smbios_raw_header) {
+ pr_debug("dmi-sysfs: SMBIOS raw data is not available.\n");
+ error = -EINVAL;
+ goto err;
+ }
+
+ /* Create the raw binary file to access the entry area */
+ smbios_raw_area_attr.size = size;
+ if (sysfs_create_bin_file(dmi_kobj, &smbios_raw_area_attr))
+ goto err;
+
pr_debug("dmi-sysfs: loaded.\n");
return 0;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 69fac06..07d2960 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -114,6 +114,8 @@ static void dmi_table(u8 *buf, u32 len, int num,
}
}
+static u8 smbios_header[32];
+static int smbios_header_size;
static phys_addr_t dmi_base;
static u32 dmi_len;
static u16 dmi_num;
@@ -475,6 +477,8 @@ static int __init dmi_present(const u8 *buf)
if (memcmp(buf, "_SM_", 4) == 0 &&
buf[5] < 32 && dmi_checksum(buf, buf[5])) {
smbios_ver = get_unaligned_be16(buf + 6);
+ smbios_header_size = buf[5];
+ memcpy(smbios_header, buf, smbios_header_size);
/* Some BIOS report weird SMBIOS version, fix that up */
switch (smbios_ver) {
@@ -506,6 +510,8 @@ static int __init dmi_present(const u8 *buf)
pr_info("SMBIOS %d.%d present.\n",
dmi_ver >> 8, dmi_ver & 0xFF);
} else {
+ smbios_header_size = 15;
+ memcpy(smbios_header, buf, smbios_header_size);
dmi_ver = (buf[14] & 0xF0) << 4 |
(buf[14] & 0x0F);
pr_info("Legacy DMI %d.%d present.\n",
@@ -531,6 +537,8 @@ static int __init dmi_smbios3_present(const u8 *buf)
dmi_ver = get_unaligned_be16(buf + 7);
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
+ smbios_header_size = buf[6];
+ memcpy(smbios_header, buf, smbios_header_size);
/*
* The 64-bit SMBIOS 3.0 entry point no longer has a field
@@ -942,3 +950,21 @@ void dmi_memdev_name(u16 handle, const char **bank, const char **device)
}
}
EXPORT_SYMBOL_GPL(dmi_memdev_name);
+
+/**
+ * dmi_get_smbios_entry_area - copy SMBIOS entry point area to array.
+ * @size - pointer to assign actual size of SMBIOS entry point area.
+ *
+ * returns NULL if table is not available, otherwise returns pointer on
+ * SMBIOS entry point area array.
+ */
+const u8 *dmi_get_smbios_entry_area(int *size)
+{
+ if (!smbios_header_size || !dmi_available)
+ return NULL;
+
+ *size = smbios_header_size;
+
+ return smbios_header;
+}
+EXPORT_SYMBOL_GPL(dmi_get_smbios_entry_area);
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index dcae482..2b38147 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -295,62 +295,3 @@ fail_free_image:
fail:
return EFI_ERROR;
}
-
-/*
- * This is the base address at which to start allocating virtual memory ranges
- * for UEFI Runtime Services. This is in the low TTBR0 range so that we can use
- * any allocation we choose, and eliminate the risk of a conflict after kexec.
- * The value chosen is the largest non-zero power of 2 suitable for this purpose
- * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
- * be mapped efficiently.
- */
-#define EFI_RT_VIRTUAL_BASE 0x40000000
-
-/*
- * efi_get_virtmap() - create a virtual mapping for the EFI memory map
- *
- * This function populates the virt_addr fields of all memory region descriptors
- * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors
- * are also copied to @runtime_map, and their total count is returned in @count.
- */
-void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
- unsigned long desc_size, efi_memory_desc_t *runtime_map,
- int *count)
-{
- u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
- efi_memory_desc_t *out = runtime_map;
- int l;
-
- for (l = 0; l < map_size; l += desc_size) {
- efi_memory_desc_t *in = (void *)memory_map + l;
- u64 paddr, size;
-
- if (!(in->attribute & EFI_MEMORY_RUNTIME))
- continue;
-
- /*
- * Make the mapping compatible with 64k pages: this allows
- * a 4k page size kernel to kexec a 64k page size kernel and
- * vice versa.
- */
- paddr = round_down(in->phys_addr, SZ_64K);
- size = round_up(in->num_pages * EFI_PAGE_SIZE +
- in->phys_addr - paddr, SZ_64K);
-
- /*
- * Avoid wasting memory on PTEs by choosing a virtual base that
- * is compatible with section mappings if this region has the
- * appropriate size and physical alignment. (Sections are 2 MB
- * on 4k granule kernels)
- */
- if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
- efi_virt_base = round_up(efi_virt_base, SZ_2M);
-
- in->virt_addr = efi_virt_base + in->phys_addr - paddr;
- efi_virt_base += size;
-
- memcpy(out, in, desc_size);
- out = (void *)out + desc_size;
- ++*count;
- }
-}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 47437b1..d1ba39c 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -43,8 +43,4 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
void *get_fdt(efi_system_table_t *sys_table);
-void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
- unsigned long desc_size, efi_memory_desc_t *runtime_map,
- int *count);
-
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 91da56c..c846a96 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -14,8 +14,6 @@
#include <linux/libfdt.h>
#include <asm/efi.h>
-#include "efistub.h"
-
efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
unsigned long orig_fdt_size,
void *fdt, int new_fdt_size, char *cmdline_ptr,
@@ -195,26 +193,9 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long map_size, desc_size;
u32 desc_ver;
unsigned long mmap_key;
- efi_memory_desc_t *memory_map, *runtime_map;
+ efi_memory_desc_t *memory_map;
unsigned long new_fdt_size;
efi_status_t status;
- int runtime_entry_count = 0;
-
- /*
- * Get a copy of the current memory map that we will use to prepare
- * the input for SetVirtualAddressMap(). We don't have to worry about
- * subsequent allocations adding entries, since they could not affect
- * the number of EFI_MEMORY_RUNTIME regions.
- */
- status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
- &desc_size, &desc_ver, &mmap_key);
- if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
- return status;
- }
-
- pr_efi(sys_table,
- "Exiting boot services and installing virtual address map...\n");
/*
* Estimate size of new FDT, and allocate memory for it. We
@@ -267,48 +248,12 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
}
}
- /*
- * Update the memory map with virtual addresses. The function will also
- * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
- * entries so that we can pass it straight into SetVirtualAddressMap()
- */
- efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
- &runtime_entry_count);
-
/* Now we are ready to exit_boot_services.*/
status = sys_table->boottime->exit_boot_services(handle, mmap_key);
- if (status == EFI_SUCCESS) {
- efi_set_virtual_address_map_t *svam;
-
- /* Install the new virtual address map */
- svam = sys_table->runtime->set_virtual_address_map;
- status = svam(runtime_entry_count * desc_size, desc_size,
- desc_ver, runtime_map);
- /*
- * We are beyond the point of no return here, so if the call to
- * SetVirtualAddressMap() failed, we need to signal that to the
- * incoming kernel but proceed normally otherwise.
- */
- if (status != EFI_SUCCESS) {
- int l;
-
- /*
- * Set the virtual address field of all
- * EFI_MEMORY_RUNTIME entries to 0. This will signal
- * the incoming kernel that no virtual translation has
- * been installed.
- */
- for (l = 0; l < map_size; l += desc_size) {
- efi_memory_desc_t *p = (void *)memory_map + l;
-
- if (p->attribute & EFI_MEMORY_RUNTIME)
- p->virt_addr = 0;
- }
- }
- return EFI_SUCCESS;
- }
+ if (status == EFI_SUCCESS)
+ return status;
pr_efi_err(sys_table, "Exit boot services failed.\n");
@@ -319,7 +264,6 @@ fail_free_new_fdt:
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
fail:
- sys_table->boottime->free_pool(runtime_map);
return EFI_LOAD_ERROR;
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fc13dd5..3143a6e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -381,7 +381,10 @@ static struct device_node *dev_get_dev_node(struct device *dev)
while (!pci_is_root_bus(bus))
bus = bus->parent;
- return bus->bridge->parent->of_node;
+ if (bus->bridge->parent)
+ return bus->bridge->parent->of_node;
+ else
+ return NULL;
}
return dev->of_node;
@@ -497,6 +500,9 @@ static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
struct arm_smmu_master *master = NULL;
struct device_node *dev_node = dev_get_dev_node(dev);
+ if (!dev_node)
+ return NULL;
+
spin_lock(&arm_smmu_devices_lock);
list_for_each_entry(smmu, &arm_smmu_devices, list) {
master = find_smmu_master(smmu, dev_node);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index fdf7065..235524d 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) "GICv2m: " fmt
+#include <linux/acpi.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
@@ -45,7 +46,6 @@
struct v2m_data {
spinlock_t msi_cnt_lock;
- struct msi_controller mchip;
struct resource res; /* GICv2m resource */
void __iomem *base; /* GICv2m virt address */
u32 spi_start; /* The SPI number that MSIs start */
@@ -192,7 +192,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain,
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static const struct irq_domain_ops gicv2m_domain_ops = {
+static struct irq_domain_ops gicv2m_domain_ops = {
.alloc = gicv2m_irq_domain_alloc,
.free = gicv2m_irq_domain_free,
};
@@ -213,11 +213,17 @@ static bool is_msi_spi_valid(u32 base, u32 num)
return true;
}
-static int __init gicv2m_init_one(struct device_node *node,
- struct irq_domain *parent)
+char gicv2m_msi_domain_name[] = "V2M-MSI";
+char gicv2m_domain_name[] = "GICV2M";
+static int __init gicv2m_init_one(struct irq_domain *parent,
+ u32 spi_start, u32 nr_spis,
+ struct resource *res,
+ struct device_node *node,
+ u32 msi_frame_id)
{
int ret;
struct v2m_data *v2m;
+ struct irq_domain *inner_domain;
v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
if (!v2m) {
@@ -225,23 +231,17 @@ static int __init gicv2m_init_one(struct device_node *node,
return -ENOMEM;
}
- ret = of_address_to_resource(node, 0, &v2m->res);
- if (ret) {
- pr_err("Failed to allocate v2m resource.\n");
- goto err_free_v2m;
- }
-
- v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
+ v2m->base = ioremap(res->start, resource_size(res));
if (!v2m->base) {
pr_err("Failed to map GICv2m resource\n");
ret = -ENOMEM;
goto err_free_v2m;
}
+ memcpy(&v2m->res,res, sizeof(struct resource));
- if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) &&
- !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) {
- pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n",
- v2m->spi_start, v2m->nr_spis);
+ if (spi_start && nr_spis) {
+ v2m->spi_start = spi_start;
+ v2m->nr_spis = nr_spis;
} else {
u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
@@ -261,43 +261,35 @@ static int __init gicv2m_init_one(struct device_node *node,
goto err_iounmap;
}
- v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m);
- if (!v2m->domain) {
+ inner_domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m);
+ if (!inner_domain) {
pr_err("Failed to create GICv2m domain\n");
ret = -ENOMEM;
goto err_free_bm;
}
- v2m->domain->parent = parent;
- v2m->mchip.of_node = node;
- v2m->mchip.domain = pci_msi_create_irq_domain(node,
- &gicv2m_msi_domain_info,
- v2m->domain);
- if (!v2m->mchip.domain) {
+ inner_domain->parent = parent;
+ inner_domain->name = gicv2m_domain_name;
+ gicv2m_msi_domain_info.acpi_msi_frame_id = msi_frame_id;
+ v2m->domain = pci_msi_create_irq_domain(node, &gicv2m_msi_domain_info,
+ inner_domain);
+ if (!v2m->domain) {
pr_err("Failed to create MSI domain\n");
ret = -ENOMEM;
goto err_free_domains;
}
- spin_lock_init(&v2m->msi_cnt_lock);
+ v2m->domain->name = gicv2m_msi_domain_name;
- ret = of_pci_msi_chip_add(&v2m->mchip);
- if (ret) {
- pr_err("Failed to add msi_chip.\n");
- goto err_free_domains;
- }
-
- pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
- (unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
- v2m->spi_start, (v2m->spi_start + v2m->nr_spis));
+ spin_lock_init(&v2m->msi_cnt_lock);
return 0;
err_free_domains:
- if (v2m->mchip.domain)
- irq_domain_remove(v2m->mchip.domain);
if (v2m->domain)
irq_domain_remove(v2m->domain);
+ if (inner_domain)
+ irq_domain_remove(inner_domain);
err_free_bm:
kfree(v2m->bm);
err_iounmap:
@@ -319,15 +311,99 @@ int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent)
for (child = of_find_matching_node(node, gicv2m_device_id); child;
child = of_find_matching_node(child, gicv2m_device_id)) {
+ u32 spi_start = 0, nr_spis = 0;
+ struct resource res;
+
if (!of_find_property(child, "msi-controller", NULL))
continue;
- ret = gicv2m_init_one(child, parent);
+ ret = of_address_to_resource(child, 0, &res);
+ if (ret) {
+ pr_err("Failed to allocate v2m resource.\n");
+ break;
+ }
+
+ if (!of_property_read_u32(child, "arm,msi-base-spi", &spi_start) &&
+ !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
+ pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n",
+ spi_start, nr_spis);
+
+ ret = gicv2m_init_one(parent, spi_start, nr_spis, &res, child, 0);
if (ret) {
of_node_put(node);
break;
}
+
+ pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", child->name,
+ (unsigned long)res.start, (unsigned long)res.end,
+ spi_start, (spi_start + nr_spis));
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_ACPI
+static struct acpi_madt_generic_msi_frame *msi_frame;
+
+static int __init
+gic_acpi_parse_madt_msi(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_msi_frame *frame;
+
+ frame = (struct acpi_madt_generic_msi_frame *)header;
+ if (BAD_MADT_ENTRY(frame, end))
+ return -EINVAL;
+
+ if (msi_frame)
+ pr_warn("Only one GIC MSI FRAME supported.\n");
+ else
+ msi_frame = frame;
+
+ return 0;
+}
+
+int __init gicv2m_acpi_init(struct acpi_table_header *table,
+ struct irq_domain *parent)
+{
+ int ret = 0;
+ int count, i;
+ static struct acpi_madt_generic_msi_frame *cur;
+
+ count = acpi_parse_entries(ACPI_SIG_MADT, sizeof(struct acpi_table_madt),
+ gic_acpi_parse_madt_msi, table,
+ ACPI_MADT_TYPE_GENERIC_MSI_FRAME, 0);
+ if ((count <= 0) || !msi_frame) {
+ pr_debug("No valid ACPI GIC MSI FRAME exist\n");
+ return 0;
}
+ for (i = 0, cur = msi_frame; i < count; i++, cur++) {
+ struct resource res;
+ u32 spi_start = 0, nr_spis = 0;
+
+ res.start = cur->base_address;
+ res.end = cur->base_address + 0x1000;
+
+ if (cur->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
+ spi_start = cur->spi_base;
+ nr_spis = cur->spi_count;
+
+ pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
+ spi_start, nr_spis);
+ }
+
+ ret = gicv2m_init_one(parent, spi_start, nr_spis, &res, NULL,
+ cur->msi_frame_id);
+ if (ret)
+ break;
+
+ pr_info("MSI frame ID %u: range[%#lx:%#lx], SPI[%d:%d]\n",
+ cur->msi_frame_id,
+ (unsigned long)res.start, (unsigned long)res.end,
+ spi_start, (spi_start + nr_spis));
+ }
return ret;
}
+
+#endif /* CONFIG_ACPI */
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 596b0a9..71c1ca4 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -54,13 +54,12 @@ struct its_collection {
/*
* The ITS structure - contains most of the infrastructure, with the
- * msi_controller, the command queue, the collections, and the list of
- * devices writing to it.
+ * top-level MSI domain, the command queue, the collections, and the
+ * list of devices writing to it.
*/
struct its_node {
raw_spinlock_t lock;
struct list_head entry;
- struct msi_controller msi_chip;
struct irq_domain *domain;
void __iomem *base;
unsigned long phys_base;
@@ -831,7 +830,7 @@ static int its_alloc_tables(struct its_node *its)
if (order >= MAX_ORDER) {
order = MAX_ORDER - 1;
pr_warn("%s: Device Table too large, reduce its page order to %u\n",
- its->msi_chip.of_node->full_name, order);
+ its->domain->of_node->full_name, order);
}
}
@@ -898,7 +897,7 @@ retry_baser:
if (val != tmp) {
pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
- its->msi_chip.of_node->full_name, i,
+ its->domain->of_node->full_name, i,
(unsigned long) val, (unsigned long) tmp);
err = -ENXIO;
goto out_free;
@@ -1353,6 +1352,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
struct resource res;
struct its_node *its;
void __iomem *its_base;
+ struct irq_domain *inner_domain = NULL;
u32 val;
u64 baser, tmp;
int err;
@@ -1396,7 +1396,6 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
INIT_LIST_HEAD(&its->its_device_list);
its->base = its_base;
its->phys_base = res.start;
- its->msi_chip.of_node = node;
its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
@@ -1430,26 +1429,22 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
}
- if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
- its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
- if (!its->domain) {
+ if (of_property_read_bool(node, "msi-controller")) {
+ inner_domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
+ if (!inner_domain) {
err = -ENOMEM;
goto out_free_tables;
}
- its->domain->parent = parent;
+ inner_domain->parent = parent;
- its->msi_chip.domain = pci_msi_create_irq_domain(node,
- &its_pci_msi_domain_info,
- its->domain);
- if (!its->msi_chip.domain) {
+ its->domain = pci_msi_create_irq_domain(node,
+ &its_pci_msi_domain_info,
+ inner_domain);
+ if (!its->domain) {
err = -ENOMEM;
goto out_free_domains;
}
-
- err = of_pci_msi_chip_add(&its->msi_chip);
- if (err)
- goto out_free_domains;
}
spin_lock(&its_lock);
@@ -1459,10 +1454,10 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
return 0;
out_free_domains:
- if (its->msi_chip.domain)
- irq_domain_remove(its->msi_chip.domain);
if (its->domain)
irq_domain_remove(its->domain);
+ if (inner_domain)
+ irq_domain_remove(inner_domain);
out_free_tables:
its_free_tables(its);
out_free_cmd:
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index fd8850d..507a34a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -524,9 +524,19 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
isb();
}
+#ifdef CONFIG_ARM_PARKING_PROTOCOL
+static void gic_wakeup_parked_cpu(int cpu)
+{
+ gic_raise_softirq(cpumask_of(cpu), 0);
+}
+#endif
+
static void gic_smp_init(void)
{
set_smp_cross_call(gic_raise_softirq);
+#ifdef CONFIG_ARM_PARKING_PROTOCOL
+ set_smp_boot_wakeup_call(gic_wakeup_parked_cpu);
+#endif
register_cpu_notifier(&gic_cpu_notifier);
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 471e1cd..aec5f2f 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -33,12 +33,14 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/acpi.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
+#include <linux/irqchip/arm-gic-acpi.h>
#include <asm/cputype.h>
#include <asm/irq.h>
@@ -648,6 +650,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
+
+#ifdef CONFIG_ARM_PARKING_PROTOCOL
+static void gic_wakeup_parked_cpu(int cpu)
+{
+ gic_raise_softirq(cpumask_of(cpu), GIC_DIST_SOFTINT_NSATT);
+}
+#endif
#endif
#ifdef CONFIG_BL_SWITCHER
@@ -988,7 +997,10 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_irqs = 1020;
gic->gic_irqs = gic_irqs;
- if (node) { /* DT case */
+ if (!acpi_disabled) { /* ACPI case */
+ gic->domain = irq_domain_add_linear(node, gic_irqs,
+ &gic_irq_domain_hierarchy_ops, gic);
+ } else if (node) { /* DT case */
const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops;
if (!of_property_read_u32(node, "arm,routable-irqs",
@@ -1032,6 +1044,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
#ifdef CONFIG_SMP
set_smp_cross_call(gic_raise_softirq);
register_cpu_notifier(&gic_cpu_notifier);
+#ifdef CONFIG_ARM_PARKING_PROTOCOL
+ set_smp_boot_wakeup_call(gic_wakeup_parked_cpu);
+#endif
#endif
set_handle_irq(gic_handle_irq);
}
@@ -1042,9 +1057,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_pm_init(gic);
}
-#ifdef CONFIG_OF
static int gic_cnt __initdata;
+#ifdef CONFIG_OF
static int __init
gic_of_init(struct device_node *node, struct device_node *parent)
{
@@ -1090,3 +1105,109 @@ IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
#endif
+
+#ifdef CONFIG_ACPI
+static phys_addr_t dist_phy_base, cpu_phy_base __initdata;
+
+static int __init
+gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+ phys_addr_t gic_cpu_base;
+ static int cpu_base_assigned;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ /*
+ * There is no support for non-banked GICv1/2 register in ACPI spec.
+ * All CPU interface addresses have to be the same.
+ */
+ gic_cpu_base = processor->base_address;
+ if (cpu_base_assigned && gic_cpu_base != cpu_phy_base)
+ return -EINVAL;
+
+ cpu_phy_base = gic_cpu_base;
+ cpu_base_assigned = 1;
+ return 0;
+}
+
+static int __init
+gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_distributor *dist;
+
+ dist = (struct acpi_madt_generic_distributor *)header;
+
+ if (BAD_MADT_ENTRY(dist, end))
+ return -EINVAL;
+
+ dist_phy_base = dist->base_address;
+ return 0;
+}
+
+int __init
+gic_v2_acpi_init(struct acpi_table_header *table, struct irq_domain **domain)
+{
+ void __iomem *cpu_base, *dist_base;
+ int count;
+
+ /* Collect CPU base addresses */
+ count = acpi_parse_entries(ACPI_SIG_MADT,
+ sizeof(struct acpi_table_madt),
+ gic_acpi_parse_madt_cpu, table,
+ ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
+ if (count <= 0) {
+ pr_err("No valid GICC entries exist\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find distributor base address. We expect one distributor entry since
+ * ACPI 5.1 spec neither support multi-GIC instances nor GIC cascade.
+ */
+ count = acpi_parse_entries(ACPI_SIG_MADT,
+ sizeof(struct acpi_table_madt),
+ gic_acpi_parse_madt_distributor, table,
+ ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
+ if (count <= 0) {
+ pr_err("No valid GICD entries exist\n");
+ return -EINVAL;
+ } else if (count > 1) {
+ pr_err("More than one GICD entry detected\n");
+ return -EINVAL;
+ }
+
+ cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
+ if (!cpu_base) {
+ pr_err("Unable to map GICC registers\n");
+ return -ENOMEM;
+ }
+
+ dist_base = ioremap(dist_phy_base, ACPI_GICV2_DIST_MEM_SIZE);
+ if (!dist_base) {
+ pr_err("Unable to map GICD registers\n");
+ iounmap(cpu_base);
+ return -ENOMEM;
+ }
+
+ gic_init_bases(gic_cnt, -1, dist_base, cpu_base, 0, NULL);
+ *domain = gic_data[gic_cnt].domain;
+
+ if (!*domain) {
+ pr_err("Unable to create domain\n");
+ return -EFAULT;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) {
+ gicv2m_acpi_init(table, gic_data[gic_cnt].domain);
+ }
+
+ gic_cnt++;
+ return 0;
+}
+#endif
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 0fe2f71..afd1af3 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -8,6 +8,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi_irq.h>
#include <linux/init.h>
#include <linux/of_irq.h>
#include <linux/irqchip.h>
@@ -26,4 +27,6 @@ extern struct of_device_id __irqchip_of_table[];
void __init irqchip_init(void)
{
of_irq_init(__irqchip_of_table);
+
+ acpi_irq_init();
}
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index a38a2dc..bf0cf2f 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_PCNET32) += pcnet32.o
obj-$(CONFIG_SUN3LANCE) += sun3lance.o
obj-$(CONFIG_SUNLANCE) += sunlance.o
obj-$(CONFIG_AMD_XGBE) += xgbe/
+obj-$(CONFIG_AMD_XGBE) += xgbe-a0/
diff --git a/drivers/net/ethernet/amd/xgbe-a0/Makefile b/drivers/net/ethernet/amd/xgbe-a0/Makefile
new file mode 100644
index 0000000..561116f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_AMD_XGBE) += amd-xgbe-a0.o
+
+amd-xgbe-a0-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
+ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
+ xgbe-ptp.o
+
+amd-xgbe-a0-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
+amd-xgbe-a0-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe-common.h b/drivers/net/ethernet/amd/xgbe-a0/xgbe-common.h
new file mode 100644
index 0000000..75b08c6
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe-common.h
@@ -0,0 +1,1142 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_COMMON_H__
+#define __XGBE_COMMON_H__
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_AXIARCR 0x3010
+#define DMA_AXIAWCR 0x3018
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+
+/* DMA register entry bit positions and sizes */
+#define DMA_AXIARCR_DRC_INDEX 0
+#define DMA_AXIARCR_DRC_WIDTH 4
+#define DMA_AXIARCR_DRD_INDEX 4
+#define DMA_AXIARCR_DRD_WIDTH 2
+#define DMA_AXIARCR_TEC_INDEX 8
+#define DMA_AXIARCR_TEC_WIDTH 4
+#define DMA_AXIARCR_TED_INDEX 12
+#define DMA_AXIARCR_TED_WIDTH 2
+#define DMA_AXIARCR_THC_INDEX 16
+#define DMA_AXIARCR_THC_WIDTH 4
+#define DMA_AXIARCR_THD_INDEX 20
+#define DMA_AXIARCR_THD_WIDTH 2
+#define DMA_AXIAWCR_DWC_INDEX 0
+#define DMA_AXIAWCR_DWC_WIDTH 4
+#define DMA_AXIAWCR_DWD_INDEX 4
+#define DMA_AXIAWCR_DWD_WIDTH 2
+#define DMA_AXIAWCR_RPC_INDEX 8
+#define DMA_AXIAWCR_RPC_WIDTH 4
+#define DMA_AXIAWCR_RPD_INDEX 12
+#define DMA_AXIAWCR_RPD_WIDTH 2
+#define DMA_AXIAWCR_RHC_INDEX 16
+#define DMA_AXIAWCR_RHC_WIDTH 4
+#define DMA_AXIAWCR_RHD_INDEX 20
+#define DMA_AXIAWCR_RHD_WIDTH 2
+#define DMA_AXIAWCR_TDC_INDEX 24
+#define DMA_AXIAWCR_TDC_WIDTH 4
+#define DMA_AXIAWCR_TDD_INDEX 28
+#define DMA_AXIAWCR_TDD_WIDTH 2
+#define DMA_ISR_MACIS_INDEX 17
+#define DMA_ISR_MACIS_WIDTH 1
+#define DMA_ISR_MTLIS_INDEX 16
+#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_SWR_INDEX 0
+#define DMA_MR_SWR_WIDTH 1
+#define DMA_SBMR_EAME_INDEX 11
+#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_BLEN_256_INDEX 7
+#define DMA_SBMR_BLEN_256_WIDTH 1
+#define DMA_SBMR_UNDEF_INDEX 0
+#define DMA_SBMR_UNDEF_WIDTH 1
+
+/* DMA register values */
+#define DMA_DSR_RPS_WIDTH 4
+#define DMA_DSR_TPS_WIDTH 4
+#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
+#define DMA_DSR0_RPS_START 8
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_RPS_START 0
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_CATDR_LO 0x44
+#define DMA_CH_CARDR_LO 0x4c
+#define DMA_CH_CATBR_HI 0x50
+#define DMA_CH_CATBR_LO 0x54
+#define DMA_CH_CARBR_HI 0x58
+#define DMA_CH_CARBR_LO 0x5c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_INDEX 16
+#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 15
+#define DMA_CH_IER_AIE_WIDTH 1
+#define DMA_CH_IER_FBEE_INDEX 12
+#define DMA_CH_IER_FBEE_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 16
+#define DMA_CH_IER_NIE_WIDTH 1
+#define DMA_CH_IER_RBUE_INDEX 7
+#define DMA_CH_IER_RBUE_WIDTH 1
+#define DMA_CH_IER_RIE_INDEX 6
+#define DMA_CH_IER_RIE_WIDTH 1
+#define DMA_CH_IER_RSE_INDEX 8
+#define DMA_CH_IER_RSE_WIDTH 1
+#define DMA_CH_IER_TBUE_INDEX 2
+#define DMA_CH_IER_TBUE_WIDTH 1
+#define DMA_CH_IER_TIE_INDEX 0
+#define DMA_CH_IER_TIE_WIDTH 1
+#define DMA_CH_IER_TXSE_INDEX 1
+#define DMA_CH_IER_TXSE_WIDTH 1
+#define DMA_CH_RCR_PBL_INDEX 16
+#define DMA_CH_RCR_PBL_WIDTH 6
+#define DMA_CH_RCR_RBSZ_INDEX 1
+#define DMA_CH_RCR_RBSZ_WIDTH 14
+#define DMA_CH_RCR_SR_INDEX 0
+#define DMA_CH_RCR_SR_WIDTH 1
+#define DMA_CH_RIWT_RWT_INDEX 0
+#define DMA_CH_RIWT_RWT_WIDTH 8
+#define DMA_CH_SR_FBE_INDEX 12
+#define DMA_CH_SR_FBE_WIDTH 1
+#define DMA_CH_SR_RBU_INDEX 7
+#define DMA_CH_SR_RBU_WIDTH 1
+#define DMA_CH_SR_RI_INDEX 6
+#define DMA_CH_SR_RI_WIDTH 1
+#define DMA_CH_SR_RPS_INDEX 8
+#define DMA_CH_SR_RPS_WIDTH 1
+#define DMA_CH_SR_TBU_INDEX 2
+#define DMA_CH_SR_TBU_WIDTH 1
+#define DMA_CH_SR_TI_INDEX 0
+#define DMA_CH_SR_TI_WIDTH 1
+#define DMA_CH_SR_TPS_INDEX 1
+#define DMA_CH_SR_TPS_WIDTH 1
+#define DMA_CH_TCR_OSP_INDEX 4
+#define DMA_CH_TCR_OSP_WIDTH 1
+#define DMA_CH_TCR_PBL_INDEX 16
+#define DMA_CH_TCR_PBL_WIDTH 6
+#define DMA_CH_TCR_ST_INDEX 0
+#define DMA_CH_TCR_ST_WIDTH 1
+#define DMA_CH_TCR_TSE_INDEX 12
+#define DMA_CH_TCR_TSE_WIDTH 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64 /* 8 x 8 */
+#define DMA_PBL_128 128 /* 8 x 16 */
+#define DMA_PBL_256 256 /* 8 x 32 */
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_WTR 0x000c
+#define MAC_HTR0 0x0010
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_IVLANIR 0x0064
+#define MAC_RETMR 0x006c
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_RTSR 0x00b8
+#define MAC_PMTCSR 0x00c0
+#define MAC_RWKPFR 0x00c4
+#define MAC_LPICSR 0x00d0
+#define MAC_LPITCR 0x00d4
+#define MAC_VR 0x0110
+#define MAC_DR 0x0114
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_GPIOCR 0x0278
+#define MAC_GPIOSR 0x027c
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
+#define MAC_TSCR 0x0d00
+#define MAC_SSIR 0x0d04
+#define MAC_STSR 0x0d08
+#define MAC_STNR 0x0d0c
+#define MAC_STSUR 0x0d10
+#define MAC_STNUR 0x0d14
+#define MAC_TSAR 0x0d18
+#define MAC_TSSR 0x0d20
+#define MAC_TXSNR 0x0d30
+#define MAC_TXSSR 0x0d34
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
+#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
+#define MAC_HWF0R_ARPOFFSEL_INDEX 9
+#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
+#define MAC_HWF0R_EEESEL_INDEX 13
+#define MAC_HWF0R_EEESEL_WIDTH 1
+#define MAC_HWF0R_GMIISEL_INDEX 1
+#define MAC_HWF0R_GMIISEL_WIDTH 1
+#define MAC_HWF0R_MGKSEL_INDEX 7
+#define MAC_HWF0R_MGKSEL_WIDTH 1
+#define MAC_HWF0R_MMCSEL_INDEX 8
+#define MAC_HWF0R_MMCSEL_WIDTH 1
+#define MAC_HWF0R_RWKSEL_INDEX 6
+#define MAC_HWF0R_RWKSEL_WIDTH 1
+#define MAC_HWF0R_RXCOESEL_INDEX 16
+#define MAC_HWF0R_RXCOESEL_WIDTH 1
+#define MAC_HWF0R_SAVLANINS_INDEX 27
+#define MAC_HWF0R_SAVLANINS_WIDTH 1
+#define MAC_HWF0R_SMASEL_INDEX 5
+#define MAC_HWF0R_SMASEL_WIDTH 1
+#define MAC_HWF0R_TSSEL_INDEX 12
+#define MAC_HWF0R_TSSEL_WIDTH 1
+#define MAC_HWF0R_TSSTSSEL_INDEX 25
+#define MAC_HWF0R_TSSTSSEL_WIDTH 2
+#define MAC_HWF0R_TXCOESEL_INDEX 14
+#define MAC_HWF0R_TXCOESEL_WIDTH 1
+#define MAC_HWF0R_VLHASH_INDEX 4
+#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF1R_ADVTHWORD_INDEX 13
+#define MAC_HWF1R_ADVTHWORD_WIDTH 1
+#define MAC_HWF1R_DBGMEMA_INDEX 19
+#define MAC_HWF1R_DBGMEMA_WIDTH 1
+#define MAC_HWF1R_DCBEN_INDEX 16
+#define MAC_HWF1R_DCBEN_WIDTH 1
+#define MAC_HWF1R_HASHTBLSZ_INDEX 24
+#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
+#define MAC_HWF1R_L3L4FNUM_INDEX 27
+#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_NUMTC_INDEX 21
+#define MAC_HWF1R_NUMTC_WIDTH 3
+#define MAC_HWF1R_RSSEN_INDEX 20
+#define MAC_HWF1R_RSSEN_WIDTH 1
+#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
+#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
+#define MAC_HWF1R_SPHEN_INDEX 17
+#define MAC_HWF1R_SPHEN_WIDTH 1
+#define MAC_HWF1R_TSOEN_INDEX 18
+#define MAC_HWF1R_TSOEN_WIDTH 1
+#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
+#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
+#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
+#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
+#define MAC_HWF2R_PPSOUTNUM_INDEX 24
+#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
+#define MAC_HWF2R_RXCHCNT_INDEX 12
+#define MAC_HWF2R_RXCHCNT_WIDTH 4
+#define MAC_HWF2R_RXQCNT_INDEX 0
+#define MAC_HWF2R_RXQCNT_WIDTH 4
+#define MAC_HWF2R_TXCHCNT_INDEX 18
+#define MAC_HWF2R_TXCHCNT_WIDTH 4
+#define MAC_HWF2R_TXQCNT_INDEX 6
+#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_IER_TSIE_INDEX 12
+#define MAC_IER_TSIE_WIDTH 1
+#define MAC_ISR_MMCRXIS_INDEX 9
+#define MAC_ISR_MMCRXIS_WIDTH 1
+#define MAC_ISR_MMCTXIS_INDEX 10
+#define MAC_ISR_MMCTXIS_WIDTH 1
+#define MAC_ISR_PMTIS_INDEX 4
+#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_ISR_TSIS_INDEX 12
+#define MAC_ISR_TSIS_WIDTH 1
+#define MAC_MACA1HR_AE_INDEX 31
+#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_PFR_HMC_INDEX 2
+#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HPF_INDEX 10
+#define MAC_PFR_HPF_WIDTH 1
+#define MAC_PFR_HUC_INDEX 1
+#define MAC_PFR_HUC_WIDTH 1
+#define MAC_PFR_PM_INDEX 4
+#define MAC_PFR_PM_WIDTH 1
+#define MAC_PFR_PR_INDEX 0
+#define MAC_PFR_PR_WIDTH 1
+#define MAC_PFR_VTFE_INDEX 16
+#define MAC_PFR_VTFE_WIDTH 1
+#define MAC_PMTCSR_MGKPKTEN_INDEX 1
+#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+#define MAC_PMTCSR_PWRDWN_INDEX 0
+#define MAC_PMTCSR_PWRDWN_WIDTH 1
+#define MAC_PMTCSR_RWKFILTRST_INDEX 31
+#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
+#define MAC_PMTCSR_RWKPKTEN_INDEX 2
+#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
+#define MAC_Q0TFCR_PT_INDEX 16
+#define MAC_Q0TFCR_PT_WIDTH 16
+#define MAC_Q0TFCR_TFE_INDEX 1
+#define MAC_Q0TFCR_TFE_WIDTH 1
+#define MAC_RCR_ACS_INDEX 1
+#define MAC_RCR_ACS_WIDTH 1
+#define MAC_RCR_CST_INDEX 2
+#define MAC_RCR_CST_WIDTH 1
+#define MAC_RCR_DCRCC_INDEX 3
+#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
+#define MAC_RCR_IPC_INDEX 9
+#define MAC_RCR_IPC_WIDTH 1
+#define MAC_RCR_JE_INDEX 8
+#define MAC_RCR_JE_WIDTH 1
+#define MAC_RCR_LM_INDEX 10
+#define MAC_RCR_LM_WIDTH 1
+#define MAC_RCR_RE_INDEX 0
+#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_PFCE_INDEX 8
+#define MAC_RFCR_PFCE_WIDTH 1
+#define MAC_RFCR_RFE_INDEX 0
+#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RFCR_UP_INDEX 1
+#define MAC_RFCR_UP_WIDTH 1
+#define MAC_RQC0R_RXQ0EN_INDEX 0
+#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_RSSAR_ADDRT_INDEX 2
+#define MAC_RSSAR_ADDRT_WIDTH 1
+#define MAC_RSSAR_CT_INDEX 1
+#define MAC_RSSAR_CT_WIDTH 1
+#define MAC_RSSAR_OB_INDEX 0
+#define MAC_RSSAR_OB_WIDTH 1
+#define MAC_RSSAR_RSSIA_INDEX 8
+#define MAC_RSSAR_RSSIA_WIDTH 8
+#define MAC_RSSCR_IP2TE_INDEX 1
+#define MAC_RSSCR_IP2TE_WIDTH 1
+#define MAC_RSSCR_RSSE_INDEX 0
+#define MAC_RSSCR_RSSE_WIDTH 1
+#define MAC_RSSCR_TCP4TE_INDEX 2
+#define MAC_RSSCR_TCP4TE_WIDTH 1
+#define MAC_RSSCR_UDP4TE_INDEX 3
+#define MAC_RSSCR_UDP4TE_WIDTH 1
+#define MAC_RSSDR_DMCH_INDEX 0
+#define MAC_RSSDR_DMCH_WIDTH 4
+#define MAC_SSIR_SNSINC_INDEX 8
+#define MAC_SSIR_SNSINC_WIDTH 8
+#define MAC_SSIR_SSINC_INDEX 16
+#define MAC_SSIR_SSINC_WIDTH 8
+#define MAC_TCR_SS_INDEX 29
+#define MAC_TCR_SS_WIDTH 2
+#define MAC_TCR_TE_INDEX 0
+#define MAC_TCR_TE_WIDTH 1
+#define MAC_TSCR_AV8021ASMEN_INDEX 28
+#define MAC_TSCR_AV8021ASMEN_WIDTH 1
+#define MAC_TSCR_SNAPTYPSEL_INDEX 16
+#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
+#define MAC_TSCR_TSADDREG_INDEX 5
+#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSCFUPDT_INDEX 1
+#define MAC_TSCR_TSCFUPDT_WIDTH 1
+#define MAC_TSCR_TSCTRLSSR_INDEX 9
+#define MAC_TSCR_TSCTRLSSR_WIDTH 1
+#define MAC_TSCR_TSENA_INDEX 0
+#define MAC_TSCR_TSENA_WIDTH 1
+#define MAC_TSCR_TSENALL_INDEX 8
+#define MAC_TSCR_TSENALL_WIDTH 1
+#define MAC_TSCR_TSEVNTENA_INDEX 14
+#define MAC_TSCR_TSEVNTENA_WIDTH 1
+#define MAC_TSCR_TSINIT_INDEX 2
+#define MAC_TSCR_TSINIT_WIDTH 1
+#define MAC_TSCR_TSIPENA_INDEX 11
+#define MAC_TSCR_TSIPENA_WIDTH 1
+#define MAC_TSCR_TSIPV4ENA_INDEX 13
+#define MAC_TSCR_TSIPV4ENA_WIDTH 1
+#define MAC_TSCR_TSIPV6ENA_INDEX 12
+#define MAC_TSCR_TSIPV6ENA_WIDTH 1
+#define MAC_TSCR_TSMSTRENA_INDEX 15
+#define MAC_TSCR_TSMSTRENA_WIDTH 1
+#define MAC_TSCR_TSVER2ENA_INDEX 10
+#define MAC_TSCR_TSVER2ENA_WIDTH 1
+#define MAC_TSCR_TXTSSTSM_INDEX 24
+#define MAC_TSCR_TXTSSTSM_WIDTH 1
+#define MAC_TSSR_TXTSC_INDEX 15
+#define MAC_TSSR_TXTSC_WIDTH 1
+#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
+#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_VLANHTR_VLHT_INDEX 0
+#define MAC_VLANHTR_VLHT_WIDTH 16
+#define MAC_VLANIR_VLTI_INDEX 20
+#define MAC_VLANIR_VLTI_WIDTH 1
+#define MAC_VLANIR_CSVL_INDEX 19
+#define MAC_VLANIR_CSVL_WIDTH 1
+#define MAC_VLANTR_DOVLTC_INDEX 20
+#define MAC_VLANTR_DOVLTC_WIDTH 1
+#define MAC_VLANTR_ERSVLM_INDEX 19
+#define MAC_VLANTR_ERSVLM_WIDTH 1
+#define MAC_VLANTR_ESVL_INDEX 18
+#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_ETV_INDEX 16
+#define MAC_VLANTR_ETV_WIDTH 1
+#define MAC_VLANTR_EVLS_INDEX 21
+#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EVLRXS_INDEX 24
+#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VLANTR_VL_INDEX 0
+#define MAC_VLANTR_VL_WIDTH 16
+#define MAC_VLANTR_VTHM_INDEX 25
+#define MAC_VLANTR_VTHM_WIDTH 1
+#define MAC_VLANTR_VTIM_INDEX 17
+#define MAC_VLANTR_VTIM_WIDTH 1
+#define MAC_VR_DEVID_INDEX 8
+#define MAC_VR_DEVID_WIDTH 8
+#define MAC_VR_SNPSVER_INDEX 0
+#define MAC_VR_SNPSVER_WIDTH 8
+#define MAC_VR_USERVER_INDEX 16
+#define MAC_VR_USERVER_WIDTH 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXOCTETCOUNT_GB_HI 0x0818
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXFRAMECOUNT_GB_HI 0x0820
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX64OCTETS_GB_HI 0x0838
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX65TO127OCTETS_GB_HI 0x0840
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX128TO255OCTETS_GB_HI 0x0848
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX256TO511OCTETS_GB_HI 0x0850
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXUNDERFLOWERROR_HI 0x0880
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXOCTETCOUNT_G_HI 0x0888
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXFRAMECOUNT_G_HI 0x0890
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXPAUSEFRAMES_HI 0x0898
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_TXVLANFRAMES_G_HI 0x08a0
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXFRAMECOUNT_GB_HI 0x0904
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_GB_HI 0x090c
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXOCTETCOUNT_G_HI 0x0914
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXCRCERROR_HI 0x092c
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX64OCTETS_GB_HI 0x0944
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX65TO127OCTETS_GB_HI 0x094c
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX128TO255OCTETS_GB_HI 0x0954
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX256TO511OCTETS_GB_HI 0x095c
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXUNICASTFRAMES_G_HI 0x0974
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXLENGTHERROR_HI 0x097c
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXOUTOFRANGETYPE_HI 0x0984
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXPAUSEFRAMES_HI 0x098c
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXFIFOOVERFLOW_HI 0x0994
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXVLANFRAMES_GB_HI 0x099c
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_INDEX 0
+#define MMC_CR_CR_WIDTH 1
+#define MMC_CR_CSR_INDEX 1
+#define MMC_CR_CSR_WIDTH 1
+#define MMC_CR_ROR_INDEX 2
+#define MMC_CR_ROR_WIDTH 1
+#define MMC_CR_MCF_INDEX 3
+#define MMC_CR_MCF_WIDTH 1
+#define MMC_CR_MCT_INDEX 4
+#define MMC_CR_MCT_WIDTH 2
+#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
+#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
+#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
+#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
+#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXCRCERROR_INDEX 5
+#define MMC_RISR_RXCRCERROR_WIDTH 1
+#define MMC_RISR_RXRUNTERROR_INDEX 6
+#define MMC_RISR_RXRUNTERROR_WIDTH 1
+#define MMC_RISR_RXJABBERERROR_INDEX 7
+#define MMC_RISR_RXJABBERERROR_WIDTH 1
+#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
+#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
+#define MMC_RISR_RXOVERSIZE_G_INDEX 9
+#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
+#define MMC_RISR_RX64OCTETS_GB_INDEX 10
+#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
+#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
+#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
+#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
+#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXLENGTHERROR_INDEX 17
+#define MMC_RISR_RXLENGTHERROR_WIDTH 1
+#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
+#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
+#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
+#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
+#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
+#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
+#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
+#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
+#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
+#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
+#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
+#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
+#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TX64OCTETS_GB_INDEX 4
+#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
+#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
+#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
+#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
+#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
+#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
+#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
+#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
+#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
+#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
+#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
+#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDCR 0x1008
+#define MTL_FDSR 0x100c
+#define MTL_FDDR 0x1010
+#define MTL_ISR 0x1020
+#define MTL_RQDCM0R 0x1030
+#define MTL_TCPM0R 0x1040
+#define MTL_TCPM1R 0x1044
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+#define MTL_TCPM_INC 4
+#define MTL_TCPM_TC_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_INDEX 5
+#define MTL_OMR_ETSALG_WIDTH 2
+#define MTL_OMR_RAA_INDEX 2
+#define MTL_OMR_RAA_WIDTH 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_TQUR 0x04
+#define MTL_Q_TQDR 0x08
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQMPOCR 0x44
+#define MTL_Q_RQDR 0x4c
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQOMR_EHFC_INDEX 7
+#define MTL_Q_RQOMR_EHFC_WIDTH 1
+#define MTL_Q_RQOMR_RFA_INDEX 8
+#define MTL_Q_RQOMR_RFA_WIDTH 3
+#define MTL_Q_RQOMR_RFD_INDEX 13
+#define MTL_Q_RQOMR_RFD_WIDTH 3
+#define MTL_Q_RQOMR_RQS_INDEX 16
+#define MTL_Q_RQOMR_RQS_WIDTH 9
+#define MTL_Q_RQOMR_RSF_INDEX 5
+#define MTL_Q_RQOMR_RSF_WIDTH 1
+#define MTL_Q_RQOMR_RTC_INDEX 0
+#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQOMR_FTQ_INDEX 0
+#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
+#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3
+#define MTL_Q_TQOMR_TQS_INDEX 16
+#define MTL_Q_TQOMR_TQS_WIDTH 10
+#define MTL_Q_TQOMR_TSF_INDEX 1
+#define MTL_Q_TQOMR_TSF_WIDTH 1
+#define MTL_Q_TQOMR_TTC_INDEX 4
+#define MTL_Q_TQOMR_TTC_WIDTH 3
+#define MTL_Q_TQOMR_TXQEN_INDEX 2
+#define MTL_Q_TQOMR_TXQEN_WIDTH 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_32 0x01
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_INDEX 0
+#define MTL_TC_ETSCR_TSA_WIDTH 2
+#define MTL_TC_QWR_QW_INDEX 0
+#define MTL_TC_QWR_QW_WIDTH 21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+/* PCS MMD select register offset
+ * The MMD select register is used for accessing PCS registers
+ * when the underlying APB3 interface is using indirect addressing.
+ * Indirect addressing requires accessing registers in two phases,
+ * an address phase and a data phase. The address phases requires
+ * writing an address selection value to the MMD select regiesters.
+ */
+#define PCS_MMD_SELECT 0xff
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_INDEX 2
+#define RX_PACKET_ERRORS_CRC_WIDTH 1
+#define RX_PACKET_ERRORS_FRAME_INDEX 3
+#define RX_PACKET_ERRORS_FRAME_WIDTH 1
+#define RX_PACKET_ERRORS_LENGTH_INDEX 0
+#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
+#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
+#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+
+#define RX_NORMAL_DESC0_OVT_INDEX 0
+#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
+#define RX_NORMAL_DESC3_CDA_INDEX 27
+#define RX_NORMAL_DESC3_CDA_WIDTH 1
+#define RX_NORMAL_DESC3_CTXT_INDEX 30
+#define RX_NORMAL_DESC3_CTXT_WIDTH 1
+#define RX_NORMAL_DESC3_ES_INDEX 15
+#define RX_NORMAL_DESC3_ES_WIDTH 1
+#define RX_NORMAL_DESC3_ETLT_INDEX 16
+#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
+#define RX_NORMAL_DESC3_INTE_INDEX 30
+#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_L34T_INDEX 20
+#define RX_NORMAL_DESC3_L34T_WIDTH 4
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
+#define RX_NORMAL_DESC3_OWN_INDEX 31
+#define RX_NORMAL_DESC3_OWN_WIDTH 1
+#define RX_NORMAL_DESC3_PL_INDEX 0
+#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_NORMAL_DESC3_RSV_INDEX 26
+#define RX_NORMAL_DESC3_RSV_WIDTH 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
+
+#define RX_CONTEXT_DESC3_TSA_INDEX 4
+#define RX_CONTEXT_DESC3_TSA_WIDTH 1
+#define RX_CONTEXT_DESC3_TSD_INDEX 6
+#define RX_CONTEXT_DESC3_TSD_WIDTH 1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
+#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
+
+#define TX_CONTEXT_DESC2_MSS_INDEX 0
+#define TX_CONTEXT_DESC2_MSS_WIDTH 15
+#define TX_CONTEXT_DESC3_CTXT_INDEX 30
+#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
+#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
+#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
+#define TX_CONTEXT_DESC3_VLTV_INDEX 16
+#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
+#define TX_CONTEXT_DESC3_VT_INDEX 0
+#define TX_CONTEXT_DESC3_VT_WIDTH 16
+
+#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
+#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
+#define TX_NORMAL_DESC2_IC_INDEX 31
+#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_TTSE_INDEX 30
+#define TX_NORMAL_DESC2_TTSE_WIDTH 1
+#define TX_NORMAL_DESC2_VTIR_INDEX 14
+#define TX_NORMAL_DESC2_VTIR_WIDTH 2
+#define TX_NORMAL_DESC3_CIC_INDEX 16
+#define TX_NORMAL_DESC3_CIC_WIDTH 2
+#define TX_NORMAL_DESC3_CPC_INDEX 26
+#define TX_NORMAL_DESC3_CPC_WIDTH 2
+#define TX_NORMAL_DESC3_CTXT_INDEX 30
+#define TX_NORMAL_DESC3_CTXT_WIDTH 1
+#define TX_NORMAL_DESC3_FD_INDEX 29
+#define TX_NORMAL_DESC3_FD_WIDTH 1
+#define TX_NORMAL_DESC3_FL_INDEX 0
+#define TX_NORMAL_DESC3_FL_WIDTH 15
+#define TX_NORMAL_DESC3_LD_INDEX 28
+#define TX_NORMAL_DESC3_LD_WIDTH 1
+#define TX_NORMAL_DESC3_OWN_INDEX 31
+#define TX_NORMAL_DESC3_OWN_WIDTH 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
+#define TX_NORMAL_DESC3_TCPPL_INDEX 0
+#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+#define TX_NORMAL_DESC3_TSE_INDEX 18
+#define TX_NORMAL_DESC3_TSE_WIDTH 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_AN_COMP_STAT
+#define MDIO_AN_COMP_STAT 0x0030
+#endif
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define GET_BITS_LE(_var, _index, _width) \
+ ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS_LE(_var, _index, _width, _val) \
+do { \
+ (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \
+ (_var) |= cpu_to_le32((((_val) & \
+ ((0x1 << (_width)) - 1)) << (_index))); \
+} while (0)
+
+/* Bit setting and getting macros based on register fields
+ * The get macro uses the bit field definitions formed using the input
+ * names to extract the current bit field value from within the
+ * variable
+ *
+ * The set macro uses the bit field definitions formed using the input
+ * names to set the bit field of the variable to the specified value
+ */
+#define XGMAC_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XGMAC_GET_BITS_LE(_var, _prefix, _field) \
+ GET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
+ SET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+/* Macros for reading or writing registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define XGMAC_IOREAD(_pdata, _reg) \
+ ioread32((_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_IOWRITE(_pdata, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing MTL queue or traffic class registers
+ * Similar to the standard read and write macros except that the
+ * base register value is calculated by the queue or traffic class number
+ */
+#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+ ioread32((_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing DMA channel registers
+ * Similar to the standard read and write macros except that the
+ * base register value is obtained from the ring
+ */
+#define XGMAC_DMA_IOREAD(_channel, _reg) \
+ ioread32((_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+ iowrite32((_val), (_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+#define XPCS_IOWRITE(_pdata, _off, _val) \
+ iowrite32(_val, (_pdata)->xpcs_regs + (_off))
+
+#define XPCS_IOREAD(_pdata, _off) \
+ ioread32((_pdata)->xpcs_regs + (_off))
+
+/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+ */
+#define XMDIO_READ(_pdata, _mmd, _reg) \
+ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+
+#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
+ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+
+#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
+ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+
+#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
+do { \
+ u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
+ mmd_val &= ~_mask; \
+ mmd_val |= (_val); \
+ XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \
+} while (0)
+
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe-a0/xgbe-dcb.c
new file mode 100644
index 0000000..343301c
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe-dcb.c
@@ -0,0 +1,269 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+#include <net/dcbnl.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static int xgbe_dcb_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ /* Set number of supported traffic classes */
+ ets->ets_cap = pdata->hw_feat.tc_cnt;
+
+ if (pdata->ets) {
+ ets->cbs = pdata->ets->cbs;
+ memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_tsa, pdata->ets->tc_tsa,
+ sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, pdata->ets->prio_tc,
+ sizeof(ets->prio_tc));
+ }
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int i, tc_ets, tc_ets_weight;
+
+ tc_ets = 0;
+ tc_ets_weight = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+ ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
+ DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
+
+ if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
+ (i >= pdata->hw_feat.tc_cnt))
+ return -EINVAL;
+
+ if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt)
+ return -EINVAL;
+
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ tc_ets = 1;
+ tc_ets_weight += ets->tc_tx_bw[i];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Weights must add up to 100% */
+ if (tc_ets && (tc_ets_weight != 100))
+ return -EINVAL;
+
+ if (!pdata->ets) {
+ pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets),
+ GFP_KERNEL);
+ if (!pdata->ets)
+ return -ENOMEM;
+ }
+
+ memcpy(pdata->ets, ets, sizeof(*pdata->ets));
+
+ pdata->hw_if.config_dcb_tc(pdata);
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ /* Set number of supported PFC traffic classes */
+ pfc->pfc_cap = pdata->hw_feat.tc_cnt;
+
+ if (pdata->pfc) {
+ pfc->pfc_en = pdata->pfc->pfc_en;
+ pfc->mbc = pdata->pfc->mbc;
+ pfc->delay = pdata->pfc->delay;
+ }
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
+ pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+
+ if (!pdata->pfc) {
+ pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
+ GFP_KERNEL);
+ if (!pdata->pfc)
+ return -ENOMEM;
+ }
+
+ memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
+
+ pdata->hw_if.config_dcb_pfc(pdata);
+
+ return 0;
+}
+
+static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
+{
+ u8 support = xgbe_dcb_getdcbx(netdev);
+
+ DBGPR(" DCBX=%#hhx\n", dcbx);
+
+ if (dcbx & ~support)
+ return 1;
+
+ if ((dcbx & support) != support)
+ return 1;
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops xgbe_dcbnl_ops = {
+ /* IEEE 802.1Qaz std */
+ .ieee_getets = xgbe_dcb_ieee_getets,
+ .ieee_setets = xgbe_dcb_ieee_setets,
+ .ieee_getpfc = xgbe_dcb_ieee_getpfc,
+ .ieee_setpfc = xgbe_dcb_ieee_setpfc,
+
+ /* DCBX configuration */
+ .getdcbx = xgbe_dcb_getdcbx,
+ .setdcbx = xgbe_dcb_setdcbx,
+};
+
+const struct dcbnl_rtnl_ops *xgbe_a0_get_dcbnl_ops(void)
+{
+ return &xgbe_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe-a0/xgbe-debugfs.c
new file mode 100644
index 0000000..ecfa6f9
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe-debugfs.c
@@ -0,0 +1,373 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static ssize_t xgbe_common_read(char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int value)
+{
+ char *buf;
+ ssize_t len;
+
+ if (*ppos != 0)
+ return 0;
+
+ buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return len;
+}
+
+static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int *value)
+{
+ char workarea[32];
+ ssize_t len;
+ int ret;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count >= sizeof(workarea))
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos,
+ buffer, count);
+ if (len < 0)
+ return len;
+
+ workarea[len] = '\0';
+ ret = kstrtouint(workarea, 16, value);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_addr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xgmac_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xgmac_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_addr_read,
+ .write = xgmac_reg_addr_write,
+};
+
+static const struct file_operations xgmac_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_value_read,
+ .write = xgmac_reg_value_write,
+};
+
+static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xpcs_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
+ value);
+
+ return len;
+}
+
+static const struct file_operations xpcs_mmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_mmd_read,
+ .write = xpcs_mmd_write,
+};
+
+static const struct file_operations xpcs_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_addr_read,
+ .write = xpcs_reg_addr_write,
+};
+
+static const struct file_operations xpcs_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_value_read,
+ .write = xpcs_reg_value_write,
+};
+
+void xgbe_a0_debugfs_init(struct xgbe_prv_data *pdata)
+{
+ struct dentry *pfile;
+ char *buf;
+
+ /* Set defaults */
+ pdata->debugfs_xgmac_reg = 0;
+ pdata->debugfs_xpcs_mmd = 1;
+ pdata->debugfs_xpcs_reg = 0;
+
+ buf = kasprintf(GFP_KERNEL, "amd-xgbe-a0-%s", pdata->netdev->name);
+ pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
+ if (!pdata->xgbe_debugfs) {
+ netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
+ return;
+ }
+
+ pfile = debugfs_create_file("xgmac_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xgmac_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_mmd", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_mmd_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ kfree(buf);
+}
+
+void xgbe_a0_debugfs_exit(struct xgbe_prv_data *pdata)
+{
+ debugfs_remove_recursive(pdata->xgbe_debugfs);
+ pdata->xgbe_debugfs = NULL;
+}
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe-a0/xgbe-desc.c
new file mode 100644
index 0000000..5dd5777
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe-desc.c
@@ -0,0 +1,636 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
+
+static void xgbe_free_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_ring_data *rdata;
+ unsigned int i;
+
+ if (!ring)
+ return;
+
+ if (ring->rdata) {
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+ xgbe_unmap_rdata(pdata, rdata);
+ }
+
+ kfree(ring->rdata);
+ ring->rdata = NULL;
+ }
+
+ if (ring->rx_hdr_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+ ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_hdr_pa.pages);
+
+ ring->rx_hdr_pa.pages = NULL;
+ ring->rx_hdr_pa.pages_len = 0;
+ ring->rx_hdr_pa.pages_offset = 0;
+ ring->rx_hdr_pa.pages_dma = 0;
+ }
+
+ if (ring->rx_buf_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+ ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_buf_pa.pages);
+
+ ring->rx_buf_pa.pages = NULL;
+ ring->rx_buf_pa.pages_len = 0;
+ ring->rx_buf_pa.pages_offset = 0;
+ ring->rx_buf_pa.pages_dma = 0;
+ }
+
+ if (ring->rdesc) {
+ dma_free_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ ring->rdesc_count),
+ ring->rdesc, ring->rdesc_dma);
+ ring->rdesc = NULL;
+ }
+}
+
+static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_free_ring_resources\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ xgbe_free_ring(pdata, channel->tx_ring);
+ xgbe_free_ring(pdata, channel->rx_ring);
+ }
+
+ DBGPR("<--xgbe_free_ring_resources\n");
+}
+
+static int xgbe_init_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, unsigned int rdesc_count)
+{
+ DBGPR("-->xgbe_init_ring\n");
+
+ if (!ring)
+ return 0;
+
+ /* Descriptors */
+ ring->rdesc_count = rdesc_count;
+ ring->rdesc = dma_alloc_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ rdesc_count), &ring->rdesc_dma,
+ GFP_KERNEL);
+ if (!ring->rdesc)
+ return -ENOMEM;
+
+ /* Descriptor information */
+ ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
+ GFP_KERNEL);
+ if (!ring->rdata)
+ return -ENOMEM;
+
+ DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
+ ring->rdesc, ring->rdesc_dma, ring->rdata);
+
+ DBGPR("<--xgbe_init_ring\n");
+
+ return 0;
+}
+
+static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+ int ret;
+
+ DBGPR("-->xgbe_alloc_ring_resources\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ DBGPR(" %s - tx_ring:\n", channel->name);
+ ret = xgbe_init_ring(pdata, channel->tx_ring,
+ pdata->tx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+
+ DBGPR(" %s - rx_ring:\n", channel->name);
+ ret = xgbe_init_ring(pdata, channel->rx_ring,
+ pdata->rx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+ }
+
+ DBGPR("<--xgbe_alloc_ring_resources\n");
+
+ return 0;
+
+err_ring:
+ xgbe_free_ring_resources(pdata);
+
+ return ret;
+}
+
+static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ struct xgbe_page_alloc *pa, gfp_t gfp, int order)
+{
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+ int ret;
+
+ /* Try to obtain pages, decreasing order if necessary */
+ gfp |= __GFP_COLD | __GFP_COMP;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
+ break;
+
+ order--;
+ }
+ if (!pages)
+ return -ENOMEM;
+
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(pdata->dev, pages_dma);
+ if (ret) {
+ put_page(pages);
+ return ret;
+ }
+
+ pa->pages = pages;
+ pa->pages_len = PAGE_SIZE << order;
+ pa->pages_offset = 0;
+ pa->pages_dma = pages_dma;
+
+ return 0;
+}
+
+static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
+ struct xgbe_page_alloc *pa,
+ unsigned int len)
+{
+ get_page(pa->pages);
+ bd->pa = *pa;
+
+ bd->dma = pa->pages_dma + pa->pages_offset;
+ bd->dma_len = len;
+
+ pa->pages_offset += len;
+ if ((pa->pages_offset + len) > pa->pages_len) {
+ /* This data descriptor is responsible for unmapping page(s) */
+ bd->pa_unmap = *pa;
+
+ /* Get a new allocation next time */
+ pa->pages = NULL;
+ pa->pages_len = 0;
+ pa->pages_offset = 0;
+ pa->pages_dma = 0;
+ }
+}
+
+static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring,
+ struct xgbe_ring_data *rdata)
+{
+ int order, ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+ order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
+ order);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the header page info */
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+
+ /* Set up the buffer page info */
+ xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
+ pdata->rx_buf_size);
+
+ return 0;
+}
+
+static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ memset(&ring->tx, 0, sizeof(ring->tx));
+
+ hw_if->tx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
+}
+
+static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_ring_data *rdata;
+ dma_addr_t rdesc_dma;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
+ break;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+
+ hw_if->rx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
+}
+
+static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata)
+{
+ if (rdata->skb_dma) {
+ if (rdata->mapped_as_page) {
+ dma_unmap_page(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ }
+ rdata->skb_dma = 0;
+ rdata->skb_dma_len = 0;
+ }
+
+ if (rdata->skb) {
+ dev_kfree_skb_any(rdata->skb);
+ rdata->skb = NULL;
+ }
+
+ if (rdata->rx.hdr.pa.pages)
+ put_page(rdata->rx.hdr.pa.pages);
+
+ if (rdata->rx.hdr.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
+ rdata->rx.hdr.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx.hdr.pa_unmap.pages);
+ }
+
+ if (rdata->rx.buf.pa.pages)
+ put_page(rdata->rx.buf.pa.pages);
+
+ if (rdata->rx.buf.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
+ rdata->rx.buf.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx.buf.pa_unmap.pages);
+ }
+
+ memset(&rdata->tx, 0, sizeof(rdata->tx));
+ memset(&rdata->rx, 0, sizeof(rdata->rx));
+
+ rdata->mapped_as_page = 0;
+
+ if (rdata->state_saved) {
+ rdata->state_saved = 0;
+ rdata->state.incomplete = 0;
+ rdata->state.context_next = 0;
+ rdata->state.skb = NULL;
+ rdata->state.len = 0;
+ rdata->state.error = 0;
+ }
+}
+
+static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct skb_frag_struct *frag;
+ dma_addr_t skb_dma;
+ unsigned int start_index, cur_index;
+ unsigned int offset, tso, vlan, datalen, len;
+ unsigned int i;
+
+ DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
+
+ offset = 0;
+ start_index = ring->cur;
+ cur_index = ring->cur;
+
+ packet = &ring->packet_data;
+ packet->rdesc_count = 0;
+ packet->length = 0;
+
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ /* Save space for a context descriptor if needed */
+ if ((tso && (packet->mss != ring->tx.cur_mss)) ||
+ (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+
+ if (tso) {
+ DBGPR(" TSO packet\n");
+
+ /* Map the TSO header */
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ packet->header_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = packet->header_len;
+
+ offset = packet->header_len;
+
+ packet->length += packet->header_len;
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ }
+
+ /* Map the (remainder of the) packet */
+ for (datalen = skb_headlen(skb) - offset; datalen; ) {
+ len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
+
+ skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
+ cur_index, skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ DBGPR(" mapping frag %u\n", i);
+
+ frag = &skb_shinfo(skb)->frags[i];
+ offset = 0;
+
+ for (datalen = skb_frag_size(frag); datalen; ) {
+ len = min_t(unsigned int, datalen,
+ XGBE_TX_MAX_BUF_SIZE);
+
+ skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "skb_frag_dma_map failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ rdata->mapped_as_page = 1;
+ DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
+ cur_index, skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ }
+ }
+
+ /* Save the skb address in the last entry. We always have some data
+ * that has been mapped so rdata is always advanced past the last
+ * piece of mapped data - use the entry pointed to by cur_index - 1.
+ */
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
+ rdata->skb = skb;
+
+ /* Save the number of descriptor entries used */
+ packet->rdesc_count = cur_index - start_index;
+
+ DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
+
+ return packet->rdesc_count;
+
+err_out:
+ while (start_index < cur_index) {
+ rdata = XGBE_GET_DESC_DATA(ring, start_index++);
+ xgbe_unmap_rdata(pdata, rdata);
+ }
+
+ DBGPR("<--xgbe_map_tx_skb: count=0\n");
+
+ return 0;
+}
+
+void xgbe_a0_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
+{
+ DBGPR("-->xgbe_a0_init_function_ptrs_desc\n");
+
+ desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
+ desc_if->free_ring_resources = xgbe_free_ring_resources;
+ desc_if->map_tx_skb = xgbe_map_tx_skb;
+ desc_if->map_rx_buffer = xgbe_map_rx_buffer;
+ desc_if->unmap_rdata = xgbe_unmap_rdata;
+ desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
+ desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
+
+ DBGPR("<--xgbe_a0_init_function_ptrs_desc\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe-a0/xgbe-dev.c
new file mode 100644
index 0000000..f6a3a58
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe-dev.c
@@ -0,0 +1,2964 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/clk.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
+ unsigned int usec)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_usec_to_riwt\n");
+
+ rate = pdata->sysclk_rate;
+
+ /*
+ * Convert the input usec value to the watchdog timer value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( usec * ( system_clock_mhz / 10^6 ) / 256
+ */
+ ret = (usec * (rate / 1000000)) / 256;
+
+ DBGPR("<--xgbe_usec_to_riwt\n");
+
+ return ret;
+}
+
+static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
+ unsigned int riwt)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_riwt_to_usec\n");
+
+ rate = pdata->sysclk_rate;
+
+ /*
+ * Convert the input watchdog timer value to the usec value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
+ */
+ ret = (riwt * 256) / (rate / 1000000);
+
+ DBGPR("<--xgbe_riwt_to_usec\n");
+
+ return ret;
+}
+
+static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
+ pdata->pblx8);
+
+ return 0;
+}
+
+static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
+}
+
+static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
+ pdata->tx_pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
+}
+
+static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
+ pdata->rx_pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
+ pdata->rx_riwt);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ return 0;
+}
+
+static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
+ pdata->rx_buf_size);
+ }
+}
+
+static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
+ }
+}
+
+static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+ }
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+}
+
+static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+
+ mutex_lock(&pdata->rss_mutex);
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ goto unlock;
+
+ usleep_range(1000, 1500);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mutex_unlock(&pdata->rss_mutex);
+
+ return ret;
+}
+
+static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xgbe_write_rss_reg(pdata,
+ XGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
+{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return xgbe_write_rss_hash_key(pdata);
+}
+
+static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
+ const u32 *table)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
+
+ return xgbe_write_rss_lookup_table(pdata);
+}
+
+static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ /* Program the hash key */
+ ret = xgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = xgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+
+ return 0;
+}
+
+static void xgbe_config_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return;
+
+ if (pdata->netdev->features & NETIF_F_RXHASH)
+ ret = xgbe_enable_rss(pdata);
+ else
+ ret = xgbe_disable_rss(pdata);
+
+ if (ret)
+ netdev_err(pdata->netdev,
+ "error configuring RSS, RSS disabled\n");
+}
+
+static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
+
+ /* Set MAC flow control */
+ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ /* Enable transmit flow control */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+ /* Set pause time */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+ return 0;
+}
+
+static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ if (pdata->tx_pause || (pfc && pfc->pfc_en))
+ xgbe_enable_tx_flow_control(pdata);
+ else
+ xgbe_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ if (pdata->rx_pause || (pfc && pfc->pfc_en))
+ xgbe_enable_rx_flow_control(pdata);
+ else
+ xgbe_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ xgbe_config_tx_flow_control(pdata);
+ xgbe_config_rx_flow_control(pdata);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
+ (pfc && pfc->pfc_en) ? 1 : 0);
+}
+
+static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+ * TIE - Transmit Interrupt Enable (unless using
+ * per channel interrupts)
+ */
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts)
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
+{
+ unsigned int mtl_q_isr;
+ unsigned int q_count, i;
+
+ q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+ /* No MTL interrupts to be enabled */
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
+ }
+}
+
+static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_ier = 0;
+
+ /* Enable Timestamp interrupt */
+ XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
+
+ XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
+
+ /* Enable all counter interrupts */
+ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
+}
+
+static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
+{
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
+ return 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
+{
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
+ return 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
+
+ return 0;
+}
+
+static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
+{
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
+ return 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
+
+ return 0;
+}
+
+static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
+ return 0;
+
+ DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
+
+ return 0;
+}
+
+static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
+ return 0;
+
+ DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
+
+ return 0;
+}
+
+static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
+ struct netdev_hw_addr *ha, unsigned int *mac_reg)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+ u8 *mac_addr;
+
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+
+ if (ha) {
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
+ *mac_reg);
+
+ XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+ }
+
+ XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
+ *mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
+ *mac_reg += MAC_MACA_INC;
+}
+
+static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int mac_reg;
+ unsigned int addn_macs;
+
+ mac_reg = MAC_MACA1HR;
+ addn_macs = pdata->hw_feat.addn_mac;
+
+ if (netdev_uc_count(netdev) > addn_macs) {
+ xgbe_set_promiscuous_mode(pdata, 1);
+ } else {
+ netdev_for_each_uc_addr(ha, netdev) {
+ xgbe_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+
+ if (netdev_mc_count(netdev) > addn_macs) {
+ xgbe_set_all_multicast_mode(pdata, 1);
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ xgbe_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+ }
+ }
+
+ /* Clear remaining additional MAC address entries */
+ while (addn_macs--)
+ xgbe_set_mac_reg(pdata, NULL, &mac_reg);
+}
+
+static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int hash_reg;
+ unsigned int hash_table_shift, hash_table_count;
+ u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
+ u32 crc;
+ unsigned int i;
+
+ hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
+ hash_table_count = pdata->hw_feat.hash_table_size / 32;
+ memset(hash_table, 0, sizeof(hash_table));
+
+ /* Build the MAC Hash Table register values */
+ netdev_for_each_uc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ /* Set the MAC Hash Table registers */
+ hash_reg = MAC_HTR0;
+ for (i = 0; i < hash_table_count; i++) {
+ XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
+ hash_reg += MAC_HTR_INC;
+ }
+}
+
+static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
+{
+ if (pdata->hw_feat.hash_table_size)
+ xgbe_set_mac_hash_table(pdata);
+ else
+ xgbe_set_mac_addn_addrs(pdata);
+
+ return 0;
+}
+
+static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+ XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+ return 0;
+}
+
+static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS implementation has reversed the devices in
+ * package registers so we need to change 05 to 06 and
+ * 06 to 05 if being read (these registers are readonly
+ * so no need to do this in the write function)
+ */
+ if ((mmd_address & 0xffff) == 0x05)
+ mmd_address = (mmd_address & ~0xffff) | 0x06;
+ else if ((mmd_address & 0xffff) == 0x06)
+ mmd_address = (mmd_address & ~0xffff) | 0x05;
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ mutex_lock(&pdata->xpcs_mutex);
+ XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+ mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
+ mutex_unlock(&pdata->xpcs_mutex);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int mmd_address;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* If the PCS is changing modes, match the MAC speed to it */
+ if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
+ ((mmd_address & 0xffff) == MDIO_CTRL2)) {
+ struct phy_device *phydev = pdata->phydev;
+
+ if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
+ /* KX mode */
+ if (phydev->supported & SUPPORTED_1000baseKX_Full)
+ xgbe_set_gmii_speed(pdata);
+ else
+ xgbe_set_gmii_2500_speed(pdata);
+ } else {
+ /* KR mode */
+ xgbe_set_xgmii_speed(pdata);
+ }
+ }
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ mutex_lock(&pdata->xpcs_mutex);
+ XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+ XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
+ mutex_unlock(&pdata->xpcs_mutex);
+}
+
+static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
+{
+ return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
+}
+
+static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ /* Put the VLAN tag in the Rx descriptor */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
+
+ /* Don't check the VLAN type */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
+
+ /* Check only C-TAG (0x8100) packets */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
+
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
+
+ /* Enable VLAN tag stripping */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+ /* Enable VLAN filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
+
+ /* Enable VLAN Hash Table filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
+
+ /* Disable VLAN tag inverse matching */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
+
+ /* Only filter on the lower 12-bits of the VLAN tag */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
+
+ /* In order for the VLAN Hash Table filtering to be effective,
+ * the VLAN tag identifier in the VLAN Tag Register must not
+ * be zero. Set the VLAN tag identifier to "1" to enable the
+ * VLAN Hash Table filtering. This implies that a VLAN tag of
+ * 1 will always pass filtering.
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+ /* Disable VLAN filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
+
+ return 0;
+}
+
+#ifndef CRCPOLY_LE
+#define CRCPOLY_LE 0xedb88320
+#endif
+static u32 xgbe_vid_crc32_le(__le16 vid_le)
+{
+ u32 poly = CRCPOLY_LE;
+ u32 crc = ~0;
+ u32 temp = 0;
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= poly;
+ }
+
+ return crc;
+}
+
+static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
+{
+ u32 crc;
+ u16 vid;
+ __le16 vid_le;
+ u16 vlan_hash_table = 0;
+
+ /* Generate the VLAN Hash Table value */
+ for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+ /* Get the CRC32 value of the VLAN ID */
+ vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
+
+ vlan_hash_table |= (1 << crc);
+ }
+
+ /* Set the VLAN Hash Table filtering register */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
+
+ return 0;
+}
+
+static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+
+ /* Reset the Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
+ */
+ rdesc->desc0 = 0;
+ rdesc->desc1 = 0;
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
+}
+
+static void xgbe_tx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ int i;
+ int start_index = ring->cur;
+
+ DBGPR("-->tx_desc_init\n");
+
+ /* Initialze all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+
+ /* Initialize Tx descriptor */
+ xgbe_tx_desc_reset(rdata);
+ }
+
+ /* Update the total number of Tx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--tx_desc_init\n");
+}
+
+static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+
+ /* Reset the Rx descriptor
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
+ */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+ rdata->interrupt ? 1 : 0);
+
+ /* Since the Rx DMA engine is likely running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the descriptor
+ */
+ wmb();
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
+}
+
+static void xgbe_rx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int start_index = ring->cur;
+ unsigned int rx_coalesce, rx_frames;
+ unsigned int i;
+
+ DBGPR("-->rx_desc_init\n");
+
+ rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
+ rx_frames = pdata->rx_frames;
+
+ /* Initialize all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+
+ /* Set interrupt on completion bit as appropriate */
+ if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
+ rdata->interrupt = 0;
+ else
+ rdata->interrupt = 1;
+
+ /* Initialize Rx descriptor */
+ xgbe_rx_desc_reset(rdata);
+ }
+
+ /* Update the total number of Rx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Update the Rx Descriptor Tail Pointer */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--rx_desc_init\n");
+}
+
+static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+{
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+ while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
+}
+
+static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+{
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+ while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
+}
+
+static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+ u64 nsec;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+ return nsec;
+}
+
+static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+ unsigned int tx_snr;
+ u64 nsec;
+
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+ return nsec;
+}
+
+static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc)
+{
+ u64 nsec;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+ !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+ nsec = le32_to_cpu(rdesc->desc1);
+ nsec <<= 32;
+ nsec |= le32_to_cpu(rdesc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ packet->rx_tstamp = nsec;
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RX_TSTAMP, 1);
+ }
+ }
+}
+
+static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
+ unsigned int mac_tscr)
+{
+ /* Set one nano-second accuracy */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+ /* Set fine timestamp update */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+ /* Overwrite earlier timestamps */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+ XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
+
+ /* Exit if timestamping is not enabled */
+ if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+ return 0;
+
+ /* Initialize time registers */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+ xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+ xgbe_set_tstamp_time(pdata, 0, 0);
+
+ /* Initialize the timecounter */
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ return 0;
+}
+
+static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
+{
+ struct ieee_ets *ets = pdata->ets;
+ unsigned int total_weight, min_weight, weight;
+ unsigned int i;
+
+ if (!ets)
+ return;
+
+ /* Set Tx to deficit weighted round robin scheduling algorithm (when
+ * traffic class is using ETS algorithm)
+ */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
+
+ /* Set Traffic Class algorithms */
+ total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
+ min_weight = total_weight / 100;
+ if (!min_weight)
+ min_weight = 1;
+
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ DBGPR(" TC%u using SP\n", i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_SP);
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ weight = total_weight * ets->tc_tx_bw[i] / 100;
+ weight = clamp(weight, min_weight, total_weight);
+
+ DBGPR(" TC%u using DWRR (weight %u)\n", i, weight);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
+ weight);
+ break;
+ }
+ }
+}
+
+static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+ struct ieee_ets *ets = pdata->ets;
+ unsigned int mask, reg, reg_val;
+ unsigned int tc, prio;
+
+ if (!pfc || !ets)
+ return;
+
+ for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) {
+ mask = 0;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ if ((pfc->pfc_en & (1 << prio)) &&
+ (ets->prio_tc[prio] == tc))
+ mask |= (1 << prio);
+ }
+ mask &= 0xff;
+
+ DBGPR(" TC%u PFC mask=%#x\n", tc, mask);
+ reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3));
+ reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3));
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+ }
+
+ xgbe_config_flow_control(pdata);
+}
+
+static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring_data *rdata;
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Start the Tx coalescing timer */
+ if (pdata->tx_usecs && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ hrtimer_start(&channel->tx_timer,
+ ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+ }
+
+ ring->tx.xmit_more = 0;
+}
+
+static void xgbe_dev_xmit(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ unsigned int csum, tso, vlan;
+ unsigned int tso_context, vlan_context;
+ unsigned int tx_set_ic;
+ int start_index = ring->cur;
+ int cur_index = ring->cur;
+ int i;
+
+ DBGPR("-->xgbe_dev_xmit\n");
+
+ csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE);
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ if (tso && (packet->mss != ring->tx.cur_mss))
+ tso_context = 1;
+ else
+ tso_context = 0;
+
+ if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
+ vlan_context = 1;
+ else
+ vlan_context = 0;
+
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+ * - Tx frame count exceeds the frame count setting
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set exceeds the frame count setting
+ * No interrupt:
+ * - No frame count setting specified (ethtool -C ethX tx-frames 0)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+ ring->coalesce_count += packet->tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+ else if (packet->tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+ else if ((ring->coalesce_count % pdata->tx_frames) <
+ packet->tx_packets)
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
+
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+
+ /* Create a context descriptor if this is a TSO packet */
+ if (tso_context || vlan_context) {
+ if (tso_context) {
+ DBGPR(" TSO context descriptor, mss=%u\n",
+ packet->mss);
+
+ /* Set the MSS size */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
+ MSS, packet->mss);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Indicate this descriptor contains the MSS */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ TCMSSV, 1);
+
+ ring->tx.cur_mss = packet->mss;
+ }
+
+ if (vlan_context) {
+ DBGPR(" VLAN context descriptor, ctag=%u\n",
+ packet->vlan_ctag);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Set the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VT, packet->vlan_ctag);
+
+ /* Indicate this descriptor contains the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+
+ ring->tx.cur_vlan_ctag = packet->vlan_ctag;
+ }
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+ }
+
+ /* Update buffer address (for TSO this is the header) */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* VLAN tag insertion check */
+ if (vlan)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+
+ /* Timestamp enablement check */
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
+ /* Mark it as First Descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
+
+ /* Mark it as a NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Set OWN bit if not the first descriptor */
+ if (cur_index != start_index)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ if (tso) {
+ /* Enable TSO */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
+ packet->tcp_payload_len);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
+ packet->tcp_header_len / 4);
+ } else {
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+
+ /* Set the total length to be transmitted */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
+ packet->length);
+ }
+
+ for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+
+ /* Update buffer address */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* Set OWN bit */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ /* Mark it as NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+ }
+
+ /* Set LAST bit for the last descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
+
+ /* Set IC bit based on Tx coalescing settings */
+ if (tx_set_ic)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+
+ /* Save the Tx info to report back during cleanup */
+ rdata->tx.packets = packet->tx_packets;
+ rdata->tx.bytes = packet->tx_bytes;
+
+ /* In case the Tx DMA engine is running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the first descriptor
+ */
+ wmb();
+
+ /* Set OWN bit for the first descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
+ rdesc = rdata->rdesc;
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+#ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_a0_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
+#endif
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
+
+ ring->cur = cur_index + 1;
+ if (!packet->skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+ channel->queue_index)))
+ xgbe_tx_start_xmit(channel, ring);
+ else
+ ring->tx.xmit_more = 1;
+
+ DBGPR(" %s: descriptors %u to %u written\n",
+ channel->name, start_index & (ring->rdesc_count - 1),
+ (ring->cur - 1) & (ring->rdesc_count - 1));
+
+ DBGPR("<--xgbe_dev_xmit\n");
+}
+
+static int xgbe_dev_read(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ struct net_device *netdev = channel->pdata->netdev;
+ unsigned int err, etlt, l34t;
+
+ DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
+
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Check for data availability */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
+ return 1;
+
+ /* Make sure descriptor fields are read after reading the OWN bit */
+ rmb();
+
+#ifdef XGMAC_ENABLE_RX_DESC_DUMP
+ xgbe_a0_dump_rx_desc(ring, rdesc, ring->cur);
+#endif
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
+ /* Timestamp Context Descriptor */
+ xgbe_get_rx_tstamp(packet, rdesc);
+
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT, 1);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT, 0);
+ return 0;
+ }
+
+ /* Normal Descriptor, be sure Context Descriptor bit is off */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
+
+ /* Indicate if a Context Descriptor is next */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT, 1);
+
+ /* Get the header length */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+ rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
+ RX_NORMAL_DESC2, HL);
+
+ /* Get the RSS hash */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RSS_HASH, 1);
+
+ packet->rss_hash = le32_to_cpu(rdesc->desc1);
+
+ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ case RX_DESC3_L34T_IPV4_UDP:
+ case RX_DESC3_L34T_IPV6_TCP:
+ case RX_DESC3_L34T_IPV6_UDP:
+ packet->rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ packet->rss_hash_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
+ /* Get the packet length */
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+
+ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
+ /* Not all the data has been transferred for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ INCOMPLETE, 1);
+ return 0;
+ }
+
+ /* This is the last of the data for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ INCOMPLETE, 0);
+
+ /* Set checksum done indicator as appropriate */
+ if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 1);
+
+ /* Check for errors (only valid in last descriptor) */
+ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
+ etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
+ DBGPR(" err=%u, etlt=%#x\n", err, etlt);
+
+ if (!err || !etlt) {
+ /* No error if err is 0 or etlt is 0 */
+ if ((etlt == 0x09) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
+ RX_NORMAL_DESC0,
+ OVT);
+ DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
+ }
+ } else {
+ if ((etlt == 0x05) || (etlt == 0x06))
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
+ else
+ XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
+ FRAME, 1);
+ }
+
+ DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
+ ring->cur & (ring->rdesc_count - 1), ring->cur);
+
+ return 0;
+}
+
+static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
+}
+
+static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share LD bit, so check TDES3.LD bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
+}
+
+static int xgbe_enable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+
+ switch (int_id) {
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI_RI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ dma_ch_ier |= channel->saved_ier;
+ break;
+ default:
+ return -1;
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+
+ return 0;
+}
+
+static int xgbe_disable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+
+ switch (int_id) {
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI_RI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
+ dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
+ break;
+ default:
+ return -1;
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+
+ return 0;
+}
+
+static int xgbe_exit(struct xgbe_prv_data *pdata)
+{
+ unsigned int count = 2000;
+
+ DBGPR("-->xgbe_exit\n");
+
+ /* Issue a software reset */
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
+ usleep_range(10, 15);
+
+ /* Poll Until Poll Condition */
+ while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+
+ DBGPR("<--xgbe_exit\n");
+
+ return 0;
+}
+
+static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
+{
+ unsigned int i, count;
+
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+ return 0;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ count = 2000;
+ while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
+ MTL_Q_TQOMR, FTQ))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
+{
+ /* Set enhanced addressing mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+
+ /* Set the System Bus mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
+}
+
+static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
+{
+ unsigned int arcache, awcache;
+
+ arcache = 0;
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
+ XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+
+ awcache = 0;
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
+ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+}
+
+static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Set Tx to weighted round robin scheduling algorithm */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+ }
+
+ /* Set Rx to strict priority algorithm */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
+ unsigned int queue_count)
+{
+ unsigned int q_fifo_size = 0;
+ enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+
+ /* Calculate Tx/Rx fifo share per queue */
+ switch (fifo_size) {
+ case 0:
+ q_fifo_size = XGBE_FIFO_SIZE_B(128);
+ break;
+ case 1:
+ q_fifo_size = XGBE_FIFO_SIZE_B(256);
+ break;
+ case 2:
+ q_fifo_size = XGBE_FIFO_SIZE_B(512);
+ break;
+ case 3:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(1);
+ break;
+ case 4:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(2);
+ break;
+ case 5:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(4);
+ break;
+ case 6:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(8);
+ break;
+ case 7:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(16);
+ break;
+ case 8:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(32);
+ break;
+ case 9:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(64);
+ break;
+ case 10:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(128);
+ break;
+ case 11:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(256);
+ break;
+ }
+
+ /* The configured value is not the actual amount of fifo RAM */
+ q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
+
+ q_fifo_size = q_fifo_size / queue_count;
+
+ /* Set the queue fifo size programmable value */
+ if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_512;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+
+ return p_fifo;
+}
+
+static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int i;
+
+ fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
+ pdata->tx_q_count);
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
+
+ netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int i;
+
+ fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
+ pdata->rx_q_count);
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
+
+ netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
+ pdata->rx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
+{
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int prio_queues;
+ unsigned int ppq, ppq_extra, prio;
+ unsigned int mask;
+ unsigned int i, j, reg, reg_val;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++) {
+ DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ pdata->q2tc_map[queue++] = i;
+ }
+
+ if (i < qptc_extra) {
+ DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ pdata->q2tc_map[queue++] = i;
+ }
+ }
+
+ /* Map the 8 VLAN priority values to available MTL Rx queues */
+ prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
+ pdata->rx_q_count);
+ ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+ ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+ reg = MAC_RQC2R;
+ reg_val = 0;
+ for (i = 0, prio = 0; i < prio_queues;) {
+ mask = 0;
+ for (j = 0; j < ppq; j++) {
+ DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ pdata->prio2q_map[prio++] = i;
+ }
+
+ if (i < ppq_extra) {
+ DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ pdata->prio2q_map[prio++] = i;
+ }
+
+ reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
+
+ if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
+ continue;
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+ reg += MAC_RQC2_INC;
+ reg_val = 0;
+ }
+
+ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+ reg = MTL_RQDCM0R;
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count;) {
+ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+ if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
+ continue;
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MTL_RQDCM_INC;
+ reg_val = 0;
+ }
+}
+
+static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ /* Activate flow control when less than 4k left in fifo */
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+
+ /* De-activate flow control when more than 6k left in fifo */
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+ }
+}
+
+static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
+
+ /* Filtering is done using perfect filtering and hash filtering */
+ if (pdata->hw_feat.hash_table_size) {
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
+ }
+}
+
+static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
+{
+ unsigned int val;
+
+ val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
+{
+ switch (pdata->phy_speed) {
+ case SPEED_10000:
+ xgbe_set_xgmii_speed(pdata);
+ break;
+
+ case SPEED_2500:
+ xgbe_set_gmii_2500_speed(pdata);
+ break;
+
+ case SPEED_1000:
+ xgbe_set_gmii_speed(pdata);
+ break;
+ }
+}
+
+static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_RXCSUM)
+ xgbe_enable_rx_csum(pdata);
+ else
+ xgbe_disable_rx_csum(pdata);
+}
+
+static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
+{
+ /* Indicate that VLAN Tx CTAGs come from context descriptors */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
+
+ /* Set the current VLAN Hash Table register value */
+ xgbe_update_vlan_hash_table(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xgbe_enable_rx_vlan_filtering(pdata);
+ else
+ xgbe_disable_rx_vlan_filtering(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ xgbe_enable_rx_vlan_stripping(pdata);
+ else
+ xgbe_disable_rx_vlan_stripping(pdata);
+}
+
+static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+{
+ bool read_hi;
+ u64 val;
+
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ };
+
+ val = XGMAC_IOREAD(pdata, reg_lo);
+
+ if (read_hi)
+ val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
+
+ return val;
+}
+
+static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
+ stats->txoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
+ stats->txframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
+ stats->txmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
+ stats->tx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
+ stats->tx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
+ stats->tx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
+ stats->tx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
+ stats->tx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
+ stats->tx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
+ stats->txunicastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
+ stats->txmulticastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
+ stats->txunderflowerror +=
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
+ stats->txoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
+ stats->txframecount_g +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
+ stats->txpauseframes +=
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
+ stats->txvlanframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+}
+
+static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
+ stats->rxframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
+ stats->rxoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
+ stats->rxoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
+ stats->rxbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
+ stats->rxmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
+ stats->rxcrcerror +=
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
+ stats->rxrunterror +=
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
+ stats->rxjabbererror +=
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
+ stats->rxundersize_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
+ stats->rxoversize_g +=
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
+ stats->rx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
+ stats->rx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
+ stats->rx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
+ stats->rx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
+ stats->rx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
+ stats->rx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
+ stats->rxunicastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
+ stats->rxlengtherror +=
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
+ stats->rxoutofrangetype +=
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
+ stats->rxpauseframes +=
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
+ stats->rxfifooverflow +=
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
+ stats->rxvlanframes_gb +=
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
+ stats->rxwatchdogerror +=
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+}
+
+static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+
+ /* Freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
+
+ stats->txoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ stats->txframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ stats->txmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ stats->tx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ stats->tx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ stats->tx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ stats->tx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ stats->tx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ stats->tx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ stats->txunicastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ stats->txmulticastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ stats->txunderflowerror +=
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ stats->txoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ stats->txframecount_g +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ stats->txpauseframes +=
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ stats->txvlanframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+
+ stats->rxframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ stats->rxoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ stats->rxoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ stats->rxbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ stats->rxmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ stats->rxcrcerror +=
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ stats->rxrunterror +=
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ stats->rxjabbererror +=
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ stats->rxundersize_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ stats->rxoversize_g +=
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ stats->rx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ stats->rx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ stats->rx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ stats->rx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ stats->rx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ stats->rx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ stats->rxunicastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ stats->rxlengtherror +=
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ stats->rxoutofrangetype +=
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ stats->rxpauseframes +=
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ stats->rxfifooverflow +=
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ stats->rxvlanframes_gb +=
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ stats->rxwatchdogerror +=
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+
+ /* Un-freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
+}
+
+static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
+{
+ /* Set counters to reset on read */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
+
+ /* Reset the counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
+}
+
+static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
+ struct xgbe_channel *channel)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* Calculate the status register to read and the position within */
+ if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
+ DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = XGMAC_IOREAD(pdata, tx_dsr);
+ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ channel->queue_index);
+}
+
+static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ MTL_Q_ENABLED);
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xgbe_prepare_tx_stop(pdata, channel);
+ }
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ }
+
+ /*TODO: Poll to be sure the channels have stopped?
+ while (count--) {
+ if (XGMAC_IOREAD_BITS(pdata, DMA_DSR0, TPS) == 6)
+ break;
+ mdelay(1);
+ }
+ */
+}
+
+static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int reg_val, i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ }
+
+ /* Enable each Rx queue */
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count; i++)
+ reg_val |= (0x02 << (i << 1));
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
+
+ /* Enable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
+}
+
+static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+
+ /* Disable each Rx queue */
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ }
+
+ /*TODO: Poll to be sure the channels have stopped?
+ while (count--) {
+ dma_sr0 = XGMAC_IOREAD_BITS(pdata, DMA_DSR0, RPS);
+ if (dma_sr0 == 3 || dma_sr0 == 4)
+ break;
+ mdelay(1);
+ }
+ */
+}
+
+static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xgbe_prepare_tx_stop(pdata, channel);
+ }
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ }
+
+ /*TODO: Poll to be sure the channels have stopped?
+ while (count--) {
+ if (XGMAC_IOREAD_BITS(pdata, DMA_DSR0, TPS) == 6)
+ break;
+ mdelay(1);
+ }
+ */
+}
+
+static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ }
+}
+
+static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ }
+
+ /*TODO: Poll to be sure the channels have stopped?
+ while (count--) {
+ dma_sr0 = XGMAC_IOREAD_BITS(pdata, DMA_DSR0, RPS);
+ if (dma_sr0 == 3 || dma_sr0 == 4)
+ break;
+ mdelay(1);
+ }
+ */
+}
+
+static int xgbe_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+ DBGPR("-->xgbe_init\n");
+
+ /* Flush Tx queues */
+ ret = xgbe_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+
+ /*
+ * Initialize DMA related features
+ */
+ xgbe_config_dma_bus(pdata);
+ xgbe_config_dma_cache(pdata);
+ xgbe_config_osp_mode(pdata);
+ xgbe_config_pblx8(pdata);
+ xgbe_config_tx_pbl_val(pdata);
+ xgbe_config_rx_pbl_val(pdata);
+ xgbe_config_rx_coalesce(pdata);
+ xgbe_config_tx_coalesce(pdata);
+ xgbe_config_rx_buffer_size(pdata);
+ xgbe_config_tso_mode(pdata);
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
+ desc_if->wrapper_tx_desc_init(pdata);
+ desc_if->wrapper_rx_desc_init(pdata);
+ xgbe_enable_dma_interrupts(pdata);
+
+ /*
+ * Initialize MTL related features
+ */
+ xgbe_config_mtl_mode(pdata);
+ xgbe_config_queue_mapping(pdata);
+ xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+ xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ xgbe_config_tx_fifo_size(pdata);
+ xgbe_config_rx_fifo_size(pdata);
+ xgbe_config_flow_control_threshold(pdata);
+ /*TODO: Error Packet and undersized good Packet forwarding enable
+ (FEP and FUP)
+ */
+ xgbe_config_dcb_tc(pdata);
+ xgbe_config_dcb_pfc(pdata);
+ xgbe_enable_mtl_interrupts(pdata);
+
+ /*
+ * Initialize MAC related features
+ */
+ xgbe_config_mac_address(pdata);
+ xgbe_config_jumbo_enable(pdata);
+ xgbe_config_flow_control(pdata);
+ xgbe_config_mac_speed(pdata);
+ xgbe_config_checksum_offload(pdata);
+ xgbe_config_vlan_support(pdata);
+ xgbe_config_mmc(pdata);
+ xgbe_enable_mac_interrupts(pdata);
+
+ DBGPR("<--xgbe_init\n");
+
+ return 0;
+}
+
+void xgbe_a0_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+{
+ DBGPR("-->xgbe_a0_init_function_ptrs\n");
+
+ hw_if->tx_complete = xgbe_tx_complete;
+
+ hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
+ hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
+ hw_if->add_mac_addresses = xgbe_add_mac_addresses;
+ hw_if->set_mac_address = xgbe_set_mac_address;
+
+ hw_if->enable_rx_csum = xgbe_enable_rx_csum;
+ hw_if->disable_rx_csum = xgbe_disable_rx_csum;
+
+ hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
+ hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
+ hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
+ hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
+ hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
+
+ hw_if->read_mmd_regs = xgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = xgbe_write_mmd_regs;
+
+ hw_if->set_gmii_speed = xgbe_set_gmii_speed;
+ hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
+ hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
+
+ hw_if->enable_tx = xgbe_enable_tx;
+ hw_if->disable_tx = xgbe_disable_tx;
+ hw_if->enable_rx = xgbe_enable_rx;
+ hw_if->disable_rx = xgbe_disable_rx;
+
+ hw_if->powerup_tx = xgbe_powerup_tx;
+ hw_if->powerdown_tx = xgbe_powerdown_tx;
+ hw_if->powerup_rx = xgbe_powerup_rx;
+ hw_if->powerdown_rx = xgbe_powerdown_rx;
+
+ hw_if->dev_xmit = xgbe_dev_xmit;
+ hw_if->dev_read = xgbe_dev_read;
+ hw_if->enable_int = xgbe_enable_int;
+ hw_if->disable_int = xgbe_disable_int;
+ hw_if->init = xgbe_init;
+ hw_if->exit = xgbe_exit;
+
+ /* Descriptor related Sequences have to be initialized here */
+ hw_if->tx_desc_init = xgbe_tx_desc_init;
+ hw_if->rx_desc_init = xgbe_rx_desc_init;
+ hw_if->tx_desc_reset = xgbe_tx_desc_reset;
+ hw_if->rx_desc_reset = xgbe_rx_desc_reset;
+ hw_if->is_last_desc = xgbe_is_last_desc;
+ hw_if->is_context_desc = xgbe_is_context_desc;
+ hw_if->tx_start_xmit = xgbe_tx_start_xmit;
+
+ /* For FLOW ctrl */
+ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
+ hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
+
+ /* For RX coalescing */
+ hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
+ hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
+ hw_if->usec_to_riwt = xgbe_usec_to_riwt;
+ hw_if->riwt_to_usec = xgbe_riwt_to_usec;
+
+ /* For RX and TX threshold config */
+ hw_if->config_rx_threshold = xgbe_config_rx_threshold;
+ hw_if->config_tx_threshold = xgbe_config_tx_threshold;
+
+ /* For RX and TX Store and Forward Mode config */
+ hw_if->config_rsf_mode = xgbe_config_rsf_mode;
+ hw_if->config_tsf_mode = xgbe_config_tsf_mode;
+
+ /* For TX DMA Operating on Second Frame config */
+ hw_if->config_osp_mode = xgbe_config_osp_mode;
+
+ /* For RX and TX PBL config */
+ hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
+ hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
+ hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
+ hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
+ hw_if->config_pblx8 = xgbe_config_pblx8;
+
+ /* For MMC statistics support */
+ hw_if->tx_mmc_int = xgbe_tx_mmc_int;
+ hw_if->rx_mmc_int = xgbe_rx_mmc_int;
+ hw_if->read_mmc_stats = xgbe_read_mmc_stats;
+
+ /* For PTP config */
+ hw_if->config_tstamp = xgbe_config_tstamp;
+ hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
+ hw_if->set_tstamp_time = xgbe_set_tstamp_time;
+ hw_if->get_tstamp_time = xgbe_get_tstamp_time;
+ hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
+
+ /* For Data Center Bridging config */
+ hw_if->config_dcb_tc = xgbe_config_dcb_tc;
+ hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+
+ /* For Receive Side Scaling */
+ hw_if->enable_rss = xgbe_enable_rss;
+ hw_if->disable_rss = xgbe_disable_rss;
+ hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+ hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+
+ DBGPR("<--xgbe_a0_init_function_ptrs\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe-a0/xgbe-drv.c
new file mode 100644
index 0000000..acaeaf5
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe-drv.c
@@ -0,0 +1,2204 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <net/busy_poll.h>
+#include <linux/clk.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static int xgbe_one_poll(struct napi_struct *, int);
+static int xgbe_all_poll(struct napi_struct *, int);
+static void xgbe_set_rx_mode(struct net_device *);
+
+static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel_mem, *channel;
+ struct xgbe_ring *tx_ring, *rx_ring;
+ unsigned int count, i;
+ int ret = -ENOMEM;
+
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
+ if (!channel_mem)
+ goto err_channel;
+
+ tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
+ GFP_KERNEL);
+ if (!tx_ring)
+ goto err_tx_ring;
+
+ rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
+ GFP_KERNEL);
+ if (!rx_ring)
+ goto err_rx_ring;
+
+ for (i = 0, channel = channel_mem; i < count; i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (pdata->per_channel_irq) {
+ /* Get the DMA interrupt (offset 1) */
+ ret = platform_get_irq(pdata->pdev, i + 1);
+ if (ret < 0) {
+ netdev_err(pdata->netdev,
+ "platform_get_irq %u failed\n",
+ i + 1);
+ goto err_irq;
+ }
+
+ channel->dma_irq = ret;
+ }
+
+ if (i < pdata->tx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->tx_ring = tx_ring++;
+ }
+
+ if (i < pdata->rx_ring_count) {
+ spin_lock_init(&rx_ring->lock);
+ channel->rx_ring = rx_ring++;
+ }
+
+ DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+ channel->name, channel->queue_index, channel->dma_regs,
+ channel->dma_irq, channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel = channel_mem;
+ pdata->channel_count = count;
+
+ return 0;
+
+err_irq:
+ kfree(rx_ring);
+
+err_rx_ring:
+ kfree(tx_ring);
+
+err_tx_ring:
+ kfree(channel_mem);
+
+err_channel:
+ return ret;
+}
+
+static void xgbe_free_channels(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->channel)
+ return;
+
+ kfree(pdata->channel->rx_ring);
+ kfree(pdata->channel->tx_ring);
+ kfree(pdata->channel);
+
+ pdata->channel = NULL;
+ pdata->channel_count = 0;
+}
+
+static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
+{
+ return (ring->rdesc_count - (ring->cur - ring->dirty));
+}
+
+static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
+{
+ return (ring->cur - ring->dirty);
+}
+
+static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
+ struct xgbe_ring *ring, unsigned int count)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+
+ if (count > xgbe_tx_avail_desc(ring)) {
+ DBGPR(" Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(pdata->netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+
+ /* If we haven't notified the hardware because of xmit_more
+ * support, tell it now
+ */
+ if (ring->tx.xmit_more)
+ pdata->hw_if.tx_start_xmit(channel, ring);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return 0;
+}
+
+static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+{
+ unsigned int rx_buf_size;
+
+ if (mtu > XGMAC_JUMBO_PACKET_MTU) {
+ netdev_alert(netdev, "MTU exceeds maximum supported value\n");
+ return -EINVAL;
+ }
+
+ rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
+ rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
+ ~(XGBE_RX_BUF_ALIGN - 1);
+
+ return rx_buf_size;
+}
+
+static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ enum xgbe_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_if->enable_int(channel, int_id);
+ }
+}
+
+static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ enum xgbe_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_if->disable_int(channel, int_id);
+ }
+}
+
+static irqreturn_t xgbe_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int dma_isr, dma_ch_isr;
+ unsigned int mac_isr, mac_tssr;
+ unsigned int i;
+
+ /* The DMA interrupt status register also reports MAC and MTL
+ * interrupts. So for polling mode, we just need to check for
+ * this register to be non-zero
+ */
+ dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
+ if (!dma_isr)
+ goto isr_done;
+
+ DBGPR(" DMA_ISR = %08x\n", dma_isr);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+ channel = pdata->channel + i;
+
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+
+ /* If we get a TI or RI interrupt that means per channel DMA
+ * interrupts are not enabled, so we use the private data napi
+ * structure, not the per channel napi structure
+ */
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
+ XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(&pdata->napi);
+ }
+ }
+
+ /* Restart the device on a Fatal Bus Error */
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ schedule_work(&pdata->restart_work);
+
+ /* Clear all interrupt signals */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+ }
+
+ if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
+ mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
+ hw_if->tx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
+ hw_if->rx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
+ mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
+
+ if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
+ /* Read Tx Timestamp to clear interrupt */
+ pdata->tx_tstamp =
+ hw_if->get_tx_tstamp(pdata);
+ schedule_work(&pdata->tx_tstamp_work);
+ }
+ }
+ }
+
+ DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
+
+isr_done:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xgbe_dma_isr(int irq, void *data)
+{
+ struct xgbe_channel *channel = data;
+
+ /* Per channel DMA interrupts are enabled, so we use the per
+ * channel napi structure and not the private data napi structure
+ */
+ if (napi_schedule_prep(&channel->napi)) {
+ /* Disable Tx and Rx interrupts */
+ disable_irq_nosync(channel->dma_irq);
+
+ /* Turn on polling */
+ __napi_schedule(&channel->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
+{
+ struct xgbe_channel *channel = container_of(timer,
+ struct xgbe_channel,
+ tx_timer);
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct napi_struct *napi;
+
+ DBGPR("-->xgbe_tx_timer\n");
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ if (napi_schedule_prep(napi)) {
+ /* Disable Tx and Rx interrupts */
+ if (pdata->per_channel_irq)
+ disable_irq(channel->dma_irq);
+ else
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(napi);
+ }
+
+ channel->tx_timer_active = 0;
+
+ DBGPR("<--xgbe_tx_timer\n");
+
+ return HRTIMER_NORESTART;
+}
+
+static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_init_tx_timers\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ DBGPR(" %s adding tx timer\n", channel->name);
+ hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ channel->tx_timer.function = xgbe_tx_timer;
+ }
+
+ DBGPR("<--xgbe_init_tx_timers\n");
+}
+
+static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_stop_tx_timers\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ DBGPR(" %s deleting tx timer\n", channel->name);
+ channel->tx_timer_active = 0;
+ hrtimer_cancel(&channel->tx_timer);
+ }
+
+ DBGPR("<--xgbe_stop_tx_timers\n");
+}
+
+void xgbe_a0_get_all_hw_features(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ DBGPR("-->xgbe_a0_get_all_hw_features\n");
+
+ mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
+
+ /* Hardware feature register 0 */
+ hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ ADDMACADRSEL);
+ hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
+ hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
+ hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ HASHTBLSZ);
+ hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ L3L4FNUM);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
+ hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
+ hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
+ hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
+ hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
+ hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
+ /* The Queue and Channel counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+
+#define XGBE_TC_CNT 2
+ hw_feat->tc_cnt = XGBE_TC_CNT;
+
+ DBGPR("<--xgbe_a0_get_all_hw_features\n");
+}
+
+static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (add)
+ netif_napi_add(pdata->netdev, &channel->napi,
+ xgbe_one_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&channel->napi);
+ }
+ } else {
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi,
+ xgbe_all_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&pdata->napi);
+ }
+}
+
+static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ napi_disable(&channel->napi);
+
+ if (del)
+ netif_napi_del(&channel->napi);
+ }
+ } else {
+ napi_disable(&pdata->napi);
+
+ if (del)
+ netif_napi_del(&pdata->napi);
+ }
+}
+
+void xgbe_a0_init_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_a0_init_tx_coalesce\n");
+
+ pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
+ pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
+
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_a0_init_tx_coalesce\n");
+}
+
+void xgbe_a0_init_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_a0_init_rx_coalesce\n");
+
+ pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
+ pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
+
+ hw_if->config_rx_coalesce(pdata);
+
+ DBGPR("<--xgbe_a0_init_rx_coalesce\n");
+}
+
+static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_tx_data\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+ desc_if->unmap_rdata(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_tx_data\n");
+}
+
+static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_rx_data\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+ desc_if->unmap_rdata(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_rx_data\n");
+}
+
+static void xgbe_adjust_link(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct phy_device *phydev = pdata->phydev;
+ int new_state = 0;
+
+ if (!phydev)
+ return;
+
+ if (phydev->link) {
+ /* Flow control support */
+ if (pdata->pause_autoneg) {
+ if (phydev->pause || phydev->asym_pause) {
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ } else {
+ pdata->tx_pause = 0;
+ pdata->rx_pause = 0;
+ }
+ }
+
+ if (pdata->tx_pause != pdata->phy_tx_pause) {
+ hw_if->config_tx_flow_control(pdata);
+ pdata->phy_tx_pause = pdata->tx_pause;
+ }
+
+ if (pdata->rx_pause != pdata->phy_rx_pause) {
+ hw_if->config_rx_flow_control(pdata);
+ pdata->phy_rx_pause = pdata->rx_pause;
+ }
+
+ /* Speed support */
+ if (phydev->speed != pdata->phy_speed) {
+ new_state = 1;
+
+ switch (phydev->speed) {
+ case SPEED_10000:
+ hw_if->set_xgmii_speed(pdata);
+ break;
+
+ case SPEED_2500:
+ hw_if->set_gmii_2500_speed(pdata);
+ break;
+
+ case SPEED_1000:
+ hw_if->set_gmii_speed(pdata);
+ break;
+ }
+ pdata->phy_speed = phydev->speed;
+ }
+
+ if (phydev->link != pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 1;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state)
+ phy_print_status(phydev);
+}
+
+static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct phy_device *phydev = pdata->phydev;
+ int ret;
+
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->phy_tx_pause = pdata->tx_pause;
+ pdata->phy_rx_pause = pdata->rx_pause;
+
+ ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
+ pdata->phy_mode);
+ if (ret) {
+ netdev_err(netdev, "phy_connect_direct failed\n");
+ return ret;
+ }
+
+ if (!phydev->drv || (phydev->drv->phy_id == 0)) {
+ netdev_err(netdev, "phy_id not valid\n");
+ ret = -ENODEV;
+ goto err_phy_connect;
+ }
+ DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
+ dev_name(&phydev->dev), phydev->link);
+
+ return 0;
+
+err_phy_connect:
+ phy_disconnect(phydev);
+
+ return ret;
+}
+
+static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->phydev)
+ return;
+
+ phy_disconnect(pdata->phydev);
+}
+
+int xgbe_a0_powerdown(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_a0_powerdown\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered down\n");
+ DBGPR("<--xgbe_a0_powerdown\n");
+ return -EINVAL;
+ }
+
+ phy_stop(pdata->phydev);
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+ xgbe_napi_disable(pdata, 0);
+
+ /* Powerdown Tx/Rx */
+ hw_if->powerdown_tx(pdata);
+ hw_if->powerdown_rx(pdata);
+
+ pdata->power_down = 1;
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_a0_powerdown\n");
+
+ return 0;
+}
+
+int xgbe_a0_powerup(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_a0_powerup\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered up\n");
+ DBGPR("<--xgbe_a0_powerup\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ pdata->power_down = 0;
+
+ phy_start(pdata->phydev);
+
+ /* Enable Tx/Rx */
+ hw_if->powerup_tx(pdata);
+ hw_if->powerup_rx(pdata);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_attach(netdev);
+
+ xgbe_napi_enable(pdata, 0);
+ netif_tx_start_all_queues(netdev);
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_a0_powerup\n");
+
+ return 0;
+}
+
+static int xgbe_start(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct net_device *netdev = pdata->netdev;
+
+ DBGPR("-->xgbe_start\n");
+
+ xgbe_set_rx_mode(netdev);
+
+ hw_if->init(pdata);
+
+ phy_start(pdata->phydev);
+
+ hw_if->enable_tx(pdata);
+ hw_if->enable_rx(pdata);
+
+ xgbe_init_tx_timers(pdata);
+
+ xgbe_napi_enable(pdata, 1);
+ netif_tx_start_all_queues(netdev);
+
+ DBGPR("<--xgbe_start\n");
+
+ return 0;
+}
+
+static void xgbe_stop(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
+ unsigned int i;
+
+ DBGPR("-->xgbe_stop\n");
+
+ phy_stop(pdata->phydev);
+
+ netif_tx_stop_all_queues(netdev);
+ xgbe_napi_disable(pdata, 1);
+
+ xgbe_stop_tx_timers(pdata);
+
+ hw_if->disable_tx(pdata);
+ hw_if->disable_rx(pdata);
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ continue;
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ netdev_tx_reset_queue(txq);
+ }
+
+ DBGPR("<--xgbe_stop\n");
+}
+
+static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int i;
+
+ DBGPR("-->xgbe_restart_dev\n");
+
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xgbe_stop(pdata);
+ synchronize_irq(pdata->dev_irq);
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ synchronize_irq(channel->dma_irq);
+ }
+
+ xgbe_free_tx_data(pdata);
+ xgbe_free_rx_data(pdata);
+
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
+
+ xgbe_start(pdata);
+
+ DBGPR("<--xgbe_restart_dev\n");
+}
+
+static void xgbe_restart(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ restart_work);
+
+ rtnl_lock();
+
+ xgbe_restart_dev(pdata);
+
+ rtnl_unlock();
+}
+
+static void xgbe_tx_tstamp(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ tx_tstamp_work);
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 nsec;
+ unsigned long flags;
+
+ if (pdata->tx_tstamp) {
+ nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+ pdata->tx_tstamp);
+
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(nsec);
+ skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+ }
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ pdata->tx_tstamp_skb = NULL;
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq)
+{
+ if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
+ sizeof(pdata->tstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq)
+{
+ struct hwtstamp_config config;
+ unsigned int mac_tscr;
+
+ if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags)
+ return -EINVAL;
+
+ mac_tscr = 0;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+
+ case HWTSTAMP_TX_ON:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+
+ case HWTSTAMP_FILTER_ALL:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ /* PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ /* PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ /* PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ pdata->hw_if.config_tstamp(pdata, mac_tscr);
+
+ memcpy(&pdata->tstamp_config, &config, sizeof(config));
+
+ return 0;
+}
+
+static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ unsigned long flags;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (pdata->tx_tstamp_skb) {
+ /* Another timestamp in progress, ignore this one */
+ XGMAC_SET_BITS(packet->attributes,
+ TX_PACKET_ATTRIBUTES, PTP, 0);
+ } else {
+ pdata->tx_tstamp_skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+ if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+ skb_tx_timestamp(skb);
+}
+
+static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ if (skb_vlan_tag_present(skb))
+ packet->vlan_ctag = skb_vlan_tag_get(skb);
+}
+
+static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ int ret;
+
+ if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ packet->tcp_header_len = tcp_hdrlen(skb);
+ packet->tcp_payload_len = skb->len - packet->header_len;
+ packet->mss = skb_shinfo(skb)->gso_size;
+ DBGPR(" packet->header_len=%u\n", packet->header_len);
+ DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
+ packet->tcp_header_len, packet->tcp_payload_len);
+ DBGPR(" packet->mss=%u\n", packet->mss);
+
+ /* Update the number of packets that will ultimately be transmitted
+ * along with the extra bytes for each extra packet
+ */
+ packet->tx_packets = skb_shinfo(skb)->gso_segs;
+ packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
+
+ return 0;
+}
+
+static int xgbe_is_tso(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ DBGPR(" TSO packet to be processed\n");
+
+ return 1;
+}
+
+static void xgbe_packet_info(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ struct skb_frag_struct *frag;
+ unsigned int context_desc;
+ unsigned int len;
+ unsigned int i;
+
+ packet->skb = skb;
+
+ context_desc = 0;
+ packet->rdesc_count = 0;
+
+ packet->tx_packets = 1;
+ packet->tx_bytes = skb->len;
+
+ if (xgbe_is_tso(skb)) {
+ /* TSO requires an extra descriptor if mss is different */
+ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ /* TSO requires an extra descriptor for TSO header */
+ packet->rdesc_count++;
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE, 1);
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+
+ if (skb_vlan_tag_present(skb)) {
+ /* VLAN requires an extra descriptor if tag is different */
+ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ /* We can share with the TSO context descriptor */
+ if (!context_desc) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ }
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ PTP, 1);
+
+ for (len = skb_headlen(skb); len;) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ for (len = skb_frag_size(frag); len; ) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
+ }
+ }
+}
+
+static int xgbe_open(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel = NULL;
+ unsigned int i = 0;
+ int ret;
+
+ DBGPR("-->xgbe_open\n");
+
+ /* Initialize the phy */
+ ret = xgbe_phy_init(pdata);
+ if (ret)
+ return ret;
+
+ /* Enable the clocks */
+ ret = clk_prepare_enable(pdata->sysclk);
+ if (ret) {
+ netdev_alert(netdev, "dma clk_prepare_enable failed\n");
+ goto err_phy_init;
+ }
+
+ ret = clk_prepare_enable(pdata->ptpclk);
+ if (ret) {
+ netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
+ goto err_sysclk;
+ }
+
+ /* Calculate the Rx buffer size before allocating rings */
+ ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
+ if (ret < 0)
+ goto err_ptpclk;
+ pdata->rx_buf_size = ret;
+
+ /* Allocate the channel and ring structures */
+ ret = xgbe_alloc_channels(pdata);
+ if (ret)
+ goto err_ptpclk;
+
+ /* Allocate the ring descriptors and buffers */
+ ret = desc_if->alloc_ring_resources(pdata);
+ if (ret)
+ goto err_channels;
+
+ /* Initialize the device restart and Tx timestamp work struct */
+ INIT_WORK(&pdata->restart_work, xgbe_restart);
+ INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+
+ /* Request interrupts */
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ goto err_rings;
+ }
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xgbe_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev,
+ "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
+ }
+
+ ret = xgbe_start(pdata);
+ if (ret)
+ goto err_start;
+
+ DBGPR("<--xgbe_open\n");
+
+ return 0;
+
+err_start:
+ hw_if->exit(pdata);
+
+err_irq:
+ if (pdata->per_channel_irq) {
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+err_rings:
+ desc_if->free_ring_resources(pdata);
+
+err_channels:
+ xgbe_free_channels(pdata);
+
+err_ptpclk:
+ clk_disable_unprepare(pdata->ptpclk);
+
+err_sysclk:
+ clk_disable_unprepare(pdata->sysclk);
+
+err_phy_init:
+ xgbe_phy_exit(pdata);
+
+ return ret;
+}
+
+static int xgbe_close(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_close\n");
+
+ /* Stop the device */
+ xgbe_stop(pdata);
+
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
+
+ /* Free the ring descriptors and buffers */
+ desc_if->free_ring_resources(pdata);
+
+ /* Release the interrupts */
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
+
+ /* Free the channel and ring structures */
+ xgbe_free_channels(pdata);
+
+ /* Disable the clocks */
+ clk_disable_unprepare(pdata->ptpclk);
+ clk_disable_unprepare(pdata->sysclk);
+
+ /* Release the phy */
+ xgbe_phy_exit(pdata);
+
+ DBGPR("<--xgbe_close\n");
+
+ return 0;
+}
+
+static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ struct netdev_queue *txq;
+ int ret;
+
+ DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+
+ channel = pdata->channel + skb->queue_mapping;
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ ring = channel->tx_ring;
+ packet = &ring->packet_data;
+
+ ret = NETDEV_TX_OK;
+
+ if (skb->len == 0) {
+ netdev_err(netdev, "empty skb received from stack\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ /* Calculate preliminary packet info */
+ memset(packet, 0, sizeof(*packet));
+ xgbe_packet_info(pdata, ring, skb, packet);
+
+ /* Check that there are enough descriptors available */
+ ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
+ if (ret)
+ goto tx_netdev_return;
+
+ ret = xgbe_prep_tso(skb, packet);
+ if (ret) {
+ netdev_err(netdev, "error processing TSO packet\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+ xgbe_prep_vlan(skb, packet);
+
+ if (!desc_if->map_tx_skb(channel, skb)) {
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ xgbe_prep_tx_tstamp(pdata, skb, packet);
+
+ /* Report on the actual number of bytes (to be) sent */
+ netdev_tx_sent_queue(txq, packet->tx_bytes);
+
+ /* Configure required descriptor fields for transmission */
+ hw_if->dev_xmit(channel);
+
+#ifdef XGMAC_ENABLE_TX_PKT_DUMP
+ xgbe_a0_print_pkt(netdev, skb, true);
+#endif
+
+ /* Stop the queue in advance if there may not be enough descriptors */
+ xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
+
+ ret = NETDEV_TX_OK;
+
+tx_netdev_return:
+ return ret;
+}
+
+static void xgbe_set_rx_mode(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int pr_mode, am_mode;
+
+ DBGPR("-->xgbe_set_rx_mode\n");
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ hw_if->set_promiscuous_mode(pdata, pr_mode);
+ hw_if->set_all_multicast_mode(pdata, am_mode);
+
+ hw_if->add_mac_addresses(pdata);
+
+ DBGPR("<--xgbe_set_rx_mode\n");
+}
+
+static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct sockaddr *saddr = addr;
+
+ DBGPR("-->xgbe_set_mac_address\n");
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+
+ hw_if->set_mac_address(pdata, netdev->dev_addr);
+
+ DBGPR("<--xgbe_set_mac_address\n");
+
+ return 0;
+}
+
+static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
+ break;
+
+ case SIOCSHWTSTAMP:
+ ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int xgbe_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_change_mtu\n");
+
+ ret = xgbe_calc_rx_buf_size(netdev, mtu);
+ if (ret < 0)
+ return ret;
+
+ pdata->rx_buf_size = ret;
+ netdev->mtu = mtu;
+
+ xgbe_restart_dev(pdata);
+
+ DBGPR("<--xgbe_change_mtu\n");
+
+ return 0;
+}
+
+static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+
+ s->rx_packets = pstats->rxframecount_gb;
+ s->rx_bytes = pstats->rxoctetcount_gb;
+ s->rx_errors = pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g;
+ s->multicast = pstats->rxmulticastframes_g;
+ s->rx_length_errors = pstats->rxlengtherror;
+ s->rx_crc_errors = pstats->rxcrcerror;
+ s->rx_fifo_errors = pstats->rxfifooverflow;
+
+ s->tx_packets = pstats->txframecount_gb;
+ s->tx_bytes = pstats->txoctetcount_gb;
+ s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
+ s->tx_dropped = netdev->stats.tx_dropped;
+
+ DBGPR("<--%s\n", __func__);
+
+ return s;
+}
+
+static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->%s\n", __func__);
+
+ set_bit(vid, pdata->active_vlans);
+ hw_if->update_vlan_hash_table(pdata);
+
+ DBGPR("<--%s\n", __func__);
+
+ return 0;
+}
+
+static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->%s\n", __func__);
+
+ clear_bit(vid, pdata->active_vlans);
+ hw_if->update_vlan_hash_table(pdata);
+
+ DBGPR("<--%s\n", __func__);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xgbe_poll_controller(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_poll_controller\n");
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xgbe_dma_isr(channel->dma_irq, channel);
+ } else {
+ disable_irq(pdata->dev_irq);
+ xgbe_isr(pdata->dev_irq, pdata);
+ enable_irq(pdata->dev_irq);
+ }
+
+ DBGPR("<--xgbe_poll_controller\n");
+}
+#endif /* End CONFIG_NET_POLL_CONTROLLER */
+
+static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int offset, queue;
+ u8 i;
+
+ if (tc && (tc != pdata->hw_feat.tc_cnt))
+ return -EINVAL;
+
+ if (tc) {
+ netdev_set_num_tc(netdev, tc);
+ for (i = 0, queue = 0, offset = 0; i < tc; i++) {
+ while ((queue < pdata->tx_q_count) &&
+ (pdata->q2tc_map[queue] == i))
+ queue++;
+
+ DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1);
+ netdev_set_tc_queue(netdev, i, queue - offset, offset);
+ offset = queue;
+ }
+ } else {
+ netdev_reset_tc(netdev);
+ }
+
+ return 0;
+}
+
+static int xgbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+ int ret = 0;
+
+ rxhash = pdata->netdev_features & NETIF_F_RXHASH;
+ rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+ rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+ rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if ((features & NETIF_F_RXHASH) && !rxhash)
+ ret = hw_if->enable_rss(pdata);
+ else if (!(features & NETIF_F_RXHASH) && rxhash)
+ ret = hw_if->disable_rss(pdata);
+ if (ret)
+ return ret;
+
+ if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ hw_if->enable_rx_csum(pdata);
+ else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ hw_if->disable_rx_csum(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
+ hw_if->enable_rx_vlan_stripping(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
+ hw_if->disable_rx_vlan_stripping(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
+ hw_if->enable_rx_vlan_filtering(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+ hw_if->disable_rx_vlan_filtering(pdata);
+
+ pdata->netdev_features = features;
+
+ DBGPR("<--xgbe_set_features\n");
+
+ return 0;
+}
+
+static const struct net_device_ops xgbe_netdev_ops = {
+ .ndo_open = xgbe_open,
+ .ndo_stop = xgbe_close,
+ .ndo_start_xmit = xgbe_xmit,
+ .ndo_set_rx_mode = xgbe_set_rx_mode,
+ .ndo_set_mac_address = xgbe_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = xgbe_ioctl,
+ .ndo_change_mtu = xgbe_change_mtu,
+ .ndo_get_stats64 = xgbe_get_stats64,
+ .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xgbe_poll_controller,
+#endif
+ .ndo_setup_tc = xgbe_setup_tc,
+ .ndo_set_features = xgbe_set_features,
+};
+
+struct net_device_ops *xgbe_a0_get_netdev_ops(void)
+{
+ return (struct net_device_ops *)&xgbe_netdev_ops;
+}
+
+static void xgbe_rx_refresh(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+
+ while (ring->dirty != ring->cur) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+
+ /* Reset rdata values */
+ desc_if->unmap_rdata(pdata, rdata);
+
+ if (desc_if->map_rx_buffer(pdata, ring, rdata))
+ break;
+
+ hw_if->rx_desc_reset(rdata);
+
+ ring->dirty++;
+ }
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+}
+
+static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata,
+ unsigned int *len)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct sk_buff *skb;
+ u8 *packet;
+ unsigned int copy_len;
+
+ skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len);
+ if (!skb)
+ return NULL;
+
+ packet = page_address(rdata->rx.hdr.pa.pages) +
+ rdata->rx.hdr.pa.pages_offset;
+ copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
+ copy_len = min(rdata->rx.hdr.dma_len, copy_len);
+ skb_copy_to_linear_data(skb, packet, copy_len);
+ skb_put(skb, copy_len);
+
+ *len -= copy_len;
+
+ return skb;
+}
+
+static int xgbe_tx_poll(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
+ int processed = 0;
+ unsigned int tx_packets = 0, tx_bytes = 0;
+
+ DBGPR("-->xgbe_tx_poll\n");
+
+ /* Nothing to do if there isn't a Tx ring for this channel */
+ if (!ring)
+ return 0;
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
+ while ((processed < XGBE_TX_DESC_MAX_PROC) &&
+ (ring->dirty != ring->cur)) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+ rdesc = rdata->rdesc;
+
+ if (!hw_if->tx_complete(rdesc))
+ break;
+
+ /* Make sure descriptor fields are read after reading the OWN
+ * bit */
+ rmb();
+
+#ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_a0_dump_tx_desc(ring, ring->dirty, 1, 0);
+#endif
+
+ if (hw_if->is_last_desc(rdesc)) {
+ tx_packets += rdata->tx.packets;
+ tx_bytes += rdata->tx.bytes;
+ }
+
+ /* Free the SKB and reset the descriptor for re-use */
+ desc_if->unmap_rdata(pdata, rdata);
+ hw_if->tx_desc_reset(rdata);
+
+ processed++;
+ ring->dirty++;
+ }
+
+ if (!processed)
+ return 0;
+
+ netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
+ if ((ring->tx.queue_stopped == 1) &&
+ (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
+ ring->tx.queue_stopped = 0;
+ netif_tx_wake_queue(txq);
+ }
+
+ DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
+
+ return processed;
+}
+
+static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct net_device *netdev = pdata->netdev;
+ struct napi_struct *napi;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *hwtstamps;
+ unsigned int incomplete, error, context_next, context;
+ unsigned int len, put_len, max_len;
+ unsigned int received = 0;
+ int packet_count = 0;
+
+ DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
+
+ /* Nothing to do if there isn't a Rx ring for this channel */
+ if (!ring)
+ return 0;
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ packet = &ring->packet_data;
+ while (packet_count < budget) {
+ DBGPR(" cur = %d\n", ring->cur);
+
+ /* First time in loop see if we need to restore state */
+ if (!received && rdata->state_saved) {
+ incomplete = rdata->state.incomplete;
+ context_next = rdata->state.context_next;
+ skb = rdata->state.skb;
+ error = rdata->state.error;
+ len = rdata->state.len;
+ } else {
+ memset(packet, 0, sizeof(*packet));
+ incomplete = 0;
+ context_next = 0;
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+
+read_again:
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+
+ if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
+ xgbe_rx_refresh(channel);
+
+ if (hw_if->dev_read(channel))
+ break;
+
+ received++;
+ ring->cur++;
+
+ incomplete = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ INCOMPLETE);
+ context_next = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT);
+ context = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ CONTEXT);
+
+ /* Earlier error, just drain the remaining data */
+ if ((incomplete || context_next) && error)
+ goto read_again;
+
+ if (error || packet->errors) {
+ if (packet->errors)
+ DBGPR("Error in received packet\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ if (!context) {
+ put_len = rdata->rx.len - len;
+ len += put_len;
+
+ if (!skb) {
+ dma_sync_single_for_cpu(pdata->dev,
+ rdata->rx.hdr.dma,
+ rdata->rx.hdr.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb = xgbe_create_skb(pdata, rdata, &put_len);
+ if (!skb) {
+ error = 1;
+ goto skip_data;
+ }
+ }
+
+ if (put_len) {
+ dma_sync_single_for_cpu(pdata->dev,
+ rdata->rx.buf.dma,
+ rdata->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rdata->rx.buf.pa.pages,
+ rdata->rx.buf.pa.pages_offset,
+ put_len, rdata->rx.buf.dma_len);
+ rdata->rx.buf.pa.pages = NULL;
+ }
+ }
+
+skip_data:
+ if (incomplete || context_next)
+ goto read_again;
+
+ if (!skb)
+ goto next_packet;
+
+ /* Be sure we don't exceed the configured MTU */
+ max_len = netdev->mtu + ETH_HLEN;
+ if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (skb->protocol == htons(ETH_P_8021Q)))
+ max_len += VLAN_HLEN;
+
+ if (skb->len > max_len) {
+ DBGPR("packet length exceeds configured MTU\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+#ifdef XGMAC_ENABLE_RX_PKT_DUMP
+ xgbe_a0_print_pkt(netdev, skb, false);
+#endif
+
+ skb_checksum_none_assert(skb);
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, CSUM_DONE))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, VLAN_CTAG))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ packet->vlan_ctag);
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
+ u64 nsec;
+
+ nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+ packet->rx_tstamp);
+ hwtstamps = skb_hwtstamps(skb);
+ hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ }
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, RSS_HASH))
+ skb_set_hash(skb, packet->rss_hash,
+ packet->rss_hash_type);
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, channel->queue_index);
+ skb_mark_napi_id(skb, napi);
+
+ netdev->last_rx = jiffies;
+ napi_gro_receive(napi, skb);
+
+next_packet:
+ packet_count++;
+ }
+
+ /* Check if we need to save state before leaving */
+ if (received && (incomplete || context_next)) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ rdata->state_saved = 1;
+ rdata->state.incomplete = incomplete;
+ rdata->state.context_next = context_next;
+ rdata->state.skb = skb;
+ rdata->state.len = len;
+ rdata->state.error = error;
+ }
+
+ DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
+
+ return packet_count;
+}
+
+static int xgbe_one_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
+ napi);
+ int processed = 0;
+
+ DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = xgbe_rx_poll(channel, budget);
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete(napi);
+
+ /* Enable Tx and Rx interrupts */
+ enable_irq(channel->dma_irq);
+ }
+
+ DBGPR("<--xgbe_one_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+static int xgbe_all_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
+ napi);
+ struct xgbe_channel *channel;
+ int ring_budget;
+ int processed, last_processed;
+ unsigned int i;
+
+ DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
+
+ processed = 0;
+ ring_budget = budget / pdata->rx_ring_count;
+ do {
+ last_processed = processed;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Cleanup Tx ring first */
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ if (ring_budget > (budget - processed))
+ ring_budget = budget - processed;
+ processed += xgbe_rx_poll(channel, ring_budget);
+ }
+ } while ((processed < budget) && (processed != last_processed));
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete(napi);
+
+ /* Enable Tx and Rx interrupts */
+ xgbe_enable_rx_tx_ints(pdata);
+ }
+
+ DBGPR("<--xgbe_all_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+void xgbe_a0_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
+ unsigned int count, unsigned int flag)
+{
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+
+ while (count--) {
+ rdata = XGBE_GET_DESC_DATA(ring, idx);
+ rdesc = rdata->rdesc;
+ pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ idx++;
+ }
+}
+
+void xgbe_a0_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
+ unsigned int idx)
+{
+ pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
+ le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
+ le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+}
+
+void xgbe_a0_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ unsigned char *buf = skb->data;
+ unsigned char buffer[128];
+ unsigned int i, j;
+
+ netdev_alert(netdev, "\n************** SKB dump ****************\n");
+
+ netdev_alert(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
+
+ netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
+
+ for (i = 0, j = 0; i < skb->len;) {
+ j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
+ buf[i++]);
+
+ if ((i % 32) == 0) {
+ netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
+ j = 0;
+ } else if ((i % 16) == 0) {
+ buffer[j++] = ' ';
+ buffer[j++] = ' ';
+ } else if ((i % 4) == 0) {
+ buffer[j++] = ' ';
+ }
+ }
+ if (i % 32)
+ netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
+
+ netdev_alert(netdev, "\n************** SKB dump ****************\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe-a0/xgbe-ethtool.c
new file mode 100644
index 0000000..165ff1c
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe-ethtool.c
@@ -0,0 +1,616 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/phy.h>
+#include <linux/net_tstamp.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+struct xgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_size;
+ int stat_offset;
+};
+
+#define XGMAC_MMC_STAT(_string, _var) \
+ { _string, \
+ FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
+ offsetof(struct xgbe_prv_data, mmc_stats._var), \
+ }
+
+static const struct xgbe_stats xgbe_gstring_stats[] = {
+ XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
+ XGMAC_MMC_STAT("tx_packets", txframecount_gb),
+ XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
+ XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
+ XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
+ XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+ XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
+ XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
+ XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
+
+ XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
+ XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
+ XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
+ XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
+ XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
+ XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
+ XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
+ XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
+ XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
+ XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
+ XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
+ XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
+ XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
+ XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
+ XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
+ XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
+ XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+};
+
+#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
+
+static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ int i;
+
+ DBGPR("-->%s\n", __func__);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ memcpy(data, xgbe_gstring_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static void xgbe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ u8 *stat;
+ int i;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
+ *data++ = *(u64 *)stat;
+ }
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
+{
+ int ret;
+
+ DBGPR("-->%s\n", __func__);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = XGBE_STATS_COUNT;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ DBGPR("<--%s\n", __func__);
+
+ return ret;
+}
+
+static void xgbe_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_get_pauseparam\n");
+
+ pause->autoneg = pdata->pause_autoneg;
+ pause->tx_pause = pdata->tx_pause;
+ pause->rx_pause = pdata->rx_pause;
+
+ DBGPR("<--xgbe_get_pauseparam\n");
+}
+
+static int xgbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct phy_device *phydev = pdata->phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_set_pauseparam\n");
+
+ DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
+ pause->autoneg, pause->tx_pause, pause->rx_pause);
+
+ pdata->pause_autoneg = pause->autoneg;
+ if (pause->autoneg) {
+ phydev->advertising |= ADVERTISED_Pause;
+ phydev->advertising |= ADVERTISED_Asym_Pause;
+
+ } else {
+ phydev->advertising &= ~ADVERTISED_Pause;
+ phydev->advertising &= ~ADVERTISED_Asym_Pause;
+
+ pdata->tx_pause = pause->tx_pause;
+ pdata->rx_pause = pause->rx_pause;
+ }
+
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phydev);
+
+ DBGPR("<--xgbe_set_pauseparam\n");
+
+ return ret;
+}
+
+static int xgbe_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_get_settings\n");
+
+ if (!pdata->phydev)
+ return -ENODEV;
+
+ ret = phy_ethtool_gset(pdata->phydev, cmd);
+ cmd->transceiver = XCVR_EXTERNAL;
+
+ DBGPR("<--xgbe_get_settings\n");
+
+ return ret;
+}
+
+static int xgbe_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct phy_device *phydev = pdata->phydev;
+ u32 speed;
+ int ret;
+
+ DBGPR("-->xgbe_set_settings\n");
+
+ if (!pdata->phydev)
+ return -ENODEV;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ if (cmd->phy_address != phydev->addr)
+ return -EINVAL;
+
+ if ((cmd->autoneg != AUTONEG_ENABLE) &&
+ (cmd->autoneg != AUTONEG_DISABLE))
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ switch (speed) {
+ case SPEED_10000:
+ case SPEED_2500:
+ case SPEED_1000:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ }
+
+ cmd->advertising &= phydev->supported;
+ if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
+ return -EINVAL;
+
+ ret = 0;
+ phydev->autoneg = cmd->autoneg;
+ phydev->speed = speed;
+ phydev->duplex = cmd->duplex;
+ phydev->advertising = cmd->advertising;
+
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ phydev->advertising |= ADVERTISED_Autoneg;
+ else
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phydev);
+
+ DBGPR("<--xgbe_set_settings\n");
+
+ return ret;
+}
+
+static void xgbe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
+ drvinfo->n_stats = XGBE_STATS_COUNT;
+}
+
+static int xgbe_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int riwt;
+
+ DBGPR("-->xgbe_get_coalesce\n");
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+
+ riwt = pdata->rx_riwt;
+ ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
+ ec->rx_max_coalesced_frames = pdata->rx_frames;
+
+ ec->tx_coalesce_usecs = pdata->tx_usecs;
+ ec->tx_max_coalesced_frames = pdata->tx_frames;
+
+ DBGPR("<--xgbe_get_coalesce\n");
+
+ return 0;
+}
+
+static int xgbe_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int rx_frames, rx_riwt, rx_usecs;
+ unsigned int tx_frames, tx_usecs;
+
+ DBGPR("-->xgbe_set_coalesce\n");
+
+ /* Check for not supported parameters */
+ if ((ec->rx_coalesce_usecs_irq) ||
+ (ec->rx_max_coalesced_frames_irq) ||
+ (ec->tx_coalesce_usecs_irq) ||
+ (ec->tx_max_coalesced_frames_irq) ||
+ (ec->stats_block_coalesce_usecs) ||
+ (ec->use_adaptive_rx_coalesce) ||
+ (ec->use_adaptive_tx_coalesce) ||
+ (ec->pkt_rate_low) ||
+ (ec->rx_coalesce_usecs_low) ||
+ (ec->rx_max_coalesced_frames_low) ||
+ (ec->tx_coalesce_usecs_low) ||
+ (ec->tx_max_coalesced_frames_low) ||
+ (ec->pkt_rate_high) ||
+ (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) ||
+ (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_high) ||
+ (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ /* Can only change rx-frames when interface is down (see
+ * rx_descriptor_init in xgbe-dev.c)
+ */
+ rx_frames = pdata->rx_frames;
+ if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
+ netdev_alert(netdev,
+ "interface must be down to change rx-frames\n");
+ return -EINVAL;
+ }
+
+ rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
+ rx_frames = ec->rx_max_coalesced_frames;
+
+ /* Use smallest possible value if conversion resulted in zero */
+ if (ec->rx_coalesce_usecs && !rx_riwt)
+ rx_riwt = 1;
+
+ /* Check the bounds of values for Rx */
+ if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
+ rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
+ netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
+ rx_usecs);
+ return -EINVAL;
+ }
+ if (rx_frames > pdata->rx_desc_count) {
+ netdev_alert(netdev, "rx-frames is limited to %d frames\n",
+ pdata->rx_desc_count);
+ return -EINVAL;
+ }
+
+ tx_usecs = ec->tx_coalesce_usecs;
+ tx_frames = ec->tx_max_coalesced_frames;
+
+ /* Check the bounds of values for Tx */
+ if (tx_frames > pdata->tx_desc_count) {
+ netdev_alert(netdev, "tx-frames is limited to %d frames\n",
+ pdata->tx_desc_count);
+ return -EINVAL;
+ }
+
+ pdata->rx_riwt = rx_riwt;
+ pdata->rx_frames = rx_frames;
+ hw_if->config_rx_coalesce(pdata);
+
+ pdata->tx_usecs = tx_usecs;
+ pdata->tx_frames = tx_frames;
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_set_coalesce\n");
+
+ return 0;
+}
+
+static int xgbe_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = pdata->rx_ring_count;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return sizeof(pdata->rss_key);
+}
+
+static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return ARRAY_SIZE(pdata->rss_table);
+}
+
+static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int i;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
+ MAC_RSSDR, DMCH);
+ }
+
+ if (key)
+ memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int ret;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ ret = hw_if->set_rss_lookup_table(pdata, indir);
+ if (ret)
+ return ret;
+ }
+
+ if (key) {
+ ret = hw_if->set_rss_hash_key(pdata, key);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *ts_info)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (pdata->ptp_clock)
+ ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
+ else
+ ts_info->phc_index = -1;
+
+ ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_settings = xgbe_get_settings,
+ .set_settings = xgbe_set_settings,
+ .get_drvinfo = xgbe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = xgbe_get_coalesce,
+ .set_coalesce = xgbe_set_coalesce,
+ .get_pauseparam = xgbe_get_pauseparam,
+ .set_pauseparam = xgbe_set_pauseparam,
+ .get_strings = xgbe_get_strings,
+ .get_ethtool_stats = xgbe_get_ethtool_stats,
+ .get_sset_count = xgbe_get_sset_count,
+ .get_rxnfc = xgbe_get_rxnfc,
+ .get_rxfh_key_size = xgbe_get_rxfh_key_size,
+ .get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
+ .get_rxfh = xgbe_get_rxfh,
+ .set_rxfh = xgbe_set_rxfh,
+ .get_ts_info = xgbe_get_ts_info,
+};
+
+struct ethtool_ops *xgbe_a0_get_ethtool_ops(void)
+{
+ return (struct ethtool_ops *)&xgbe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe-main.c b/drivers/net/ethernet/amd/xgbe-a0/xgbe-main.c
new file mode 100644
index 0000000..a85fb49
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe-main.c
@@ -0,0 +1,643 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/property.h>
+#include <linux/acpi.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(XGBE_DRV_VERSION);
+MODULE_DESCRIPTION(XGBE_DRV_DESC);
+
+unsigned int speed = 0;
+module_param(speed, uint, 0444);
+MODULE_PARM_DESC(speed, " Select operating speed (1=1GbE, 2=2.5GbE, 10=10GbE, any other value implies auto-negotiation");
+
+static void xgbe_default_config(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_default_config\n");
+
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_pbl = DMA_PBL_16;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->rx_pbl = DMA_PBL_16;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->power_down = 0;
+
+ if (speed == 10) {
+ pdata->default_autoneg = AUTONEG_DISABLE;
+ pdata->default_speed = SPEED_10000;
+ } else if (speed == 2) {
+ pdata->default_autoneg = AUTONEG_DISABLE;
+ pdata->default_speed = SPEED_2500;
+ } else if (speed == 1) {
+ pdata->default_autoneg = AUTONEG_DISABLE;
+ pdata->default_speed = SPEED_1000;
+ } else {
+ pdata->default_autoneg = AUTONEG_ENABLE;
+ pdata->default_speed = SPEED_10000;
+ }
+
+ DBGPR("<--xgbe_default_config\n");
+}
+
+static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
+{
+ xgbe_a0_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_a0_init_function_ptrs_desc(&pdata->desc_if);
+}
+
+#ifdef CONFIG_ACPI
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+ struct acpi_device *adev = pdata->adev;
+ struct device *dev = pdata->dev;
+ u32 property;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long data;
+ int cca;
+ int ret;
+
+ /* Obtain the system clock setting */
+ ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
+ if (ret) {
+ dev_err(dev, "unable to obtain %s property\n",
+ XGBE_ACPI_DMA_FREQ);
+ return ret;
+ }
+ pdata->sysclk_rate = property;
+
+ /* Obtain the PTP clock setting */
+ ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
+ if (ret) {
+ dev_err(dev, "unable to obtain %s property\n",
+ XGBE_ACPI_PTP_FREQ);
+ return ret;
+ }
+ pdata->ptpclk_rate = property;
+
+ /* Retrieve the device cache coherency value */
+ handle = adev->handle;
+ do {
+ status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
+ if (!ACPI_FAILURE(status)) {
+ cca = data;
+ break;
+ }
+
+ status = acpi_get_parent(handle, &handle);
+ } while (!ACPI_FAILURE(status));
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "error obtaining acpi coherency value\n");
+ return -EINVAL;
+ }
+ pdata->coherent = !!cca;
+
+ return 0;
+}
+#else /* CONFIG_ACPI */
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_OF
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+
+ /* Obtain the system clock setting */
+ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+ if (IS_ERR(pdata->sysclk)) {
+ dev_err(dev, "dma devm_clk_get failed\n");
+ return PTR_ERR(pdata->sysclk);
+ }
+ pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
+
+ /* Obtain the PTP clock setting */
+ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+ if (IS_ERR(pdata->ptpclk)) {
+ dev_err(dev, "ptp devm_clk_get failed\n");
+ return PTR_ERR(pdata->ptpclk);
+ }
+ pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
+
+ /* Retrieve the device cache coherency value */
+ pdata->coherent = of_dma_is_coherent(dev->of_node);
+
+ return 0;
+}
+#else /* CONFIG_OF */
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+ return -EINVAL;
+}
+#endif /*CONFIG_OF */
+
+static int xgbe_probe(struct platform_device *pdev)
+{
+ struct xgbe_prv_data *pdata;
+ struct xgbe_hw_if *hw_if;
+ struct xgbe_desc_if *desc_if;
+ struct net_device *netdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ const char *phy_mode;
+ unsigned int i;
+ int ret;
+
+ DBGPR("--> xgbe_probe\n");
+
+ netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
+ XGBE_MAX_DMA_CHANNELS);
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev failed\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+ SET_NETDEV_DEV(netdev, dev);
+ pdata = netdev_priv(netdev);
+ pdata->netdev = netdev;
+ pdata->pdev = pdev;
+ pdata->adev = ACPI_COMPANION(dev);
+ pdata->dev = dev;
+ platform_set_drvdata(pdev, netdev);
+
+ spin_lock_init(&pdata->lock);
+ mutex_init(&pdata->xpcs_mutex);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+
+ /* Check if we should use ACPI or DT */
+ pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+
+ /* Set and validate the number of descriptors for a ring */
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
+ pdata->tx_desc_count = XGBE_TX_DESC_CNT;
+ if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
+ dev_err(dev, "tx descriptor count (%d) is not valid\n",
+ pdata->tx_desc_count);
+ ret = -EINVAL;
+ goto err_io;
+ }
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
+ pdata->rx_desc_count = XGBE_RX_DESC_CNT;
+ if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
+ dev_err(dev, "rx descriptor count (%d) is not valid\n",
+ pdata->rx_desc_count);
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Obtain the mmio areas for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xgmac_regs)) {
+ dev_err(dev, "xgmac ioremap failed\n");
+ ret = PTR_ERR(pdata->xgmac_regs);
+ goto err_io;
+ }
+ DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xpcs_regs)) {
+ dev_err(dev, "xpcs ioremap failed\n");
+ ret = PTR_ERR(pdata->xpcs_regs);
+ goto err_io;
+ }
+ DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ /* Retrieve the MAC address */
+ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
+ pdata->mac_addr,
+ sizeof(pdata->mac_addr));
+ if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
+ dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Retrieve the PHY mode - it must be "xgmii" */
+ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
+ &phy_mode);
+ if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
+ dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_io;
+ }
+ pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
+
+ /* Check for per channel interrupt support */
+ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
+ pdata->per_channel_irq = 1;
+
+ /* Obtain device settings unique to ACPI/OF */
+ if (pdata->use_acpi)
+ ret = xgbe_acpi_support(pdata);
+ else
+ ret = xgbe_of_support(pdata);
+ if (ret)
+ goto err_io;
+
+ /* Set the DMA coherency values */
+ if (pdata->coherent) {
+ pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+ pdata->arcache = XGBE_DMA_OS_ARCACHE;
+ pdata->awcache = XGBE_DMA_OS_AWCACHE;
+ } else {
+ pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
+ pdata->arcache = XGBE_DMA_SYS_ARCACHE;
+ pdata->awcache = XGBE_DMA_SYS_AWCACHE;
+ }
+
+ /* Set the DMA mask */
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed\n");
+ goto err_io;
+ }
+
+ /* Get the device interrupt */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq 0 failed\n");
+ goto err_io;
+ }
+ pdata->dev_irq = ret;
+
+ netdev->irq = pdata->dev_irq;
+ netdev->base_addr = (unsigned long)pdata->xgmac_regs;
+ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+
+ /* Set all the function pointers */
+ xgbe_init_all_fptrs(pdata);
+ hw_if = &pdata->hw_if;
+ desc_if = &pdata->desc_if;
+
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
+
+ /* Populate the hardware features */
+ xgbe_a0_get_all_hw_features(pdata);
+
+ /* Set default configuration data */
+ xgbe_default_config(pdata);
+
+ /* Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+ * enabled
+ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+ * number of Rx queues
+ */
+ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.tx_ch_cnt);
+ pdata->tx_q_count = pdata->tx_ring_count;
+ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+ if (ret) {
+ dev_err(dev, "error setting real tx queue count\n");
+ goto err_io;
+ }
+
+ pdata->rx_ring_count = min_t(unsigned int,
+ netif_get_num_default_rss_queues(),
+ pdata->hw_feat.rx_ch_cnt);
+ pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
+ if (ret) {
+ dev_err(dev, "error setting real rx queue count\n");
+ goto err_io;
+ }
+
+ /* Initialize RSS hash key and lookup table */
+ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
+
+ for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->rx_ring_count);
+
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+
+ /* Prepare to regsiter with MDIO */
+ pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
+ if (!pdata->mii_bus_id) {
+ dev_err(dev, "failed to allocate mii bus id\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+ ret = xgbe_a0_mdio_register(pdata);
+ if (ret)
+ goto err_bus_id;
+
+ /* Set device operations */
+ netdev->netdev_ops = xgbe_a0_get_netdev_ops();
+ netdev->ethtool_ops = xgbe_a0_get_ethtool_ops();
+#ifdef CONFIG_AMD_XGBE_DCB
+ netdev->dcbnl_ops = xgbe_a0_get_dcbnl_ops();
+#endif
+
+ /* Set device features */
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (pdata->hw_feat.rss)
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->vlan_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+
+ netdev->features |= netdev->hw_features;
+ pdata->netdev_features = netdev->features;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ xgbe_a0_init_rx_coalesce(pdata);
+ xgbe_a0_init_tx_coalesce(pdata);
+
+ netif_carrier_off(netdev);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dev, "net device registration failed\n");
+ goto err_reg_netdev;
+ }
+
+ xgbe_a0_ptp_register(pdata);
+
+ xgbe_a0_debugfs_init(pdata);
+
+ netdev_notice(netdev, "net device enabled\n");
+
+ DBGPR("<-- xgbe_probe\n");
+
+ return 0;
+
+err_reg_netdev:
+ xgbe_a0_mdio_unregister(pdata);
+
+err_bus_id:
+ kfree(pdata->mii_bus_id);
+
+err_io:
+ free_netdev(netdev);
+
+err_alloc:
+ dev_notice(dev, "net device not enabled\n");
+
+ return ret;
+}
+
+static int xgbe_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_remove\n");
+
+ xgbe_a0_debugfs_exit(pdata);
+
+ xgbe_a0_ptp_unregister(pdata);
+
+ unregister_netdev(netdev);
+
+ xgbe_a0_mdio_unregister(pdata);
+
+ kfree(pdata->mii_bus_id);
+
+ free_netdev(netdev);
+
+ DBGPR("<--xgbe_remove\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int xgbe_suspend(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ int ret;
+
+ DBGPR("-->xgbe_suspend\n");
+
+ if (!netif_running(netdev)) {
+ DBGPR("<--xgbe_dev_suspend\n");
+ return -EINVAL;
+ }
+
+ ret = xgbe_a0_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+
+ DBGPR("<--xgbe_suspend\n");
+
+ return ret;
+}
+
+static int xgbe_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ int ret;
+
+ DBGPR("-->xgbe_resume\n");
+
+ if (!netif_running(netdev)) {
+ DBGPR("<--xgbe_dev_resume\n");
+ return -EINVAL;
+ }
+
+ ret = xgbe_a0_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+
+ DBGPR("<--xgbe_resume\n");
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgbe_a0_acpi_match[] = {
+ { "AMDI8000", 0 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, xgbe_a0_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id xgbe_a0_of_match[] = {
+ { .compatible = "amd,xgbe-seattle-v0a", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xgbe_a0_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
+
+static struct platform_driver xgbe_a0_driver = {
+ .driver = {
+ .name = "amd-xgbe-a0",
+#ifdef CONFIG_ACPI
+ .acpi_match_table = xgbe_a0_acpi_match,
+#endif
+#ifdef CONFIG_OF
+ .of_match_table = xgbe_a0_of_match,
+#endif
+ .pm = &xgbe_pm_ops,
+ },
+ .probe = xgbe_probe,
+ .remove = xgbe_remove,
+};
+
+module_platform_driver(xgbe_a0_driver);
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe-a0/xgbe-mdio.c
new file mode 100644
index 0000000..b84d048
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe-mdio.c
@@ -0,0 +1,312 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int mmd_data;
+
+ DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
+ prtad, mmd_reg);
+
+ mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
+
+ DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
+
+ return mmd_data;
+}
+
+static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
+ u16 mmd_val)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int mmd_data = mmd_val;
+
+ DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
+ prtad, mmd_reg, mmd_data);
+
+ hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
+
+ DBGPR_MDIO("<--xgbe_mdio_write\n");
+
+ return 0;
+}
+
+void xgbe_a0_dump_phy_registers(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
+ int i;
+
+ dev_alert(dev, "\n************* PHY Reg dump **********************\n");
+
+ dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+ dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+ dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+ dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+ dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+ dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+ dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+ dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+ dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+ dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+ dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+ dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ dev_alert(dev, "MMD Device Mask = %#x\n",
+ phydev->c45_ids.devices_in_package);
+ for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
+ dev_alert(dev, " MMD %d: ID = %#08x\n", i,
+ phydev->c45_ids.device_ids[i]);
+
+ dev_alert(dev, "\n*************************************************\n");
+}
+
+int xgbe_a0_mdio_register(struct xgbe_prv_data *pdata)
+{
+ struct mii_bus *mii;
+ struct phy_device *phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_a0_mdio_register\n");
+
+ mii = mdiobus_alloc();
+ if (!mii) {
+ dev_err(pdata->dev, "mdiobus_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ /* Register on the MDIO bus (don't probe any PHYs) */
+ mii->name = XGBE_PHY_NAME;
+ mii->read = xgbe_mdio_read;
+ mii->write = xgbe_mdio_write;
+ snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
+ mii->priv = pdata;
+ mii->phy_mask = ~0;
+ mii->parent = pdata->dev;
+ ret = mdiobus_register(mii);
+ if (ret) {
+ dev_err(pdata->dev, "mdiobus_register failed\n");
+ goto err_mdiobus_alloc;
+ }
+ DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
+
+ /* Probe the PCS using Clause 45 */
+ phydev = get_phy_device(mii, XGBE_PRTAD, true);
+ if (IS_ERR(phydev) || !phydev ||
+ !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
+ dev_err(pdata->dev, "get_phy_device failed\n");
+ ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
+ goto err_mdiobus_register;
+ }
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
+ MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
+
+ ret = phy_device_register(phydev);
+ if (ret) {
+ dev_err(pdata->dev, "phy_device_register failed\n");
+ goto err_phy_device;
+ }
+ if (!phydev->dev.driver) {
+ dev_err(pdata->dev, "phy driver probe failed\n");
+ ret = -EIO;
+ goto err_phy_device;
+ }
+
+ /* Add a reference to the PHY driver so it can't be unloaded */
+ pdata->phy_module = phydev->dev.driver->owner;
+ if (!try_module_get(pdata->phy_module)) {
+ dev_err(pdata->dev, "try_module_get failed\n");
+ ret = -EIO;
+ goto err_phy_device;
+ }
+
+ pdata->mii = mii;
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ phydev->autoneg = pdata->default_autoneg;
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ phydev->speed = pdata->default_speed;
+ phydev->duplex = DUPLEX_FULL;
+
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+ }
+
+ pdata->phydev = phydev;
+
+ DBGPHY_REGS(pdata);
+
+ DBGPR("<--xgbe_a0_mdio_register\n");
+
+ return 0;
+
+err_phy_device:
+ phy_device_free(phydev);
+
+err_mdiobus_register:
+ mdiobus_unregister(mii);
+
+err_mdiobus_alloc:
+ mdiobus_free(mii);
+
+ return ret;
+}
+
+void xgbe_a0_mdio_unregister(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_a0_mdio_unregister\n");
+
+ pdata->phydev = NULL;
+
+ module_put(pdata->phy_module);
+ pdata->phy_module = NULL;
+
+ mdiobus_unregister(pdata->mii);
+ pdata->mii->priv = NULL;
+
+ mdiobus_free(pdata->mii);
+ pdata->mii = NULL;
+
+ DBGPR("<--xgbe_a0_mdio_unregister\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe-a0/xgbe-ptp.c
new file mode 100644
index 0000000..c53c7b2
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe-ptp.c
@@ -0,0 +1,284 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
+{
+ struct xgbe_prv_data *pdata = container_of(cc,
+ struct xgbe_prv_data,
+ tstamp_cc);
+ u64 nsec;
+
+ nsec = pdata->hw_if.get_tstamp_time(pdata);
+
+ return nsec;
+}
+
+static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 adjust;
+ u32 addend, diff;
+ unsigned int neg_adjust = 0;
+
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ adjust = pdata->tstamp_addend;
+ adjust *= delta;
+ diff = div_u64(adjust, 1000000000UL);
+
+ addend = (neg_adjust) ? pdata->tstamp_addend - diff :
+ pdata->tstamp_addend + diff;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ pdata->hw_if.update_tstamp_addend(pdata, addend);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ nsec = timecounter_read(&pdata->tstamp_tc);
+
+ nsec += delta;
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ nsec = timecounter_read(&pdata->tstamp_tc);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ *ts = ns_to_timespec(nsec);
+
+ return 0;
+}
+
+static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec_to_ns(ts);
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+void xgbe_a0_ptp_register(struct xgbe_prv_data *pdata)
+{
+ struct ptp_clock_info *info = &pdata->ptp_clock_info;
+ struct ptp_clock *clock;
+ struct cyclecounter *cc = &pdata->tstamp_cc;
+ u64 dividend;
+
+ snprintf(info->name, sizeof(info->name), "%s",
+ netdev_name(pdata->netdev));
+ info->owner = THIS_MODULE;
+ info->max_adj = pdata->ptpclk_rate;
+ info->adjfreq = xgbe_adjfreq;
+ info->adjtime = xgbe_adjtime;
+ info->gettime = xgbe_gettime;
+ info->settime = xgbe_settime;
+ info->enable = xgbe_enable;
+
+ clock = ptp_clock_register(info, pdata->dev);
+ if (IS_ERR(clock)) {
+ dev_err(pdata->dev, "ptp_clock_register failed\n");
+ return;
+ }
+
+ pdata->ptp_clock = clock;
+
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / 50Mhz)
+ * = (2^32 * 50Mhz) / PTP ref clock
+ */
+ dividend = 50000000;
+ dividend <<= 32;
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ /* Setup the timecounter */
+ cc->read = xgbe_cc_read;
+ cc->mask = CLOCKSOURCE_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ /* Disable all timestamping to start */
+ XGMAC_IOWRITE(pdata, MAC_TCR, 0);
+ pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
+void xgbe_a0_ptp_unregister(struct xgbe_prv_data *pdata)
+{
+ if (pdata->ptp_clock)
+ ptp_clock_unregister(pdata->ptp_clock);
+}
diff --git a/drivers/net/ethernet/amd/xgbe-a0/xgbe.h b/drivers/net/ethernet/amd/xgbe-a0/xgbe.h
new file mode 100644
index 0000000..dd8500d
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe-a0/xgbe.h
@@ -0,0 +1,868 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_H__
+#define __XGBE_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <net/dcbnl.h>
+
+#define XGBE_DRV_NAME "amd-xgbe"
+#define XGBE_DRV_VERSION "0.0.0-a"
+#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
+
+/* Descriptor related defines */
+#define XGBE_TX_DESC_CNT 512
+#define XGBE_TX_DESC_MIN_FREE (XGBE_TX_DESC_CNT >> 3)
+#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
+#define XGBE_RX_DESC_CNT 512
+
+#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+
+/* Descriptors required for maximum contigous TSO/GSO packet */
+#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
+
+/* Maximum possible descriptors needed for an SKB:
+ * - Maximum number of SKB frags
+ * - Maximum descriptors for contiguous TSO/GSO packet
+ * - Possible context descriptor
+ * - Possible TSO header descriptor
+ */
+#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2)
+
+#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XGBE_RX_BUF_ALIGN 64
+#define XGBE_SKB_ALLOC_SIZE 256
+#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
+
+#define XGBE_MAX_DMA_CHANNELS 16
+#define XGBE_MAX_QUEUES 16
+#define XGBE_DMA_STOP_TIMEOUT 5
+
+/* DMA cache settings - Outer sharable, write-back, write-allocate */
+#define XGBE_DMA_OS_AXDOMAIN 0x2
+#define XGBE_DMA_OS_ARCACHE 0xb
+#define XGBE_DMA_OS_AWCACHE 0xf
+
+/* DMA cache settings - System, no caches used */
+#define XGBE_DMA_SYS_AXDOMAIN 0x3
+#define XGBE_DMA_SYS_ARCACHE 0x0
+#define XGBE_DMA_SYS_AWCACHE 0x0
+
+#define XGBE_DMA_INTERRUPT_MASK 0x31c7
+
+#define XGMAC_MIN_PACKET 60
+#define XGMAC_STD_PACKET_MTU 1500
+#define XGMAC_MAX_STD_PACKET 1518
+#define XGMAC_JUMBO_PACKET_MTU 9000
+#define XGMAC_MAX_JUMBO_PACKET 9018
+
+/* MDIO bus phy name */
+#define XGBE_PHY_NAME "amd_xgbe_phy_a0"
+#define XGBE_PRTAD 0
+
+/* Common property names */
+#define XGBE_MAC_ADDR_PROPERTY "mac-address"
+#define XGBE_PHY_MODE_PROPERTY "phy-mode"
+#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
+
+/* Device-tree clock names */
+#define XGBE_DMA_CLOCK "dma_clk"
+#define XGBE_PTP_CLOCK "ptp_clk"
+
+/* ACPI property names */
+#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
+#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
+
+/* Timestamp support - values based on 50MHz PTP clock
+ * 50MHz => 20 nsec
+ */
+#define XGBE_TSTAMP_SSINC 20
+#define XGBE_TSTAMP_SNSINC 0
+
+/* Driver PMT macros */
+#define XGMAC_DRIVER_CONTEXT 1
+#define XGMAC_IOCTL_CONTEXT 2
+
+#define XGBE_FIFO_MAX 81920
+#define XGBE_FIFO_SIZE_B(x) (x)
+#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
+
+#define XGBE_TC_MIN_QUANTUM 10
+
+/* Helper macro for descriptor handling
+ * Always use XGBE_GET_DESC_DATA to access the descriptor data
+ * since the index is free-running and needs to be and-ed
+ * with the descriptor count value of the ring to index to
+ * the proper descriptor data.
+ */
+#define XGBE_GET_DESC_DATA(_ring, _idx) \
+ ((_ring)->rdata + \
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+/* Default coalescing parameters */
+#define XGMAC_INIT_DMA_TX_USECS 50
+#define XGMAC_INIT_DMA_TX_FRAMES 25
+
+#define XGMAC_MAX_DMA_RIWT 0xff
+#define XGMAC_INIT_DMA_RX_USECS 30
+#define XGMAC_INIT_DMA_RX_FRAMES 25
+
+/* Flow control queue count */
+#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define XGBE_MAC_HASH_TABLE_SIZE 8
+
+/* Receive Side Scaling */
+#define XGBE_RSS_HASH_KEY_SIZE 40
+#define XGBE_RSS_MAX_TABLE_SIZE 256
+#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
+#define XGBE_RSS_HASH_KEY_TYPE 1
+
+struct xgbe_prv_data;
+
+struct xgbe_packet_data {
+ struct sk_buff *skb;
+
+ unsigned int attributes;
+
+ unsigned int errors;
+
+ unsigned int rdesc_count;
+ unsigned int length;
+
+ unsigned int header_len;
+ unsigned int tcp_header_len;
+ unsigned int tcp_payload_len;
+ unsigned short mss;
+
+ unsigned short vlan_ctag;
+
+ u64 rx_tstamp;
+
+ u32 rss_hash;
+ enum pkt_hash_types rss_hash_type;
+
+ unsigned int tx_packets;
+ unsigned int tx_bytes;
+};
+
+/* Common Rx and Tx descriptor mapping */
+struct xgbe_ring_desc {
+ __le32 desc0;
+ __le32 desc1;
+ __le32 desc2;
+ __le32 desc3;
+};
+
+/* Page allocation related values */
+struct xgbe_page_alloc {
+ struct page *pages;
+ unsigned int pages_len;
+ unsigned int pages_offset;
+
+ dma_addr_t pages_dma;
+};
+
+/* Ring entry buffer data */
+struct xgbe_buffer_data {
+ struct xgbe_page_alloc pa;
+ struct xgbe_page_alloc pa_unmap;
+
+ dma_addr_t dma;
+ unsigned int dma_len;
+};
+
+/* Tx-related ring data */
+struct xgbe_tx_ring_data {
+ unsigned int packets; /* BQL packet count */
+ unsigned int bytes; /* BQL byte count */
+};
+
+/* Rx-related ring data */
+struct xgbe_rx_ring_data {
+ struct xgbe_buffer_data hdr; /* Header locations */
+ struct xgbe_buffer_data buf; /* Payload locations */
+
+ unsigned short hdr_len; /* Length of received header */
+ unsigned short len; /* Length of received packet */
+};
+
+/* Structure used to hold information related to the descriptor
+ * and the packet associated with the descriptor (always use
+ * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
+ */
+struct xgbe_ring_data {
+ struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
+ dma_addr_t rdesc_dma; /* DMA address of descriptor */
+
+ struct sk_buff *skb; /* Virtual address of SKB */
+ dma_addr_t skb_dma; /* DMA address of SKB data */
+ unsigned int skb_dma_len; /* Length of SKB DMA area */
+
+ struct xgbe_tx_ring_data tx; /* Tx-related data */
+ struct xgbe_rx_ring_data rx; /* Rx-related data */
+
+ unsigned int interrupt; /* Interrupt indicator */
+
+ unsigned int mapped_as_page;
+
+ /* Incomplete receive save location. If the budget is exhausted
+ * or the last descriptor (last normal descriptor or a following
+ * context descriptor) has not been DMA'd yet the current state
+ * of the receive processing needs to be saved.
+ */
+ unsigned int state_saved;
+ struct {
+ unsigned int incomplete;
+ unsigned int context_next;
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
+};
+
+struct xgbe_ring {
+ /* Ring lock - used just for TX rings at the moment */
+ spinlock_t lock;
+
+ /* Per packet related information */
+ struct xgbe_packet_data packet_data;
+
+ /* Virtual/DMA addresses and count of allocated descriptor memory */
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int rdesc_count;
+
+ /* Array of descriptor data corresponding the descriptor memory
+ * (always use the XGBE_GET_DESC_DATA macro to access this data)
+ */
+ struct xgbe_ring_data *rdata;
+
+ /* Page allocation for RX buffers */
+ struct xgbe_page_alloc rx_hdr_pa;
+ struct xgbe_page_alloc rx_buf_pa;
+
+ /* Ring index values
+ * cur - Tx: index of descriptor to be used for current transfer
+ * Rx: index of descriptor to check for packet availability
+ * dirty - Tx: index of descriptor to check for transfer complete
+ * Rx: index of descriptor to check for buffer reallocation
+ */
+ unsigned int cur;
+ unsigned int dirty;
+
+ /* Coalesce frame count used for interrupt bit setting */
+ unsigned int coalesce_count;
+
+ union {
+ struct {
+ unsigned int queue_stopped;
+ unsigned int xmit_more;
+ unsigned short cur_mss;
+ unsigned short cur_vlan_ctag;
+ } tx;
+ };
+} ____cacheline_aligned;
+
+/* Structure used to describe the descriptor rings associated with
+ * a DMA channel.
+ */
+struct xgbe_channel {
+ char name[16];
+
+ /* Address of private data area for device */
+ struct xgbe_prv_data *pdata;
+
+ /* Queue index and base address of queue's DMA registers */
+ unsigned int queue_index;
+ void __iomem *dma_regs;
+
+ /* Per channel interrupt irq number */
+ int dma_irq;
+ char dma_irq_name[IFNAMSIZ + 32];
+
+ /* Netdev related settings */
+ struct napi_struct napi;
+
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+ struct hrtimer tx_timer;
+
+ struct xgbe_ring *tx_ring;
+ struct xgbe_ring *rx_ring;
+} ____cacheline_aligned;
+
+enum xgbe_int {
+ XGMAC_INT_DMA_CH_SR_TI,
+ XGMAC_INT_DMA_CH_SR_TPS,
+ XGMAC_INT_DMA_CH_SR_TBU,
+ XGMAC_INT_DMA_CH_SR_RI,
+ XGMAC_INT_DMA_CH_SR_RBU,
+ XGMAC_INT_DMA_CH_SR_RPS,
+ XGMAC_INT_DMA_CH_SR_TI_RI,
+ XGMAC_INT_DMA_CH_SR_FBE,
+ XGMAC_INT_DMA_ALL,
+};
+
+enum xgbe_int_state {
+ XGMAC_INT_STATE_SAVE,
+ XGMAC_INT_STATE_RESTORE,
+};
+
+enum xgbe_mtl_fifo_size {
+ XGMAC_MTL_FIFO_SIZE_256 = 0x00,
+ XGMAC_MTL_FIFO_SIZE_512 = 0x01,
+ XGMAC_MTL_FIFO_SIZE_1K = 0x03,
+ XGMAC_MTL_FIFO_SIZE_2K = 0x07,
+ XGMAC_MTL_FIFO_SIZE_4K = 0x0f,
+ XGMAC_MTL_FIFO_SIZE_8K = 0x1f,
+ XGMAC_MTL_FIFO_SIZE_16K = 0x3f,
+ XGMAC_MTL_FIFO_SIZE_32K = 0x7f,
+ XGMAC_MTL_FIFO_SIZE_64K = 0xff,
+ XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
+ XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
+};
+
+struct xgbe_mmc_stats {
+ /* Tx Stats */
+ u64 txoctetcount_gb;
+ u64 txframecount_gb;
+ u64 txbroadcastframes_g;
+ u64 txmulticastframes_g;
+ u64 tx64octets_gb;
+ u64 tx65to127octets_gb;
+ u64 tx128to255octets_gb;
+ u64 tx256to511octets_gb;
+ u64 tx512to1023octets_gb;
+ u64 tx1024tomaxoctets_gb;
+ u64 txunicastframes_gb;
+ u64 txmulticastframes_gb;
+ u64 txbroadcastframes_gb;
+ u64 txunderflowerror;
+ u64 txoctetcount_g;
+ u64 txframecount_g;
+ u64 txpauseframes;
+ u64 txvlanframes_g;
+
+ /* Rx Stats */
+ u64 rxframecount_gb;
+ u64 rxoctetcount_gb;
+ u64 rxoctetcount_g;
+ u64 rxbroadcastframes_g;
+ u64 rxmulticastframes_g;
+ u64 rxcrcerror;
+ u64 rxrunterror;
+ u64 rxjabbererror;
+ u64 rxundersize_g;
+ u64 rxoversize_g;
+ u64 rx64octets_gb;
+ u64 rx65to127octets_gb;
+ u64 rx128to255octets_gb;
+ u64 rx256to511octets_gb;
+ u64 rx512to1023octets_gb;
+ u64 rx1024tomaxoctets_gb;
+ u64 rxunicastframes_g;
+ u64 rxlengtherror;
+ u64 rxoutofrangetype;
+ u64 rxpauseframes;
+ u64 rxfifooverflow;
+ u64 rxvlanframes_gb;
+ u64 rxwatchdogerror;
+};
+
+struct xgbe_hw_if {
+ int (*tx_complete)(struct xgbe_ring_desc *);
+
+ int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*add_mac_addresses)(struct xgbe_prv_data *);
+ int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+
+ int (*enable_rx_csum)(struct xgbe_prv_data *);
+ int (*disable_rx_csum)(struct xgbe_prv_data *);
+
+ int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
+ int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
+ int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *);
+ int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *);
+ int (*update_vlan_hash_table)(struct xgbe_prv_data *);
+
+ int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
+ void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
+ int (*set_gmii_speed)(struct xgbe_prv_data *);
+ int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
+ int (*set_xgmii_speed)(struct xgbe_prv_data *);
+
+ void (*enable_tx)(struct xgbe_prv_data *);
+ void (*disable_tx)(struct xgbe_prv_data *);
+ void (*enable_rx)(struct xgbe_prv_data *);
+ void (*disable_rx)(struct xgbe_prv_data *);
+
+ void (*powerup_tx)(struct xgbe_prv_data *);
+ void (*powerdown_tx)(struct xgbe_prv_data *);
+ void (*powerup_rx)(struct xgbe_prv_data *);
+ void (*powerdown_rx)(struct xgbe_prv_data *);
+
+ int (*init)(struct xgbe_prv_data *);
+ int (*exit)(struct xgbe_prv_data *);
+
+ int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
+ int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
+ void (*dev_xmit)(struct xgbe_channel *);
+ int (*dev_read)(struct xgbe_channel *);
+ void (*tx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_reset)(struct xgbe_ring_data *);
+ void (*tx_desc_reset)(struct xgbe_ring_data *);
+ int (*is_last_desc)(struct xgbe_ring_desc *);
+ int (*is_context_desc)(struct xgbe_ring_desc *);
+ void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
+
+ /* For FLOW ctrl */
+ int (*config_tx_flow_control)(struct xgbe_prv_data *);
+ int (*config_rx_flow_control)(struct xgbe_prv_data *);
+
+ /* For RX coalescing */
+ int (*config_rx_coalesce)(struct xgbe_prv_data *);
+ int (*config_tx_coalesce)(struct xgbe_prv_data *);
+ unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int);
+ unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX threshold config */
+ int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX Store and Forward Mode config */
+ int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int);
+
+ /* For TX DMA Operate on Second Frame config */
+ int (*config_osp_mode)(struct xgbe_prv_data *);
+
+ /* For RX and TX PBL config */
+ int (*config_rx_pbl_val)(struct xgbe_prv_data *);
+ int (*get_rx_pbl_val)(struct xgbe_prv_data *);
+ int (*config_tx_pbl_val)(struct xgbe_prv_data *);
+ int (*get_tx_pbl_val)(struct xgbe_prv_data *);
+ int (*config_pblx8)(struct xgbe_prv_data *);
+
+ /* For MMC statistics */
+ void (*rx_mmc_int)(struct xgbe_prv_data *);
+ void (*tx_mmc_int)(struct xgbe_prv_data *);
+ void (*read_mmc_stats)(struct xgbe_prv_data *);
+
+ /* For Timestamp config */
+ int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
+ void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
+ void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
+ unsigned int nsec);
+ u64 (*get_tstamp_time)(struct xgbe_prv_data *);
+ u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
+
+ /* For Data Center Bridging config */
+ void (*config_dcb_tc)(struct xgbe_prv_data *);
+ void (*config_dcb_pfc)(struct xgbe_prv_data *);
+
+ /* For Receive Side Scaling */
+ int (*enable_rss)(struct xgbe_prv_data *);
+ int (*disable_rss)(struct xgbe_prv_data *);
+ int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
+ int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
+};
+
+struct xgbe_desc_if {
+ int (*alloc_ring_resources)(struct xgbe_prv_data *);
+ void (*free_ring_resources)(struct xgbe_prv_data *);
+ int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
+ int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
+ struct xgbe_ring_data *);
+ void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
+ void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
+ void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct xgbe_hw_features {
+ /* HW Version */
+ unsigned int version;
+
+ /* HW Feature Register0 */
+ unsigned int gmii; /* 1000 Mbps support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct xgbe_prv_data {
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ struct acpi_device *adev;
+ struct device *dev;
+
+ /* ACPI or DT flag */
+ unsigned int use_acpi;
+
+ /* XGMAC/XPCS related mmio registers */
+ void __iomem *xgmac_regs; /* XGMAC CSRs */
+ void __iomem *xpcs_regs; /* XPCS MMD registers */
+
+ /* Overall device lock */
+ spinlock_t lock;
+
+ /* XPCS indirect addressing mutex */
+ struct mutex xpcs_mutex;
+
+ /* RSS addressing mutex */
+ struct mutex rss_mutex;
+
+ int dev_irq;
+ unsigned int per_channel_irq;
+
+ struct xgbe_hw_if hw_if;
+ struct xgbe_desc_if desc_if;
+
+ /* AXI DMA settings */
+ unsigned int coherent;
+ unsigned int axdomain;
+ unsigned int arcache;
+ unsigned int awcache;
+
+ /* Rings for Tx/Rx on a DMA channel */
+ struct xgbe_channel *channel;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+
+ /* Tx coalescing settings */
+ unsigned int tx_usecs;
+ unsigned int tx_frames;
+
+ /* Rx coalescing settings */
+ unsigned int rx_riwt;
+ unsigned int rx_frames;
+
+ /* Current Rx buffer size */
+ unsigned int rx_buf_size;
+
+ /* Flow control settings */
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
+ u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
+ u32 rss_options;
+
+ /* MDIO settings */
+ struct module *phy_module;
+ char *mii_bus_id;
+ struct mii_bus *mii;
+ int mdio_mmd;
+ struct phy_device *phydev;
+ int default_autoneg;
+ int default_speed;
+
+ /* Current PHY settings */
+ phy_interface_t phy_mode;
+ int phy_link;
+ int phy_speed;
+ unsigned int phy_tx_pause;
+ unsigned int phy_rx_pause;
+
+ /* Netdev related settings */
+ unsigned char mac_addr[ETH_ALEN];
+ netdev_features_t netdev_features;
+ struct napi_struct napi;
+ struct xgbe_mmc_stats mmc_stats;
+
+ /* Filtering support */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ /* Device clocks */
+ struct clk *sysclk;
+ unsigned long sysclk_rate;
+ struct clk *ptpclk;
+ unsigned long ptpclk_rate;
+
+ /* Timestamp support */
+ spinlock_t tstamp_lock;
+ struct ptp_clock_info ptp_clock_info;
+ struct ptp_clock *ptp_clock;
+ struct hwtstamp_config tstamp_config;
+ struct cyclecounter tstamp_cc;
+ struct timecounter tstamp_tc;
+ unsigned int tstamp_addend;
+ struct work_struct tx_tstamp_work;
+ struct sk_buff *tx_tstamp_skb;
+ u64 tx_tstamp;
+
+ /* DCB support */
+ struct ieee_ets *ets;
+ struct ieee_pfc *pfc;
+ unsigned int q2tc_map[XGBE_MAX_QUEUES];
+ unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
+
+ /* Hardware features of the device */
+ struct xgbe_hw_features hw_feat;
+
+ /* Device restart work structure */
+ struct work_struct restart_work;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *xgbe_debugfs;
+
+ unsigned int debugfs_xgmac_reg;
+
+ unsigned int debugfs_xpcs_mmd;
+ unsigned int debugfs_xpcs_reg;
+#endif
+};
+
+/* Function prototypes*/
+
+void xgbe_a0_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_a0_init_function_ptrs_desc(struct xgbe_desc_if *);
+struct net_device_ops *xgbe_a0_get_netdev_ops(void);
+struct ethtool_ops *xgbe_a0_get_ethtool_ops(void);
+#ifdef CONFIG_AMD_XGBE_DCB
+const struct dcbnl_rtnl_ops *xgbe_a0_get_dcbnl_ops(void);
+#endif
+
+int xgbe_a0_mdio_register(struct xgbe_prv_data *);
+void xgbe_a0_mdio_unregister(struct xgbe_prv_data *);
+void xgbe_a0_dump_phy_registers(struct xgbe_prv_data *);
+void xgbe_a0_ptp_register(struct xgbe_prv_data *);
+void xgbe_a0_ptp_unregister(struct xgbe_prv_data *);
+void xgbe_a0_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
+ unsigned int);
+void xgbe_a0_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
+ unsigned int);
+void xgbe_a0_print_pkt(struct net_device *, struct sk_buff *, bool);
+void xgbe_a0_get_all_hw_features(struct xgbe_prv_data *);
+int xgbe_a0_powerup(struct net_device *, unsigned int);
+int xgbe_a0_powerdown(struct net_device *, unsigned int);
+void xgbe_a0_init_rx_coalesce(struct xgbe_prv_data *);
+void xgbe_a0_init_tx_coalesce(struct xgbe_prv_data *);
+
+#ifdef CONFIG_DEBUG_FS
+void xgbe_a0_debugfs_init(struct xgbe_prv_data *);
+void xgbe_a0_debugfs_exit(struct xgbe_prv_data *);
+#else
+static inline void xgbe_a0_debugfs_init(struct xgbe_prv_data *pdata) {}
+static inline void xgbe_a0_debugfs_exit(struct xgbe_prv_data *pdata) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
+#if 0
+#define XGMAC_ENABLE_TX_DESC_DUMP
+#define XGMAC_ENABLE_RX_DESC_DUMP
+#endif
+
+/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
+#if 0
+#define XGMAC_ENABLE_TX_PKT_DUMP
+#define XGMAC_ENABLE_RX_PKT_DUMP
+#endif
+
+/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
+#if 0
+#define YDEBUG
+#define YDEBUG_MDIO
+#endif
+
+/* For debug prints */
+#ifdef YDEBUG
+#define DBGPR(x...) pr_alert(x)
+#define DBGPHY_REGS(x...) xgbe_a0_dump_phy_registers(x)
+#else
+#define DBGPR(x...) do { } while (0)
+#define DBGPHY_REGS(x...) do { } while (0)
+#endif
+
+#ifdef YDEBUG_MDIO
+#define DBGPR_MDIO(x...) pr_alert(x)
+#else
+#define DBGPR_MDIO(x...) do { } while (0)
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 5d093dc..edb4218 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -82,6 +82,7 @@ static const char version[] =
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
+#include <linux/acpi.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -2473,6 +2474,14 @@ static struct dev_pm_ops smc_drv_pm_ops = {
.resume = smc_drv_resume,
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id smc91x_acpi_match[] = {
+ { "LNRO0003", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, smc91x_acpi_match);
+#endif
+
static struct platform_driver smc_driver = {
.probe = smc_drv_probe,
.remove = smc_drv_remove,
@@ -2480,6 +2489,7 @@ static struct platform_driver smc_driver = {
.name = CARDNAME,
.pm = &smc_drv_pm_ops,
.of_match_table = of_match_ptr(smc91x_match),
+ .acpi_match_table = ACPI_PTR(smc91x_acpi_match),
},
};
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 501ea76..92e7644 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -34,4 +34,5 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o
+obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy-a0.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
diff --git a/drivers/net/phy/amd-xgbe-phy-a0.c b/drivers/net/phy/amd-xgbe-phy-a0.c
new file mode 100644
index 0000000..93faf9e
--- /dev/null
+++ b/drivers/net/phy/amd-xgbe-phy-a0.c
@@ -0,0 +1,1829 @@
+/*
+ * AMD 10Gb Ethernet PHY driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/property.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("0.0.0-a");
+MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
+
+#define XGBE_PHY_ID 0x7996ced0
+#define XGBE_PHY_MASK 0xfffffff0
+
+#define XGBE_PHY_SERDES_RETRY 32
+#define XGBE_PHY_CHANNEL_PROPERTY "amd,serdes-channel"
+#define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
+#define XGBE_PHY_BLWC_PROPERTY "amd,serdes-blwc"
+#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
+#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
+#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+
+#define XGBE_PHY_SPEEDS 3
+#define XGBE_PHY_SPEED_1000 0
+#define XGBE_PHY_SPEED_2500 1
+#define XGBE_PHY_SPEED_10000 2
+
+#define XGBE_AN_INT_CMPLT 0x01
+#define XGBE_AN_INC_LINK 0x02
+#define XGBE_AN_PG_RCV 0x04
+#define XGBE_AN_INT_MASK 0x07
+
+#define XNP_MCF_NULL_MESSAGE 0x001
+#define XNP_ACK_PROCESSED BIT(12)
+#define XNP_MP_FORMATTED BIT(13)
+#define XNP_NP_EXCHANGE BIT(15)
+
+#define XGBE_PHY_RATECHANGE_COUNT 500
+
+#define XGBE_PHY_KR_TRAINING_START 0x01
+#define XGBE_PHY_KR_TRAINING_ENABLE 0x02
+
+#define XGBE_PHY_FEC_ENABLE 0x01
+#define XGBE_PHY_FEC_FORWARD 0x02
+#define XGBE_PHY_FEC_MASK 0x03
+
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FEC_ABILITY
+#define MDIO_PMA_10GBR_FEC_ABILITY 0x00aa
+#endif
+
+#ifndef MDIO_PMA_10GBR_FEC_CTRL
+#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX 0x0019
+#endif
+
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_AN_KR_CTRL
+#define MDIO_AN_KR_CTRL 0x8003
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+#ifndef MDIO_KR_CTRL_PDETECT
+#define MDIO_KR_CTRL_PDETECT 0x01
+#endif
+
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define XCMU_IOREAD(_priv, _reg) \
+ ioread16((_priv)->cmu_regs + _reg)
+
+#define XCMU_IOWRITE(_priv, _reg, _val) \
+ iowrite16((_val), (_priv)->cmu_regs + _reg)
+
+#define XRXTX_IOREAD(_priv, _reg) \
+ ioread16((_priv)->rxtx_regs + _reg)
+
+#define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_priv), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_priv, _reg, _val) \
+ iowrite16((_val), (_priv)->rxtx_regs + _reg)
+
+#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_priv), _reg, reg_val); \
+} while (0)
+
+/* SerDes CMU register offsets */
+#define CMU_REG15 0x003c
+#define CMU_REG16 0x0040
+
+/* SerDes CMU register entry bit positions and sizes */
+#define CMU_REG16_TX_RATE_CHANGE_BASE 15
+#define CMU_REG16_RX_RATE_CHANGE_BASE 14
+#define CMU_REG16_RATE_CHANGE_DECR 2
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG2 0x0008
+#define RXTX_REG3 0x000c
+#define RXTX_REG5 0x0014
+#define RXTX_REG6 0x0018
+#define RXTX_REG20 0x0050
+#define RXTX_REG53 0x00d4
+#define RXTX_REG114 0x01c8
+#define RXTX_REG115 0x01cc
+#define RXTX_REG142 0x0238
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG2_RESETB_INDEX 15
+#define RXTX_REG2_RESETB_WIDTH 1
+#define RXTX_REG3_TX_DATA_RATE_INDEX 14
+#define RXTX_REG3_TX_DATA_RATE_WIDTH 2
+#define RXTX_REG3_TX_WORD_MODE_INDEX 11
+#define RXTX_REG3_TX_WORD_MODE_WIDTH 3
+#define RXTX_REG5_TXAMP_CNTL_INDEX 7
+#define RXTX_REG5_TXAMP_CNTL_WIDTH 4
+#define RXTX_REG6_RX_DATA_RATE_INDEX 9
+#define RXTX_REG6_RX_DATA_RATE_WIDTH 2
+#define RXTX_REG6_RX_WORD_MODE_INDEX 11
+#define RXTX_REG6_RX_WORD_MODE_WIDTH 3
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG53_RX_PLLSELECT_INDEX 15
+#define RXTX_REG53_RX_PLLSELECT_WIDTH 1
+#define RXTX_REG53_TX_PLLSELECT_INDEX 14
+#define RXTX_REG53_TX_PLLSELECT_WIDTH 1
+#define RXTX_REG53_PI_SPD_SEL_CDR_INDEX 10
+#define RXTX_REG53_PI_SPD_SEL_CDR_WIDTH 4
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG115_FORCE_LAT_CAL_START_INDEX 2
+#define RXTX_REG115_FORCE_LAT_CAL_START_WIDTH 1
+#define RXTX_REG115_FORCE_SUM_CAL_START_INDEX 1
+#define RXTX_REG115_FORCE_SUM_CAL_START_WIDTH 1
+#define RXTX_REG142_SUM_CALIB_DONE_INDEX 15
+#define RXTX_REG142_SUM_CALIB_DONE_WIDTH 1
+#define RXTX_REG142_SUM_CALIB_ERR_INDEX 14
+#define RXTX_REG142_SUM_CALIB_ERR_WIDTH 1
+#define RXTX_REG142_LAT_CALIB_DONE_INDEX 11
+#define RXTX_REG142_LAT_CALIB_DONE_WIDTH 1
+
+#define RXTX_FULL_RATE 0x0
+#define RXTX_HALF_RATE 0x1
+#define RXTX_FIFTH_RATE 0x3
+#define RXTX_66BIT_WORD 0x7
+#define RXTX_10BIT_WORD 0x1
+#define RXTX_10G_BLWC 0x0
+#define RXTX_1G_BLWC 0x1
+#define RXTX_10G_TX_AMP 0xa
+#define RXTX_1G_TX_AMP 0xf
+#define RXTX_10G_CDR 0x7
+#define RXTX_1G_CDR 0x2
+#define RXTX_10G_PLL 0x1
+#define RXTX_1G_PLL 0x0
+#define RXTX_10G_PQ 0x1e
+#define RXTX_1G_PQ 0xa
+
+DEFINE_SPINLOCK(cmu_lock);
+
+static const u32 amd_xgbe_phy_serdes_blwc[] = {
+ RXTX_1G_BLWC,
+ RXTX_1G_BLWC,
+ RXTX_10G_BLWC,
+};
+
+static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
+ RXTX_1G_CDR,
+ RXTX_1G_CDR,
+ RXTX_10G_CDR,
+};
+
+static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
+ RXTX_1G_PQ,
+ RXTX_1G_PQ,
+ RXTX_10G_PQ,
+};
+
+static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
+ RXTX_1G_TX_AMP,
+ RXTX_1G_TX_AMP,
+ RXTX_10G_TX_AMP,
+};
+
+enum amd_xgbe_phy_an {
+ AMD_XGBE_AN_READY = 0,
+ AMD_XGBE_AN_PAGE_RECEIVED,
+ AMD_XGBE_AN_INCOMPAT_LINK,
+ AMD_XGBE_AN_COMPLETE,
+ AMD_XGBE_AN_NO_LINK,
+ AMD_XGBE_AN_ERROR,
+};
+
+enum amd_xgbe_phy_rx {
+ AMD_XGBE_RX_BPA = 0,
+ AMD_XGBE_RX_XNP,
+ AMD_XGBE_RX_COMPLETE,
+ AMD_XGBE_RX_ERROR,
+};
+
+enum amd_xgbe_phy_mode {
+ AMD_XGBE_MODE_KR,
+ AMD_XGBE_MODE_KX,
+};
+
+enum amd_xgbe_phy_speedset {
+ AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
+ AMD_XGBE_PHY_SPEEDSET_2500_10000,
+};
+
+struct amd_xgbe_phy_priv {
+ struct platform_device *pdev;
+ struct acpi_device *adev;
+ struct device *dev;
+
+ struct phy_device *phydev;
+
+ /* SerDes related mmio resources */
+ struct resource *rxtx_res;
+ struct resource *cmu_res;
+
+ /* SerDes related mmio registers */
+ void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
+ void __iomem *cmu_regs; /* SerDes CMU CSRs */
+
+ int an_irq;
+ char an_irq_name[IFNAMSIZ + 32];
+ struct work_struct an_irq_work;
+ unsigned int an_irq_allocated;
+
+ unsigned int serdes_channel;
+ unsigned int speed_set;
+
+ /* Maintain link status for re-starting auto-negotiation */
+ unsigned int link;
+
+ /* SerDes UEFI configurable settings.
+ * Switching between modes/speeds requires new values for some
+ * SerDes settings. The values can be supplied as device
+ * properties in array format. The first array entry is for
+ * 1GbE, second for 2.5GbE and third for 10GbE
+ */
+ u32 serdes_blwc[XGBE_PHY_SPEEDS];
+ u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
+ u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
+ u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+
+ /* Auto-negotiation state machine support */
+ struct mutex an_mutex;
+ enum amd_xgbe_phy_an an_result;
+ enum amd_xgbe_phy_an an_state;
+ enum amd_xgbe_phy_rx kr_state;
+ enum amd_xgbe_phy_rx kx_state;
+ struct work_struct an_work;
+ struct workqueue_struct *an_workqueue;
+ unsigned int an_supported;
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+
+ unsigned int lpm_ctrl; /* CTRL1 for resume */
+};
+
+static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret |= XGBE_PHY_KR_TRAINING_ENABLE;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+
+ return 0;
+}
+
+static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ usleep_range(75, 100);
+
+ ret &= ~MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ return 0;
+}
+
+static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ u16 val, mask;
+
+ /* Assert Rx and Tx ratechange in CMU_reg16 */
+ val = XCMU_IOREAD(priv, CMU_REG16);
+
+ mask = (1 << (CMU_REG16_TX_RATE_CHANGE_BASE -
+ (priv->serdes_channel * CMU_REG16_RATE_CHANGE_DECR))) |
+ (1 << (CMU_REG16_RX_RATE_CHANGE_BASE -
+ (priv->serdes_channel * CMU_REG16_RATE_CHANGE_DECR)));
+ val |= mask;
+
+ XCMU_IOWRITE(priv, CMU_REG16, val);
+}
+
+static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ u16 val, mask;
+ unsigned int wait;
+
+ /* Release Rx and Tx ratechange for proper channel in CMU_reg16 */
+ val = XCMU_IOREAD(priv, CMU_REG16);
+
+ mask = (1 << (CMU_REG16_TX_RATE_CHANGE_BASE -
+ (priv->serdes_channel * CMU_REG16_RATE_CHANGE_DECR))) |
+ (1 << (CMU_REG16_RX_RATE_CHANGE_BASE -
+ (priv->serdes_channel * CMU_REG16_RATE_CHANGE_DECR)));
+ val &= ~mask;
+
+ XCMU_IOWRITE(priv, CMU_REG16, val);
+
+ /* Wait for Rx and Tx ready in CMU_reg15 */
+ mask = (1 << priv->serdes_channel) |
+ (1 << (priv->serdes_channel + 8));
+ wait = XGBE_PHY_RATECHANGE_COUNT;
+ while (wait--) {
+ udelay(50);
+
+ val = XCMU_IOREAD(priv, CMU_REG15);
+ if ((val & mask) == mask)
+ return;
+ }
+
+ netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
+ val);
+}
+
+static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Disable KR training */
+ ret = amd_xgbe_an_disable_kr_training(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to KR/10G speed */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBR;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_CTRL1_SPEEDSEL;
+ ret |= MDIO_CTRL1_SPEED10G;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = amd_xgbe_phy_pcs_power_cycle(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set SerDes to 10G speed */
+ spin_lock(&cmu_lock);
+
+ amd_xgbe_phy_serdes_start_ratechange(phydev);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_DATA_RATE, RXTX_FULL_RATE);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_WORD_MODE, RXTX_66BIT_WORD);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG5, TXAMP_CNTL,
+ priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_DATA_RATE, RXTX_FULL_RATE);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_WORD_MODE, RXTX_66BIT_WORD);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
+ priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, RX_PLLSELECT, RXTX_10G_PLL);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, TX_PLLSELECT, RXTX_10G_PLL);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, PI_SPD_SEL_CDR,
+ priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
+ priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+ spin_unlock(&cmu_lock);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Disable KR training */
+ ret = amd_xgbe_an_disable_kr_training(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to KX/1G speed */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBX;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_CTRL1_SPEEDSEL;
+ ret |= MDIO_CTRL1_SPEED1G;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = amd_xgbe_phy_pcs_power_cycle(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set SerDes to 2.5G speed */
+ spin_lock(&cmu_lock);
+
+ amd_xgbe_phy_serdes_start_ratechange(phydev);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_DATA_RATE, RXTX_HALF_RATE);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_WORD_MODE, RXTX_10BIT_WORD);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG5, TXAMP_CNTL,
+ priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_DATA_RATE, RXTX_HALF_RATE);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_WORD_MODE, RXTX_10BIT_WORD);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
+ priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, RX_PLLSELECT, RXTX_1G_PLL);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, TX_PLLSELECT, RXTX_1G_PLL);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, PI_SPD_SEL_CDR,
+ priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
+ priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+ spin_unlock(&cmu_lock);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Disable KR training */
+ ret = amd_xgbe_an_disable_kr_training(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to KX/1G speed */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBX;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_CTRL1_SPEEDSEL;
+ ret |= MDIO_CTRL1_SPEED1G;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = amd_xgbe_phy_pcs_power_cycle(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set SerDes to 1G speed */
+ spin_lock(&cmu_lock);
+
+ amd_xgbe_phy_serdes_start_ratechange(phydev);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_DATA_RATE, RXTX_FIFTH_RATE);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG3, TX_WORD_MODE, RXTX_10BIT_WORD);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG5, TXAMP_CNTL,
+ priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_DATA_RATE, RXTX_FIFTH_RATE);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RX_WORD_MODE, RXTX_10BIT_WORD);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
+ priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, RX_PLLSELECT, RXTX_1G_PLL);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, TX_PLLSELECT, RXTX_1G_PLL);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG53, PI_SPD_SEL_CDR,
+ priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
+ priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+ spin_unlock(&cmu_lock);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
+ enum amd_xgbe_phy_mode *mode)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
+ *mode = AMD_XGBE_MODE_KR;
+ else
+ *mode = AMD_XGBE_MODE_KX;
+
+ return 0;
+}
+
+static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
+{
+ enum amd_xgbe_phy_mode mode;
+
+ if (amd_xgbe_phy_cur_mode(phydev, &mode))
+ return false;
+
+ return (mode == AMD_XGBE_MODE_KR);
+}
+
+static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* If we are in KR switch to KX, and vice-versa */
+ if (amd_xgbe_phy_in_kr_mode(phydev)) {
+ if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
+ ret = amd_xgbe_phy_gmii_mode(phydev);
+ else
+ ret = amd_xgbe_phy_gmii_2500_mode(phydev);
+ } else {
+ ret = amd_xgbe_phy_xgmii_mode(phydev);
+ }
+
+ return ret;
+}
+
+static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
+ enum amd_xgbe_phy_mode mode)
+{
+ enum amd_xgbe_phy_mode cur_mode;
+ int ret;
+
+ ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
+ if (ret)
+ return ret;
+
+ if (mode != cur_mode)
+ ret = amd_xgbe_phy_switch_mode(phydev);
+
+ return ret;
+}
+
+static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
+ bool restart)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_AN_CTRL1_ENABLE;
+
+ if (enable)
+ ret |= MDIO_AN_CTRL1_ENABLE;
+
+ if (restart)
+ ret |= MDIO_AN_CTRL1_RESTART;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
+{
+ return amd_xgbe_phy_set_an(phydev, true, true);
+}
+
+static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
+{
+ return amd_xgbe_phy_set_an(phydev, false, false);
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ad_reg, lp_reg, ret;
+
+ *state = AMD_XGBE_RX_COMPLETE;
+
+ /* If we're not in KR mode then we're done */
+ if (!amd_xgbe_phy_in_kr_mode(phydev))
+ return AMD_XGBE_AN_PAGE_RECEIVED;
+
+ /* Enable/Disable FEC */
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (ad_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ ret &= ~XGBE_PHY_FEC_MASK;
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ ret |= priv->fec_ability;
+
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
+
+ /* Start KR training */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
+ ret |= XGBE_PHY_KR_TRAINING_START;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ ret);
+ }
+
+ return AMD_XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ u16 msg;
+
+ *state = AMD_XGBE_RX_XNP;
+
+ msg = XNP_MCF_NULL_MESSAGE;
+ msg |= XNP_MP_FORMATTED;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return AMD_XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ unsigned int link_support;
+ int ret, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
+ if (!(ret & link_support))
+ return AMD_XGBE_AN_INCOMPAT_LINK;
+
+ /* Check Extended Next Page support */
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ad_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
+ amd_xgbe_an_tx_xnp(phydev, state) :
+ amd_xgbe_an_tx_training(phydev, state);
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
+ if (ad_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
+ if (lp_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
+ amd_xgbe_an_tx_xnp(phydev, state) :
+ amd_xgbe_an_tx_training(phydev, state);
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ enum amd_xgbe_phy_rx *state;
+ int ret;
+
+ state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
+ : &priv->kx_state;
+
+ switch (*state) {
+ case AMD_XGBE_RX_BPA:
+ ret = amd_xgbe_an_rx_bpa(phydev, state);
+ break;
+
+ case AMD_XGBE_RX_XNP:
+ ret = amd_xgbe_an_rx_xnp(phydev, state);
+ break;
+
+ default:
+ ret = AMD_XGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Be sure we aren't looping trying to negotiate */
+ if (amd_xgbe_phy_in_kr_mode(phydev)) {
+ priv->kr_state = AMD_XGBE_RX_ERROR;
+
+ if (!(phydev->supported & SUPPORTED_1000baseKX_Full) &&
+ !(phydev->supported & SUPPORTED_2500baseX_Full))
+ return AMD_XGBE_AN_NO_LINK;
+
+ if (priv->kx_state != AMD_XGBE_RX_BPA)
+ return AMD_XGBE_AN_NO_LINK;
+ } else {
+ priv->kx_state = AMD_XGBE_RX_ERROR;
+
+ if (!(phydev->supported & SUPPORTED_10000baseKR_Full))
+ return AMD_XGBE_AN_NO_LINK;
+
+ if (priv->kr_state != AMD_XGBE_RX_BPA)
+ return AMD_XGBE_AN_NO_LINK;
+ }
+
+ ret = amd_xgbe_phy_disable_an(phydev);
+ if (ret)
+ return AMD_XGBE_AN_ERROR;
+
+ ret = amd_xgbe_phy_switch_mode(phydev);
+ if (ret)
+ return AMD_XGBE_AN_ERROR;
+
+ ret = amd_xgbe_phy_restart_an(phydev);
+ if (ret)
+ return AMD_XGBE_AN_ERROR;
+
+ return AMD_XGBE_AN_INCOMPAT_LINK;
+}
+
+static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
+{
+ struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
+
+ /* Interrupt reason must be read and cleared outside of IRQ context */
+ disable_irq_nosync(priv->an_irq);
+
+ queue_work(priv->an_workqueue, &priv->an_irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void amd_xgbe_an_irq_work(struct work_struct *work)
+{
+ struct amd_xgbe_phy_priv *priv = container_of(work,
+ struct amd_xgbe_phy_priv,
+ an_irq_work);
+
+ /* Avoid a race between enabling the IRQ and exiting the work by
+ * waiting for the work to finish and then queueing it
+ */
+ flush_work(&priv->an_work);
+ queue_work(priv->an_workqueue, &priv->an_work);
+}
+
+static void amd_xgbe_an_state_machine(struct work_struct *work)
+{
+ struct amd_xgbe_phy_priv *priv = container_of(work,
+ struct amd_xgbe_phy_priv,
+ an_work);
+ struct phy_device *phydev = priv->phydev;
+ enum amd_xgbe_phy_an cur_state = priv->an_state;
+ int int_reg, int_mask;
+
+ mutex_lock(&priv->an_mutex);
+
+ /* Read the interrupt */
+ int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
+ if (!int_reg)
+ goto out;
+
+next_int:
+ if (int_reg < 0) {
+ priv->an_state = AMD_XGBE_AN_ERROR;
+ int_mask = XGBE_AN_INT_MASK;
+ } else if (int_reg & XGBE_AN_PG_RCV) {
+ priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
+ int_mask = XGBE_AN_PG_RCV;
+ } else if (int_reg & XGBE_AN_INC_LINK) {
+ priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
+ int_mask = XGBE_AN_INC_LINK;
+ } else if (int_reg & XGBE_AN_INT_CMPLT) {
+ priv->an_state = AMD_XGBE_AN_COMPLETE;
+ int_mask = XGBE_AN_INT_CMPLT;
+ } else {
+ priv->an_state = AMD_XGBE_AN_ERROR;
+ int_mask = 0;
+ }
+
+ /* Clear the interrupt to be processed */
+ int_reg &= ~int_mask;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
+
+ priv->an_result = priv->an_state;
+
+again:
+ cur_state = priv->an_state;
+
+ switch (priv->an_state) {
+ case AMD_XGBE_AN_READY:
+ priv->an_supported = 0;
+ break;
+
+ case AMD_XGBE_AN_PAGE_RECEIVED:
+ priv->an_state = amd_xgbe_an_page_received(phydev);
+ priv->an_supported++;
+ break;
+
+ case AMD_XGBE_AN_INCOMPAT_LINK:
+ priv->an_supported = 0;
+ priv->parallel_detect = 0;
+ priv->an_state = amd_xgbe_an_incompat_link(phydev);
+ break;
+
+ case AMD_XGBE_AN_COMPLETE:
+ priv->parallel_detect = priv->an_supported ? 0 : 1;
+ netdev_dbg(phydev->attached_dev, "%s successful\n",
+ priv->an_supported ? "Auto negotiation"
+ : "Parallel detection");
+ break;
+
+ case AMD_XGBE_AN_NO_LINK:
+ break;
+
+ default:
+ priv->an_state = AMD_XGBE_AN_ERROR;
+ }
+
+ if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
+ /* Disable auto-negotiation for now - it will be
+ * re-enabled once a link is established
+ */
+ amd_xgbe_phy_disable_an(phydev);
+
+ int_reg = 0;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
+ netdev_err(phydev->attached_dev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+
+ int_reg = 0;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ }
+
+ if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
+ priv->an_result = priv->an_state;
+ priv->an_state = AMD_XGBE_AN_READY;
+ priv->kr_state = AMD_XGBE_RX_BPA;
+ priv->kx_state = AMD_XGBE_RX_BPA;
+ }
+
+ if (cur_state != priv->an_state)
+ goto again;
+
+ if (int_reg)
+ goto next_int;
+
+out:
+ enable_irq(priv->an_irq);
+
+ mutex_unlock(&priv->an_mutex);
+}
+
+static int amd_xgbe_an_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Set up Advertisement register 3 first */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->supported & SUPPORTED_10000baseR_FEC)
+ ret |= 0xc000;
+ else
+ ret &= ~0xc000;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
+
+ /* Set up Advertisement register 2 next */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ ret |= 0x80;
+ else
+ ret &= ~0x80;
+
+ if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
+ (phydev->supported & SUPPORTED_2500baseX_Full))
+ ret |= 0x20;
+ else
+ ret &= ~0x20;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
+
+ /* Set up Advertisement register 1 last */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->supported & SUPPORTED_Pause)
+ ret |= 0x400;
+ else
+ ret &= ~0x400;
+
+ if (phydev->supported & SUPPORTED_Asym_Pause)
+ ret |= 0x800;
+ else
+ ret &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ ret &= ~XNP_NP_EXCHANGE;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
+{
+ int count, ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= MDIO_CTRL1_RESET;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ count = 50;
+ do {
+ msleep(20);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+ } while ((ret & MDIO_CTRL1_RESET) && --count);
+
+ if (ret & MDIO_CTRL1_RESET)
+ return -ETIMEDOUT;
+
+ /* Disable auto-negotiation for now */
+ ret = amd_xgbe_phy_disable_an(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Clear auto-negotiation interrupts */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_config_init(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ struct net_device *netdev = phydev->attached_dev;
+ int ret;
+
+ if (!priv->an_irq_allocated) {
+ /* Allocate the auto-negotiation workqueue and interrupt */
+ snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
+ "%s-pcs", netdev_name(netdev));
+
+ priv->an_workqueue =
+ create_singlethread_workqueue(priv->an_irq_name);
+ if (!priv->an_workqueue) {
+ netdev_err(netdev, "phy workqueue creation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_request_irq(priv->dev, priv->an_irq,
+ amd_xgbe_an_isr, 0, priv->an_irq_name,
+ priv);
+ if (ret) {
+ netdev_err(netdev, "phy irq request failed\n");
+ destroy_workqueue(priv->an_workqueue);
+ return ret;
+ }
+
+ priv->an_irq_allocated = 1;
+ }
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
+ if (ret < 0)
+ return ret;
+ priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
+
+ /* Initialize supported features */
+ phydev->supported = SUPPORTED_Autoneg;
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phydev->supported |= SUPPORTED_Backplane;
+ phydev->supported |= SUPPORTED_10000baseKR_Full;
+ switch (priv->speed_set) {
+ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+ phydev->supported |= SUPPORTED_1000baseKX_Full;
+ break;
+ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
+ phydev->supported |= SUPPORTED_2500baseX_Full;
+ break;
+ }
+
+ if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
+ phydev->supported |= SUPPORTED_10000baseR_FEC;
+
+ phydev->advertising = phydev->supported;
+
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+ if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ ret = amd_xgbe_phy_xgmii_mode(phydev);
+ else if (phydev->supported & SUPPORTED_1000baseKX_Full)
+ ret = amd_xgbe_phy_gmii_mode(phydev);
+ else if (phydev->supported & SUPPORTED_2500baseX_Full)
+ ret = amd_xgbe_phy_gmii_2500_mode(phydev);
+ else
+ ret = -EINVAL;
+ if (ret < 0)
+ return ret;
+
+ /* Set up advertisement registers based on current settings */
+ ret = amd_xgbe_an_init(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable auto-negotiation interrupts */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Disable auto-negotiation */
+ ret = amd_xgbe_phy_disable_an(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Validate/Set specified speed */
+ switch (phydev->speed) {
+ case SPEED_10000:
+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
+ break;
+
+ case SPEED_2500:
+ case SPEED_1000:
+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* Validate duplex mode */
+ if (phydev->duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ return 0;
+}
+
+static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
+ int ret;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return amd_xgbe_phy_setup_forced(phydev);
+
+ /* Make sure we have the AN MMD present */
+ if (!(mmd_mask & MDIO_DEVS_AN))
+ return -EINVAL;
+
+ /* Disable auto-negotiation interrupt */
+ disable_irq(priv->an_irq);
+
+ /* Start auto-negotiation in a supported mode */
+ if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
+ else if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
+ (phydev->supported & SUPPORTED_2500baseX_Full))
+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
+ else
+ ret = -EINVAL;
+ if (ret < 0) {
+ enable_irq(priv->an_irq);
+ return ret;
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+ ret = amd_xgbe_phy_disable_an(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Clear any auto-negotitation interrupts */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ priv->an_result = AMD_XGBE_AN_READY;
+ priv->an_state = AMD_XGBE_AN_READY;
+ priv->kr_state = AMD_XGBE_RX_BPA;
+ priv->kx_state = AMD_XGBE_RX_BPA;
+
+ /* Re-enable auto-negotiation interrupt */
+ enable_irq(priv->an_irq);
+
+ /* Set up advertisement registers based on current settings */
+ ret = amd_xgbe_an_init(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable and start auto-negotiation */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret |= MDIO_KR_CTRL_PDETECT;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret);
+
+ return amd_xgbe_phy_restart_an(phydev);
+}
+
+static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ mutex_lock(&priv->an_mutex);
+
+ ret = __amd_xgbe_phy_config_aneg(phydev);
+
+ mutex_unlock(&priv->an_mutex);
+
+ return ret;
+}
+
+static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+
+ return (priv->an_result == AMD_XGBE_AN_COMPLETE);
+}
+
+static int amd_xgbe_phy_update_link(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ unsigned int check_again, autoneg;
+ int ret;
+
+ /* If we're doing auto-negotiation don't report link down */
+ if (priv->an_state != AMD_XGBE_AN_READY) {
+ phydev->link = 1;
+ return 0;
+ }
+
+ /* Since the device can be in the wrong mode when a link is
+ * (re-)established (cable connected after the interface is
+ * up, etc.), the link status may report no link. If there
+ * is no link, try switching modes and checking the status
+ * again if auto negotiation is enabled.
+ */
+ check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
+again:
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+ if (!phydev->link) {
+ if (check_again) {
+ ret = amd_xgbe_phy_switch_mode(phydev);
+ if (ret < 0)
+ return ret;
+ check_again = 0;
+ goto again;
+ }
+ }
+
+ autoneg = (phydev->link && !priv->link) ? 1 : 0;
+ priv->link = phydev->link;
+ if (autoneg) {
+ /* Link is (back) up, re-start auto-negotiation */
+ ret = amd_xgbe_phy_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amd_xgbe_phy_read_status(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
+ int ret, ad_ret, lp_ret;
+
+ ret = amd_xgbe_phy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ if ((phydev->autoneg == AUTONEG_ENABLE) &&
+ !priv->parallel_detect) {
+ if (!(mmd_mask & MDIO_DEVS_AN))
+ return -EINVAL;
+
+ if (!amd_xgbe_phy_aneg_done(phydev))
+ return 0;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ad_ret < 0)
+ return ad_ret;
+ lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_ret < 0)
+ return lp_ret;
+
+ ad_ret &= lp_ret;
+ phydev->pause = (ad_ret & 0x400) ? 1 : 0;
+ phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_ADVERTISE + 1);
+ if (ad_ret < 0)
+ return ad_ret;
+ lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_ret < 0)
+ return lp_ret;
+
+ ad_ret &= lp_ret;
+ if (ad_ret & 0x80) {
+ phydev->speed = SPEED_10000;
+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
+ if (ret)
+ return ret;
+ } else {
+ switch (priv->speed_set) {
+ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+ phydev->speed = SPEED_1000;
+ break;
+
+ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+
+ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
+ if (ret)
+ return ret;
+ }
+
+ phydev->duplex = DUPLEX_FULL;
+ } else {
+ if (amd_xgbe_phy_in_kr_mode(phydev)) {
+ phydev->speed = SPEED_10000;
+ } else {
+ switch (priv->speed_set) {
+ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+ phydev->speed = SPEED_1000;
+ break;
+
+ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+ }
+ phydev->duplex = DUPLEX_FULL;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ }
+
+ return 0;
+}
+
+static int amd_xgbe_phy_suspend(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ goto unlock;
+
+ priv->lpm_ctrl = ret;
+
+ ret |= MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+static int amd_xgbe_phy_resume(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+
+ mutex_lock(&phydev->lock);
+
+ priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
+
+ mutex_unlock(&phydev->lock);
+
+ return 0;
+}
+
+static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
+ unsigned int type)
+{
+ unsigned int count;
+ int i;
+
+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
+ struct resource *r = &pdev->resource[i];
+
+ if (type == resource_type(r))
+ count++;
+ }
+
+ return count;
+}
+
+static int amd_xgbe_phy_probe(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv;
+ struct platform_device *phy_pdev;
+ struct device *dev, *phy_dev;
+ unsigned int phy_resnum, phy_irqnum;
+ int ret;
+
+ if (!phydev->bus || !phydev->bus->parent)
+ return -EINVAL;
+
+ dev = phydev->bus->parent;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = to_platform_device(dev);
+ priv->adev = ACPI_COMPANION(dev);
+ priv->dev = dev;
+ priv->phydev = phydev;
+ mutex_init(&priv->an_mutex);
+ INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
+ INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
+
+ if (!priv->adev || acpi_disabled) {
+ struct device_node *bus_node;
+ struct device_node *phy_node;
+
+ bus_node = priv->dev->of_node;
+ phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
+ if (!phy_node) {
+ dev_err(dev, "unable to parse phy-handle\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ phy_pdev = of_find_device_by_node(phy_node);
+ of_node_put(phy_node);
+
+ if (!phy_pdev) {
+ dev_err(dev, "unable to obtain phy device\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ phy_resnum = 0;
+ phy_irqnum = 0;
+ } else {
+ /* In ACPI, the XGBE and PHY resources are the grouped
+ * together with the PHY resources at the end
+ */
+ phy_pdev = priv->pdev;
+ phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
+ IORESOURCE_MEM) - 2;
+ phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
+ IORESOURCE_IRQ) - 1;
+ }
+ phy_dev = &phy_pdev->dev;
+
+ /* Get the device mmio areas */
+ priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
+ phy_resnum++);
+ priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
+ if (IS_ERR(priv->rxtx_regs)) {
+ dev_err(dev, "rxtx ioremap failed\n");
+ ret = PTR_ERR(priv->rxtx_regs);
+ goto err_put;
+ }
+
+ /* All xgbe phy devices share the CMU registers so retrieve
+ * the resource and do the ioremap directly rather than
+ * the devm_ioremap_resource call
+ */
+ priv->cmu_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
+ phy_resnum++);
+ if (!priv->cmu_res) {
+ dev_err(dev, "cmu invalid resource\n");
+ ret = -EINVAL;
+ goto err_rxtx;
+ }
+ priv->cmu_regs = devm_ioremap_nocache(dev, priv->cmu_res->start,
+ resource_size(priv->cmu_res));
+ if (!priv->cmu_regs) {
+ dev_err(dev, "cmu ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_rxtx;
+ }
+
+ /* Get the auto-negotiation interrupt */
+ ret = platform_get_irq(phy_pdev, phy_irqnum);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq failed\n");
+ goto err_cmu;
+ }
+ if (priv->adev && !acpi_disabled && !phy_irqnum) {
+ struct irq_data *d = irq_get_irq_data(ret);
+ if (!d) {
+ dev_err(dev, "unable to set AN interrupt\n");
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+#ifdef CONFIG_ACPI
+ ret = acpi_register_gsi(dev, d->hwirq - 2,
+ ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
+#else
+ ret = -EINVAL;
+#endif
+ if (ret < 0) {
+ dev_err(dev, "unable to set AN interrupt\n");
+ ret = -EINVAL;
+ goto err_put;
+ }
+ }
+ priv->an_irq = ret;
+
+ /* Get the device serdes channel property */
+ ret = device_property_read_u32(phy_dev, XGBE_PHY_CHANNEL_PROPERTY,
+ &priv->serdes_channel);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_CHANNEL_PROPERTY);
+ goto err_cmu;
+ }
+
+ /* Get the device speed set property */
+ ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
+ &priv->speed_set);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_SPEEDSET_PROPERTY);
+ goto err_cmu;
+ }
+
+ switch (priv->speed_set) {
+ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
+ break;
+ default:
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_SPEEDSET_PROPERTY);
+ ret = -EINVAL;
+ goto err_cmu;
+ }
+
+ if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_BLWC_PROPERTY,
+ priv->serdes_blwc,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_BLWC_PROPERTY);
+ goto err_cmu;
+ }
+ } else {
+ memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
+ sizeof(priv->serdes_blwc));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_CDR_RATE_PROPERTY,
+ priv->serdes_cdr_rate,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_CDR_RATE_PROPERTY);
+ goto err_cmu;
+ }
+ } else {
+ memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
+ sizeof(priv->serdes_cdr_rate));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_PQ_SKEW_PROPERTY,
+ priv->serdes_pq_skew,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_PQ_SKEW_PROPERTY);
+ goto err_cmu;
+ }
+ } else {
+ memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
+ sizeof(priv->serdes_pq_skew));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_TX_AMP_PROPERTY,
+ priv->serdes_tx_amp,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_TX_AMP_PROPERTY);
+ goto err_cmu;
+ }
+ } else {
+ memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
+ sizeof(priv->serdes_tx_amp));
+ }
+
+ priv->link = 1;
+
+ phydev->priv = priv;
+
+ if (!priv->adev || acpi_disabled)
+ platform_device_put(phy_pdev);
+
+ return 0;
+
+err_cmu:
+ devm_iounmap(dev, priv->cmu_regs);
+
+err_rxtx:
+ devm_iounmap(dev, priv->rxtx_regs);
+ devm_release_mem_region(dev, priv->rxtx_res->start,
+ resource_size(priv->rxtx_res));
+
+err_put:
+ if (!priv->adev || acpi_disabled)
+ platform_device_put(phy_pdev);
+
+err_priv:
+ devm_kfree(dev, priv);
+
+ return ret;
+}
+
+static void amd_xgbe_phy_remove(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ struct device *dev = priv->dev;
+
+ if (priv->an_irq_allocated) {
+ devm_free_irq(dev, priv->an_irq, priv);
+
+ flush_workqueue(priv->an_workqueue);
+ destroy_workqueue(priv->an_workqueue);
+ }
+
+ devm_iounmap(dev, priv->cmu_regs);
+
+ devm_iounmap(dev, priv->rxtx_regs);
+ devm_release_mem_region(dev, priv->rxtx_res->start,
+ resource_size(priv->rxtx_res));
+
+ devm_kfree(dev, priv);
+}
+
+static int amd_xgbe_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
+}
+
+static struct phy_driver amd_xgbe_phy_a0_driver[] = {
+ {
+ .phy_id = XGBE_PHY_ID,
+ .phy_id_mask = XGBE_PHY_MASK,
+ .name = "AMD XGBE PHY A0",
+ .features = 0,
+ .probe = amd_xgbe_phy_probe,
+ .remove = amd_xgbe_phy_remove,
+ .soft_reset = amd_xgbe_phy_soft_reset,
+ .config_init = amd_xgbe_phy_config_init,
+ .suspend = amd_xgbe_phy_suspend,
+ .resume = amd_xgbe_phy_resume,
+ .config_aneg = amd_xgbe_phy_config_aneg,
+ .aneg_done = amd_xgbe_phy_aneg_done,
+ .read_status = amd_xgbe_phy_read_status,
+ .match_phy_device = amd_xgbe_match_phy_device,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+ },
+};
+
+module_phy_driver(amd_xgbe_phy_a0_driver);
+
+static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids_a0[] = {
+ { XGBE_PHY_ID, XGBE_PHY_MASK },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids_a0);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 7a8f1c5..32e0a73 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -22,6 +22,13 @@ config PCI_MSI_IRQ_DOMAIN
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
+config PCI_ECAM
+ bool "Enhanced Configuration Access Mechanism (ECAM)"
+ depends on PCI
+
+config PCI_ECAM_GENERIC
+ bool
+
config PCI_DEBUG
bool "PCI Debugging"
depends on PCI && DEBUG_KERNEL
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 73e4af4..ce7b630 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -41,6 +41,11 @@ obj-$(CONFIG_SPARC_LEON) += setup-irq.o
obj-$(CONFIG_M68K) += setup-irq.o
#
+# Enhanced Configuration Access Mechanism (ECAM)
+#
+obj-$(CONFIG_PCI_ECAM) += ecam.o
+
+#
# ACPI Related PCI FW Functions
# ACPI _DSM provided firmware instance and string name
#
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
new file mode 100644
index 0000000..bcb0c2f
--- /dev/null
+++ b/drivers/pci/ecam.c
@@ -0,0 +1,361 @@
+/*
+ * Arch agnostic direct PCI config space access via
+ * ECAM (Enhanced Configuration Access Mechanism)
+ *
+ * Per-architecture code takes care of the mappings, region validation and
+ * accesses themselves.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/ecam.h>
+
+#define PREFIX "PCI ECAM: "
+
+static DEFINE_MUTEX(pci_ecam_lock);
+
+LIST_HEAD(pci_ecam_list);
+
+extern struct acpi_mcfg_fixup __start_acpi_mcfg_fixups[];
+extern struct acpi_mcfg_fixup __end_acpi_mcfg_fixups[];
+
+#ifdef CONFIG_PCI_ECAM_GENERIC
+int pci_ecam_read(unsigned int seg, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 *value)
+{
+ struct pci_ecam_region *cfg;
+ char __iomem *addr;
+
+ /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
+ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) {
+err: *value = -1;
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ cfg = pci_ecam_lookup(seg, bus);
+ if (!cfg || !cfg->virt) {
+ rcu_read_unlock();
+ goto err;
+ }
+
+ if (cfg->read)
+ (*cfg->read)(cfg, bus, devfn, reg, len, value);
+ else {
+ addr = cfg->virt + (PCI_ECAM_BUS_OFFSET(bus) | (devfn << 12));
+ *value = pci_mmio_read(len, addr + reg);
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+int pci_ecam_write(unsigned int seg, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 value)
+{
+ struct pci_ecam_region *cfg;
+ char __iomem *addr;
+
+ /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
+ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095)))
+ return -EINVAL;
+
+ rcu_read_lock();
+ cfg = pci_ecam_lookup(seg, bus);
+ if (!cfg || !cfg->virt) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ if (cfg->write)
+ (*cfg->write)(cfg, bus, devfn, reg, len, value);
+ else {
+ addr = cfg->virt + (PCI_ECAM_BUS_OFFSET(bus) | (devfn << 12));
+ pci_mmio_write(len, addr + reg, value);
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static void __iomem *pci_ecam_ioremap(struct pci_ecam_region *cfg)
+{
+ void __iomem *addr;
+ u64 start, size;
+ int num_buses;
+
+ start = cfg->address + PCI_ECAM_BUS_OFFSET(cfg->start_bus);
+ num_buses = cfg->end_bus - cfg->start_bus + 1;
+ size = PCI_ECAM_BUS_OFFSET(num_buses);
+ addr = ioremap_nocache(start, size);
+ if (addr)
+ addr -= PCI_ECAM_BUS_OFFSET(cfg->start_bus);
+ return addr;
+}
+
+int __init pci_ecam_arch_init(void)
+{
+ struct pci_ecam_region *cfg;
+
+ list_for_each_entry(cfg, &pci_ecam_list, list)
+ if (pci_ecam_arch_map(cfg)) {
+ pci_ecam_arch_free();
+ return 0;
+ }
+
+ return 1;
+}
+
+void __init pci_ecam_arch_free(void)
+{
+ struct pci_ecam_region *cfg;
+
+ list_for_each_entry(cfg, &pci_ecam_list, list)
+ pci_ecam_arch_unmap(cfg);
+}
+
+int pci_ecam_arch_map(struct pci_ecam_region *cfg)
+{
+ cfg->virt = pci_ecam_ioremap(cfg);
+ if (!cfg->virt) {
+ pr_err(PREFIX "can't map ECAM at %pR\n", &cfg->res);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void pci_ecam_arch_unmap(struct pci_ecam_region *cfg)
+{
+ if (cfg && cfg->virt) {
+ iounmap(cfg->virt + PCI_ECAM_BUS_OFFSET(cfg->start_bus));
+ cfg->virt = NULL;
+ }
+}
+#endif
+
+static u32
+pci_ecam_generic_read(int len, void __iomem *addr)
+{
+ u32 data = 0;
+
+ switch (len) {
+ case 1:
+ data = readb(addr);
+ break;
+ case 2:
+ data = readw(addr);
+ break;
+ case 4:
+ data = readl(addr);
+ break;
+ }
+
+ return data;
+}
+
+static void
+pci_ecam_generic_write(int len, void __iomem *addr, u32 value)
+{
+ switch (len) {
+ case 1:
+ writeb(value, addr);
+ break;
+ case 2:
+ writew(value, addr);
+ break;
+ case 4:
+ writel(value, addr);
+ break;
+ }
+}
+
+static struct pci_ecam_mmio_ops pci_ecam_mmio_default = {
+ .read = pci_ecam_generic_read,
+ .write = pci_ecam_generic_write,
+};
+
+static struct pci_ecam_mmio_ops *pci_ecam_mmio = &pci_ecam_mmio_default;
+
+void
+pci_ecam_register_mmio(struct pci_ecam_mmio_ops *ops)
+{
+ pci_ecam_mmio = ops;
+}
+
+u32
+pci_mmio_read(int len, void __iomem *addr)
+{
+ if (!pci_ecam_mmio) {
+ pr_err("PCI config space has no accessors !");
+ return 0;
+ }
+
+ return pci_ecam_mmio->read(len, addr);
+}
+
+void
+pci_mmio_write(int len, void __iomem *addr, u32 value)
+{
+ if (!pci_ecam_mmio) {
+ pr_err("PCI config space has no accessors !");
+ return;
+ }
+
+ pci_ecam_mmio->write(len, addr, value);
+}
+
+static void __init pci_ecam_remove(struct pci_ecam_region *cfg)
+{
+ if (cfg->res.parent)
+ release_resource(&cfg->res);
+ list_del(&cfg->list);
+ kfree(cfg);
+}
+
+void __init pci_ecam_free_all(void)
+{
+ struct pci_ecam_region *cfg, *tmp;
+
+ pci_ecam_arch_free();
+ list_for_each_entry_safe(cfg, tmp, &pci_ecam_list, list)
+ pci_ecam_remove(cfg);
+}
+
+void pci_ecam_list_add_sorted(struct pci_ecam_region *new)
+{
+ struct pci_ecam_region *cfg;
+
+ /* keep list sorted by segment and starting bus number */
+ list_for_each_entry_rcu(cfg, &pci_ecam_list, list) {
+ if (cfg->segment > new->segment ||
+ (cfg->segment == new->segment &&
+ cfg->start_bus >= new->start_bus)) {
+ list_add_tail_rcu(&new->list, &cfg->list);
+ return;
+ }
+ }
+ list_add_tail_rcu(&new->list, &pci_ecam_list);
+}
+
+struct pci_ecam_region *pci_ecam_alloc(int segment, int start,
+ int end, u64 addr)
+{
+ struct pci_ecam_region *new;
+ struct resource *res;
+
+ if (addr == 0)
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ new->address = addr;
+ new->segment = segment;
+ new->start_bus = start;
+ new->end_bus = end;
+
+ res = &new->res;
+ res->start = addr + PCI_ECAM_BUS_OFFSET(start);
+ res->end = addr + PCI_ECAM_BUS_OFFSET(end + 1) - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ snprintf(new->name, PCI_ECAM_RESOURCE_NAME_LEN,
+ "PCI ECAM %04x [bus %02x-%02x]", segment, start, end);
+ res->name = new->name;
+
+ return new;
+}
+
+struct pci_ecam_region *pci_ecam_add(int segment, int start,
+ int end, u64 addr)
+{
+ struct pci_ecam_region *new;
+
+ new = pci_ecam_alloc(segment, start, end, addr);
+ if (new) {
+ mutex_lock(&pci_ecam_lock);
+ pci_ecam_list_add_sorted(new);
+ mutex_unlock(&pci_ecam_lock);
+
+ pr_info(PREFIX
+ "ECAM for domain %04x [bus %02x-%02x] at %pR "
+ "(base %#lx)\n",
+ segment, start, end, &new->res, (unsigned long)addr);
+ }
+
+ return new;
+}
+
+struct pci_ecam_region *pci_ecam_lookup(int segment, int bus)
+{
+ struct pci_ecam_region *cfg;
+
+ list_for_each_entry_rcu(cfg, &pci_ecam_list, list)
+ if (cfg->segment == segment &&
+ cfg->start_bus <= bus && bus <= cfg->end_bus)
+ return cfg;
+
+ return NULL;
+}
+
+/* Delete ECAM information for host bridges */
+int pci_ecam_delete(u16 seg, u8 start, u8 end)
+{
+ struct pci_ecam_region *cfg;
+
+ mutex_lock(&pci_ecam_lock);
+ list_for_each_entry_rcu(cfg, &pci_ecam_list, list)
+ if (cfg->segment == seg && cfg->start_bus == start &&
+ cfg->end_bus == end) {
+ list_del_rcu(&cfg->list);
+ synchronize_rcu();
+ pci_ecam_arch_unmap(cfg);
+ if (cfg->res.parent)
+ release_resource(&cfg->res);
+ mutex_unlock(&pci_ecam_lock);
+ kfree(cfg);
+ return 0;
+ }
+ mutex_unlock(&pci_ecam_lock);
+
+ return -ENOENT;
+}
+
+int pci_ecam_inject(struct pci_ecam_region *cfg)
+{
+ struct pci_ecam_region *cfg_conflict;
+ int err = 0;
+
+ mutex_lock(&pci_ecam_lock);
+ cfg_conflict = pci_ecam_lookup(cfg->segment, cfg->start_bus);
+ if (cfg_conflict) {
+ if (cfg_conflict->end_bus < cfg->end_bus)
+ pr_info(FW_INFO "ECAM for "
+ "domain %04x [bus %02x-%02x] "
+ "only partially covers this bridge\n",
+ cfg_conflict->segment, cfg_conflict->start_bus,
+ cfg_conflict->end_bus);
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (pci_ecam_arch_map(cfg)) {
+ pr_warn("fail to map ECAM %pR.\n", &cfg->res);
+ err = -ENOMEM;
+ goto out;
+ } else {
+ pci_ecam_list_add_sorted(cfg);
+ pr_info("ECAM at %pR (base %#lx)\n",
+ &cfg->res, (unsigned long)cfg->address);
+
+ }
+out:
+ mutex_unlock(&pci_ecam_lock);
+ return err;
+}
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index ee082c0..c37a3f3 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -29,6 +29,8 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/ecam.h>
#define PCIECORE_CTLANDSTATUS 0x50
#define PIM1_1L 0x80
@@ -468,6 +470,160 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
return 0;
}
+#ifdef CONFIG_ACPI
+struct xgene_mcfg_info {
+ void __iomem *csr_base;
+};
+
+/*
+ * When the address bit [17:16] is 2'b01, the Configuration access will be
+ * treated as Type 1 and it will be forwarded to external PCIe device.
+ */
+static void __iomem *__get_cfg_base(struct pci_ecam_region *cfg,
+ unsigned int bus)
+{
+ if (bus > cfg->start_bus)
+ return cfg->virt + AXI_EP_CFG_ACCESS;
+
+ return cfg->virt;
+}
+
+/*
+ * For Configuration request, RTDID register is used as Bus Number,
+ * Device Number and Function number of the header fields.
+ */
+static void __set_rtdid_reg(struct pci_ecam_region *cfg,
+ unsigned int bus, unsigned int devfn)
+{
+ struct xgene_mcfg_info *info = cfg->data;
+ unsigned int b, d, f;
+ u32 rtdid_val = 0;
+
+ b = bus;
+ d = PCI_SLOT(devfn);
+ f = PCI_FUNC(devfn);
+
+ if (bus != cfg->start_bus)
+ rtdid_val = (b << 8) | (d << 3) | f;
+
+ writel(rtdid_val, info->csr_base + RTDID);
+ /* read the register back to ensure flush */
+ readl(info->csr_base + RTDID);
+}
+
+static int xgene_raw_pci_read(struct pci_ecam_region *cfg, unsigned int bus,
+ unsigned int devfn, int offset, int len, u32 *val)
+{
+ void __iomem *addr;
+
+ if (bus == cfg->start_bus) {
+ if (devfn != 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /* see xgene_pcie_hide_rc_bars() above */
+ if (offset == PCI_BASE_ADDRESS_0 ||
+ offset == PCI_BASE_ADDRESS_1) {
+ *val = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+ }
+
+ __set_rtdid_reg(cfg, bus, devfn);
+ addr = __get_cfg_base(cfg, bus) + (offset & ~0x3);
+ *val = readl(addr);
+ if (len <= 2)
+ *val = (*val >> (8 * (offset & 3))) & ((1 << (len * 8)) - 1);
+
+ /* FIXME.
+ * Something wrong with Configuration Request Retry Status
+ * on this hw. Pretend it isn't supported until the problem
+ * gets sorted out properly.
+ */
+ if (len == 2 && bus == cfg->start_bus && offset == (0x40 + PCI_EXP_RTCAP))
+ *val &= ~PCI_EXP_RTCAP_CRSVIS;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int xgene_raw_pci_write(struct pci_ecam_region *cfg, unsigned int bus,
+ unsigned int devfn, int offset, int len, u32 val)
+{
+ void __iomem *addr;
+ u32 mask, tmp;
+
+ if (bus == cfg->start_bus && devfn != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ __set_rtdid_reg(cfg, bus, devfn);
+ addr = __get_cfg_base(cfg, bus) + (offset & ~0x3);
+
+ if (len == 4) {
+ writel(val, addr);
+ return PCIBIOS_SUCCESSFUL;
+ } else
+ mask = ~(((1 << (len * 8)) - 1) << ((offset & 0x3) * 8));
+
+ tmp = readl(addr) & mask;
+ tmp |= val << ((offset & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static acpi_status find_csr_base(struct acpi_resource *acpi_res, void *data)
+{
+ struct pci_ecam_region *cfg = data;
+ struct xgene_mcfg_info *info = cfg->data;
+ struct acpi_resource_fixed_memory32 *fixed32;
+
+ if (acpi_res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
+ fixed32 = &acpi_res->data.fixed_memory32;
+ info->csr_base = ioremap(fixed32->address,
+ fixed32->address_length);
+ return AE_CTRL_TERMINATE;
+ }
+ return AE_OK;
+}
+
+static int xgene_mcfg_fixup(struct acpi_pci_root *root,
+ struct pci_ecam_region *cfg)
+{
+ struct acpi_device *device = root->device;
+ struct xgene_mcfg_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ return -ENOMEM;
+
+ cfg->data = info;
+
+ acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ find_csr_base, cfg);
+
+ if (!info->csr_base) {
+ kfree(info);
+ cfg->data = NULL;
+ return -ENODEV;
+ }
+
+ cfg->read = xgene_raw_pci_read;
+ cfg->write = xgene_raw_pci_write;
+
+ /* actual last bus reachable through this mmconfig */
+ cfg->end_bus = root->secondary.end;
+
+ /* firmware should have done this */
+ xgene_raw_pci_write(cfg, cfg->start_bus, 0, PCI_PRIMARY_BUS, 4,
+ cfg->start_bus | ((cfg->start_bus + 1) << 8) |
+ (cfg->end_bus << 16));
+
+ return 0;
+}
+DECLARE_ACPI_MCFG_FIXUP("APM ", "XGENE ", xgene_mcfg_fixup);
+#endif /* CONFIG_ACPI */
+
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index c3e7dfc..1bed857 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -41,8 +41,7 @@ static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
{
struct irq_domain *domain = NULL;
- if (dev->bus->msi)
- domain = dev->bus->msi->domain;
+ domain = dev_get_msi_domain(&dev->dev);
if (!domain)
domain = arch_get_pci_msi_domain(dev);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index f092993..75bfb85 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
@@ -59,3 +60,22 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
return of_node_get(bus->bridge->parent->of_node);
return NULL;
}
+
+void pci_set_phb_of_msi_domain(struct pci_bus *bus)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct device_node *np;
+
+ if (!bus->dev.of_node)
+ return;
+ /* Start looking for a phandle to an MSI controller. */
+ np = of_parse_phandle(bus->dev.of_node, "msi-parent", 0);
+ /*
+ * If we don't have an msi-parent property, look for a domain
+ * directly attached to the host bridge.
+ */
+ if (!np)
+ np = bus->dev.of_node;
+ dev_set_msi_domain(&bus->dev, irq_find_host(np));
+#endif
+}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 4890639..04b676b 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -9,9 +9,11 @@
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/irqdomain.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pci-aspm.h>
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
@@ -628,3 +630,37 @@ static int __init acpi_pci_init(void)
return 0;
}
arch_initcall(acpi_pci_init);
+
+#ifdef CONFIG_PCI_MSI
+void pci_acpi_set_phb_msi_domain(struct pci_bus *bus) {
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct device_node np;
+
+ if (acpi_disabled)
+ return;
+
+ np.data = kzalloc(sizeof(unsigned int), GFP_KERNEL);
+ if (!np.data)
+ return;
+
+ /**
+ * Since ACPI 5.1 currently does not define
+ * a way to associate MSI frame ID to a device,
+ * we can only support single MSI frame.
+ * Therefore, the ID 0 is used as a default.
+ *
+ * Alternatively, we should query the ID from
+ * device's DSDT
+ *
+ * FIXME when ACPI spec is fixed!!!
+ */
+ *((u32 *)(np.data)) = 0;
+
+ /**
+ * FIXME: This is currently a hack until we have a
+ * better way to find MSI domain using msi_frame_id
+ */
+ dev_set_msi_domain(&bus->dev, irq_find_host(&np));
+#endif
+}
+#endif /* CONFIG_PCI_MSI */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 81f06e8..ae3fce5 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -25,6 +25,7 @@
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
+#include <linux/acpi.h>
#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
#include "pci.h"
@@ -4504,7 +4505,7 @@ int pci_get_new_domain_nr(void)
void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
{
static int use_dt_domains = -1;
- int domain = of_get_pci_domain_nr(parent->of_node);
+ int domain;
/*
* Check DT domain and use_dt_domains values.
@@ -4532,17 +4533,22 @@ void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
* invalidating the domain value (domain = -1) and printing a
* corresponding error.
*/
- if (domain >= 0 && use_dt_domains) {
- use_dt_domains = 1;
- } else if (domain < 0 && use_dt_domains != 1) {
- use_dt_domains = 0;
- domain = pci_get_new_domain_nr();
+ if (acpi_disabled) {
+ domain = of_get_pci_domain_nr(parent->of_node);
+ if (domain >= 0 && use_dt_domains) {
+ use_dt_domains = 1;
+ } else if (domain < 0 && use_dt_domains != 1) {
+ use_dt_domains = 0;
+ domain = pci_get_new_domain_nr();
+ } else {
+ dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
+ parent->of_node->full_name);
+ domain = -1;
+ }
} else {
- dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
- parent->of_node->full_name);
- domain = -1;
+ struct pci_sysdata *sd = bus->sysdata;
+ domain = sd->domain;
}
-
bus->domain_nr = domain;
}
#endif
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8d2f400..324cdce 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpumask.h>
+#include <linux/pci-acpi.h>
#include <linux/pci-aspm.h>
#include <asm-generic/pci-bridge.h>
#include "pci.h"
@@ -660,6 +661,22 @@ static void pci_set_bus_speed(struct pci_bus *bus)
}
}
+void __weak pcibios_set_phb_msi_domain(struct pci_bus *bus)
+{
+ pci_set_phb_of_msi_domain(bus);
+ pci_acpi_set_phb_msi_domain(bus);
+}
+
+static void pci_set_bus_msi_domain(struct pci_bus *bus)
+{
+ struct pci_dev *bridge = bus->self;
+
+ if (!bridge)
+ pcibios_set_phb_msi_domain(bus);
+ else
+ dev_set_msi_domain(&bus->dev, dev_get_msi_domain(&bridge->dev));
+}
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -713,6 +730,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
bridge->subordinate = child;
add_dev:
+ pci_set_bus_msi_domain(child);
ret = device_register(&child->dev);
WARN_ON(ret < 0);
@@ -1507,6 +1525,17 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_enable_acs(dev);
}
+static void pci_set_msi_domain(struct pci_dev *dev)
+{
+ /*
+ * If no domain has been set through the pcibios callback,
+ * inherit the default from the bus device.
+ */
+ if (!dev_get_msi_domain(&dev->dev))
+ dev_set_msi_domain(&dev->dev,
+ dev_get_msi_domain(&dev->bus->dev));
+}
+
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
{
int ret;
@@ -1547,6 +1576,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
ret = pcibios_add_device(dev);
WARN_ON(ret < 0);
+ /* Setup MSI irq domain */
+ pci_set_msi_domain(dev);
+
/* Notifier could use PCI capabilities */
dev->match_driver = false;
ret = device_add(&dev->dev);
@@ -1937,6 +1969,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
b->bridge = get_device(&bridge->dev);
device_enable_async_suspend(b->bridge);
pci_set_bus_of_node(b);
+ pci_set_bus_msi_domain(b);
if (!parent)
set_dev_node(b->bridge, pcibus_to_node(b));
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index b24aa01..50fe279 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -419,4 +419,10 @@ config DA_CONSOLE
help
This enables a console on a Dash channel.
+config SBSAUART_TTY
+ tristate "SBSA UART TTY Driver"
+ help
+ Console and system TTY driver for the SBSA UART which is defined
+ in the Server Base System Architecure document for ARM64 servers.
+
endif # TTY
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 58ad1c0..c3211c0 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -29,5 +29,6 @@ obj-$(CONFIG_SYNCLINK) += synclink.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o
obj-$(CONFIG_DA_TTY) += metag_da.o
+obj-$(CONFIG_SBSAUART_TTY) += sbsauart.o
obj-y += ipwireless/
diff --git a/drivers/tty/sbsauart.c b/drivers/tty/sbsauart.c
new file mode 100644
index 0000000..0f44624
--- /dev/null
+++ b/drivers/tty/sbsauart.c
@@ -0,0 +1,358 @@
+/*
+ * SBSA (Server Base System Architecture) Compatible UART driver
+ *
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/amba/serial.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+struct sbsa_tty {
+ struct tty_port port;
+ spinlock_t lock;
+ void __iomem *base;
+ u32 irq;
+ int opencount;
+ struct console console;
+};
+
+static struct tty_driver *sbsa_tty_driver;
+static struct sbsa_tty *sbsa_tty;
+
+#define SBSAUART_CHAR_MASK 0xFF
+
+static void sbsa_raw_putc(struct uart_port *port, int c)
+{
+ while (readw(port->membase + UART01x_FR) & UART01x_FR_TXFF)
+ ;
+ writew(c & 0xFF, port->membase + UART01x_DR);
+}
+
+static void sbsa_uart_early_write(struct console *con, const char *buf,
+ unsigned count)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, buf, count, sbsa_raw_putc);
+}
+
+static int __init sbsa_uart_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = sbsa_uart_early_write;
+ return 0;
+}
+EARLYCON_DECLARE(sbsauart, sbsa_uart_early_console_setup);
+
+static void sbsa_tty_do_write(const char *buf, unsigned count)
+{
+ unsigned long irq_flags;
+ struct sbsa_tty *qtty = sbsa_tty;
+ void __iomem *base = qtty->base;
+ unsigned n;
+
+ spin_lock_irqsave(&qtty->lock, irq_flags);
+ for (n = 0; n < count; n++) {
+ while (readw(base + UART01x_FR) & UART01x_FR_TXFF)
+ ;
+ writew(buf[n], base + UART01x_DR);
+ }
+ spin_unlock_irqrestore(&qtty->lock, irq_flags);
+}
+
+static void sbsauart_fifo_to_tty(struct sbsa_tty *qtty)
+{
+ void __iomem *base = qtty->base;
+ unsigned int flag, max_count = 32;
+ u16 status, ch;
+
+ while (max_count--) {
+ status = readw(base + UART01x_FR);
+ if (status & UART01x_FR_RXFE)
+ break;
+
+ /* Take chars from the FIFO and update status */
+ ch = readw(base + UART01x_DR);
+ flag = TTY_NORMAL;
+
+ if (ch & UART011_DR_BE)
+ flag = TTY_BREAK;
+ else if (ch & UART011_DR_PE)
+ flag = TTY_PARITY;
+ else if (ch & UART011_DR_FE)
+ flag = TTY_FRAME;
+ else if (ch & UART011_DR_OE)
+ flag = TTY_OVERRUN;
+
+ ch &= SBSAUART_CHAR_MASK;
+
+ tty_insert_flip_char(&qtty->port, ch, flag);
+ }
+
+ tty_schedule_flip(&qtty->port);
+
+ /* Clear the RX IRQ */
+ writew(UART011_RXIC | UART011_RXIC, base + UART011_ICR);
+}
+
+static irqreturn_t sbsa_tty_interrupt(int irq, void *dev_id)
+{
+ struct sbsa_tty *qtty = sbsa_tty;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&qtty->lock, irq_flags);
+ sbsauart_fifo_to_tty(qtty);
+ spin_unlock_irqrestore(&qtty->lock, irq_flags);
+
+ return IRQ_HANDLED;
+}
+
+static int sbsa_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct sbsa_tty *qtty = sbsa_tty;
+
+ return tty_port_open(&qtty->port, tty, filp);
+}
+
+static void sbsa_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ tty_port_close(tty->port, tty, filp);
+}
+
+static void sbsa_tty_hangup(struct tty_struct *tty)
+{
+ tty_port_hangup(tty->port);
+}
+
+static int sbsa_tty_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ sbsa_tty_do_write(buf, count);
+ return count;
+}
+
+static int sbsa_tty_write_room(struct tty_struct *tty)
+{
+ return 32;
+}
+
+static void sbsa_tty_console_write(struct console *co, const char *b,
+ unsigned count)
+{
+ sbsa_tty_do_write(b, count);
+
+ if (b[count - 1] == '\n')
+ sbsa_tty_do_write("\r", 1);
+}
+
+static struct tty_driver *sbsa_tty_console_device(struct console *c,
+ int *index)
+{
+ *index = c->index;
+ return sbsa_tty_driver;
+}
+
+static int sbsa_tty_console_setup(struct console *co, char *options)
+{
+ if ((unsigned)co->index > 0)
+ return -ENODEV;
+ if (sbsa_tty->base == NULL)
+ return -ENODEV;
+ return 0;
+}
+
+static struct tty_port_operations sbsa_port_ops = {
+};
+
+static const struct tty_operations sbsa_tty_ops = {
+ .open = sbsa_tty_open,
+ .close = sbsa_tty_close,
+ .hangup = sbsa_tty_hangup,
+ .write = sbsa_tty_write,
+ .write_room = sbsa_tty_write_room,
+};
+
+static int sbsa_tty_create_driver(void)
+{
+ int ret;
+ struct tty_driver *tty;
+
+ sbsa_tty = kzalloc(sizeof(*sbsa_tty), GFP_KERNEL);
+ if (sbsa_tty == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_sbsa_tty_failed;
+ }
+ tty = alloc_tty_driver(1);
+ if (tty == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_tty_driver_failed;
+ }
+ tty->driver_name = "sbsauart";
+ tty->name = "ttySBSA";
+ tty->type = TTY_DRIVER_TYPE_SERIAL;
+ tty->subtype = SERIAL_TYPE_NORMAL;
+ tty->init_termios = tty_std_termios;
+ tty->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(tty, &sbsa_tty_ops);
+ ret = tty_register_driver(tty);
+ if (ret)
+ goto err_tty_register_driver_failed;
+
+ sbsa_tty_driver = tty;
+ return 0;
+
+err_tty_register_driver_failed:
+ put_tty_driver(tty);
+err_alloc_tty_driver_failed:
+ kfree(sbsa_tty);
+ sbsa_tty = NULL;
+err_alloc_sbsa_tty_failed:
+ return ret;
+}
+
+static void sbsa_tty_delete_driver(void)
+{
+ tty_unregister_driver(sbsa_tty_driver);
+ put_tty_driver(sbsa_tty_driver);
+ sbsa_tty_driver = NULL;
+ kfree(sbsa_tty);
+ sbsa_tty = NULL;
+}
+
+static int sbsa_tty_probe(struct platform_device *pdev)
+{
+ struct sbsa_tty *qtty;
+ int ret = -EINVAL;
+ int i;
+ struct resource *r;
+ struct device *ttydev;
+ void __iomem *base;
+ u32 irq;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL)
+ return -EINVAL;
+
+ base = ioremap(r->start, r->end - r->start);
+ if (base == NULL)
+ pr_err("sbsa_tty: unable to remap base\n");
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (r == NULL)
+ goto err_unmap;
+
+ irq = r->start;
+
+ if (pdev->id > 0)
+ goto err_unmap;
+
+ ret = sbsa_tty_create_driver();
+ if (ret)
+ goto err_unmap;
+
+ qtty = sbsa_tty;
+ spin_lock_init(&qtty->lock);
+ tty_port_init(&qtty->port);
+ qtty->port.ops = &sbsa_port_ops;
+ qtty->base = base;
+ qtty->irq = irq;
+
+ /* Clear and Mask all IRQs */
+ writew(0, base + UART011_IMSC);
+ writew(0xFFFF, base + UART011_ICR);
+
+ ret = request_irq(irq, sbsa_tty_interrupt, IRQF_SHARED,
+ "sbsa_tty", pdev);
+ if (ret)
+ goto err_request_irq_failed;
+
+ /* Unmask the RX IRQ */
+ writew(UART011_RXIM | UART011_RTIM, base + UART011_IMSC);
+
+ ttydev = tty_port_register_device(&qtty->port, sbsa_tty_driver,
+ 0, &pdev->dev);
+ if (IS_ERR(ttydev)) {
+ ret = PTR_ERR(ttydev);
+ goto err_tty_register_device_failed;
+ }
+
+ strcpy(qtty->console.name, "ttySBSA");
+ qtty->console.write = sbsa_tty_console_write;
+ qtty->console.device = sbsa_tty_console_device;
+ qtty->console.setup = sbsa_tty_console_setup;
+ qtty->console.flags = CON_PRINTBUFFER;
+ /* if no console= on cmdline, make this the console device */
+ if (!console_set_on_cmdline)
+ qtty->console.flags |= CON_CONSDEV;
+ qtty->console.index = pdev->id;
+ register_console(&qtty->console);
+
+ return 0;
+
+ tty_unregister_device(sbsa_tty_driver, i);
+err_tty_register_device_failed:
+ free_irq(irq, pdev);
+err_request_irq_failed:
+ sbsa_tty_delete_driver();
+err_unmap:
+ iounmap(base);
+ return ret;
+}
+
+static int sbsa_tty_remove(struct platform_device *pdev)
+{
+ struct sbsa_tty *qtty;
+
+ qtty = sbsa_tty;
+ unregister_console(&qtty->console);
+ tty_unregister_device(sbsa_tty_driver, pdev->id);
+ iounmap(qtty->base);
+ qtty->base = 0;
+ free_irq(qtty->irq, pdev);
+ sbsa_tty_delete_driver();
+ return 0;
+}
+
+static const struct acpi_device_id sbsa_acpi_match[] = {
+ { "ARMH0011", 0 },
+ { }
+};
+
+static struct platform_driver sbsa_tty_platform_driver = {
+ .probe = sbsa_tty_probe,
+ .remove = sbsa_tty_remove,
+ .driver = {
+ .name = "sbsa_tty",
+ .acpi_match_table = ACPI_PTR(sbsa_acpi_match),
+ }
+};
+
+module_platform_driver(sbsa_tty_platform_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 2ab229d..27e2e8f 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -394,8 +394,18 @@ static int dw8250_probe_acpi(struct uart_8250_port *up,
if (!p->uartclk)
if (device_property_read_u32(p->dev, "clock-frequency",
- &p->uartclk))
- return -EINVAL;
+ &p->uartclk)) {
+ if (strncmp("APMC0D08", id->id, 8))
+ return -EINVAL;
+ /*
+ * Temp hack for Mustang to continue working
+ * with older firmware.
+ */
+ dev_info(p->dev,
+ "clock-frequency not found in ACPI tables.");
+ dev_info(p->dev, "Updated firmware needed!\n");
+ p->uartclk = 50000000;
+ }
p->iotype = UPIO_MEM32;
p->serial_in = dw8250_serial_in32;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8d94c19..04930e2 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2238,7 +2238,15 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
}
}
+ /*
+ * temp hack to avoid need for console= on cmdline
+ * this can go away when we switch completely to acpi
+ */
+ if (amba_reg.cons && !console_set_on_cmdline && uap->port.line == 0)
+ amba_reg.cons->flags |= CON_CONSDEV;
ret = uart_add_one_port(&amba_reg, &uap->port);
+ if (amba_reg.cons && !console_set_on_cmdline && uap->port.line == 0)
+ amba_reg.cons->flags &= ~CON_CONSDEV;
if (ret) {
amba_ports[i] = NULL;
uart_unregister_driver(&amba_reg);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 0e11d61..88ffbee 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -83,14 +83,13 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (irq < 0)
return -ENODEV;
- /* Initialize dma_mask and coherent_dma_mask to 32-bits */
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- else
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ /* Try setting the coherent_dma_mask to 64 bits, then try 32 bits */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+ }
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index cad5698..88b6cd3 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -70,8 +70,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_mmio.h>
#include <linux/virtio_ring.h>
-
-
+#include <linux/acpi.h>
/* The alignment to use between consumer and producer parts of vring.
* Currently hardcoded to the page size. */
@@ -666,12 +665,21 @@ static struct of_device_id virtio_mmio_match[] = {
};
MODULE_DEVICE_TABLE(of, virtio_mmio_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id virtio_mmio_acpi_match[] = {
+ { "LNRO0005", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
+#endif
+
static struct platform_driver virtio_mmio_driver = {
.probe = virtio_mmio_probe,
.remove = virtio_mmio_remove,
.driver = {
.name = "virtio-mmio",
.of_match_table = virtio_mmio_match,
+ .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
},
};
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 95ee430..eff4035 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -204,7 +204,7 @@ arch_initcall(register_xen_pci_notifier);
#ifdef CONFIG_PCI_MMCONFIG
static int __init xen_mcfg_late(void)
{
- struct pci_mmcfg_region *cfg;
+ struct pci_ecam_region *cfg;
int rc;
if (!xen_initial_domain())
@@ -213,11 +213,11 @@ static int __init xen_mcfg_late(void)
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return 0;
- if (list_empty(&pci_mmcfg_list))
+ if (list_empty(&pci_ecam_list))
return 0;
/* Check whether they are in the right area. */
- list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ list_for_each_entry(cfg, &pci_ecam_list, list) {
struct physdev_pci_mmcfg_reserved r;
r.address = cfg->address;
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 273de70..b52c0dc 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -51,6 +51,7 @@
#define METHOD_NAME__BBN "_BBN"
#define METHOD_NAME__CBA "_CBA"
#define METHOD_NAME__CID "_CID"
+#define METHOD_NAME__CLS "_CLS"
#define METHOD_NAME__CRS "_CRS"
#define METHOD_NAME__DDN "_DDN"
#define METHOD_NAME__HID "_HID"
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 61e32ec..1fec6f5 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -69,6 +69,8 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs);
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
int rev, int func, union acpi_object *argv4);
+acpi_status acpi_check_coherency(acpi_handle handle, int *val);
+
static inline union acpi_object *
acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func,
union acpi_object *argv4, acpi_object_type type)
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index 444671e..dd86c5f 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -3,11 +3,15 @@
#include <linux/io.h>
+#include <asm/acpi.h>
+
+#ifndef acpi_os_ioremap
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
acpi_size size)
{
return ioremap_cache(phys, size);
}
+#endif
void __iomem *__init_refok
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index b034f10..ab3dac8 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -1148,7 +1148,7 @@ struct acpi_device_info {
u32 name; /* ACPI object Name */
acpi_object_type type; /* ACPI object Type */
u8 param_count; /* If a method, required parameter count */
- u8 valid; /* Indicates which optional fields are valid */
+ u16 valid; /* Indicates which optional fields are valid */
u8 flags; /* Miscellaneous info */
u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */
u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */
@@ -1157,6 +1157,7 @@ struct acpi_device_info {
struct acpi_pnp_device_id hardware_id; /* _HID value */
struct acpi_pnp_device_id unique_id; /* _UID value */
struct acpi_pnp_device_id subsystem_id; /* _SUB value */
+ struct acpi_pnp_device_id cls; /* _CLS value */
struct acpi_pnp_device_id_list compatible_id_list; /* _CID list <must be last> */
};
@@ -1174,6 +1175,7 @@ struct acpi_device_info {
#define ACPI_VALID_CID 0x20
#define ACPI_VALID_SXDS 0x40
#define ACPI_VALID_SXWS 0x80
+#define ACPI_VALID_CLS 0x100
/* Flags for _STA return value (current_status above) */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index b95dc32..4188a4d 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -196,7 +196,7 @@ struct acpi_processor_flags {
struct acpi_processor {
acpi_handle handle;
u32 acpi_id;
- u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */
+ phys_cpuid_t phys_id; /* CPU hardware ID such as APIC ID for x86 */
u32 id; /* CPU logical ID allocated by OS */
u32 pblk;
int performance_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
#endif /* CONFIG_CPU_FREQ */
/* in processor_core.c */
-int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
-int acpi_map_cpuid(int phys_id, u32 acpi_id);
+phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id);
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
/* in processor_pdc.c */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ac78910..472d6b8 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -276,6 +276,13 @@
VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
} \
\
+ /* ACPI quirks */ \
+ .acpi_fixup : AT(ADDR(.acpi_fixup) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_acpi_mcfg_fixups) = .; \
+ *(.acpi_fixup_mcfg) \
+ VMLINUX_SYMBOL(__end_acpi_mcfg_fixups) = .; \
+ } \
+ \
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_builtin_fw) = .; \
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 7c55dd5..d7fcc50 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -318,17 +318,19 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
#define vgic_ready(k) ((k)->arch.vgic.ready)
-int vgic_v2_probe(struct device_node *vgic_node,
- const struct vgic_ops **ops,
- const struct vgic_params **params);
+int vgic_v2_dt_probe(struct device_node *vgic_node,
+ const struct vgic_ops **ops,
+ const struct vgic_params **params);
+int vgic_v2_acpi_probe(const struct vgic_ops **ops,
+ const struct vgic_params **params);
#ifdef CONFIG_ARM_GIC_V3
-int vgic_v3_probe(struct device_node *vgic_node,
- const struct vgic_ops **ops,
- const struct vgic_params **params);
+int vgic_v3_dt_probe(struct device_node *vgic_node,
+ const struct vgic_ops **ops,
+ const struct vgic_params **params);
#else
-static inline int vgic_v3_probe(struct device_node *vgic_node,
- const struct vgic_ops **ops,
- const struct vgic_params **params)
+static inline int vgic_v3_dt_probe(struct device_node *vgic_node,
+ const struct vgic_ops **ops,
+ const struct vgic_params **params)
{
return -ENODEV;
}
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 24c7aa8..de4e86f 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -73,6 +73,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOAPIC,
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
+ ACPI_IRQ_MODEL_GIC,
ACPI_IRQ_MODEL_COUNT
};
@@ -146,9 +147,14 @@ void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
+#ifndef PHYS_CPUID_INVALID
+typedef u32 phys_cpuid_t;
+#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
+#endif
+
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
-int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
diff --git a/include/linux/acpi_irq.h b/include/linux/acpi_irq.h
new file mode 100644
index 0000000..e4e8a81
--- /dev/null
+++ b/include/linux/acpi_irq.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_ACPI_IRQ_H
+#define _LINUX_ACPI_IRQ_H
+
+#include <asm/irq.h>
+
+#ifndef acpi_irq_init
+static inline void acpi_irq_init(void) { }
+#endif
+
+#endif /* _LINUX_ACPI_IRQ_H */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 9c78d15..2b2e1f8 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -244,4 +244,10 @@ extern void clocksource_of_init(void);
static inline void clocksource_of_init(void) {}
#endif
+#ifdef CONFIG_ACPI
+void acpi_generic_timer_init(void);
+#else
+static inline void acpi_generic_timer_init(void) { }
+#endif
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index 0eb8ee2..66730dd 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -690,6 +690,7 @@ struct acpi_dev_node {
* along with subsystem-level and driver-level callbacks.
* @pins: For device pin management.
* See Documentation/pinctrl.txt for details.
+ * @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -750,6 +751,9 @@ struct device {
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct irq_domain *msi_domain; /* MSI domain device uses */
+#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
@@ -837,6 +841,22 @@ static inline void set_dev_node(struct device *dev, int node)
}
#endif
+static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ return dev->msi_domain;
+#else
+ return NULL;
+#endif
+}
+
+static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ dev->msi_domain = d;
+#endif
+}
+
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index f820f0a..8e1a28d 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -109,6 +109,7 @@ extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
+const u8 *dmi_get_smbios_entry_area(int *size);
#else
@@ -140,6 +141,8 @@ static inline void dmi_memdev_name(u16 handle, const char **bank,
const char **device) { }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
+static inline const u8 *dmi_get_smbios_entry_area(int *size)
+ { return NULL; }
#endif
diff --git a/include/linux/ecam.h b/include/linux/ecam.h
new file mode 100644
index 0000000..4d42eff
--- /dev/null
+++ b/include/linux/ecam.h
@@ -0,0 +1,81 @@
+#ifndef __ECAM_H
+#define __ECAM_H
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/acpi.h>
+
+/* "PCI ECAM %04x [bus %02x-%02x]" */
+#define PCI_ECAM_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
+
+struct acpi_pci_root;
+struct pci_ecam_region;
+
+typedef int (*acpi_mcfg_fixup_t)(struct acpi_pci_root *root,
+ struct pci_ecam_region *cfg);
+
+struct pci_ecam_region {
+ struct list_head list;
+ struct resource res;
+ int (*read)(struct pci_ecam_region *cfg, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 *value);
+ int (*write)(struct pci_ecam_region *cfg, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 value);
+ acpi_mcfg_fixup_t fixup;
+ void *data;
+ u64 address;
+ char __iomem *virt;
+ u16 segment;
+ u8 start_bus;
+ u8 end_bus;
+ char name[PCI_ECAM_RESOURCE_NAME_LEN];
+};
+
+struct acpi_mcfg_fixup {
+ char oem_id[7];
+ char oem_table_id[9];
+ acpi_mcfg_fixup_t hook;
+};
+
+/* Designate a routine to fix up buggy MCFG */
+#define DECLARE_ACPI_MCFG_FIXUP(oem_id, table_id, hook) \
+ static const struct acpi_mcfg_fixup __acpi_fixup_##hook __used \
+ __attribute__((__section__(".acpi_fixup_mcfg"), aligned((sizeof(void *))))) \
+ = { {oem_id}, {table_id}, hook };
+
+
+struct pci_ecam_mmio_ops {
+ u32 (*read)(int len, void __iomem *addr);
+ void (*write)(int len, void __iomem *addr, u32 value);
+};
+
+struct pci_ecam_region *pci_ecam_lookup(int segment, int bus);
+struct pci_ecam_region *pci_ecam_alloc(int segment, int start,
+ int end, u64 addr);
+int pci_ecam_inject(struct pci_ecam_region *cfg);
+struct pci_ecam_region *pci_ecam_add(int segment, int start,
+ int end, u64 addr);
+void pci_ecam_list_add_sorted(struct pci_ecam_region *new);
+void pci_ecam_free_all(void);
+int pci_ecam_delete(u16 seg, u8 start, u8 end);
+
+/* Arch specific calls */
+int pci_ecam_arch_init(void);
+void pci_ecam_arch_free(void);
+int pci_ecam_arch_map(struct pci_ecam_region *cfg);
+void pci_ecam_arch_unmap(struct pci_ecam_region *cfg);
+extern u32 pci_mmio_read(int len, void __iomem *addr);
+extern void pci_mmio_write(int len, void __iomem *addr, u32 value);
+extern void pci_ecam_register_mmio(struct pci_ecam_mmio_ops *ops);
+
+extern struct list_head pci_ecam_list;
+
+#define PCI_ECAM_BUS_OFFSET(bus) ((bus) << 20)
+
+int pci_ecam_read(unsigned int seg, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 *value);
+int pci_ecam_write(unsigned int seg, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 value);
+
+#endif /* __KERNEL__ */
+#endif /* __ECAM_H */
diff --git a/include/linux/irqchip/arm-gic-acpi.h b/include/linux/irqchip/arm-gic-acpi.h
new file mode 100644
index 0000000..8776eec
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-acpi.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014, Linaro Ltd.
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ARM_GIC_ACPI_H_
+#define ARM_GIC_ACPI_H_
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hard code here, we can not get memory size from MADT (but FDT does),
+ * Actually no need to do that, because this size can be inferred
+ * from GIC spec.
+ */
+#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
+#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
+
+struct acpi_table_header;
+struct irq_domain;
+
+int gic_v2_acpi_init(struct acpi_table_header *table, struct irq_domain **domain);
+void acpi_gic_init(void);
+#else
+static inline void acpi_gic_init(void) { }
+#endif
+
+#endif /* ARM_GIC_ACPI_H_ */
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 71d706d..0b45062 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -55,6 +55,8 @@
(GICD_INT_DEF_PRI << 8) |\
GICD_INT_DEF_PRI)
+#define GIC_DIST_SOFTINT_NSATT 0x8000
+
#define GICH_HCR 0x0
#define GICH_VTR 0x4
#define GICH_VMCR 0x8
@@ -110,6 +112,11 @@ static inline void gic_init(unsigned int nr, int start,
int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
+struct acpi_table_header;
+
+int gicv2m_acpi_init(struct acpi_table_header *table,
+ struct irq_domain *parent);
+
void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index e530533..9a42522 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -189,6 +189,7 @@ struct css_device_id {
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
kernel_ulong_t driver_data;
+ __u32 cls;
};
#define PNP_ID_LEN 8
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8ac4a68..01b648f 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -108,9 +108,6 @@ struct msi_controller {
struct device *dev;
struct device_node *of_node;
struct list_head list;
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *domain;
-#endif
int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
struct msi_desc *desc);
@@ -188,6 +185,7 @@ struct msi_domain_info {
void *handler_data;
const char *handler_name;
void *data;
+ u32 acpi_msi_frame_id;
};
/* Flags for msi_domain_info */
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 24c7728..3e95ec8 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -77,9 +77,12 @@ static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
#endif
+void pci_acpi_set_phb_msi_domain(struct pci_bus *bus);
+
#else /* CONFIG_ACPI */
static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
+static inline void pci_acpi_set_phb_msi_domain(struct pci_bus *bus) { };
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_APEI
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 211e9da..0027171 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1651,6 +1651,7 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
int pcibios_add_device(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
void pcibios_penalize_isa_irq(int irq, int active);
+void pcibios_set_phb_msi_domain(struct pci_bus *bus);
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern struct dev_pm_ops pcibios_pm_ops;
@@ -1838,6 +1839,7 @@ void pci_set_of_node(struct pci_dev *dev);
void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
void pci_release_bus_of_node(struct pci_bus *bus);
+void pci_set_phb_of_msi_domain(struct pci_bus *bus);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -1858,6 +1860,7 @@ static inline void pci_set_of_node(struct pci_dev *dev) { }
static inline void pci_release_of_node(struct pci_dev *dev) { }
static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
+static inline void pci_set_phb_of_msi_domain(struct pci_bus *bus) {}
static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
#endif /* CONFIG_OF */
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 3e18163..2c43e96 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -9,11 +9,13 @@
* This file contains common code to support Message Signalled Interrupt for
* PCI compatible and non PCI compatible devices.
*/
+#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
+#include <linux/of.h>
/* Temparory solution for building, will be removed later */
#include <linux/pci.h>
@@ -124,11 +126,33 @@ static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
+/**
+ * TODO: SURAVEE: This is a hack to match the msi_frame_id
+ * This is being discussed w/ Marc for better solution.
+ */
+static int msi_domain_match(struct irq_domain *d, struct device_node *node)
+{
+ struct msi_domain_info *info;
+
+ if (acpi_disabled)
+ return (d->of_node != NULL) && (d->of_node == node);
+
+ info = msi_get_domain_info(d);
+ if (!info || !(node->data))
+ return 0;
+
+ if (info->acpi_msi_frame_id == *((u32 *)(node->data)))
+ return 1;
+
+ return 0;
+}
+
static struct irq_domain_ops msi_domain_ops = {
.alloc = msi_domain_alloc,
.free = msi_domain_free,
.activate = msi_domain_activate,
.deactivate = msi_domain_deactivate,
+ .match = msi_domain_match,
};
#ifdef GENERIC_MSI_DOMAIN_OPS
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index f282516..7f68268 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -63,6 +63,7 @@ int main(void)
DEVID(acpi_device_id);
DEVID_FIELD(acpi_device_id, id);
+ DEVID_FIELD(acpi_device_id, cls);
DEVID(pnp_device_id);
DEVID_FIELD(pnp_device_id, id);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index e614ef6..ba5998c 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -511,12 +511,21 @@ static int do_serio_entry(const char *filename,
}
ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
-/* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */
+/* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
+ * "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
+ *
+ * NOTE: * Each driver should use one of the following : _HID, _CIDs or _CLS.
+ */
static int do_acpi_entry(const char *filename,
void *symval, char *alias)
{
DEF_FIELD_ADDR(symval, acpi_device_id, id);
- sprintf(alias, "acpi*:%s:*", *id);
+ DEF_FIELD_ADDR(symval, acpi_device_id, cls);
+
+ if (id && strlen((const char *)*id))
+ sprintf(alias, "acpi*:%s:*", *id);
+ else if (cls)
+ sprintf(alias, "acpi*:%06x:*", *cls);
return 1;
}
ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 6e54f35..691e868 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -21,9 +21,11 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/interrupt.h>
+#include <linux/acpi.h>
#include <clocksource/arm_arch_timer.h>
#include <asm/arch_timer.h>
+#include <asm/acpi.h>
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
@@ -247,60 +249,91 @@ static const struct of_device_id arch_timer_of_match[] = {
{},
};
-int kvm_timer_hyp_init(void)
+static int kvm_timer_ppi_parse_dt(unsigned int *ppi)
{
struct device_node *np;
- unsigned int ppi;
- int err;
-
- timecounter = arch_timer_get_timecounter();
- if (!timecounter)
- return -ENODEV;
np = of_find_matching_node(NULL, arch_timer_of_match);
if (!np) {
- kvm_err("kvm_arch_timer: can't find DT node\n");
return -ENODEV;
}
- ppi = irq_of_parse_and_map(np, 2);
- if (!ppi) {
- kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
- err = -EINVAL;
- goto out;
+ *ppi = irq_of_parse_and_map(np, 2);
+ if (*ppi == 0) {
+ of_node_put(np);
+ return -EINVAL;
}
- err = request_percpu_irq(ppi, kvm_arch_timer_handler,
- "kvm guest timer", kvm_get_running_vcpus());
- if (err) {
- kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
- ppi, err);
- goto out;
- }
+ return 0;
+}
- host_vtimer_irq = ppi;
+extern int arch_timer_ppi[];
- err = __register_cpu_notifier(&kvm_timer_cpu_nb);
- if (err) {
- kvm_err("Cannot register timer CPU notifier\n");
- goto out_free;
- }
+static int kvm_timer_ppi_parse_acpi(unsigned int *ppi)
- wqueue = create_singlethread_workqueue("kvm_arch_timer");
- if (!wqueue) {
- err = -ENOMEM;
- goto out_free;
- }
+{
+ /* retrieve VIRT_PPI info */
+ *ppi = arch_timer_ppi[2];
- kvm_info("%s IRQ%d\n", np->name, ppi);
- on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
+ if (*ppi == 0)
+ return -EINVAL;
+ else
+ return 0;
+}
+
+int kvm_timer_hyp_init(void)
+{
+ unsigned int ppi;
+ int err;
+
+ timecounter = arch_timer_get_timecounter();
+ if (!timecounter)
+ return -ENODEV;
+
+ /* PPI DT parsing */
+ err = kvm_timer_ppi_parse_dt(&ppi);
- goto out;
+ /* if DT parsing fails, try ACPI next */
+ if (err && !acpi_disabled)
+ err = kvm_timer_ppi_parse_acpi(&ppi);
+
+ if (err) {
+ kvm_err("kvm_timer_hyp_init: can't find virtual timer info or "
+ "config virtual timer interrupt\n");
+ return err;
+ }
+
+ /* configure IRQ handler */
+ err = request_percpu_irq(ppi, kvm_arch_timer_handler,
+ "kvm guest timer", kvm_get_running_vcpus());
+ if (err) {
+ kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
+ ppi, err);
+ goto out;
+ }
+
+ host_vtimer_irq = ppi;
+
+ err = __register_cpu_notifier(&kvm_timer_cpu_nb);
+ if (err) {
+ kvm_err("Cannot register timer CPU notifier\n");
+ goto out_free;
+ }
+
+ wqueue = create_singlethread_workqueue("kvm_arch_timer");
+ if (!wqueue) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ kvm_info("timer IRQ%d\n", ppi);
+ on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
+
+ goto out;
out_free:
- free_percpu_irq(ppi, kvm_get_running_vcpus());
+ free_percpu_irq(ppi, kvm_get_running_vcpus());
out:
- of_node_put(np);
- return err;
+ return err;
}
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index a0a7b5d..964363f 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -19,6 +19,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/interrupt.h>
+#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -26,6 +27,7 @@
#include <linux/irqchip/arm-gic.h>
+#include <asm/acpi.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
@@ -159,7 +161,7 @@ static const struct vgic_ops vgic_v2_ops = {
static struct vgic_params vgic_v2_params;
/**
- * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
+ * vgic_v2_dt_probe - probe for a GICv2 compatible interrupt controller in DT
* @node: pointer to the DT node
* @ops: address of a pointer to the GICv2 operations
* @params: address of a pointer to HW-specific parameters
@@ -168,7 +170,7 @@ static struct vgic_params vgic_v2_params;
* in *ops and the HW parameters in *params. Returns an error code
* otherwise.
*/
-int vgic_v2_probe(struct device_node *vgic_node,
+int vgic_v2_dt_probe(struct device_node *vgic_node,
const struct vgic_ops **ops,
const struct vgic_params **params)
{
@@ -222,11 +224,22 @@ int vgic_v2_probe(struct device_node *vgic_node,
}
if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+#if 0
kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
(unsigned long long)resource_size(&vcpu_res),
PAGE_SIZE);
ret = -ENXIO;
goto out_unmap;
+#else
+ /*
+ * The check fails for arm64 with 64K pagesize and certain firmware.
+ * Ignore for now until firmware takes care of the problem.
+ */
+ kvm_info("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+ (unsigned long long)resource_size(&vcpu_res),
+ PAGE_SIZE);
+ kvm_info("Update DT to assign GICV a multiple of kernel page size \n");
+#endif
}
vgic->can_emulate_gicv2 = true;
@@ -249,3 +262,72 @@ out:
of_node_put(vgic_node);
return ret;
}
+
+struct acpi_madt_generic_interrupt *vgic_acpi;
+static void gic_get_acpi_header(struct acpi_subtable_header *header)
+{
+ vgic_acpi = (struct acpi_madt_generic_interrupt *)header;
+}
+
+int vgic_v2_acpi_probe(const struct vgic_ops **ops,
+ const struct vgic_params **params)
+{
+ struct vgic_params *vgic = &vgic_v2_params;
+ int irq_mode, ret;
+
+ /* MADT table */
+ ret = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ (acpi_tbl_entry_handler)gic_get_acpi_header, 0);
+ if (!ret) {
+ pr_err("Failed to get MADT VGIC CPU entry\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* IRQ trigger mode */
+ irq_mode = (vgic_acpi->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
+ ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
+ /* According to GIC-400 manual, all PPIs are active-LOW, level
+ * sensative. We register IRQ as active-low.
+ */
+ vgic->maint_irq = acpi_register_gsi(NULL, vgic_acpi->vgic_interrupt,
+ irq_mode, ACPI_ACTIVE_LOW);
+ if (!vgic->maint_irq) {
+ pr_err("Cannot register VGIC ACPI maintenance irq\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* GICH resource */
+ vgic->vctrl_base = ioremap(vgic_acpi->gich_base_address, SZ_8K);
+ if (!vgic->vctrl_base) {
+ pr_err("cannot ioremap GICH memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
+ vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
+
+ ret = create_hyp_io_mappings(vgic->vctrl_base,
+ vgic->vctrl_base + SZ_8K,
+ vgic_acpi->gich_base_address);
+ if (ret) {
+ kvm_err("Cannot map GICH into hyp\n");
+ goto out;
+ }
+
+ vgic->vcpu_base = vgic_acpi->gicv_base_address;
+
+ kvm_info("GICH base=0x%llx, GICV base=0x%llx, IRQ=%d\n",
+ (unsigned long long)vgic_acpi->gich_base_address,
+ (unsigned long long)vgic_acpi->gicv_base_address,
+ vgic->maint_irq);
+
+ vgic->type = VGIC_V2;
+ *ops = &vgic_v2_ops;
+ *params = vgic;
+
+out:
+ return ret;
+}
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 3a62d8a..6f27fe1 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -203,7 +203,7 @@ static const struct vgic_ops vgic_v3_ops = {
static struct vgic_params vgic_v3_params;
/**
- * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
+ * vgic_v3_dt_probe - probe for a GICv3 compatible interrupt controller in DT
* @node: pointer to the DT node
* @ops: address of a pointer to the GICv3 operations
* @params: address of a pointer to HW-specific parameters
@@ -212,9 +212,9 @@ static struct vgic_params vgic_v3_params;
* in *ops and the HW parameters in *params. Returns an error code
* otherwise.
*/
-int vgic_v3_probe(struct device_node *vgic_node,
- const struct vgic_ops **ops,
- const struct vgic_params **params)
+int vgic_v3_dt_probe(struct device_node *vgic_node,
+ const struct vgic_ops **ops,
+ const struct vgic_params **params)
{
int ret = 0;
u32 gicv_idx;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 0cc6ab6..c3fb126 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -25,9 +25,11 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/uaccess.h>
+#include <linux/acpi.h>
#include <linux/irqchip/arm-gic.h>
+#include <asm/acpi.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
@@ -1865,8 +1867,8 @@ static struct notifier_block vgic_cpu_nb = {
};
static const struct of_device_id vgic_ids[] = {
- { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
- { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
+ { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_dt_probe, },
+ { .compatible = "arm,gic-v3", .data = vgic_v3_dt_probe, },
{},
};
@@ -1876,20 +1878,26 @@ int kvm_vgic_hyp_init(void)
const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
const struct vgic_params **);
struct device_node *vgic_node;
- int ret;
-
- vgic_node = of_find_matching_node_and_match(NULL,
- vgic_ids, &matched_id);
- if (!vgic_node) {
- kvm_err("error: no compatible GIC node found\n");
- return -ENODEV;
+ int ret = -ENODEV;
+
+ /* probe VGIC */
+ if ((vgic_node = of_find_matching_node_and_match(NULL,
+ vgic_ids, &matched_id))) {
+ /* probe VGIC in DT */
+ vgic_probe = matched_id->data;
+ ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
+ }
+ else if (!acpi_disabled) {
+ /* probe VGIC in ACPI */
+ ret = vgic_v2_acpi_probe(&vgic_ops, &vgic);
}
- vgic_probe = matched_id->data;
- ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
- if (ret)
+ if (ret) {
+ kvm_err("error: no compatible GIC info found\n");
return ret;
+ }
+ /* configuration */
ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
"vgic", kvm_get_running_vcpus());
if (ret) {