From b805379cc6ad000e4aec1799f5f348cb22ed5bd0 Mon Sep 17 00:00:00 2001 From: linmin Date: Thu, 18 Apr 2024 14:39:33 +0800 Subject: [PATCH 005/219] feat(SMMU support):Support SMMU Changelogs: 1.arch/riscv/Kconfig: Do not select SWIOTLB because SMMU is used. 2.win2030_defconfig: Select CONFIG_ARM_SMMU_V3 3.Define wfe and __iomb which are needed by arm-smmu-v3.c 4.dma-noncoherent.c: Setup dma ops for devices that are behind SMMU. 5.iommu/Kconfig and Makefile: Support IOMMU_IO_PGTABLE_LPAE and ARM_SMMU_V3 for ARCH_ESWIN_EIC770X_SOC_FAMILY 6.arm-smmu-v3.c: Set disable_bypass = false by default,thus the access from the devices which are not behind SMMU can be bypassed by SMMU. Added SMMU IRQ clear handler for EIC770X because the oneshot irq is not supported by EIC770X, the irq clearing on the EIC770X specific regs must be performed. Added reset API for EIC770X. Support dupplicated streamID. 7.arm-smmu-v3.h: Added new members for supporting dulicated streamID. 8.eswin-win2030-sid.c: New file added for streamID configuration and TBU power operation. 9.drivers/soc/sifive/Kconfig: select IOMMU_DMA if IOMMU_SUPPORT, so that iommu can work correctly. --- arch/riscv/Kconfig | 2 +- arch/riscv/configs/win2030_defconfig | 1 + arch/riscv/include/asm/barrier.h | 4 + arch/riscv/include/asm/io.h | 3 + arch/riscv/mm/dma-noncoherent.c | 8 + drivers/iommu/Kconfig | 4 +- drivers/iommu/Makefile | 2 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 416 +++++++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 48 ++ drivers/iommu/eswin/Makefile | 2 + drivers/iommu/eswin/eswin-win2030-sid.c | 886 ++++++++++++++++++++ drivers/soc/sifive/Kconfig | 1 + 12 files changed, 1373 insertions(+), 4 deletions(-) create mode 100644 drivers/iommu/eswin/Makefile create mode 100644 drivers/iommu/eswin/eswin-win2030-sid.c diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 1304992232ad..baefecd75016 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -334,7 +334,7 @@ config ARCH_RV64I bool "RV64I" select 64BIT select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 - select SWIOTLB if MMU + select SWIOTLB if MMU && !ARCH_ESWIN_EIC770X_SOC_FAMILY endchoice diff --git a/arch/riscv/configs/win2030_defconfig b/arch/riscv/configs/win2030_defconfig index 0fc2a5f9522b..2e2e4e419bd3 100644 --- a/arch/riscv/configs/win2030_defconfig +++ b/arch/riscv/configs/win2030_defconfig @@ -223,6 +223,7 @@ CONFIG_VIRTIO_MMIO=y CONFIG_STAGING=y CONFIG_COMMON_CLK_WIN2030=y CONFIG_MAILBOX=y +CONFIG_ARM_SMMU_V3=y CONFIG_RPMSG_VIRTIO=y CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY=y CONFIG_EXTCON=y diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 110752594228..600abcec3631 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -71,6 +71,10 @@ do { \ */ #define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +#define wfe() do { } while (0) +#endif + #include #endif /* __ASSEMBLY__ */ diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 42497d487a17..0c3c9507dca1 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -52,6 +52,9 @@ #define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory"); #define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory"); +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +#define __iomb() mb() +#endif /* * Accesses from a single hart to a single I/O address must be ordered. This * allows us to use the raw read macros, but we still need to fence before and diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index 898a9c8facd4..75d3f1e6f884 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -11,6 +11,9 @@ #include #include #include +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +#include +#endif static bool noncoherent_supported __ro_after_init; int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN; @@ -149,6 +152,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, dev_driver_string(dev), dev_name(dev)); dev->dma_coherent = coherent; + + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + if (iommu) + iommu_setup_dma_ops(dev, dma_base, size); + #endif } void riscv_noncoherent_supported(void) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d57c5adf932e..68ce46410fea 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -28,7 +28,7 @@ config IOMMU_IO_PGTABLE config IOMMU_IO_PGTABLE_LPAE bool "ARMv7/v8 Long Descriptor Format" select IOMMU_IO_PGTABLE - depends on ARM || ARM64 || COMPILE_TEST + depends on ARM || ARM64 || ARCH_ESWIN_EIC770X_SOC_FAMILY || COMPILE_TEST depends on !GENERIC_ATOMIC64 # for cmpxchg64() help Enable support for the ARM long descriptor pagetable format. @@ -389,7 +389,7 @@ config ARM_SMMU_QCOM_DEBUG config ARM_SMMU_V3 tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support" - depends on ARM64 + depends on ARM64 || ARCH_ESWIN_EIC770X_SOC_FAMILY select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select GENERIC_MSI_IRQ diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 769e43d780ce..42b2d74869aa 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += amd/ intel/ arm/ iommufd/ +obj-y += amd/ intel/ arm/ iommufd/ eswin/ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 68b81f9c2f4b..c383426a0655 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -30,8 +30,26 @@ #include "arm-smmu-v3.h" #include "../../dma-iommu.h" #include "../../iommu-sva.h" +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +#include +#include +#include +#include + +#define ESWIN_SMMU_IRQ_CLEAR_REG 1 + +/* smmu interrupt clear bits */ +#define TCU_U84_EVENT_Q_IRPT_NS_CLR_BIT 9 +#define TCU_U84_PRI_Q_IRPT_NS_CLR_BIT 10 +#define TCU_U84_CMD_SYNC_IRPT_NS_CLR_BIT 11 +#define TCU_U84_GLOBAL_IRPT_NS_CLR_BIT 13 +#endif +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +static bool disable_bypass = false; +#else static bool disable_bypass = true; +#endif module_param(disable_bypass, bool, 0444); MODULE_PARM_DESC(disable_bypass, "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); @@ -74,6 +92,21 @@ struct arm_smmu_option_prop { DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa); DEFINE_MUTEX(arm_smmu_asid_lock); +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +static unsigned long get_tcu_node_status(struct arm_smmu_device *smmu) +{ + unsigned long reg_val; + unsigned long tcu_node_status = 0; + int i; + + for (i = 0; i < 62; i++) { + reg_val = readl_relaxed(smmu->s_base + ARM_SMMU_TCU_NODE_STATUSn_OFFSET + (4*i)); + tcu_node_status |= (reg_val & 0x1) << i; + } + return tcu_node_status; +} +#endif + /* * Special value used by SVA when a process dies, to quiesce a CD without * disabling it. @@ -840,11 +873,20 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, llq.prod = queue_inc_prod_n(&llq, n); ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq); if (ret) { + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + dev_err_ratelimited(smmu->dev, + "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x], TCU_NODE_STATUS=0x%016lx\n", + llq.prod, + readl_relaxed(cmdq->q.prod_reg), + readl_relaxed(cmdq->q.cons_reg), + get_tcu_node_status(smmu)); + #else dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n", llq.prod, readl_relaxed(cmdq->q.prod_reg), readl_relaxed(cmdq->q.cons_reg)); + #endif } /* @@ -1348,7 +1390,14 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ? STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1; + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + if (ste_live) { + dev_dbg(master->dev, "%s:%d, smmu_dbg, duplicated stream, sharing same ste, return!\n", __func__, __LINE__); + return; + } + #else BUG_ON(ste_live); + #endif dst[1] = cpu_to_le64( FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | @@ -1367,7 +1416,14 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, } if (s2_cfg) { + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + if (ste_live) { + dev_dbg(master->dev, "%s:%d, smmu_dbg, duplicated stream, sharing same ste, return!\n", __func__, __LINE__); + return; + } + #else BUG_ON(ste_live); + #endif dst[2] = cpu_to_le64( FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | @@ -1560,6 +1616,33 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) return ret; } +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +static void eswin_smmu_irq_clear(struct arm_smmu_device *smmu, int clearbit) +{ + int bitmask; + bitmask = BIT(clearbit); + + regmap_write(smmu->regmap, smmu->smmu_irq_clear_reg, bitmask); +} + +static irqreturn_t eswin_smmu_irq_clear_handler(int irq, void *dev) +{ + struct arm_smmu_device *smmu = dev; + + if (irq == smmu->evtq.q.irq) { + eswin_smmu_irq_clear(smmu, TCU_U84_EVENT_Q_IRPT_NS_CLR_BIT); + } + else if (irq == smmu->priq.q.irq) { + eswin_smmu_irq_clear(smmu, TCU_U84_PRI_Q_IRPT_NS_CLR_BIT); + } + else { + return IRQ_NONE; + } + + return IRQ_WAKE_THREAD; +} +#endif + static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) { int i, ret; @@ -1664,6 +1747,10 @@ static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) u32 gerror, gerrorn, active; struct arm_smmu_device *smmu = dev; + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + eswin_smmu_irq_clear(smmu, TCU_U84_GLOBAL_IRPT_NS_CLR_BIT); + #endif + gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR); gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN); @@ -2386,6 +2473,15 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master) pci_disable_pasid(pdev); } +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +static void arm_smmu_attach_ref_release(struct kref *kref) +{ + /* no related data needs to be released when kref is 0 */ + pr_debug("smmu_dbg, %s\n", __func__); + return; +} +#endif + static void arm_smmu_detach_dev(struct arm_smmu_master *master) { unsigned long flags; @@ -2402,6 +2498,19 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master) master->domain = NULL; master->ats_enabled = false; + + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + /* Eswin, for generic dev only.If other masters are still attached to this domain, + * do NOT allow to change the ste right now + */ + if (!dev_is_pci(master->dev)) { + if (!kref_put(&smmu_domain->attach_refcount, arm_smmu_attach_ref_release)) { + dev_dbg(master->dev, "smmu_dbg, %s:%d, other masters are still on smmu_domain, return!\n", + __func__, __LINE__); + return; + } + } + #endif arm_smmu_install_ste_for_dev(master); } @@ -2430,7 +2539,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) return -EBUSY; } + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + /* Eswin, to support duplicated streamID within one group/smmu_domain, skip detach for generic devices*/ + if (dev_is_pci(dev)) { + arm_smmu_detach_dev(master); + } + #else arm_smmu_detach_dev(master); + #endif mutex_lock(&smmu_domain->init_mutex); @@ -2441,6 +2557,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) smmu_domain->smmu = NULL; goto out_unlock; } + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + if (!dev_is_pci(dev)) { //Eswin, to support generic devices share the same streamID, i.e in the same domain + kref_init(&smmu_domain->attach_refcount); + } + #endif } else if (smmu_domain->smmu != smmu) { ret = -EINVAL; goto out_unlock; @@ -2453,6 +2574,17 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ret = -EINVAL; goto out_unlock; } + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + else if (!dev_is_pci(dev)){ + /* Eswin,for supportting generic devices sharing the same streamID dev. + * This domain has already been initialized by the first dev that was + * attached to this domain previously.So, only needs to increase attach_refcount. + */ + kref_get(&smmu_domain->attach_refcount); + dev_dbg(dev, "smmu_dbg, generic dev sharing the same domain, attach_refcount = %d after add 1\n", + kref_read(&smmu_domain->attach_refcount)); + } + #endif master->domain = smmu_domain; @@ -2615,8 +2747,15 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, break; } } + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + if (ret) { //duplicated streamID found, skip inserting new_stream to smmu->streams + ret = 0; + continue; + } + #else if (ret) break; + #endif rb_link_node(&new_stream->node, parent_node, new_node); rb_insert_color(&new_stream->node, &smmu->streams); @@ -2723,6 +2862,146 @@ static void arm_smmu_release_device(struct device *dev) kfree(master); } +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +static void arm_smmu_group_lookup_delete(void *iommu_data) +{ + struct arm_smmu_group *smmu_group = iommu_data; + struct arm_smmu_device *smmu; + + if (WARN_ON_ONCE(smmu_group == NULL)) + return; + + if (IS_ERR(smmu_group)) + return; + + smmu = smmu_group->smmu; + mutex_lock(&smmu->smmu_groups_mutex); + rb_erase(&smmu_group->node, &smmu->smmu_groups); + mutex_unlock(&smmu->smmu_groups_mutex); + + kfree(smmu_group); +} + +static struct iommu_group *arm_smmu_group_lookup(struct device *dev) +{ + struct arm_smmu_device *smmu; + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct rb_node *node; + struct arm_smmu_group *smmu_group; + u32 sid; + + lockdep_assert_held(&smmu->smmu_groups_mutex); + if (!master) + return NULL; + + /* pick the first sid, since only one sid for each device is allowed */ + sid = fwspec->ids[0]; + smmu = master->smmu; + + node = smmu->smmu_groups.rb_node; + while (node) { + smmu_group = rb_entry(node, struct arm_smmu_group, node); + if (smmu_group->streamid < sid) + node = node->rb_right; + else if (smmu_group->streamid > sid) + node = node->rb_left; + else { + return iommu_group_ref_get(smmu_group->group); // Found, iommu_group refcnt add, then return iommu_group + } + } + + return NULL; +} + +static struct arm_smmu_group *arm_smmu_insert_to_group_lookup(struct device *dev, struct iommu_group *group) +{ + struct arm_smmu_device *smmu; + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct arm_smmu_group *new_smmu_group, *cur_smmu_group; + struct rb_node **new_node, *parent_node = NULL; + + lockdep_assert_held(&smmu->smmu_groups_mutex); + if (!master) + return ERR_PTR(-EFAULT); + + smmu = master->smmu; + + new_smmu_group = kzalloc(sizeof(*new_smmu_group), GFP_KERNEL); + if (!new_smmu_group) + return ERR_PTR(-ENOMEM); + /* pick the first sid, since only one sid for each device is allowed */ + new_smmu_group->smmu = smmu; + new_smmu_group->streamid = fwspec->ids[0]; + new_smmu_group->group = group; + + new_node = &(smmu->smmu_groups.rb_node); + while (*new_node) { + cur_smmu_group = rb_entry(*new_node, struct arm_smmu_group, + node); + parent_node = *new_node; + if (cur_smmu_group->streamid > new_smmu_group->streamid) { + new_node = &((*new_node)->rb_left); + } else if (cur_smmu_group->streamid < new_smmu_group->streamid) { + new_node = &((*new_node)->rb_right); + } else { + dev_warn(dev, + "group %u already in tree\n", + cur_smmu_group->streamid); + kfree(new_smmu_group); + return ERR_PTR(-EINVAL); + } + } + + rb_link_node(&new_smmu_group->node, parent_node, new_node); + rb_insert_color(&new_smmu_group->node, &smmu->smmu_groups); + + return new_smmu_group; +} + +static struct iommu_group *arm_smmu_device_group(struct device *dev) +{ + struct iommu_group *group; + struct arm_smmu_device *smmu; + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_group *smmu_group = NULL; + + if (!master) + return ERR_PTR(-EFAULT); + + smmu = master->smmu; + /* + * We don't support devices sharing stream IDs other than PCI RID + * aliases, since the necessary ID-to-device lookup becomes rather + * impractical given a potential sparse 32-bit stream ID space. + */ + if (dev_is_pci(dev)) + group = pci_device_group(dev); + else { + mutex_lock(&smmu->smmu_groups_mutex); + group = arm_smmu_group_lookup(dev); + if (!group) { + dev_dbg(dev, "smmu_dbg, generic dev,group was NOT found in lut, alloc new group!\n"); + group = generic_device_group(dev); + + if (group) { + smmu_group = arm_smmu_insert_to_group_lookup(dev, group); + if (!IS_ERR(smmu_group)) { + iommu_group_set_iommudata(group, smmu_group, arm_smmu_group_lookup_delete); + dev_dbg(dev, "smmu_dbg, generic dev,new smmu_group added in lut\n"); + } + } + } + else { + dev_dbg(dev, "smmu_dbg, generic dev,group was found in lut!\n"); + } + mutex_unlock(&smmu->smmu_groups_mutex); + } + + return group; +} +#else static struct iommu_group *arm_smmu_device_group(struct device *dev) { struct iommu_group *group; @@ -2739,6 +3018,7 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) return group; } +#endif static int arm_smmu_enable_nesting(struct iommu_domain *domain) { @@ -3099,6 +3379,11 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu) mutex_init(&smmu->streams_mutex); smmu->streams = RB_ROOT; + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + mutex_init(&smmu->smmu_groups_mutex); + smmu->smmu_groups = RB_ROOT; + #endif + ret = arm_smmu_init_queues(smmu); if (ret) return ret; @@ -3205,10 +3490,17 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) /* Request interrupt lines */ irq = smmu->evtq.q.irq; if (irq) { + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + ret = devm_request_threaded_irq(smmu->dev, irq, eswin_smmu_irq_clear_handler, + arm_smmu_evtq_thread, + IRQF_ONESHOT, + "arm-smmu-v3-evtq", smmu); + #else ret = devm_request_threaded_irq(smmu->dev, irq, NULL, arm_smmu_evtq_thread, IRQF_ONESHOT, "arm-smmu-v3-evtq", smmu); + #endif if (ret < 0) dev_warn(smmu->dev, "failed to enable evtq irq\n"); } else { @@ -3228,11 +3520,19 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) if (smmu->features & ARM_SMMU_FEAT_PRI) { irq = smmu->priq.q.irq; if (irq) { + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + ret = devm_request_threaded_irq(smmu->dev, irq, eswin_smmu_irq_clear_handler, + arm_smmu_priq_thread, + IRQF_ONESHOT, + "arm-smmu-v3-priq", + smmu); + #else ret = devm_request_threaded_irq(smmu->dev, irq, NULL, arm_smmu_priq_thread, IRQF_ONESHOT, "arm-smmu-v3-priq", smmu); + #endif if (ret < 0) dev_warn(smmu->dev, "failed to enable priq irq\n"); @@ -3523,8 +3823,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) if (reg & IDR0_HYP) { smmu->features |= ARM_SMMU_FEAT_HYP; + #ifdef CONFIG_ARM64 if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN)) smmu->features |= ARM_SMMU_FEAT_E2H; + #endif } /* @@ -3797,6 +4099,87 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); } +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +static int eswin_smmu_reset_release(struct arm_smmu_device *smmu) +{ + int ret = 0; + int i; + char tbu_rst_name[16] = {0}; + struct device *dev = smmu->dev; + struct eswin_smmu_reset_control *eswin_smmu_rst_ctl_p = &smmu->eswin_smmu_rst_ctl; + + dev_dbg(dev, "Try %s !\n", __func__); + + eswin_smmu_rst_ctl_p->smmu_axi_rst = devm_reset_control_get_optional(dev, "axi_rst"); + if (IS_ERR_OR_NULL(eswin_smmu_rst_ctl_p->smmu_axi_rst)) { + dev_err(dev, "Failed to get eswin smmu_axi_rst handle\n"); + return -EFAULT; + } + + eswin_smmu_rst_ctl_p->smmu_cfg_rst = devm_reset_control_get_optional(dev, "cfg_rst"); + if (IS_ERR_OR_NULL(eswin_smmu_rst_ctl_p->smmu_cfg_rst)) { + dev_err(dev, "Failed to get eswin smmu_cfg_rst handle\n"); + return -EFAULT; + } + + for(i = 0; i < ESWIN_MAX_TBU_COUNT; i++) { + snprintf(tbu_rst_name, sizeof(tbu_rst_name), "tbu%d_rst", i); + eswin_smmu_rst_ctl_p->tbu_rst[i] = devm_reset_control_get_optional(dev, tbu_rst_name); + if (IS_ERR_OR_NULL(eswin_smmu_rst_ctl_p->tbu_rst[i])) { + dev_err(dev, "Failed to get eswin %s handle\n", tbu_rst_name); + return -EFAULT; + } + } + + // The order of the reset must be TCU_cfg_rst ---> TCU_axi_rst ---> TBU_rst + ret = reset_control_reset(eswin_smmu_rst_ctl_p->smmu_cfg_rst); + WARN_ON(0 != ret); + + ret = reset_control_reset(eswin_smmu_rst_ctl_p->smmu_axi_rst); + WARN_ON(0 != ret); + + for(i = 0; i < ESWIN_MAX_TBU_COUNT; i++) { + ret = reset_control_reset(eswin_smmu_rst_ctl_p->tbu_rst[i]); + WARN_ON(0 != ret); + } + + dev_dbg(dev, "%s successfully!\n", __func__); + + return ret; +} + +static int eswin_smmu_reset_assert(struct arm_smmu_device *smmu) +{ + int ret = 0; + int i; + struct device *dev = smmu->dev; + struct eswin_smmu_reset_control *eswin_smmu_rst_ctl_p = &smmu->eswin_smmu_rst_ctl; + + dev_dbg(dev, "Try %s !\n", __func__); + + for(i = 0; i < ESWIN_MAX_TBU_COUNT; i++) { + if (eswin_smmu_rst_ctl_p->tbu_rst[i]) { + ret = reset_control_assert(eswin_smmu_rst_ctl_p->tbu_rst[i]); + WARN_ON(0 != ret); + } + } + + if (eswin_smmu_rst_ctl_p->smmu_axi_rst) { + ret = reset_control_assert(eswin_smmu_rst_ctl_p->smmu_axi_rst); + WARN_ON(0 != ret); + } + + if (eswin_smmu_rst_ctl_p->smmu_cfg_rst) { + ret = reset_control_assert(eswin_smmu_rst_ctl_p->smmu_cfg_rst); + WARN_ON(0 != ret); + } + + dev_dbg(dev, "%s successfully!\n", __func__); + + return ret; +} +#endif + static int arm_smmu_device_probe(struct platform_device *pdev) { int irq, ret; @@ -3849,6 +4232,34 @@ static int arm_smmu_device_probe(struct platform_device *pdev) smmu->page1 = smmu->base; } + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + /* eswin, map the tcu microarchitectural register region */ + smmu->s_base = arm_smmu_ioremap(dev, ioaddr + ARM_SMMU_S_BASE, ARM_SMMU_S_AND_TCU_MICRO_REG_SZ); + if (IS_ERR(smmu->s_base)) + return PTR_ERR(smmu->s_base); + + /* eswin, release the reset of smmu */ + ret = eswin_smmu_reset_release(smmu); + if (ret) { + dev_err(dev, "failed to release the reset of SMMU\n"); + return ret; + } + + /* eswin, syscon devie is used for clearing the smmu interrupt */ + smmu->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "eswin,syscfg"); + if (IS_ERR(smmu->regmap)) { + dev_err(smmu->dev, "No syscfg phandle specified\n"); + return PTR_ERR(smmu->regmap); + } + + ret = of_property_read_u32_index(dev->of_node, "eswin,syscfg", ESWIN_SMMU_IRQ_CLEAR_REG, + &smmu->smmu_irq_clear_reg); + if (ret) { + dev_err(dev, "can't get SMMU irq clear reg offset (%d)\n", ret); + return ret; + } + #endif + /* Interrupt lines */ irq = platform_get_irq_byname_optional(pdev, "combined"); @@ -3913,6 +4324,11 @@ static void arm_smmu_device_remove(struct platform_device *pdev) arm_smmu_device_disable(smmu); iopf_queue_free(smmu->evtq.iopf); ida_destroy(&smmu->vmid_map); + + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + /* eswin, hold the reset of the smmu */ + eswin_smmu_reset_assert(smmu); + #endif } static void arm_smmu_device_shutdown(struct platform_device *pdev) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 9915850dd4db..25a73a177b59 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -160,6 +160,19 @@ #define ARM_SMMU_REG_SZ 0xe00 +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +/* Secure programming register */ +#define ARM_SMMU_S_BASE 0x8000 +#define ARM_SMMU_S_INIT_OFFSET (0x803c - ARM_SMMU_S_BASE) +/* TCU Microarchitectural registers */ +#define ARM_SMMU_TCU_QOS_OFFSET (0x8e04 - ARM_SMMU_S_BASE) +#define ARM_SMMU_TCU_NODE_STATUSn_OFFSET (0x9400 - ARM_SMMU_S_BASE) + +#define ARM_SMMU_S_AND_TCU_MICRO_REG_SZ (0x9800 - ARM_SMMU_S_BASE) + +#define ARM_SMMU_TCU_QOS_VALUE 0xffffffff +#endif + /* Common MSI config fields */ #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2) #define MSI_CFG2_SH GENMASK(5, 4) @@ -620,11 +633,23 @@ struct arm_smmu_strtab_cfg { u32 strtab_base_cfg; }; +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +#define ESWIN_MAX_TBU_COUNT 8 +struct eswin_smmu_reset_control { + struct reset_control* smmu_axi_rst; + struct reset_control* smmu_cfg_rst; + struct reset_control* tbu_rst[ESWIN_MAX_TBU_COUNT]; +}; +#endif + /* An SMMUv3 instance */ struct arm_smmu_device { struct device *dev; void __iomem *base; void __iomem *page1; + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + void __iomem *s_base; + #endif #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0) #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1) @@ -682,6 +707,17 @@ struct arm_smmu_device { struct rb_root streams; struct mutex streams_mutex; + + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + /* eswin, to support duplicated streamID */ + struct rb_root smmu_groups; + struct mutex smmu_groups_mutex; + + /* eswin, syscon device is used for clearing the smmu interrupt*/ + struct regmap *regmap; + int smmu_irq_clear_reg; + struct eswin_smmu_reset_control eswin_smmu_rst_ctl; + #endif }; struct arm_smmu_stream { @@ -690,6 +726,15 @@ struct arm_smmu_stream { struct rb_node node; }; +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +struct arm_smmu_group { + u32 streamid; + struct arm_smmu_device *smmu; + struct iommu_group *group; + struct rb_node node; +}; +#endif + /* SMMU private data for each master */ struct arm_smmu_master { struct arm_smmu_device *smmu; @@ -734,6 +779,9 @@ struct arm_smmu_domain { spinlock_t devices_lock; struct list_head mmu_notifiers; + #if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) + struct kref attach_refcount; + #endif }; static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) diff --git a/drivers/iommu/eswin/Makefile b/drivers/iommu/eswin/Makefile new file mode 100644 index 000000000000..563a2ca1a91d --- /dev/null +++ b/drivers/iommu/eswin/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) += eswin-win2030-sid.o diff --git a/drivers/iommu/eswin/eswin-win2030-sid.c b/drivers/iommu/eswin/eswin-win2030-sid.c new file mode 100644 index 000000000000..24fc76fe7272 --- /dev/null +++ b/drivers/iommu/eswin/eswin-win2030-sid.c @@ -0,0 +1,886 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 ESWIN. All rights reserved. + * Author: Lin Min + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void trigger_waveform_ioremap_resource(void); + +#define DYMN_CSR_EN_REG_OFFSET 0x0 +#define DYMN_CSR_GNT_REG_OFFSET 0x4 + +#define MCPU_SP0_DYMN_CSR_EN_BIT 3 +#define MCPU_SP0_DYMN_CSR_GNT_BIT 3 + +#define AWSMMUSID GENMASK(31, 24) // The sid of write operation +#define AWSMMUSSID GENMASK(23, 16) // The ssid of write operation +#define ARSMMUSID GENMASK(15, 8) // The sid of read operation +#define ARSMMUSSID GENMASK(7, 0) // The ssid of read operation + +struct win2030_sid_client { + const char *name; + unsigned int sid; + unsigned int reg_offset; +}; + +struct win2030_sid_soc { + const struct win2030_sid_client *clients; + unsigned int num_clients; +}; + +/* The syscon registers for tbu power up(down) must be configured so that + tcu can be aware of tbu up and down. + + */ +struct tbu_reg_cfg_info { + unsigned int reg_offset; + unsigned int qreqn_pd_bit; + unsigned int qacceptn_pd_bit; +}; + +struct tbu_priv { + atomic_t refcount; + int nid; + const struct win2030_tbu_client *tbu_client_p; + struct mutex tbu_priv_lock; +}; + +struct win2030_tbu_client { + /* tbu_id: bit[3:0] is for major ID, bit[7:4] is for minor ID; + For example, tbu of dsp3 is tbu7_3, the tbu_ID is 0x73. It measn tbu7_3 + */ + u32 tbu_id; + struct tbu_reg_cfg_info tbu_reg_info; + int (*tbu_power_ctl_register) (int nid, struct tbu_priv *tbu_priv_p, bool is_powerUp); +}; + +struct win2030_tbu_soc { + const struct win2030_tbu_client *tbu_clients; + unsigned int num_tbuClients; +}; + + + +struct tbu_power_soc { + struct tbu_priv *tbu_priv_array; + unsigned int num_tbuClients; +}; + +struct win2030_sid { + void __iomem *regs; + resource_size_t start; + const struct win2030_sid_soc *soc; + struct mutex eswin_dynm_sid_cfg_en_lock; + struct tbu_power_soc *tbu_power_soc; + struct mutex tbu_reg_lock; +}; +struct win2030_sid *syscon_sid_cfg[MAX_NUMNODES] = {NULL}; + +static int win2030_tbu_power_ctl_register(int nid, struct tbu_priv *tbu_priv_p, bool is_powerUp); + +static int win2030_tbu_powr_priv_init(struct tbu_power_soc **tbu_power_soc_pp, int nid); +static int ioremap_tcu_resource(void); +void print_tcu_node_status(const char *call_name, int call_line); +static int __init tcu_proc_init(void); + +int win2030_dynm_sid_enable(int nid) +{ + unsigned long reg_val; + struct win2030_sid *mc = NULL; + + if (nid == NUMA_NO_NODE) { + #ifdef CONFIG_NUMA + pr_err("%s:%d, NUMA_NO_NODE\n", __func__, __LINE__); + return -EFAULT; + #else + pr_info("%s:%d, NUMA_NO_NODE, single DIE\n", __func__, __LINE__); + nid = 0; + #endif + } + + mc = syscon_sid_cfg[nid]; + if (mc == NULL) + return -EFAULT; + + mutex_lock(&mc->eswin_dynm_sid_cfg_en_lock); + reg_val = readl(mc->regs + DYMN_CSR_EN_REG_OFFSET); + set_bit(MCPU_SP0_DYMN_CSR_EN_BIT, ®_val); + writel(reg_val, mc->regs + DYMN_CSR_EN_REG_OFFSET); + + while(1) { + reg_val = readl(mc->regs + DYMN_CSR_GNT_REG_OFFSET) & (1 << MCPU_SP0_DYMN_CSR_GNT_BIT); + if (reg_val) + break; + + msleep(10); + } + reg_val = readl(mc->regs + DYMN_CSR_EN_REG_OFFSET); + clear_bit(MCPU_SP0_DYMN_CSR_EN_BIT, ®_val); + writel(reg_val, mc->regs + DYMN_CSR_EN_REG_OFFSET); + mutex_unlock(&mc->eswin_dynm_sid_cfg_en_lock); + + return 0; +} +EXPORT_SYMBOL(win2030_dynm_sid_enable); + +int win2030_aon_sid_cfg(struct device *dev) +{ + int ret = 0; + struct regmap *regmap; + int aon_sid_reg; + u32 rdwr_sid_ssid; + u32 sid; + int i,sid_count; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct device_node *np_syscon; + int syscon_cell_size = 0; + + /* not behind smmu, use the default reset value(0x0) of the reg as streamID*/ + if (fwspec == NULL) { + dev_info(dev, "dev is not behind smmu, skip configuration of sid\n"); + return 0; + } + + regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "eswin,syscfg"); + if (IS_ERR(regmap)) { + dev_err(dev, "No eswin,syscfg phandle specified\n"); + return -1; + } + + np_syscon = of_parse_phandle(dev->of_node, "eswin,syscfg", 0); + if (np_syscon) { + if (of_property_read_u32(np_syscon, "#syscon-cells", &syscon_cell_size)) { + of_node_put(np_syscon); + dev_err(dev, "failed to get #syscon-cells of sys_con\n"); + return -1; + } + of_node_put(np_syscon); + } + + sid_count = of_count_phandle_with_args(dev->of_node, + "eswin,syscfg", "#syscon-cells"); + + dev_dbg(dev, "sid_count=%d, fwspec->num_ids=%d, syscon_cell_size=%d\n", + sid_count, fwspec->num_ids, syscon_cell_size); + + if (sid_count < 0) { + dev_err(dev, "failed to parse eswin,syscfg property!\n"); + return -1; + } + + if (fwspec->num_ids != sid_count) { + dev_err(dev, "num_ids(%d) is NOT equal to num of sid regs(%d)!\n", + fwspec->num_ids, sid_count); + return -1; + } + + for (i = 0; i < sid_count; i++) { + sid = fwspec->ids[i]; + ret = of_property_read_u32_index(dev->of_node, "eswin,syscfg", (syscon_cell_size + 1)*i+1, + &aon_sid_reg); + if (ret) { + dev_err(dev, "can't get sid cfg reg offset in sys_con(errno:%d)\n", ret); + return ret; + } + + /* make the reading sid the same as writing sid, ssid is fixed to zero */ + rdwr_sid_ssid = FIELD_PREP(AWSMMUSID, sid); + rdwr_sid_ssid |= FIELD_PREP(ARSMMUSID, sid); + rdwr_sid_ssid |= FIELD_PREP(AWSMMUSSID, 0); + rdwr_sid_ssid |= FIELD_PREP(ARSMMUSSID, 0); + regmap_write(regmap, aon_sid_reg, rdwr_sid_ssid); + + ret = win2030_dynm_sid_enable(dev_to_node(dev)); + if (ret < 0) + dev_err(dev, "failed to config streamID(%d) for %s!\n", sid, of_node_full_name(dev->of_node)); + else + dev_info(dev, "success to config dma streamID(%d) for %s!\n", sid, of_node_full_name(dev->of_node)); + } + + return ret; +} +EXPORT_SYMBOL(win2030_aon_sid_cfg); + +static int of_parse_syscon_nodes(struct device_node *np, int *nid_p) +{ + #ifdef CONFIG_NUMA + int nid; + int r; + + r = of_property_read_u32(np, "numa-node-id", &nid); + if (r) + return -EINVAL; + + pr_debug("Syscon on %u\n", nid); + if (nid >= MAX_NUMNODES) { + pr_warn("Node id %u exceeds maximum value\n", nid); + return -EINVAL; + } + else + *nid_p = nid; + #else + *nid_p = 0; + #endif + + pr_debug("%s, nid = %d\n", __func__, *nid_p); + + return 0; +} + +#if 0 +static int win2030_program_sid(int nid) +{ + unsigned int i; + u32 rdwr_sid_ssid; + struct win2030_sid *mc = NULL; + int ret = 0; + + if (nid == NUMA_NO_NODE) { + #ifdef CONFIG_NUMA + pr_err("%s:%d, NUMA_NO_NODE\n", __func__, __LINE__); + return -EFAULT; + #else + pr_debug("%s:%d, NUMA_NO_NODE, single DIE\n", __func__, __LINE__); + nid = 0; + #endif + } + + mc = syscon_sid_cfg[nid]; + if (mc == NULL) + return -EFAULT; + + for (i = 0; i < mc->soc->num_clients; i++) { + const struct win2030_sid_client *client = &mc->soc->clients[i]; + + /* make the reading sid the same as writing sid, ssid is fixed to zero */ + rdwr_sid_ssid = FIELD_PREP(AWSMMUSID, client->sid); + rdwr_sid_ssid |= FIELD_PREP(ARSMMUSID, client->sid); + rdwr_sid_ssid |= FIELD_PREP(AWSMMUSSID, 0); + rdwr_sid_ssid |= FIELD_PREP(ARSMMUSSID, 0); + pr_debug("smmu_dbg, setting SID %u for %s\n", client->sid, + client->name); + writel(rdwr_sid_ssid, mc->regs + client->reg_offset); + + rdwr_sid_ssid = readl(mc->regs + client->reg_offset); + + pr_debug( "smmu_dbg, client %s: rdwr_sid_ssid: 0x%x\n", + client->name, rdwr_sid_ssid); + } + + ret = win2030_dynm_sid_enable(nid); + if (ret < 0) + pr_err( "smmu_dbg, %s 0x%0llx fail!\n", __func__, mc->start); + else + pr_info( "smmu_dbg, %s 0x%0llx done!\n", __func__, mc->start); + + return ret; +} +#endif + +#if IS_ENABLED(CONFIG_ARCH_ESWIN_EIC770X_SOC_FAMILY) +static const struct win2030_sid_client win2030_sid_clients[] = { + { + .name = "scpu", + .sid = WIN2030_SID_SCPU, + .reg_offset = SCPU_SID_REG_OFFSET, + }, + /* remove the configuration for lcpu, it should be configured by lcpu driver + * since there are clk&reset needs to be configured before setting the + streamID register in sys_con. + */ + //{ + // .name = "lcpu", + // .sid = WIN2030_SID_LCPU, + // .reg_offset = LCPU_SID_REG_OFFSET, + // }, + { + .name = "dma1", + .sid = WIN2030_SID_DMA1, + .reg_offset = DMA1_SID_REG_OFFSET, + }, { + .name = "crypt", + .sid = WIN2030_SID_CRYPT, + .reg_offset = CRYPT_SID_REG_OFFSET, + } +}; + +static const struct win2030_sid_soc win2030_sid_soc = { + .num_clients = ARRAY_SIZE(win2030_sid_clients), + .clients = win2030_sid_clients, +}; +#endif + +static const struct of_device_id win2030_sid_of_match[] = { + { .compatible = "eswin,win2030-scu-sys-con", .data = &win2030_sid_soc }, + { /* sentinel */ } +}; + +static int __init win2030_init_streamID(void) +{ + const struct of_device_id *match; + struct device_node *root, *child = NULL; + struct resource regs; + struct win2030_sid *mc = NULL; + int nid; + int ret = 0; + + /* Mapping trigger reg base for capturing wave in zebu */ + trigger_waveform_ioremap_resource(); + + root = of_find_node_by_name(NULL, "soc"); + for_each_child_of_node(root, child) { + match = of_match_node(win2030_sid_of_match, child); + if (match && of_node_get(child)) { + if (of_address_to_resource(child, 0, ®s) < 0) { + pr_err("failed to get scu register\n"); + of_node_put(child); + ret = -ENXIO; + break; + } + if (of_parse_syscon_nodes(child, &nid) < 0) { + pr_err("failed to get numa-node-id\n"); + of_node_put(child); + ret = -ENXIO; + break; + } + + /* program scu sreamID related registers */ + mc = kzalloc(sizeof(*mc), GFP_KERNEL); + if (!mc) { + of_node_put(child); + pr_err("failed to kzalloc\n"); + ret = -ENOMEM; + break; + } + + mc->soc = match->data; + mc->regs = ioremap(regs.start, resource_size(®s)); + if (IS_ERR(mc->regs)) { + pr_err("failed to ioremap scu reges\n"); + of_node_put(child); + ret = PTR_ERR(mc->regs); + kfree(mc); + break; + } + mc->start = regs.start; + mutex_init(&mc->eswin_dynm_sid_cfg_en_lock); + + if (win2030_tbu_powr_priv_init(&mc->tbu_power_soc, nid)) { + pr_err("failed to kzalloc for tbu_power_priv_arry\n"); + of_node_put(child); + iounmap(mc->regs); + kfree(mc); + ret = -ENOMEM; + WARN_ON(1); + break; + } + mutex_init(&mc->tbu_reg_lock); + + syscon_sid_cfg[nid] = mc; + pr_debug("%s, syscon_sid_cfg[%d] addr is 0x%px\n", __func__, nid, syscon_sid_cfg[nid]); + + /* sid configuration was moved into each driver, so skip win2030_program_sid*/ + // win2030_program_sid(nid); + + of_node_put(child); + } + } + of_node_put(root); + + return ret; +} + +early_initcall(win2030_init_streamID); + + + +static const struct win2030_tbu_client win2030_tbu_clients[] = { + { + .tbu_id = WIN2030_TBUID_0x0, // ISP, DW200 share the tbu0 + .tbu_reg_info = {0x3d8, 7, 6}, + .tbu_power_ctl_register = win2030_tbu_power_ctl_register, + }, + { + .tbu_id = WIN2030_TBUID_0x10, // tbu1_0 is only for video decoder + .tbu_reg_info = {0x3d4, 31, 30}, + .tbu_power_ctl_register = NULL, + }, + { + .tbu_id = WIN2030_TBUID_0x11, // tbu1_1 is only video encoder + .tbu_reg_info = {0x3d4, 23, 22}, + .tbu_power_ctl_register = NULL, + }, + { + .tbu_id = WIN2030_TBUID_0x12, // tbu1_2 is only Jpeg encoder + .tbu_reg_info = {0x3d4, 7, 6}, + .tbu_power_ctl_register = NULL, + }, + { + .tbu_id = WIN2030_TBUID_0x13, // tbu1_3 is only Jpeg decoder + .tbu_reg_info = {0x3d4, 15, 14}, + .tbu_power_ctl_register = NULL, + }, + { + .tbu_id = WIN2030_TBUID_0x2, // Ethernet, sata, usb, dma0, emmc, sd, sdio share the tbu2 + .tbu_reg_info = {0x3d8, 15, 14}, + .tbu_power_ctl_register = win2030_tbu_power_ctl_register, + }, + { + .tbu_id = WIN2030_TBUID_0x3, // tbu3 is only for pcie + .tbu_reg_info = {0x3d8, 23, 22}, + .tbu_power_ctl_register = NULL, + }, + { + .tbu_id = WIN2030_TBUID_0x4, // scpu, crypto, lpcpu, dma1 share the tbu4 + .tbu_reg_info = {0x3d8, 31, 30}, + .tbu_power_ctl_register = win2030_tbu_power_ctl_register, + }, + { + .tbu_id = WIN2030_TBUID_0x5, // tbu5 is only NPU + .tbu_reg_info = {0x3d0, 15, 14}, + .tbu_power_ctl_register = NULL, + }, + { + .tbu_id = WIN2030_TBUID_0x70, // tbu7_0 is only dsp0 + .tbu_reg_info = {0x3f8, 7, 6}, + .tbu_power_ctl_register = NULL, + }, + { + .tbu_id = WIN2030_TBUID_0x71, // tbu7_1 is only dsp1 + .tbu_reg_info = {0x3f8, 15, 14}, + .tbu_power_ctl_register = NULL, + }, + { + .tbu_id = WIN2030_TBUID_0x72, // tbu7_2 is only dsp2 + .tbu_reg_info = {0x3f8, 23, 22}, + .tbu_power_ctl_register = NULL, + }, + { + .tbu_id = WIN2030_TBUID_0x73, // tbu7_3 is only dsp3 + .tbu_reg_info = {0x3f8, 31, 30}, + .tbu_power_ctl_register = NULL, + }, +}; + +static const struct win2030_tbu_soc win2030_tbu_soc = { + .num_tbuClients = ARRAY_SIZE(win2030_tbu_clients), + .tbu_clients = win2030_tbu_clients, +}; + +static int __do_win2030_tbu_power_ctl(int nid, bool is_powerUp, const struct tbu_reg_cfg_info *tbu_reg_info_p) +{ + int ret = 0; + unsigned long reg_val; + struct win2030_sid *mc = NULL; + int loop_cnt = 0; + // int bitmask; = BIT(clearbit); + mc = syscon_sid_cfg[nid]; + if (mc == NULL) + return -EFAULT; + + mutex_lock(&mc->tbu_reg_lock); + if (is_powerUp) { + reg_val = readl(mc->regs + tbu_reg_info_p->reg_offset); + set_bit(tbu_reg_info_p->qreqn_pd_bit, ®_val); + writel(reg_val, mc->regs + tbu_reg_info_p->reg_offset); + pr_debug("reg_offset=0x%03x, tbu_val=0x%x\n", + tbu_reg_info_p->reg_offset, readl(mc->regs + tbu_reg_info_p->reg_offset)); + pr_debug("%s, power up!\n", __func__); + } + else { + reg_val = readl(mc->regs + tbu_reg_info_p->reg_offset); + clear_bit(tbu_reg_info_p->qreqn_pd_bit, ®_val); + writel(reg_val, mc->regs + tbu_reg_info_p->reg_offset); + do { + reg_val = readl(mc->regs + tbu_reg_info_p->reg_offset); + pr_debug("reg_offset=0x%03x, tbu_val=0x%lx, BIT(qacceptn_pd_bit)=0x%lx\n", + tbu_reg_info_p->reg_offset, reg_val, BIT(tbu_reg_info_p->qacceptn_pd_bit)); + if ((reg_val & BIT(tbu_reg_info_p->qacceptn_pd_bit)) == 0) { + pr_debug("%s, power down!\n", __func__); + break; + } + mdelay(10); + loop_cnt++; + if (loop_cnt > 10) { + WARN_ON(1); // it should never happen. + break; + } + }while (1); + + if(loop_cnt > 10) { + ret = -1; + } + } + mutex_unlock(&mc->tbu_reg_lock); + + return ret; +} + +#define do_win2030_tbu_power_up(nid, tbu_reg_info_p) __do_win2030_tbu_power_ctl(nid, true, tbu_reg_info_p) +#define do_win2030_tbu_power_down(nid, tbu_reg_info_p) __do_win2030_tbu_power_ctl(nid, false, tbu_reg_info_p) + + + +static int tbu_power_down_ref_release(atomic_t *ref) +{ + int ret = 0; + struct tbu_priv *tbu_priv_p = container_of(ref, struct tbu_priv, refcount); + int nid = tbu_priv_p->nid; + const struct tbu_reg_cfg_info *tbu_reg_info_p = &tbu_priv_p->tbu_client_p->tbu_reg_info; + + WARN_ON(!tbu_priv_p); + if (!tbu_priv_p) + return -1; + + ret = do_win2030_tbu_power_down(nid, tbu_reg_info_p); + + return ret; +} + +static int win2030_tbu_power_ctl_register(int nid, struct tbu_priv *tbu_priv_p, bool is_powerUp) +{ + int ret = 0; + const struct win2030_tbu_client *tbu_client_p = tbu_priv_p->tbu_client_p; + const struct tbu_reg_cfg_info *tbu_reg_info_p = &tbu_priv_p->tbu_client_p->tbu_reg_info; + unsigned int old_refcount; + + mutex_lock(&tbu_priv_p->tbu_priv_lock); + old_refcount = atomic_read(&tbu_priv_p->refcount); + + pr_debug("%s, nid=%d, is_powerUp=%d, tbu_priv_p addr is 0x%px\n", + __func__, nid, is_powerUp, tbu_priv_p); + if (is_powerUp == false) { //power down + if (unlikely(0 == old_refcount)) { + pr_debug("%s, tbu_id 0x%02x is down already!\n", __func__, tbu_client_p->tbu_id); + goto tbu_finish; + } + + if (atomic_sub_return(1, &tbu_priv_p->refcount) == 0) { + ret = tbu_power_down_ref_release(&tbu_priv_p->refcount); + } + else { + pr_debug("Can't power down tbu 0x%02x, it's used by other modules right now!\n", + tbu_client_p->tbu_id); + } + + } + else { //power up + if (0 == old_refcount) { + ret = do_win2030_tbu_power_up(nid, tbu_reg_info_p); + } + else { + pr_debug("tbu 0x%02x is already power up!", tbu_client_p->tbu_id); + } + atomic_add(1, &tbu_priv_p->refcount); + } + +tbu_finish: + mutex_unlock(&tbu_priv_p->tbu_priv_lock); + + return ret; + +} + +static int win2030_tbu_powr_priv_init(struct tbu_power_soc **tbu_power_soc_pp, int nid) +{ + int ret = 0; + int i; + unsigned int num_tbuClients = win2030_tbu_soc.num_tbuClients; + struct tbu_power_soc *tbu_power_soc_p; + struct tbu_priv *tbu_priv_p; + unsigned int alloc_size; + + pr_debug("%s:%d\n", __func__, __LINE__); + + tbu_power_soc_p = kzalloc(sizeof(struct tbu_power_soc), GFP_KERNEL); + if (!tbu_power_soc_p) + return -ENOMEM; + pr_debug("%s:%d, tbu_power_soc_p(0x%px)\n", __func__, __LINE__, tbu_power_soc_p); + + alloc_size = num_tbuClients * sizeof(struct tbu_priv); + tbu_priv_p = kzalloc(alloc_size, GFP_KERNEL); + if (!tbu_priv_p) { + ret = -ENOMEM; + goto err_tbu_priv_p; + } + tbu_power_soc_p->tbu_priv_array = tbu_priv_p; + pr_debug("%s:%d, num_tbu=%d,sizeof(struct tbu_priv)=0x%lx, alloc_size=0x%x, tbu_priv_p=0x%px\n", + __func__, __LINE__, num_tbuClients, sizeof(struct tbu_priv), alloc_size, tbu_priv_p); + + for (i = 0; i < win2030_tbu_soc.num_tbuClients; i++) { + tbu_priv_p->nid = nid; + atomic_set(&tbu_priv_p->refcount, 0); + tbu_priv_p->tbu_client_p = &win2030_tbu_soc.tbu_clients[i]; + mutex_init(&tbu_priv_p->tbu_priv_lock); + pr_debug("%s, nid %d, tbu 0x%02x, tbu_priv_p(0x%px), sizeof(struct tbu_priv)=0x%lx\n", __func__, nid, tbu_priv_p->tbu_client_p->tbu_id, tbu_priv_p, sizeof(struct tbu_priv)); + tbu_priv_p++; + } + tbu_power_soc_p->num_tbuClients = num_tbuClients; + + *tbu_power_soc_pp = tbu_power_soc_p; + + ret = ioremap_tcu_resource(); + if (ret) { + WARN_ON(1); + } + ret = tcu_proc_init(); + if (ret) { + pr_err("failed to create proc for tcu!!!\n"); + } + print_tcu_node_status(__func__, __LINE__); + pr_info("%s finished!\n", __func__); + + return 0; + +err_tbu_priv_p: + kfree(tbu_power_soc_p); + + return ret; + +} + +static int win2030_get_tbu_priv(int nid, u32 tbu_id, struct tbu_priv **tbu_priv_pp) +{ + int i; + struct win2030_sid *mc = syscon_sid_cfg[nid]; + struct tbu_power_soc *tbu_power_soc_p = mc->tbu_power_soc; + struct tbu_priv *tbu_priv_p = tbu_power_soc_p->tbu_priv_array; + + pr_debug("%s, syscon_sid_cfg[%d] addr is 0x%px, tbu_id=0x%02x, tbu_power_soc_p is 0x%px\n", + __func__, nid, syscon_sid_cfg[nid], tbu_id, tbu_power_soc_p); + + for (i = 0; i < tbu_power_soc_p->num_tbuClients; i++) { + if (tbu_id == tbu_priv_p->tbu_client_p->tbu_id) { + *tbu_priv_pp = tbu_priv_p; + pr_debug("%s, found tbu_id 0x%02x, tbu_priv_array[%d] tbu_priv_p is 0x%px\n", + __func__, tbu_id, i, tbu_priv_p); + return 0; + } + tbu_priv_p++; + } + + return -1; +} + +/*********************************************************************************************** + win2030_tbu_power(struct device *dev, bool is_powerUp) is for powering up or down + the tbus of the device module which is under smmu. + Drivers should call win2030_tbu_power(dev, true) when probing afer clk of the tbu is on, + and call call win2030_tbu_power(dev, false) when removing driver before clk of the tbu is off. + + Input: + struct device *dev The struct device of the driver that calls this API. + bool is_powerUp true: power up the tbus; false: power down the tbus. + Return: + zero: successfully power up/down + none zero: faild to power up/down +***********************************************************************************************/ +int win2030_tbu_power(struct device *dev, bool is_powerUp) +{ + int ret = 0; + struct device_node *node = dev->of_node; + int nid = dev_to_node(dev); + u32 tbu_id; + const struct win2030_tbu_client *tbu_client_p = NULL; + struct tbu_priv *tbu_priv_p; + struct property *prop; + const __be32 *cur; + int tbu_num = 0; + + if (nid == NUMA_NO_NODE) { + #ifdef CONFIG_NUMA + pr_err("%s:%d, NUMA_NO_NODE\n", __func__, __LINE__); + return -EFAULT; + #else + pr_info("%s:%d, NUMA_NO_NODE, single DIE\n", __func__, __LINE__); + nid = 0; + #endif + } + + pr_debug("%s called!\n", __func__); + of_property_for_each_u32(node, "tbus", prop, cur, tbu_id) { + pr_debug("tbus = <0x%02x>\n", tbu_id); + if (0 == win2030_get_tbu_priv(nid, tbu_id, &tbu_priv_p)) { + tbu_client_p = tbu_priv_p->tbu_client_p; + if (tbu_client_p->tbu_power_ctl_register) { + ret = tbu_client_p->tbu_power_ctl_register(nid, tbu_priv_p, is_powerUp); + if (ret) + return ret; + } + else { + ret = __do_win2030_tbu_power_ctl(nid, is_powerUp, &tbu_client_p->tbu_reg_info); + if (ret) + return ret; + } + tbu_num++; + } + else if (tbu_id == WIN2030_TBUID_0xF00) { + tbu_num++; + } + else { + pr_err("tbu power ctl failed!, Couldn't find tbu 0x%x\n", tbu_id); + return -1; + } + } + + if (tbu_num == 0) { + pr_err("Err,tbu NOT defined in dts!!!!\n"); + WARN_ON(1); + } + + return ret; +} +EXPORT_SYMBOL(win2030_tbu_power); + +int win2030_tbu_power_by_dev_and_node(struct device *dev, struct device_node *node, bool is_powerUp) +{ + int ret = 0; + int nid = dev_to_node(dev); + u32 tbu_id; + const struct win2030_tbu_client *tbu_client_p = NULL; + struct tbu_priv *tbu_priv_p; + struct property *prop; + const __be32 *cur; + int tbu_num = 0; + + if (nid == NUMA_NO_NODE) { + #ifdef CONFIG_NUMA + pr_err("%s:%d, NUMA_NO_NODE\n", __func__, __LINE__); + return -EFAULT; + #else + pr_info("%s:%d, NUMA_NO_NODE, single DIE\n", __func__, __LINE__); + nid = 0; + #endif + } + + pr_debug("%s called!\n", __func__); + of_property_for_each_u32(node, "tbus", prop, cur, tbu_id) { + pr_debug("tbus = <0x%02x>\n", tbu_id); + if (0 == win2030_get_tbu_priv(nid, tbu_id, &tbu_priv_p)) { + tbu_client_p = tbu_priv_p->tbu_client_p; + if (tbu_client_p->tbu_power_ctl_register) { + ret = tbu_client_p->tbu_power_ctl_register(nid, tbu_priv_p, is_powerUp); + if (ret) + return ret; + } + else { + ret = __do_win2030_tbu_power_ctl(nid, is_powerUp, &tbu_client_p->tbu_reg_info); + if (ret) + return ret; + } + tbu_num++; + } + else if (tbu_id == WIN2030_TBUID_0xF00) { + tbu_num++; + } + else { + pr_err("tbu power ctl failed!, Couldn't find tbu 0x%x\n", tbu_id); + return -1; + } + } + + if (tbu_num == 0) { + pr_err("Err,tbu NOT defined in dts!!!!\n"); + WARN_ON(1); + } + + return ret; +} +EXPORT_SYMBOL(win2030_tbu_power_by_dev_and_node); + +#define WAVE_TRIGGER_REG_OFFSET 0x668 +#define WAVE_TRIGGER_REG_BASE 0x51810000 +static void __iomem *trigger_reg_base; +static void trigger_waveform_ioremap_resource(void) +{ + trigger_reg_base = ioremap(WAVE_TRIGGER_REG_BASE, PAGE_SIZE); +} + +void trigger_waveform_start(void) +{ + printk("trigger waveform capture!\n"); + + writel(0x8000, trigger_reg_base + WAVE_TRIGGER_REG_OFFSET); +} +EXPORT_SYMBOL(trigger_waveform_start); + +void trigger_waveform_stop(void) +{ + writel(0x0, trigger_reg_base + WAVE_TRIGGER_REG_OFFSET); +} +EXPORT_SYMBOL(trigger_waveform_stop); + +void *tcu_base; +#define TCU_NODE_STATUSn 0x9400 +static int ioremap_tcu_resource(void) +{ + tcu_base = ioremap(0x50c00000, 0x40000); + if (IS_ERR(tcu_base)) { + pr_err("failed to ioremap tcu reges\n"); + return PTR_ERR(tcu_base); + } + return 0; +} + +static int get_tcu_node_status(unsigned long *tcu_node_status_p) +{ + unsigned long reg_val; + unsigned long tcu_node_status = 0; + int i; + + for (i = 0; i < 62; i++) { + reg_val = readl(tcu_base + TCU_NODE_STATUSn + (4*i)); + tcu_node_status |= (reg_val & 0x1) << i; + } + *tcu_node_status_p = tcu_node_status; + + return 0; +} + +void print_tcu_node_status(const char *call_name, int call_line) +{ + unsigned long tcu_node_status = 0; + + get_tcu_node_status(&tcu_node_status); + + pr_info("---%s:%d, TCU_NODE_STATUS=0x%016lx\n", call_name, call_line, tcu_node_status); +} +EXPORT_SYMBOL(print_tcu_node_status); + +static int tcu_proc_show(struct seq_file *m, void *v) +{ + unsigned long tcu_node_status = 0; + + get_tcu_node_status(&tcu_node_status); + seq_printf(m, "TCU Node Status:0x%016lx\n", tcu_node_status); + + return 0; +} + +static int __init tcu_proc_init(void) +{ + char proc_name[64]; + + sprintf(proc_name, "%s_info", "tcu"); + pr_debug("%s, proc_name:%s\n", __func__, proc_name); + if (NULL == proc_create_single_data(proc_name, 0, NULL, tcu_proc_show, NULL)) { + return -1; + } + + return 0; +} diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig index 290b961eb729..d78b8c3f72a8 100644 --- a/drivers/soc/sifive/Kconfig +++ b/drivers/soc/sifive/Kconfig @@ -19,6 +19,7 @@ config ARCH_ESWIN_EIC770X_SOC_FAMILY select ESWIN_MC select ESWIN_RSV_MEMBLOCK select ESWIN_CODACACHE_CONTROLLER + select IOMMU_DMA if IOMMU_SUPPORT menu "ESWIN EIC770X SoC Family Selection" depends on ARCH_ESWIN_EIC770X_SOC_FAMILY -- 2.47.0