From: Thierry Reding <tred...@nvidia.com>

Parse the reg property in device tree and detect the number of instances
represented by a device tree node. This is subsequently needed in order
to support single-instance SMMUs with the Tegra implementation because
additional programming is needed to properly configure the SID override
registers in the memory controller.

Signed-off-by: Thierry Reding <tred...@nvidia.com>
---
Changes in v3:
- move .probe_finalize initialization to later patch

Changes in v2:
- provide a separate implementation to simplify single instances

 drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c | 57 ++++++++++++++------
 1 file changed, 41 insertions(+), 16 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c 
b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 29117444e5a0..23889090eb01 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -20,13 +20,19 @@
  * The third instance usage is through standard arm-smmu driver itself and
  * is out of scope of this implementation.
  */
-#define NUM_SMMU_INSTANCES 2
+#define MAX_SMMU_INSTANCES 2
 
 struct nvidia_smmu {
-       struct arm_smmu_device  smmu;
-       void __iomem            *bases[NUM_SMMU_INSTANCES];
+       struct arm_smmu_device smmu;
+       void __iomem *bases[MAX_SMMU_INSTANCES];
+       unsigned int num_instances;
 };
 
+static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu)
+{
+       return container_of(smmu, struct nvidia_smmu, smmu);
+}
+
 static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
                                             unsigned int inst, int page)
 {
@@ -47,9 +53,10 @@ static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
 static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
                                  int page, int offset, u32 val)
 {
+       struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
        unsigned int i;
 
-       for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+       for (i = 0; i < nvidia->num_instances; i++) {
                void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
 
                writel_relaxed(val, reg);
@@ -67,9 +74,10 @@ static u64 nvidia_smmu_read_reg64(struct arm_smmu_device 
*smmu,
 static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
                                    int page, int offset, u64 val)
 {
+       struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
        unsigned int i;
 
-       for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+       for (i = 0; i < nvidia->num_instances; i++) {
                void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
 
                writeq_relaxed(val, reg);
@@ -79,6 +87,7 @@ static void nvidia_smmu_write_reg64(struct arm_smmu_device 
*smmu,
 static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
                                 int sync, int status)
 {
+       struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
        unsigned int delay;
 
        arm_smmu_writel(smmu, page, sync, 0);
@@ -90,7 +99,7 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device 
*smmu, int page,
                        u32 val = 0;
                        unsigned int i;
 
-                       for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+                       for (i = 0; i < nvidia->num_instances; i++) {
                                void __iomem *reg;
 
                                reg = nvidia_smmu_page(smmu, i, page) + status;
@@ -112,9 +121,10 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device 
*smmu, int page,
 
 static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
 {
+       struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
        unsigned int i;
 
-       for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+       for (i = 0; i < nvidia->num_instances; i++) {
                u32 val;
                void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
                                    ARM_SMMU_GR0_sGFSR;
@@ -157,8 +167,9 @@ static irqreturn_t nvidia_smmu_global_fault(int irq, void 
*dev)
        unsigned int inst;
        irqreturn_t ret = IRQ_NONE;
        struct arm_smmu_device *smmu = dev;
+       struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
 
-       for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+       for (inst = 0; inst < nvidia->num_instances; inst++) {
                irqreturn_t irq_ret;
 
                irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
@@ -202,11 +213,13 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, 
void *dev)
        struct arm_smmu_device *smmu;
        struct iommu_domain *domain = dev;
        struct arm_smmu_domain *smmu_domain;
+       struct nvidia_smmu *nvidia;
 
        smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
        smmu = smmu_domain->smmu;
+       nvidia = to_nvidia_smmu(smmu);
 
-       for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+       for (inst = 0; inst < nvidia->num_instances; inst++) {
                irqreturn_t irq_ret;
 
                /*
@@ -235,12 +248,16 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
        .context_fault = nvidia_smmu_context_fault,
 };
 
+static const struct arm_smmu_impl nvidia_smmu_single_impl = {
+};
+
 struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
 {
        struct resource *res;
        struct device *dev = smmu->dev;
        struct nvidia_smmu *nvidia_smmu;
        struct platform_device *pdev = to_platform_device(dev);
+       unsigned int i;
 
        nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), 
GFP_KERNEL);
        if (!nvidia_smmu)
@@ -248,16 +265,24 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct 
arm_smmu_device *smmu)
 
        /* Instance 0 is ioremapped by arm-smmu.c. */
        nvidia_smmu->bases[0] = smmu->base;
+       nvidia_smmu->num_instances++;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res)
-               return ERR_PTR(-ENODEV);
+       for (i = 1; i < MAX_SMMU_INSTANCES; i++) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+               if (!res)
+                       break;
 
-       nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res);
-       if (IS_ERR(nvidia_smmu->bases[1]))
-               return ERR_CAST(nvidia_smmu->bases[1]);
+               nvidia_smmu->bases[i] = devm_ioremap_resource(dev, res);
+               if (IS_ERR(nvidia_smmu->bases[i]))
+                       return ERR_CAST(nvidia_smmu->bases[i]);
+
+               nvidia_smmu->num_instances++;
+       }
 
-       nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
+       if (nvidia_smmu->num_instances == 1)
+               nvidia_smmu->smmu.impl = &nvidia_smmu_single_impl;
+       else
+               nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
 
        return &nvidia_smmu->smmu;
 }
-- 
2.31.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to