Re: [PATCH 5/9] iommu/arm-smmu: tegra: Detect number of instances at runtime

2021-03-25 Thread Robin Murphy

On 2021-03-25 13:03, Thierry Reding wrote:

From: Thierry Reding 

Parse the reg property in device tree and detect the number of instances
represented by a device tree node. This is subsequently needed in order
to support single-instance SMMUs with the Tegra implementation because
additional programming is needed to properly configure the SID override
registers in the memory controller.

Signed-off-by: Thierry Reding 
---
  drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c | 49 ++--
  1 file changed, 34 insertions(+), 15 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c 
b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 29117444e5a0..5b1170b028f0 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -20,13 +20,19 @@
   * The third instance usage is through standard arm-smmu driver itself and
   * is out of scope of this implementation.
   */
-#define NUM_SMMU_INSTANCES 2
+#define MAX_SMMU_INSTANCES 2
  
  struct nvidia_smmu {

-   struct arm_smmu_device  smmu;
-   void __iomem*bases[NUM_SMMU_INSTANCES];
+   struct arm_smmu_device smmu;
+   void __iomem *bases[MAX_SMMU_INSTANCES];
+   unsigned int num_instances;


Surely it would make more sense to just add a second set of 
implementation ops without all the overrides that aren't needed for a 
single instance?


Also note that the binding currently requires the Tegra-specific 
compatible to have exactly two regions.


Robin.


  };
  
+static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu)

+{
+   return container_of(smmu, struct nvidia_smmu, smmu);
+}
+
  static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
 unsigned int inst, int page)
  {
@@ -47,9 +53,10 @@ static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
  static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
  int page, int offset, u32 val)
  {
+   struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
  
-	for (i = 0; i < NUM_SMMU_INSTANCES; i++) {

+   for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
  
  		writel_relaxed(val, reg);

@@ -67,9 +74,10 @@ static u64 nvidia_smmu_read_reg64(struct arm_smmu_device 
*smmu,
  static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
int page, int offset, u64 val)
  {
+   struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
  
-	for (i = 0; i < NUM_SMMU_INSTANCES; i++) {

+   for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
  
  		writeq_relaxed(val, reg);

@@ -79,6 +87,7 @@ static void nvidia_smmu_write_reg64(struct arm_smmu_device 
*smmu,
  static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
 int sync, int status)
  {
+   struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int delay;
  
  	arm_smmu_writel(smmu, page, sync, 0);

@@ -90,7 +99,7 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device 
*smmu, int page,
u32 val = 0;
unsigned int i;
  
-			for (i = 0; i < NUM_SMMU_INSTANCES; i++) {

+   for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg;
  
  reg = nvidia_smmu_page(smmu, i, page) + status;

@@ -112,9 +121,10 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device 
*smmu, int page,
  
  static int nvidia_smmu_reset(struct arm_smmu_device *smmu)

  {
+   struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
  
-	for (i = 0; i < NUM_SMMU_INSTANCES; i++) {

+   for (i = 0; i < nvidia->num_instances; i++) {
u32 val;
void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
ARM_SMMU_GR0_sGFSR;
@@ -157,8 +167,9 @@ static irqreturn_t nvidia_smmu_global_fault(int irq, void 
*dev)
unsigned int inst;
irqreturn_t ret = IRQ_NONE;
struct arm_smmu_device *smmu = dev;
+   struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
  
-	for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {

+   for (inst = 0; inst < nvidia->num_instances; inst++) {
irqreturn_t irq_ret;
  
  		irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);

@@ -202,11 +213,13 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, 
void *dev)
struct arm_smmu_device *smmu;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain;
+   struct nvidia_smmu *nvidia;
  
  	smmu_domain = container_of(domain, struct arm_smmu_domain, domain);

smmu = smmu_domain->smmu;
+   nvidia = 

[PATCH 5/9] iommu/arm-smmu: tegra: Detect number of instances at runtime

2021-03-25 Thread Thierry Reding
From: Thierry Reding 

Parse the reg property in device tree and detect the number of instances
represented by a device tree node. This is subsequently needed in order
to support single-instance SMMUs with the Tegra implementation because
additional programming is needed to properly configure the SID override
registers in the memory controller.

Signed-off-by: Thierry Reding 
---
 drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c | 49 ++--
 1 file changed, 34 insertions(+), 15 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c 
b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 29117444e5a0..5b1170b028f0 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -20,13 +20,19 @@
  * The third instance usage is through standard arm-smmu driver itself and
  * is out of scope of this implementation.
  */
-#define NUM_SMMU_INSTANCES 2
+#define MAX_SMMU_INSTANCES 2
 
 struct nvidia_smmu {
-   struct arm_smmu_device  smmu;
-   void __iomem*bases[NUM_SMMU_INSTANCES];
+   struct arm_smmu_device smmu;
+   void __iomem *bases[MAX_SMMU_INSTANCES];
+   unsigned int num_instances;
 };
 
+static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu)
+{
+   return container_of(smmu, struct nvidia_smmu, smmu);
+}
+
 static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
 unsigned int inst, int page)
 {
@@ -47,9 +53,10 @@ static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
 static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
  int page, int offset, u32 val)
 {
+   struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
 
-   for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+   for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
 
writel_relaxed(val, reg);
@@ -67,9 +74,10 @@ static u64 nvidia_smmu_read_reg64(struct arm_smmu_device 
*smmu,
 static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
int page, int offset, u64 val)
 {
+   struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
 
-   for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+   for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
 
writeq_relaxed(val, reg);
@@ -79,6 +87,7 @@ static void nvidia_smmu_write_reg64(struct arm_smmu_device 
*smmu,
 static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
 int sync, int status)
 {
+   struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int delay;
 
arm_smmu_writel(smmu, page, sync, 0);
@@ -90,7 +99,7 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device 
*smmu, int page,
u32 val = 0;
unsigned int i;
 
-   for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+   for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg;
 
reg = nvidia_smmu_page(smmu, i, page) + status;
@@ -112,9 +121,10 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device 
*smmu, int page,
 
 static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
 {
+   struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
 
-   for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+   for (i = 0; i < nvidia->num_instances; i++) {
u32 val;
void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
ARM_SMMU_GR0_sGFSR;
@@ -157,8 +167,9 @@ static irqreturn_t nvidia_smmu_global_fault(int irq, void 
*dev)
unsigned int inst;
irqreturn_t ret = IRQ_NONE;
struct arm_smmu_device *smmu = dev;
+   struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
 
-   for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+   for (inst = 0; inst < nvidia->num_instances; inst++) {
irqreturn_t irq_ret;
 
irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
@@ -202,11 +213,13 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, 
void *dev)
struct arm_smmu_device *smmu;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain;
+   struct nvidia_smmu *nvidia;
 
smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
smmu = smmu_domain->smmu;
+   nvidia = to_nvidia_smmu(smmu);
 
-   for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+   for (inst = 0; inst < nvidia->num_instances; inst++) {
irqreturn_t irq_ret;
 
/*
@@ -241,6 +254,7 @@ struct arm_smmu_device