The driver currently calls arm_smmu_get_cmdq() helper internally in
different places, though they are all actually called from the same
source -- arm_smmu_cmdq_issue_cmdlist() function.

This patch changes this to pass the cmdq pointer to these functions
instead of calling arm_smmu_get_cmdq() every time.

This also helps NVIDIA implementation, which maintains its own cmdq
pointers and needs to redirect the cmdq pointer from arm_smmu->cmdq
pointer to its own, upon scanning the illegal commands by checking
the opcode of the cmdlist.

Signed-off-by: Nicolin Chen <nicol...@nvidia.com>
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 6878a83582b9..216f3442aac4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -584,11 +584,11 @@ static void arm_smmu_cmdq_poll_valid_map(struct 
arm_smmu_cmdq *cmdq,
 
 /* Wait for the command queue to become non-full */
 static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
+                                            struct arm_smmu_cmdq *cmdq,
                                             struct arm_smmu_ll_queue *llq)
 {
        unsigned long flags;
        struct arm_smmu_queue_poll qp;
-       struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
        int ret = 0;
 
        /*
@@ -619,11 +619,11 @@ static int arm_smmu_cmdq_poll_until_not_full(struct 
arm_smmu_device *smmu,
  * Must be called with the cmdq lock held in some capacity.
  */
 static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
+                                         struct arm_smmu_cmdq *cmdq,
                                          struct arm_smmu_ll_queue *llq)
 {
        int ret = 0;
        struct arm_smmu_queue_poll qp;
-       struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
        u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
 
        queue_poll_init(smmu, &qp);
@@ -643,10 +643,10 @@ static int __arm_smmu_cmdq_poll_until_msi(struct 
arm_smmu_device *smmu,
  * Must be called with the cmdq lock held in some capacity.
  */
 static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
+                                              struct arm_smmu_cmdq *cmdq,
                                               struct arm_smmu_ll_queue *llq)
 {
        struct arm_smmu_queue_poll qp;
-       struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
        u32 prod = llq->prod;
        int ret = 0;
 
@@ -693,12 +693,13 @@ static int __arm_smmu_cmdq_poll_until_consumed(struct 
arm_smmu_device *smmu,
 }
 
 static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
+                                        struct arm_smmu_cmdq *cmdq,
                                         struct arm_smmu_ll_queue *llq)
 {
        if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
-               return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
+               return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq);
 
-       return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
+       return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq);
 }
 
 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
@@ -755,7 +756,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct 
arm_smmu_device *smmu,
 
                while (!queue_has_space(&llq, n + sync)) {
                        local_irq_restore(flags);
-                       if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
+                       if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq))
                                dev_err_ratelimited(smmu->dev, "CMDQ 
timeout\n");
                        local_irq_save(flags);
                }
@@ -831,7 +832,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct 
arm_smmu_device *smmu,
        /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
        if (sync) {
                llq.prod = queue_inc_prod_n(&llq, n);
-               ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
+               ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq);
                if (ret) {
                        dev_err_ratelimited(smmu->dev,
                                            "CMD_SYNC timeout at 0x%08x [hwprod 
0x%08x, hwcons 0x%08x]\n",
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to