The SMMUv3 can handle invalidation targeted at TLB entries with shared
ASIDs.  If the implementation supports broadcast TLB maintenance, enable
it and keep track of it in a feature bit. The SMMU will then take into
account the following CPU instruction for ASIDs in the shared set:

* TLBI VAE1IS(ASID, VA)
* TLBI ASIDE1IS(ASID)

Signed-off-by: Jean-Philippe Brucker <[email protected]>
---
 drivers/iommu/arm-smmu-v3.c | 19 +++++++++++++++++--
 1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 27376e1193c1..b23f69aa242e 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -64,6 +64,7 @@
 #define IDR0_ASID16                    (1 << 12)
 #define IDR0_ATS                       (1 << 10)
 #define IDR0_HYP                       (1 << 9)
+#define IDR0_BTM                       (1 << 5)
 #define IDR0_COHACC                    (1 << 4)
 #define IDR0_TTF_SHIFT                 2
 #define IDR0_TTF_MASK                  0x3
@@ -659,6 +660,7 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_HYP              (1 << 12)
 #define ARM_SMMU_FEAT_STALL_FORCE      (1 << 13)
 #define ARM_SMMU_FEAT_E2H              (1 << 14)
+#define ARM_SMMU_FEAT_BTM              (1 << 15)
        u32                             features;
 
 #define ARM_SMMU_OPT_SKIP_PREFETCH     (1 << 0)
@@ -2730,11 +2732,14 @@ static int arm_smmu_device_reset(struct arm_smmu_device 
*smmu, bool bypass)
        writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
 
        /* CR2 (random crap) */
-       reg = CR2_PTM | CR2_RECINVSID;
+       reg = CR2_RECINVSID;
 
        if (smmu->features & ARM_SMMU_FEAT_E2H)
                reg |= CR2_E2H;
 
+       if (!(smmu->features & ARM_SMMU_FEAT_BTM))
+               reg |= CR2_PTM;
+
        writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
 
        /* Stream table */
@@ -2837,6 +2842,7 @@ static int arm_smmu_device_hw_probe(struct 
arm_smmu_device *smmu)
 {
        u32 reg;
        bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
+       bool vhe = cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN);
 
        /* IDR0 */
        reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
@@ -2886,11 +2892,20 @@ static int arm_smmu_device_hw_probe(struct 
arm_smmu_device *smmu)
 
        if (reg & IDR0_HYP) {
                smmu->features |= ARM_SMMU_FEAT_HYP;
-               if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
+               if (vhe)
                        smmu->features |= ARM_SMMU_FEAT_E2H;
        }
 
        /*
+        * If the CPU is using VHE, but the SMMU doesn't support it, the SMMU
+        * will create TLB entries for NH-EL1 world and will miss the
+        * broadcasted TLB invalidations that target EL2-E2H world. Don't enable
+        * BTM in that case.
+        */
+       if (reg & IDR0_BTM && (!vhe || reg & IDR0_HYP))
+               smmu->features |= ARM_SMMU_FEAT_BTM;
+
+       /*
         * The coherency feature as set by FW is used in preference to the ID
         * register, but warn on mismatch.
         */
-- 
2.13.3

_______________________________________________
iommu mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to