From: Rob Clark <robdcl...@chromium.org>

The brute force iommu_flush_iotlb_all() was good enough for unmap, but
in some cases a map operation could require removing a table pte entry
to replace with a block entry.  This also requires tlb invalidation.
Missing this was resulting an obscure iova fault on what should be a
valid buffer address.

Thanks to Robin Murphy for helping me understand the cause of the fault.

Cc: Robin Murphy <robin.mur...@arm.com>
Fixes: b145c6e65eb0 ("drm/msm: Add support to create a local pagetable")
Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
 drivers/gpu/drm/msm/msm_iommu.c | 32 +++++++++++++++++++++++++++++---
 1 file changed, 29 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 5cc8d358cc97..d5512037c38b 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -21,6 +21,8 @@ struct msm_iommu_pagetable {
        struct msm_mmu base;
        struct msm_mmu *parent;
        struct io_pgtable_ops *pgtbl_ops;
+       const struct iommu_flush_ops *tlb;
+       struct device *iommu_dev;
        unsigned long pgsize_bitmap;    /* Bitmap of page sizes in use */
        phys_addr_t ttbr;
        u32 asid;
@@ -201,11 +203,33 @@ static const struct msm_mmu_funcs pagetable_funcs = {
 
 static void msm_iommu_tlb_flush_all(void *cookie)
 {
+       struct msm_iommu_pagetable *pagetable = cookie;
+       struct adreno_smmu_priv *adreno_smmu;
+
+       if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
+               return;
+
+       adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
+
+       pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
+
+       pm_runtime_put_autosuspend(pagetable->iommu_dev);
 }
 
 static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
                size_t granule, void *cookie)
 {
+       struct msm_iommu_pagetable *pagetable = cookie;
+       struct adreno_smmu_priv *adreno_smmu;
+
+       if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
+               return;
+
+       adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
+
+       pagetable->tlb->tlb_flush_walk(iova, size, granule, (void 
*)adreno_smmu->cookie);
+
+       pm_runtime_put_autosuspend(pagetable->iommu_dev);
 }
 
 static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
@@ -213,7 +237,7 @@ static void msm_iommu_tlb_add_page(struct 
iommu_iotlb_gather *gather,
 {
 }
 
-static const struct iommu_flush_ops null_tlb_ops = {
+static const struct iommu_flush_ops tlb_ops = {
        .tlb_flush_all = msm_iommu_tlb_flush_all,
        .tlb_flush_walk = msm_iommu_tlb_flush_walk,
        .tlb_add_page = msm_iommu_tlb_add_page,
@@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu 
*parent)
 
        /* The incoming cfg will have the TTBR1 quirk enabled */
        ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
-       ttbr0_cfg.tlb = &null_tlb_ops;
+       ttbr0_cfg.tlb = &tlb_ops;
 
        pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
-               &ttbr0_cfg, iommu->domain);
+               &ttbr0_cfg, pagetable);
 
        if (!pagetable->pgtbl_ops) {
                kfree(pagetable);
@@ -279,6 +303,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu 
*parent)
 
        /* Needed later for TLB flush */
        pagetable->parent = parent;
+       pagetable->tlb = ttbr1_cfg->tlb;
+       pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
        pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
        pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
 
-- 
2.43.0

Reply via email to