On 23/08/2019 03:12, Rob Herring wrote:
There is no point in resuming the h/w just to do flush operations and
doing so takes several locks which cause lockdep issues with the shrinker.
Rework the flush operations to only happen when the h/w is already awake.
This avoids taking any locks associated with resuming.

Cc: Tomeu Vizoso <tomeu.viz...@collabora.com>
Cc: Steven Price <steven.pr...@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzw...@collabora.com>
Cc: David Airlie <airl...@linux.ie>
Cc: Daniel Vetter <dan...@ffwll.ch>
Signed-off-by: Rob Herring <r...@kernel.org>
---
v2: new patch

  drivers/gpu/drm/panfrost/panfrost_mmu.c | 41 ++++++++++++-------------
  1 file changed, 20 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 842bdd7cf6be..ccf671a9c3fb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -220,6 +220,23 @@ static size_t get_pgsize(u64 addr, size_t size)
        return SZ_2M;
  }
+void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+                             struct panfrost_mmu *mmu,
+                             u64 iova, size_t size)
+{
+       if (mmu->as < 0)
+               return;
+
+       /* Flush the PTs only if we're already awake */
+       if (!pm_runtime_get_if_in_use(pfdev->dev))
+               return;

Is the MMU guaranteed to be reset on resume such that the TLBs will always come up clean? Otherwise there are potentially corners where stale entries that we skip here might hang around if the hardware lies about powering things down.

Robin.

+
+       mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
+
+       pm_runtime_mark_last_busy(pfdev->dev);
+       pm_runtime_put_autosuspend(pfdev->dev);
+}
+
  static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
                      u64 iova, int prot, struct sg_table *sgt)
  {
@@ -246,11 +263,10 @@ static int mmu_map_sg(struct panfrost_device *pfdev, 
struct panfrost_mmu *mmu,
                }
        }
- mmu_hw_do_operation(pfdev, mmu, start_iova, iova - start_iova,
-                           AS_COMMAND_FLUSH_PT);
-
        mutex_unlock(&mmu->lock);
+ panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
+
        return 0;
  }
@@ -259,7 +275,6 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
        struct drm_gem_object *obj = &bo->base.base;
        struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
        struct sg_table *sgt;
-       int ret;
        int prot = IOMMU_READ | IOMMU_WRITE;
if (WARN_ON(bo->is_mapped))
@@ -272,14 +287,7 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
        if (WARN_ON(IS_ERR(sgt)))
                return PTR_ERR(sgt);
- ret = pm_runtime_get_sync(pfdev->dev);
-       if (ret < 0)
-               return ret;
-
        mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
-
-       pm_runtime_mark_last_busy(pfdev->dev);
-       pm_runtime_put_autosuspend(pfdev->dev);
        bo->is_mapped = true;
return 0;
@@ -293,17 +301,12 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
        u64 iova = bo->node.start << PAGE_SHIFT;
        size_t len = bo->node.size << PAGE_SHIFT;
        size_t unmapped_len = 0;
-       int ret;
if (WARN_ON(!bo->is_mapped))
                return;
dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len); - ret = pm_runtime_get_sync(pfdev->dev);
-       if (ret < 0)
-               return;
-
        mutex_lock(&bo->mmu->lock);
while (unmapped_len < len) {
@@ -318,13 +321,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
                unmapped_len += pgsize;
        }
- mmu_hw_do_operation(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT,
-                           bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
-
        mutex_unlock(&bo->mmu->lock);
- pm_runtime_mark_last_busy(pfdev->dev);
-       pm_runtime_put_autosuspend(pfdev->dev);
+       panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, 
len);
        bo->is_mapped = false;
  }
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to