At the moment, we have dependency on the RPM as a barrier itself in both
i915_gem_release_all_mmaps() and i915_gem_restore_fences().
i915_gem_restore_fences() is also called along !runtime pm paths, but we
can move the markup of lost fences alongside releasing the mmaps into a
common i915_gem_runtime_suspend(). This has the advantage of locating
all the tricky barrier dependencies into one location.

Suggested-by: Daniel Vetter <daniel.vet...@ffwll.ch>
Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vet...@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_drv.c       |  6 ++----
 drivers/gpu/drm/i915/i915_drv.h       |  3 ++-
 drivers/gpu/drm/i915/i915_gem.c       | 25 +++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_gem_fence.c | 12 +++++-------
 4 files changed, 32 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d08d6617e3b6..6473bf58fbe5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2267,10 +2267,8 @@ static int vlv_resume_prepare(struct drm_i915_private 
*dev_priv,
 
        vlv_check_no_gt_access(dev_priv);
 
-       if (rpm_resume) {
+       if (rpm_resume)
                intel_init_clock_gating(dev);
-               i915_gem_restore_fences(dev);
-       }
 
        return ret;
 }
@@ -2296,7 +2294,7 @@ static int intel_runtime_suspend(struct device *kdev)
         * We are safe here against re-faults, since the fault handler takes
         * an RPM reference.
         */
-       i915_gem_release_all_mmaps(dev_priv);
+       i915_gem_runtime_suspend(dev_priv);
 
        intel_guc_suspend(dev);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7c686145861b..cc703c38b397 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3125,9 +3125,10 @@ void i915_vma_destroy(struct i915_vma *vma);
 
 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
+
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __sg_page_count(struct scatterlist *sg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8819fafbcc7a..d904e5f5d146 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1954,10 +1954,10 @@ out:
        intel_runtime_pm_put(i915);
 }
 
-void
-i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
 {
        struct drm_i915_gem_object *obj, *on;
+       int i;
 
        /*
         * Only called during RPM suspend. All users of the userfault_list
@@ -1972,6 +1972,27 @@ i915_gem_release_all_mmaps(struct drm_i915_private 
*dev_priv)
                drm_vma_node_unmap(&obj->base.vma_node,
                                   obj->base.dev->anon_inode->i_mapping);
        }
+
+       /* The fence will be lost when the device powers down. If any were
+        * in use by hardware (i.e. they are pinned), we should not be powering
+        * down! All other fences will be reacquired by the user upon waking.
+        */
+       for (i = 0; i < dev_priv->num_fence_regs; i++) {
+               struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+               struct i915_vma *vma;
+
+               if (WARN_ON(reg->pin_count))
+                       continue;
+
+               vma = fetch_and_zero(&reg->vma);
+               if (!vma)
+                       continue;
+
+               GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
+
+               list_move(&reg->link, &dev_priv->mm.fence_list);
+               vma->fence = NULL;
+       }
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c 
b/drivers/gpu/drm/i915/i915_gem_fence.c
index f7081f4b5d22..230edc821823 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -343,6 +343,9 @@ i915_vma_get_fence(struct i915_vma *vma)
        struct drm_i915_fence_reg *fence;
        struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
 
+       /* Note that we revoke fences on runtime suspend. Therefore the user
+        * must keep the device awake whilst using the fence.
+        */
        assert_rpm_wakelock_held(to_i915(vma->vm->dev));
 
        /* Just update our place in the LRU if our fence is getting reused. */
@@ -368,19 +371,14 @@ i915_vma_get_fence(struct i915_vma *vma)
  * @dev: DRM device
  *
  * Restore the hw fence state to match the software tracking again, to be 
called
- * after a gpu reset and on resume.
+ * after a gpu reset and on resume. Note that on runtime suspend we only cancel
+ * the fences, to be reacquired by the user later.
  */
 void i915_gem_restore_fences(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
-       /* Note that this may be called outside of struct_mutex, by
-        * runtime suspend/resume. The barrier we require is enforced by
-        * rpm itself - all access to fences/GTT are only within an rpm
-        * wakeref, and to acquire that wakeref you must pass through here.
-        */
-
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
                struct i915_vma *vma = reg->vma;
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to