We use the context->pin_mutex to serialise updates to the OA config and
the registers values written into each new context. Document this
relationship and assert we do hold the context->pin_mutex as used by
gen8_configure_all_contexts() to serialise updates to the OA config
itself.

v2: Add a white-lie for when we call intel_gt_resume() from init.
v3: Lie while we have the context pinned inside atomic reset.

Signed-off-by: Chris Wilson <[email protected]>
Cc: Lionel Landwerlin <[email protected]>
Reviewed-by: Lionel Landwerlin <[email protected]> #v1
---
 drivers/gpu/drm/i915/gt/intel_gt_pm.c | 7 ++++++-
 drivers/gpu/drm/i915/gt/intel_lrc.c   | 9 +++++++++
 drivers/gpu/drm/i915/i915_perf.c      | 3 +++
 3 files changed, 18 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c 
b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 1363e069ec83..aa6cf0152ce7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -6,6 +6,7 @@
 
 #include "i915_drv.h"
 #include "i915_params.h"
+#include "intel_context.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
@@ -142,8 +143,12 @@ int intel_gt_resume(struct intel_gt *gt)
                intel_engine_pm_get(engine);
 
                ce = engine->kernel_context;
-               if (ce)
+               if (ce) {
+                       GEM_BUG_ON(!intel_context_is_pinned(ce));
+                       mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
                        ce->ops->reset(ce);
+                       mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
+               }
 
                engine->serial++; /* kernel context lost */
                err = engine->resume(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c 
b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7d460b1842dd..d746a8fc332c 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2440,6 +2440,10 @@ static void __execlists_reset(struct intel_engine_cs 
*engine, bool stalled)
        ce = rq->hw_context;
        GEM_BUG_ON(i915_active_is_idle(&ce->active));
        GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+       GEM_BUG_ON(!intel_context_is_pinned(ce));
+       mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
+
        rq = active_request(rq);
        if (!rq) {
                ce->ring->head = ce->ring->tail;
@@ -2499,6 +2503,7 @@ static void __execlists_reset(struct intel_engine_cs 
*engine, bool stalled)
                  engine->name, ce->ring->head, ce->ring->tail);
        intel_ring_update_space(ce->ring);
        __execlists_update_reg_state(ce, engine);
+       mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
 
 unwind:
        /* Push back any incomplete requests for replay after the reset. */
@@ -3969,6 +3974,9 @@ void intel_lr_context_reset(struct intel_engine_cs 
*engine,
                            u32 head,
                            bool scrub)
 {
+       GEM_BUG_ON(!intel_context_is_pinned(ce));
+       mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
+
        /*
         * We want a simple context + ring to execute the breadcrumb update.
         * We cannot rely on the context being intact across the GPU hang,
@@ -3993,6 +4001,7 @@ void intel_lr_context_reset(struct intel_engine_cs 
*engine,
        intel_ring_update_space(ce->ring);
 
        __execlists_update_reg_state(ce, engine);
+       mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2c9f46e12622..c1b764233761 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2305,6 +2305,9 @@ void i915_oa_init_reg_state(struct intel_engine_cs 
*engine,
 {
        struct i915_perf_stream *stream;
 
+       /* perf.exclusive_stream serialised by gen8_configure_all_contexts() */
+       lockdep_assert_held(&ce->pin_mutex);
+
        if (engine->class != RENDER_CLASS)
                return;
 
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to