In preparation to more carefully handling incomplete preemption during
reset by execlists, we move the existing code wholesale to the backends
under a couple of new reset vfuncs.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: MichaƂ Winiarski <michal.winiar...@intel.com>
CC: Michel Thierry <michel.thie...@intel.com>
Cc: Jeff McGee <jeff.mc...@intel.com>
Reviewed-by: Jeff McGee <jeff.mc...@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c         | 47 +++-----------------------
 drivers/gpu/drm/i915/intel_lrc.c        | 59 +++++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/intel_ringbuffer.c | 23 +++++++++++--
 drivers/gpu/drm/i915/intel_ringbuffer.h |  9 +++--
 4 files changed, 88 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9650a7b10c5f..038867c96809 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2917,7 +2917,7 @@ static bool engine_stalled(struct intel_engine_cs *engine)
 struct i915_request *
 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 {
-       struct i915_request *request = NULL;
+       struct i915_request *request;
 
        /*
         * During the reset sequence, we must prevent the engine from
@@ -2940,40 +2940,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs 
*engine)
         */
        kthread_park(engine->breadcrumbs.signaler);
 
-       /*
-        * Prevent request submission to the hardware until we have
-        * completed the reset in i915_gem_reset_finish(). If a request
-        * is completed by one engine, it may then queue a request
-        * to a second via its execlists->tasklet *just* as we are
-        * calling engine->init_hw() and also writing the ELSP.
-        * Turning off the execlists->tasklet until the reset is over
-        * prevents the race.
-        *
-        * Note that this needs to be a single atomic operation on the
-        * tasklet (flush existing tasks, prevent new tasks) to prevent
-        * a race between reset and set-wedged. It is not, so we do the best
-        * we can atm and make sure we don't lock the machine up in the more
-        * common case of recursively being called from set-wedged from inside
-        * i915_reset.
-        */
-       if (!atomic_read(&engine->execlists.tasklet.count))
-               tasklet_kill(&engine->execlists.tasklet);
-       tasklet_disable(&engine->execlists.tasklet);
-
-       /*
-        * We're using worker to queue preemption requests from the tasklet in
-        * GuC submission mode.
-        * Even though tasklet was disabled, we may still have a worker queued.
-        * Let's make sure that all workers scheduled before disabling the
-        * tasklet are completed before continuing with the reset.
-        */
-       if (engine->i915->guc.preempt_wq)
-               flush_workqueue(engine->i915->guc.preempt_wq);
-
-       if (engine->irq_seqno_barrier)
-               engine->irq_seqno_barrier(engine);
-
-       request = i915_gem_find_active_request(engine);
+       request = engine->reset.prepare(engine);
        if (request && request->fence.error == -EIO)
                request = ERR_PTR(-EIO); /* Previous reset failed! */
 
@@ -3114,13 +3081,8 @@ void i915_gem_reset_engine(struct intel_engine_cs 
*engine,
        if (request)
                request = i915_gem_reset_request(engine, request);
 
-       if (request) {
-               DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 
0x%x\n",
-                                engine->name, request->global_seqno);
-       }
-
        /* Setup the CS to resume from the breadcrumb of the hung request */
-       engine->reset_hw(engine, request);
+       engine->reset.reset(engine, request);
 }
 
 void i915_gem_reset(struct drm_i915_private *dev_priv)
@@ -3172,7 +3134,8 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
 
 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
 {
-       tasklet_enable(&engine->execlists.tasklet);
+       engine->reset.finish(engine);
+
        kthread_unpark(engine->breadcrumbs.signaler);
 
        intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 51e930323626..42f9af625e88 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1732,8 +1732,48 @@ static int gen9_init_render_ring(struct intel_engine_cs 
*engine)
        return init_workarounds_ring(engine);
 }
 
-static void reset_common_ring(struct intel_engine_cs *engine,
-                             struct i915_request *request)
+static struct i915_request *
+execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+
+       GEM_TRACE("%s\n", engine->name);
+
+       /*
+        * Prevent request submission to the hardware until we have
+        * completed the reset in i915_gem_reset_finish(). If a request
+        * is completed by one engine, it may then queue a request
+        * to a second via its execlists->tasklet *just* as we are
+        * calling engine->init_hw() and also writing the ELSP.
+        * Turning off the execlists->tasklet until the reset is over
+        * prevents the race.
+        *
+        * Note that this needs to be a single atomic operation on the
+        * tasklet (flush existing tasks, prevent new tasks) to prevent
+        * a race between reset and set-wedged. It is not, so we do the best
+        * we can atm and make sure we don't lock the machine up in the more
+        * common case of recursively being called from set-wedged from inside
+        * i915_reset.
+        */
+       if (!atomic_read(&execlists->tasklet.count))
+               tasklet_kill(&execlists->tasklet);
+       tasklet_disable(&execlists->tasklet);
+
+       /*
+        * We're using worker to queue preemption requests from the tasklet in
+        * GuC submission mode.
+        * Even though tasklet was disabled, we may still have a worker queued.
+        * Let's make sure that all workers scheduled before disabling the
+        * tasklet are completed before continuing with the reset.
+        */
+       if (engine->i915->guc.preempt_wq)
+               flush_workqueue(engine->i915->guc.preempt_wq);
+
+       return i915_gem_find_active_request(engine);
+}
+
+static void execlists_reset(struct intel_engine_cs *engine,
+                           struct i915_request *request)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct intel_context *ce;
@@ -1742,6 +1782,9 @@ static void reset_common_ring(struct intel_engine_cs 
*engine,
        GEM_TRACE("%s seqno=%x\n",
                  engine->name, request ? request->global_seqno : 0);
 
+       /* The submission tasklet must be disabled, engine->reset.prepare(). */
+       GEM_BUG_ON(!atomic_read(&execlists->tasklet.count));
+
        /* See execlists_cancel_requests() for the irq/spinlock split. */
        local_irq_save(flags);
 
@@ -1802,6 +1845,13 @@ static void reset_common_ring(struct intel_engine_cs 
*engine,
        unwind_wa_tail(request);
 }
 
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+       tasklet_enable(&engine->execlists.tasklet);
+
+       GEM_TRACE("%s\n", engine->name);
+}
+
 static int intel_logical_ring_emit_pdps(struct i915_request *rq)
 {
        struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
@@ -2126,7 +2176,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs 
*engine)
 {
        /* Default vfuncs which can be overriden by each engine. */
        engine->init_hw = gen8_init_common_ring;
-       engine->reset_hw = reset_common_ring;
+
+       engine->reset.prepare = execlists_reset_prepare;
+       engine->reset.reset = execlists_reset;
+       engine->reset.finish = execlists_reset_finish;
 
        engine->context_pin = execlists_context_pin;
        engine->context_unpin = execlists_context_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 04d9d9a946a7..5dadbc435c0e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -530,9 +530,20 @@ static int init_ring_common(struct intel_engine_cs *engine)
        return ret;
 }
 
-static void reset_ring_common(struct intel_engine_cs *engine,
-                             struct i915_request *request)
+static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
 {
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
+
+       return i915_gem_find_active_request(engine);
+}
+
+static void reset_ring(struct intel_engine_cs *engine,
+                      struct i915_request *request)
+{
+       GEM_TRACE("%s seqno=%x\n",
+                 engine->name, request ? request->global_seqno : 0);
+
        /*
         * RC6 must be prevented until the reset is complete and the engine
         * reinitialised. If it occurs in the middle of this sequence, the
@@ -595,6 +606,10 @@ static void reset_ring_common(struct intel_engine_cs 
*engine,
        }
 }
 
+static void reset_finish(struct intel_engine_cs *engine)
+{
+}
+
 static int intel_rcs_ctx_init(struct i915_request *rq)
 {
        int ret;
@@ -1987,7 +2002,9 @@ static void intel_ring_default_vfuncs(struct 
drm_i915_private *dev_priv,
        intel_ring_init_semaphores(dev_priv, engine);
 
        engine->init_hw = init_ring_common;
-       engine->reset_hw = reset_ring_common;
+       engine->reset.prepare = reset_prepare;
+       engine->reset.reset = reset_ring;
+       engine->reset.finish = reset_finish;
 
        engine->context_pin = intel_ring_context_pin;
        engine->context_unpin = intel_ring_context_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2e20627e254b..15d624925594 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -418,8 +418,13 @@ struct intel_engine_cs {
        void            (*irq_disable)(struct intel_engine_cs *engine);
 
        int             (*init_hw)(struct intel_engine_cs *engine);
-       void            (*reset_hw)(struct intel_engine_cs *engine,
-                                   struct i915_request *rq);
+
+       struct {
+               struct i915_request *(*prepare)(struct intel_engine_cs *engine);
+               void (*reset)(struct intel_engine_cs *engine,
+                             struct i915_request *rq);
+               void (*finish)(struct intel_engine_cs *engine);
+       } reset;
 
        void            (*park)(struct intel_engine_cs *engine);
        void            (*unpark)(struct intel_engine_cs *engine);
-- 
2.16.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to