When setting up reset, we may need to recursively prepare an engine. In
which case we should only synchronously flush the tasklets on the outer
most call, the inner calls will then be inside an atomic section where
the tasklet will never be run (and so the sync will never complete).

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c     | 2 +-
 drivers/gpu/drm/i915/i915_tasklet.h | 6 ++++++
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 75668b50c81e..3f7ecbff1179 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3036,7 +3036,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs 
*engine)
         * Turning off the execlists->tasklet until the reset is over
         * prevents the race.
         */
-       i915_tasklet_disable(&engine->execlists.tasklet);
+       i915_tasklet_disable_once(&engine->execlists.tasklet);
 
        /*
         * We're using worker to queue preemption requests from the tasklet in
diff --git a/drivers/gpu/drm/i915/i915_tasklet.h 
b/drivers/gpu/drm/i915/i915_tasklet.h
index e24e4f77fe8e..b7cbbc2f8f69 100644
--- a/drivers/gpu/drm/i915/i915_tasklet.h
+++ b/drivers/gpu/drm/i915/i915_tasklet.h
@@ -58,6 +58,12 @@ static inline void i915_tasklet_disable(struct i915_tasklet 
*t)
        tasklet_disable(&t->base);
 }
 
+static inline void i915_tasklet_disable_once(struct i915_tasklet *t)
+{
+       if (atomic_inc_return(&t->base.count) == 1)
+               tasklet_unlock_wait(&t->base);
+}
+
 static inline void i915_tasklet_enable(struct i915_tasklet *t)
 {
        tasklet_enable(&t->base);
-- 
2.17.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to