From: Matthew Brost <matthew.br...@intel.com>

Don't drop ce->guc_active.lock when unwinding a context after reset.
At one point we had to drop this because of a lock inversion but that is
no longer the case. It is much safer to hold the lock so let's do that.

Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface")
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospu...@intel.com>
Signed-off-by: Matthew Brost <matthew.br...@intel.com>
Cc: <sta...@vger.kernel.org>
---
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 0c1e6b465fba..31bbfe5479ae 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -813,8 +813,6 @@ __unwind_incomplete_requests(struct intel_context *ce)
                        continue;
 
                list_del_init(&rq->sched.link);
-               spin_unlock(&ce->guc_active.lock);
-
                __i915_request_unsubmit(rq);
 
                /* Push the request back into the queue for later resubmission. 
*/
@@ -827,8 +825,6 @@ __unwind_incomplete_requests(struct intel_context *ce)
 
                list_add(&rq->sched.link, pl);
                set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
-               spin_lock(&ce->guc_active.lock);
        }
        spin_unlock(&ce->guc_active.lock);
        spin_unlock_irqrestore(&sched_engine->lock, flags);
-- 
2.25.1

Reply via email to