Before we did some clever tricks to not use the a lock when touching
guc_state.sched_state in certain cases. Don't do that, enforce the use
of the lock.

v2:
 (kernel test robo )
  - Add __maybe_unused to sched_state_is_init()

v3: rebase after the unused code path removal has been moved to an
earlier patch.

Signed-off-by: Matthew Brost <matthew.br...@intel.com>
Reported-by: kernel test robot <l...@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospu...@intel.com>
---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 22 ++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index e036a171ff17..ca73128d7b4d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -151,11 +151,23 @@ static inline void clr_context_registered(struct 
intel_context *ce)
 
 static inline void init_sched_state(struct intel_context *ce)
 {
-       /* Only should be called from guc_lrc_desc_pin() */
+       lockdep_assert_held(&ce->guc_state.lock);
        atomic_set(&ce->guc_sched_state_no_lock, 0);
        ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
 }
 
+__maybe_unused
+static bool sched_state_is_init(struct intel_context *ce)
+{
+       /*
+        * XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after
+        * suspend.
+        */
+       return !(atomic_read(&ce->guc_sched_state_no_lock) &
+                ~SCHED_STATE_NO_LOCK_REGISTERED) &&
+               !(ce->guc_state.sched_state &= ~SCHED_STATE_BLOCKED_MASK);
+}
+
 static inline bool
 context_wait_for_deregister_to_register(struct intel_context *ce)
 {
@@ -166,7 +178,7 @@ context_wait_for_deregister_to_register(struct 
intel_context *ce)
 static inline void
 set_context_wait_for_deregister_to_register(struct intel_context *ce)
 {
-       /* Only should be called from guc_lrc_desc_pin() without lock */
+       lockdep_assert_held(&ce->guc_state.lock);
        ce->guc_state.sched_state |=
                SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
 }
@@ -607,9 +619,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct 
intel_guc *guc)
        bool pending_disable, pending_enable, deregister, destroyed, banned;
 
        xa_for_each(&guc->context_lookup, index, ce) {
-               /* Flush context */
                spin_lock_irqsave(&ce->guc_state.lock, flags);
-               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
                /*
                 * Once we are at this point submission_disabled() is guaranteed
@@ -625,6 +635,8 @@ static void scrub_guc_desc_for_outstanding_g2h(struct 
intel_guc *guc)
                banned = context_banned(ce);
                init_sched_state(ce);
 
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
                if (pending_enable || destroyed || deregister) {
                        decr_outstanding_submission_g2h(guc);
                        if (deregister)
@@ -1324,6 +1336,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
        int ret = 0;
 
        GEM_BUG_ON(!engine->mask);
+       GEM_BUG_ON(!sched_state_is_init(ce));
 
        /*
         * Ensure LRC + CT vmas are is same region as write barrier is done
@@ -1352,7 +1365,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
        desc->priority = ce->guc_prio;
        desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
        guc_context_policy_init(engine, desc);
-       init_sched_state(ce);
 
        /*
         * The context_lookup xarray is used to determine if the hardware
-- 
2.32.0

Reply via email to