Sadly lockdep records when the irqs are reenabled and then marks up the
fake lock as being irq-unsafe. Our hand is forced and so we must mark up
the entire fake lock critical section as irq-off.

Hopefully this is the last tweak required!

Fixes: d67739268cf0 ("drm/i915/gt: Mark up the nested engine-pm timeline lock 
as irqsafe")
Signed-off-by: Chris Wilson <[email protected]>
Cc: Tvrtko Ursulin <[email protected]>
Cc: Mika Kuoppala <[email protected]>
---
 drivers/gpu/drm/i915/gt/intel_engine_pm.c | 28 +++++++++++++++--------
 1 file changed, 18 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c 
b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index a372d4ea9370..65b5ca74b394 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -39,27 +39,32 @@ static int __engine_unpark(struct intel_wakeref *wf)
 
 #if IS_ENABLED(CONFIG_LOCKDEP)
 
-static inline void __timeline_mark_lock(struct intel_context *ce)
+static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
 {
        unsigned long flags;
 
        local_irq_save(flags);
        mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
-       local_irq_restore(flags);
+
+       return flags;
 }
 
-static inline void __timeline_mark_unlock(struct intel_context *ce)
+static inline void __timeline_mark_unlock(struct intel_context *ce,
+                                         unsigned long flags)
 {
        mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
+       local_irq_restore(flags);
 }
 
 #else
 
-static inline void __timeline_mark_lock(struct intel_context *ce)
+static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
 {
+       return 0;
 }
 
-static inline void __timeline_mark_unlock(struct intel_context *ce)
+static inline void __timeline_mark_unlock(struct intel_context *ce,
+                                         unsigned long flags)
 {
 }
 
@@ -68,6 +73,8 @@ static inline void __timeline_mark_unlock(struct 
intel_context *ce)
 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 {
        struct i915_request *rq;
+       unsigned long flags;
+       bool result = true;
 
        /* Already inside the kernel context, safe to power down. */
        if (engine->wakeref_serial == engine->serial)
@@ -89,12 +96,12 @@ static bool switch_to_kernel_context(struct intel_engine_cs 
*engine)
         * retiring the last request, thus all rings should be empty and
         * all timelines idle.
         */
-       __timeline_mark_lock(engine->kernel_context);
+       flags = __timeline_mark_lock(engine->kernel_context);
 
        rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
        if (IS_ERR(rq))
                /* Context switch failed, hope for the best! Maybe reset? */
-               return true;
+               goto out_unlock;
 
        intel_timeline_enter(rq->timeline);
 
@@ -110,9 +117,10 @@ static bool switch_to_kernel_context(struct 
intel_engine_cs *engine)
        __intel_wakeref_defer_park(&engine->wakeref);
        __i915_request_queue(rq, NULL);
 
-       __timeline_mark_unlock(engine->kernel_context);
-
-       return false;
+       result = false;
+out_unlock:
+       __timeline_mark_unlock(engine->kernel_context, flags);
+       return result;
 }
 
 static int __engine_park(struct intel_wakeref *wf)
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to