execlists_dequeue() is invoked from a function which uses
local_irq_disable() to disable interrupts so the spin_lock() behaves
like spin_lock_irq().
This breaks PREEMPT_RT because local_irq_disable() + spin_lock() is not
the same as spin_lock_irq().

execlists_dequeue_irq() and execlists_dequeue() has each one caller
only. If intel_engine_cs::active::lock is acquired and released with the
_irq suffix then it behaves almost as if execlists_dequeue() would be
invoked with disabled interrupts. The difference is the last part of the
function which is then invoked with enabled interrupts.
I can't tell if this makes a difference. From looking at it, it might
work to move the last unlock at the end of the function as I didn't find
anything that would acquire the lock again.

Reported-by: Clark Williams <willi...@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
Reviewed-by: Maarten Lankhorst <maarten.lankho...@linux.intel.com>
---
 .../drm/i915/gt/intel_execlists_submission.c    | 17 +++++------------
 1 file changed, 5 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index a69df5e9e77af..2d5f0c226ad66 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1284,7 +1284,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
         * and context switches) submission.
         */
 
-       spin_lock(&sched_engine->lock);
+       spin_lock_irq(&sched_engine->lock);
 
        /*
         * If the queue is higher priority than the last
@@ -1384,7 +1384,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
                                 * Even if ELSP[1] is occupied and not worthy
                                 * of timeslices, our queue might be.
                                 */
-                               spin_unlock(&sched_engine->lock);
+                               spin_unlock_irq(&sched_engine->lock);
                                return;
                        }
                }
@@ -1410,7 +1410,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 
                if (last && !can_merge_rq(last, rq)) {
                        spin_unlock(&ve->base.sched_engine->lock);
-                       spin_unlock(&engine->sched_engine->lock);
+                       spin_unlock_irq(&engine->sched_engine->lock);
                        return; /* leave this for another sibling */
                }
 
@@ -1572,7 +1572,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
         */
        sched_engine->queue_priority_hint = queue_prio(sched_engine);
        i915_sched_engine_reset_on_empty(sched_engine);
-       spin_unlock(&sched_engine->lock);
+       spin_unlock_irq(&sched_engine->lock);
 
        /*
         * We can skip poking the HW if we ended up with exactly the same set
@@ -1598,13 +1598,6 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
        }
 }
 
-static void execlists_dequeue_irq(struct intel_engine_cs *engine)
-{
-       local_irq_disable(); /* Suspend interrupts across request submission */
-       execlists_dequeue(engine);
-       local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
-}
-
 static void clear_ports(struct i915_request **ports, int count)
 {
        memset_p((void **)ports, NULL, count);
@@ -2425,7 +2418,7 @@ static void execlists_submission_tasklet(struct 
tasklet_struct *t)
        }
 
        if (!engine->execlists.pending[0]) {
-               execlists_dequeue_irq(engine);
+               execlists_dequeue(engine);
                start_timeslice(engine);
        }
 
-- 
2.34.1

Reply via email to