Since commit e2144503bf3b ("drm/i915: Prevent bonded requests from
overtaking each other on preemption") we have restricted requests to run
on their chosen engine across preemption events. We can take this
restriction into account to know that we will want to resubmit those
requests onto the same physical engine, and so can shortcircuit the
virtual engine selection process and keep the request on the same
engine during unwind.

References: e2144503bf3b ("drm/i915: Prevent bonded requests from overtaking 
each other on preemption")
Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 6 +++---
 drivers/gpu/drm/i915/i915_request.c | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c 
b/drivers/gpu/drm/i915/gt/intel_lrc.c
index e6bf633b48d5..03732e3f5ec7 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -895,7 +895,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
        list_for_each_entry_safe_reverse(rq, rn,
                                         &engine->active.requests,
                                         sched.link) {
-               struct intel_engine_cs *owner;
 
                if (i915_request_completed(rq))
                        continue; /* XXX */
@@ -910,8 +909,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
                 * engine so that it can be moved across onto another physical
                 * engine as load dictates.
                 */
-               owner = rq->hw_context->engine;
-               if (likely(owner == engine)) {
+               if (likely(rq->execution_mask == engine->mask)) {
                        GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
                        if (rq_prio(rq) != prio) {
                                prio = rq_prio(rq);
@@ -922,6 +920,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
                        list_move(&rq->sched.link, pl);
                        active = rq;
                } else {
+                       struct intel_engine_cs *owner = rq->hw_context->engine;
+
                        /*
                         * Decouple the virtual breadcrumb before moving it
                         * back to the virtual engine -- we don't want the
diff --git a/drivers/gpu/drm/i915/i915_request.c 
b/drivers/gpu/drm/i915/i915_request.c
index 437f9fc6282e..b8a54572a4f8 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -649,6 +649,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
        rq->gem_context = ce->gem_context;
        rq->engine = ce->engine;
        rq->ring = ce->ring;
+       rq->execution_mask = ce->engine->mask;
 
        rcu_assign_pointer(rq->timeline, tl);
        rq->hwsp_seqno = tl->hwsp_seqno;
@@ -671,7 +672,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
        rq->batch = NULL;
        rq->capture_list = NULL;
        rq->flags = 0;
-       rq->execution_mask = ALL_ENGINES;
 
        INIT_LIST_HEAD(&rq->execute_cb);
 
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to