On 15/05/2019 14:00, Chris Wilson wrote:
With the disappearance of NEWCLIENT, we no longer need to provide the
priority boost on preemption in order to prevent repeated gazumping,
and we can remove the dead code.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
---
  drivers/gpu/drm/i915/gt/intel_lrc.c | 59 +++++------------------------
  1 file changed, 10 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c 
b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b5e82171df8f..f263a8374273 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -164,8 +164,6 @@
  #define WA_TAIL_DWORDS 2
  #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
-#define ACTIVE_PRIORITY (I915_PRIORITY_NOSEMAPHORE)
-
  static int execlists_context_deferred_alloc(struct intel_context *ce,
                                            struct intel_engine_cs *engine);
  static void execlists_init_reg_state(u32 *reg_state,
@@ -189,23 +187,12 @@ static int effective_prio(const struct i915_request *rq)
/*
         * On unwinding the active request, we give it a priority bump
-        * equivalent to a freshly submitted request. This protects it from
-        * being gazumped again, but it would be preferable if we didn't
-        * let it be gazumped in the first place!
-        *
-        * See __unwind_incomplete_requests()
+        * if it has completed waiting on any semaphore. If we know that
+        * the request has already started, we can prevent an unwanted
+        * preempt-to-idle cycle by taking that into account now.
         */
-       if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) {
-               /*
-                * After preemption, we insert the active request at the
-                * end of the new priority level. This means that we will be
-                * _lower_ priority than the preemptee all things equal (and
-                * so the preemption is valid), so adjust our comparison
-                * accordingly.
-                */
-               prio |= ACTIVE_PRIORITY;
-               prio--;
-       }
+       if (__i915_request_has_started(rq))
+               prio |= I915_PRIORITY_NOSEMAPHORE;
/* Restrict mere WAIT boosts from triggering preemption */
        return prio | __NO_PREEMPTION;
@@ -371,11 +358,11 @@ static void unwind_wa_tail(struct i915_request *rq)
  }
static struct i915_request *
-__unwind_incomplete_requests(struct intel_engine_cs *engine, int boost)
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
  {
        struct i915_request *rq, *rn, *active = NULL;
        struct list_head *uninitialized_var(pl);
-       int prio = I915_PRIORITY_INVALID | boost;
+       int prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->timeline.lock); @@ -402,31 +389,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine, int boost)
                active = rq;
        }
- /*
-        * The active request is now effectively the start of a new client
-        * stream, so give it the equivalent small priority bump to prevent
-        * it being gazumped a second time by another peer.
-        *
-        * Note we have to be careful not to apply a priority boost to a request
-        * still spinning on its semaphores. If the request hasn't started, that
-        * means it is still waiting for its dependencies to be signaled, and
-        * if we apply a priority boost to this request, we will boost it past
-        * its signalers and so break PI.
-        *
-        * One consequence of this preemption boost is that we may jump
-        * over lesser priorities (such as I915_PRIORITY_WAIT), effectively
-        * making those priorities non-preemptible. They will be moved forward
-        * in the priority queue, but they will not gain immediate access to
-        * the GPU.
-        */
-       if (~prio & boost && __i915_request_has_started(active)) {
-               prio |= boost;
-               GEM_BUG_ON(active->sched.attr.priority >= prio);
-               active->sched.attr.priority = prio;
-               list_move_tail(&active->sched.link,
-                              i915_sched_lookup_priolist(engine, prio));
-       }
-
        return active;
  }
@@ -436,7 +398,7 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
        struct intel_engine_cs *engine =
                container_of(execlists, typeof(*engine), execlists);
- return __unwind_incomplete_requests(engine, 0);
+       return __unwind_incomplete_requests(engine);
  }
static inline void
@@ -657,8 +619,7 @@ static void complete_preempt_context(struct 
intel_engine_execlists *execlists)
        execlists_cancel_port_requests(execlists);
        __unwind_incomplete_requests(container_of(execlists,
                                                  struct intel_engine_cs,
-                                                 execlists),
-                                    ACTIVE_PRIORITY);
+                                                 execlists));
  }
static void execlists_dequeue(struct intel_engine_cs *engine)
@@ -1911,7 +1872,7 @@ static void __execlists_reset(struct intel_engine_cs 
*engine, bool stalled)
        execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
-       rq = __unwind_incomplete_requests(engine, 0);
+       rq = __unwind_incomplete_requests(engine);
        if (!rq)
                goto out_replay;

Reviewed-by: Tvrtko Ursulin <tvrtko.ursu...@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to