[PATCH 6/8] drm/i915: Add kick_backend function to i915_sched_engine

2021-06-17 Thread Matthew Brost
Not all back-ends require a kick after a  scheduling update, so make the
kick a call-back function that the  back-end can opt-in to. Also move
the current kick function from the scheduler to the execlists file as it
is specific to that back-end.

Signed-off-by: Matthew Brost 
Reviewed-by: Daniele Ceraolo Spurio 
---
 .../drm/i915/gt/intel_execlists_submission.c  | 52 
 drivers/gpu/drm/i915/i915_scheduler.c | 62 +--
 drivers/gpu/drm/i915/i915_scheduler_types.h   |  6 ++
 3 files changed, 60 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8a3d4014fd2c..9487d9e0be62 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3116,10 +3116,61 @@ static bool can_preempt(struct intel_engine_cs *engine)
return engine->class != RENDER_CLASS;
 }
 
+static void kick_execlists(const struct i915_request *rq, int prio)
+{
+   struct intel_engine_cs *engine = rq->engine;
+   struct i915_sched_engine *sched_engine = engine->sched_engine;
+   const struct i915_request *inflight;
+
+   /*
+* We only need to kick the tasklet once for the high priority
+* new context we add into the queue.
+*/
+   if (prio <= sched_engine->queue_priority_hint)
+   return;
+
+   rcu_read_lock();
+
+   /* Nothing currently active? We're overdue for a submission! */
+   inflight = execlists_active(>execlists);
+   if (!inflight)
+   goto unlock;
+
+   /*
+* If we are already the currently executing context, don't
+* bother evaluating if we should preempt ourselves.
+*/
+   if (inflight->context == rq->context)
+   goto unlock;
+
+   ENGINE_TRACE(engine,
+"bumping queue-priority-hint:%d for rq:%llx:%lld, 
inflight:%llx:%lld prio %d\n",
+prio,
+rq->fence.context, rq->fence.seqno,
+inflight->fence.context, inflight->fence.seqno,
+inflight->sched.attr.priority);
+
+   sched_engine->queue_priority_hint = prio;
+
+   /*
+* Allow preemption of low -> normal -> high, but we do
+* not allow low priority tasks to preempt other low priority
+* tasks under the impression that latency for low priority
+* tasks does not matter (as much as background throughput),
+* so kiss.
+*/
+   if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
+   tasklet_hi_schedule(>execlists.tasklet);
+
+unlock:
+   rcu_read_unlock();
+}
+
 static void execlists_set_default_submission(struct intel_engine_cs *engine)
 {
engine->submit_request = execlists_submit_request;
engine->sched_engine->schedule = i915_schedule;
+   engine->sched_engine->kick_backend = kick_execlists;
engine->execlists.tasklet.callback = execlists_submission_tasklet;
 }
 
@@ -3702,6 +3753,7 @@ intel_execlists_create_virtual(struct intel_engine_cs 
**siblings,
ve->base.request_alloc = execlists_request_alloc;
 
ve->base.sched_engine->schedule = i915_schedule;
+   ve->base.sched_engine->kick_backend = kick_execlists;
ve->base.submit_request = virtual_submit_request;
ve->base.bond_execute = virtual_bond_execute;
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 4bc6969f6a97..035b88f2e4aa 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -157,65 +157,6 @@ sched_lock_engine(const struct i915_sched_node *node,
return locked;
 }
 
-static inline int rq_prio(const struct i915_request *rq)
-{
-   return rq->sched.attr.priority;
-}
-
-static inline bool need_preempt(int prio, int active)
-{
-   /*
-* Allow preemption of low -> normal -> high, but we do
-* not allow low priority tasks to preempt other low priority
-* tasks under the impression that latency for low priority
-* tasks does not matter (as much as background throughput),
-* so kiss.
-*/
-   return prio >= max(I915_PRIORITY_NORMAL, active);
-}
-
-static void kick_submission(struct intel_engine_cs *engine,
-   const struct i915_request *rq,
-   int prio)
-{
-   const struct i915_request *inflight;
-
-   /*
-* We only need to kick the tasklet once for the high priority
-* new context we add into the queue.
-*/
-   if (prio <= engine->sched_engine->queue_priority_hint)
-   return;
-
-   rcu_read_lock();
-
-   /* Nothing currently active? We're overdue for a submission! */
-   inflight = execlists_active(>execlists);
-   if (!inflight)
-   goto unlock;
-
-   /*
-* If we are 

[PATCH 6/8] drm/i915: Add kick_backend function to i915_sched_engine

2021-06-15 Thread Matthew Brost
Not all back-ends require a kick after a  scheduling update, so make the
kick a call-back function that the  back-end can opt-in to. Also move
the current kick function from the scheduler to the execlists file as it
is specific to that back-end.

Signed-off-by: Matthew Brost 
Reviewed-by: Daniele Ceraolo Spurio 
---
 .../drm/i915/gt/intel_execlists_submission.c  | 52 
 drivers/gpu/drm/i915/i915_scheduler.c | 62 +--
 drivers/gpu/drm/i915/i915_scheduler_types.h   |  6 ++
 3 files changed, 60 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8a3d4014fd2c..9487d9e0be62 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3116,10 +3116,61 @@ static bool can_preempt(struct intel_engine_cs *engine)
return engine->class != RENDER_CLASS;
 }
 
+static void kick_execlists(const struct i915_request *rq, int prio)
+{
+   struct intel_engine_cs *engine = rq->engine;
+   struct i915_sched_engine *sched_engine = engine->sched_engine;
+   const struct i915_request *inflight;
+
+   /*
+* We only need to kick the tasklet once for the high priority
+* new context we add into the queue.
+*/
+   if (prio <= sched_engine->queue_priority_hint)
+   return;
+
+   rcu_read_lock();
+
+   /* Nothing currently active? We're overdue for a submission! */
+   inflight = execlists_active(>execlists);
+   if (!inflight)
+   goto unlock;
+
+   /*
+* If we are already the currently executing context, don't
+* bother evaluating if we should preempt ourselves.
+*/
+   if (inflight->context == rq->context)
+   goto unlock;
+
+   ENGINE_TRACE(engine,
+"bumping queue-priority-hint:%d for rq:%llx:%lld, 
inflight:%llx:%lld prio %d\n",
+prio,
+rq->fence.context, rq->fence.seqno,
+inflight->fence.context, inflight->fence.seqno,
+inflight->sched.attr.priority);
+
+   sched_engine->queue_priority_hint = prio;
+
+   /*
+* Allow preemption of low -> normal -> high, but we do
+* not allow low priority tasks to preempt other low priority
+* tasks under the impression that latency for low priority
+* tasks does not matter (as much as background throughput),
+* so kiss.
+*/
+   if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
+   tasklet_hi_schedule(>execlists.tasklet);
+
+unlock:
+   rcu_read_unlock();
+}
+
 static void execlists_set_default_submission(struct intel_engine_cs *engine)
 {
engine->submit_request = execlists_submit_request;
engine->sched_engine->schedule = i915_schedule;
+   engine->sched_engine->kick_backend = kick_execlists;
engine->execlists.tasklet.callback = execlists_submission_tasklet;
 }
 
@@ -3702,6 +3753,7 @@ intel_execlists_create_virtual(struct intel_engine_cs 
**siblings,
ve->base.request_alloc = execlists_request_alloc;
 
ve->base.sched_engine->schedule = i915_schedule;
+   ve->base.sched_engine->kick_backend = kick_execlists;
ve->base.submit_request = virtual_submit_request;
ve->base.bond_execute = virtual_bond_execute;
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 4bc6969f6a97..035b88f2e4aa 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -157,65 +157,6 @@ sched_lock_engine(const struct i915_sched_node *node,
return locked;
 }
 
-static inline int rq_prio(const struct i915_request *rq)
-{
-   return rq->sched.attr.priority;
-}
-
-static inline bool need_preempt(int prio, int active)
-{
-   /*
-* Allow preemption of low -> normal -> high, but we do
-* not allow low priority tasks to preempt other low priority
-* tasks under the impression that latency for low priority
-* tasks does not matter (as much as background throughput),
-* so kiss.
-*/
-   return prio >= max(I915_PRIORITY_NORMAL, active);
-}
-
-static void kick_submission(struct intel_engine_cs *engine,
-   const struct i915_request *rq,
-   int prio)
-{
-   const struct i915_request *inflight;
-
-   /*
-* We only need to kick the tasklet once for the high priority
-* new context we add into the queue.
-*/
-   if (prio <= engine->sched_engine->queue_priority_hint)
-   return;
-
-   rcu_read_lock();
-
-   /* Nothing currently active? We're overdue for a submission! */
-   inflight = execlists_active(>execlists);
-   if (!inflight)
-   goto unlock;
-
-   /*
-* If we are 

[PATCH 6/8] drm/i915: Add kick_backend function to i915_sched_engine

2021-06-15 Thread Matthew Brost
Not all back-ends require a kick after a  scheduling update, so make the
kick a call-back function that the  back-end can opt-in to. Also move
the current kick function from the scheduler to the execlists file as it
is specific to that back-end.

Signed-off-by: Matthew Brost 
Reviewed-by: Daniele Ceraolo Spurio 
---
 .../drm/i915/gt/intel_execlists_submission.c  | 52 
 drivers/gpu/drm/i915/i915_scheduler.c | 62 +--
 drivers/gpu/drm/i915/i915_scheduler_types.h   |  6 ++
 3 files changed, 60 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8a3d4014fd2c..9487d9e0be62 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3116,10 +3116,61 @@ static bool can_preempt(struct intel_engine_cs *engine)
return engine->class != RENDER_CLASS;
 }
 
+static void kick_execlists(const struct i915_request *rq, int prio)
+{
+   struct intel_engine_cs *engine = rq->engine;
+   struct i915_sched_engine *sched_engine = engine->sched_engine;
+   const struct i915_request *inflight;
+
+   /*
+* We only need to kick the tasklet once for the high priority
+* new context we add into the queue.
+*/
+   if (prio <= sched_engine->queue_priority_hint)
+   return;
+
+   rcu_read_lock();
+
+   /* Nothing currently active? We're overdue for a submission! */
+   inflight = execlists_active(>execlists);
+   if (!inflight)
+   goto unlock;
+
+   /*
+* If we are already the currently executing context, don't
+* bother evaluating if we should preempt ourselves.
+*/
+   if (inflight->context == rq->context)
+   goto unlock;
+
+   ENGINE_TRACE(engine,
+"bumping queue-priority-hint:%d for rq:%llx:%lld, 
inflight:%llx:%lld prio %d\n",
+prio,
+rq->fence.context, rq->fence.seqno,
+inflight->fence.context, inflight->fence.seqno,
+inflight->sched.attr.priority);
+
+   sched_engine->queue_priority_hint = prio;
+
+   /*
+* Allow preemption of low -> normal -> high, but we do
+* not allow low priority tasks to preempt other low priority
+* tasks under the impression that latency for low priority
+* tasks does not matter (as much as background throughput),
+* so kiss.
+*/
+   if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
+   tasklet_hi_schedule(>execlists.tasklet);
+
+unlock:
+   rcu_read_unlock();
+}
+
 static void execlists_set_default_submission(struct intel_engine_cs *engine)
 {
engine->submit_request = execlists_submit_request;
engine->sched_engine->schedule = i915_schedule;
+   engine->sched_engine->kick_backend = kick_execlists;
engine->execlists.tasklet.callback = execlists_submission_tasklet;
 }
 
@@ -3702,6 +3753,7 @@ intel_execlists_create_virtual(struct intel_engine_cs 
**siblings,
ve->base.request_alloc = execlists_request_alloc;
 
ve->base.sched_engine->schedule = i915_schedule;
+   ve->base.sched_engine->kick_backend = kick_execlists;
ve->base.submit_request = virtual_submit_request;
ve->base.bond_execute = virtual_bond_execute;
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 4bc6969f6a97..035b88f2e4aa 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -157,65 +157,6 @@ sched_lock_engine(const struct i915_sched_node *node,
return locked;
 }
 
-static inline int rq_prio(const struct i915_request *rq)
-{
-   return rq->sched.attr.priority;
-}
-
-static inline bool need_preempt(int prio, int active)
-{
-   /*
-* Allow preemption of low -> normal -> high, but we do
-* not allow low priority tasks to preempt other low priority
-* tasks under the impression that latency for low priority
-* tasks does not matter (as much as background throughput),
-* so kiss.
-*/
-   return prio >= max(I915_PRIORITY_NORMAL, active);
-}
-
-static void kick_submission(struct intel_engine_cs *engine,
-   const struct i915_request *rq,
-   int prio)
-{
-   const struct i915_request *inflight;
-
-   /*
-* We only need to kick the tasklet once for the high priority
-* new context we add into the queue.
-*/
-   if (prio <= engine->sched_engine->queue_priority_hint)
-   return;
-
-   rcu_read_lock();
-
-   /* Nothing currently active? We're overdue for a submission! */
-   inflight = execlists_active(>execlists);
-   if (!inflight)
-   goto unlock;
-
-   /*
-* If we are 

Re: [PATCH 6/8] drm/i915: Add kick_backend function to i915_sched_engine

2021-06-14 Thread Daniele Ceraolo Spurio




On 6/8/2021 12:17 PM, Matthew Brost wrote:

Rather than touching execlist specific structures in the generic
scheduling code, add a callback function in the backend.


I think this could do with a better wording to explain the reasoning 
more, something like: "Not all back-ends require a kick after a 
scheduling update, so make the kick a call-back function that the 
back-end can opt-in to. Also move the current kick function from the 
scheduler to the execlists file as it is specific to that back-end". 
With something like that:


Reviewed-by: Daniele Ceraolo Spurio 

Daniele




Signed-off-by: Matthew Brost 
---
  .../drm/i915/gt/intel_execlists_submission.c  | 52 
  drivers/gpu/drm/i915/i915_scheduler.c | 62 +--
  drivers/gpu/drm/i915/i915_scheduler_types.h   |  6 ++
  3 files changed, 60 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8a3d4014fd2c..9487d9e0be62 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3116,10 +3116,61 @@ static bool can_preempt(struct intel_engine_cs *engine)
return engine->class != RENDER_CLASS;
  }
  
+static void kick_execlists(const struct i915_request *rq, int prio)

+{
+   struct intel_engine_cs *engine = rq->engine;
+   struct i915_sched_engine *sched_engine = engine->sched_engine;
+   const struct i915_request *inflight;
+
+   /*
+* We only need to kick the tasklet once for the high priority
+* new context we add into the queue.
+*/
+   if (prio <= sched_engine->queue_priority_hint)
+   return;
+
+   rcu_read_lock();
+
+   /* Nothing currently active? We're overdue for a submission! */
+   inflight = execlists_active(>execlists);
+   if (!inflight)
+   goto unlock;
+
+   /*
+* If we are already the currently executing context, don't
+* bother evaluating if we should preempt ourselves.
+*/
+   if (inflight->context == rq->context)
+   goto unlock;
+
+   ENGINE_TRACE(engine,
+"bumping queue-priority-hint:%d for rq:%llx:%lld, 
inflight:%llx:%lld prio %d\n",
+prio,
+rq->fence.context, rq->fence.seqno,
+inflight->fence.context, inflight->fence.seqno,
+inflight->sched.attr.priority);
+
+   sched_engine->queue_priority_hint = prio;
+
+   /*
+* Allow preemption of low -> normal -> high, but we do
+* not allow low priority tasks to preempt other low priority
+* tasks under the impression that latency for low priority
+* tasks does not matter (as much as background throughput),
+* so kiss.
+*/
+   if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
+   tasklet_hi_schedule(>execlists.tasklet);
+
+unlock:
+   rcu_read_unlock();
+}
+
  static void execlists_set_default_submission(struct intel_engine_cs *engine)
  {
engine->submit_request = execlists_submit_request;
engine->sched_engine->schedule = i915_schedule;
+   engine->sched_engine->kick_backend = kick_execlists;
engine->execlists.tasklet.callback = execlists_submission_tasklet;
  }
  
@@ -3702,6 +3753,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,

ve->base.request_alloc = execlists_request_alloc;
  
  	ve->base.sched_engine->schedule = i915_schedule;

+   ve->base.sched_engine->kick_backend = kick_execlists;
ve->base.submit_request = virtual_submit_request;
ve->base.bond_execute = virtual_bond_execute;
  
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c

index 4bc6969f6a97..035b88f2e4aa 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -157,65 +157,6 @@ sched_lock_engine(const struct i915_sched_node *node,
return locked;
  }
  
-static inline int rq_prio(const struct i915_request *rq)

-{
-   return rq->sched.attr.priority;
-}
-
-static inline bool need_preempt(int prio, int active)
-{
-   /*
-* Allow preemption of low -> normal -> high, but we do
-* not allow low priority tasks to preempt other low priority
-* tasks under the impression that latency for low priority
-* tasks does not matter (as much as background throughput),
-* so kiss.
-*/
-   return prio >= max(I915_PRIORITY_NORMAL, active);
-}
-
-static void kick_submission(struct intel_engine_cs *engine,
-   const struct i915_request *rq,
-   int prio)
-{
-   const struct i915_request *inflight;
-
-   /*
-* We only need to kick the tasklet once for the high priority
-* new context we add into the queue.
-*/
-  

[PATCH 6/8] drm/i915: Add kick_backend function to i915_sched_engine

2021-06-08 Thread Matthew Brost
Rather than touching execlist specific structures in the generic
scheduling code, add a callback function in the backend.

Signed-off-by: Matthew Brost 
---
 .../drm/i915/gt/intel_execlists_submission.c  | 52 
 drivers/gpu/drm/i915/i915_scheduler.c | 62 +--
 drivers/gpu/drm/i915/i915_scheduler_types.h   |  6 ++
 3 files changed, 60 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8a3d4014fd2c..9487d9e0be62 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3116,10 +3116,61 @@ static bool can_preempt(struct intel_engine_cs *engine)
return engine->class != RENDER_CLASS;
 }
 
+static void kick_execlists(const struct i915_request *rq, int prio)
+{
+   struct intel_engine_cs *engine = rq->engine;
+   struct i915_sched_engine *sched_engine = engine->sched_engine;
+   const struct i915_request *inflight;
+
+   /*
+* We only need to kick the tasklet once for the high priority
+* new context we add into the queue.
+*/
+   if (prio <= sched_engine->queue_priority_hint)
+   return;
+
+   rcu_read_lock();
+
+   /* Nothing currently active? We're overdue for a submission! */
+   inflight = execlists_active(>execlists);
+   if (!inflight)
+   goto unlock;
+
+   /*
+* If we are already the currently executing context, don't
+* bother evaluating if we should preempt ourselves.
+*/
+   if (inflight->context == rq->context)
+   goto unlock;
+
+   ENGINE_TRACE(engine,
+"bumping queue-priority-hint:%d for rq:%llx:%lld, 
inflight:%llx:%lld prio %d\n",
+prio,
+rq->fence.context, rq->fence.seqno,
+inflight->fence.context, inflight->fence.seqno,
+inflight->sched.attr.priority);
+
+   sched_engine->queue_priority_hint = prio;
+
+   /*
+* Allow preemption of low -> normal -> high, but we do
+* not allow low priority tasks to preempt other low priority
+* tasks under the impression that latency for low priority
+* tasks does not matter (as much as background throughput),
+* so kiss.
+*/
+   if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
+   tasklet_hi_schedule(>execlists.tasklet);
+
+unlock:
+   rcu_read_unlock();
+}
+
 static void execlists_set_default_submission(struct intel_engine_cs *engine)
 {
engine->submit_request = execlists_submit_request;
engine->sched_engine->schedule = i915_schedule;
+   engine->sched_engine->kick_backend = kick_execlists;
engine->execlists.tasklet.callback = execlists_submission_tasklet;
 }
 
@@ -3702,6 +3753,7 @@ intel_execlists_create_virtual(struct intel_engine_cs 
**siblings,
ve->base.request_alloc = execlists_request_alloc;
 
ve->base.sched_engine->schedule = i915_schedule;
+   ve->base.sched_engine->kick_backend = kick_execlists;
ve->base.submit_request = virtual_submit_request;
ve->base.bond_execute = virtual_bond_execute;
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 4bc6969f6a97..035b88f2e4aa 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -157,65 +157,6 @@ sched_lock_engine(const struct i915_sched_node *node,
return locked;
 }
 
-static inline int rq_prio(const struct i915_request *rq)
-{
-   return rq->sched.attr.priority;
-}
-
-static inline bool need_preempt(int prio, int active)
-{
-   /*
-* Allow preemption of low -> normal -> high, but we do
-* not allow low priority tasks to preempt other low priority
-* tasks under the impression that latency for low priority
-* tasks does not matter (as much as background throughput),
-* so kiss.
-*/
-   return prio >= max(I915_PRIORITY_NORMAL, active);
-}
-
-static void kick_submission(struct intel_engine_cs *engine,
-   const struct i915_request *rq,
-   int prio)
-{
-   const struct i915_request *inflight;
-
-   /*
-* We only need to kick the tasklet once for the high priority
-* new context we add into the queue.
-*/
-   if (prio <= engine->sched_engine->queue_priority_hint)
-   return;
-
-   rcu_read_lock();
-
-   /* Nothing currently active? We're overdue for a submission! */
-   inflight = execlists_active(>execlists);
-   if (!inflight)
-   goto unlock;
-
-   /*
-* If we are already the currently executing context, don't
-* bother evaluating if we should preempt ourselves.
-*/
-   if (inflight->context ==