Re: [Intel-gfx] [PATCH 22/57] drm/i915: Move scheduler queue

2021-02-04 Thread Chris Wilson
Quoting Tvrtko Ursulin (2021-02-04 11:19:00)
> 
> On 01/02/2021 08:56, Chris Wilson wrote:
> > @@ -252,10 +242,6 @@ struct intel_engine_execlists {
> >*/
> >   int queue_priority_hint;
> >   
> > - /**
> > -  * @queue: queue of requests, in priority lists
> > -  */
> > - struct rb_root_cached queue;
> >   struct rb_root_cached virtual;
> 
> Presumably virtual queue will go later in the series since I have seen 
> some patches which improve that algorithm.

There's no commonality yet, so I left it in execlists. All of the
virtual engines do themselves migrate to using the scheduler completely,
but the association between the physical/virtual is still buried inside
execlists.

On the face of it, virtual_requeue() does only talk between the
base scheduler structs, so it looks like it could be easily extracted.
But the guc is going to use a single scheduling channel onto an
out-of-order guc queue, which doesn't allow us to use late greedy virtual
scheduling ourselves (we have no choice on which queue to use).

So not even a second user, let alone a third, to check if the semantics
are general enough.
-Chris
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH 22/57] drm/i915: Move scheduler queue

2021-02-04 Thread Chris Wilson
Quoting Tvrtko Ursulin (2021-02-04 11:19:00)
> 
> On 01/02/2021 08:56, Chris Wilson wrote:
> >   bool intel_engine_is_idle(struct intel_engine_cs *engine)
> >   {
> > + struct i915_sched *se = intel_engine_get_scheduler(engine);
> 
> What do you have 'se' stand for?

Originally i915_sched_engine, then working backwards there was just a
single i915_sched struct (but may have a few working together). So it's
CPU runlist with a bit more speciality logic.

This is akin to drm_sched_entity, se could work for that. Other ideas
for the name would be i915_sched_queue.

Anyway se stuck around from the first pass. It was easy to type!
-Chris
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH 22/57] drm/i915: Move scheduler queue

2021-02-04 Thread Tvrtko Ursulin



On 01/02/2021 08:56, Chris Wilson wrote:

Extract the scheduling queue from "execlists" into the per-engine
scheduling structs, for reuse by other backends.

Signed-off-by: Chris Wilson 
---
  .../gpu/drm/i915/gem/i915_gem_context_types.h |  2 +-
  drivers/gpu/drm/i915/gem/i915_gem_wait.c  |  1 +
  drivers/gpu/drm/i915/gt/intel_engine_cs.c |  7 ++-
  drivers/gpu/drm/i915/gt/intel_engine_pm.c |  3 +-
  drivers/gpu/drm/i915/gt/intel_engine_types.h  | 14 -
  .../drm/i915/gt/intel_execlists_submission.c  | 29 +-
  .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 11 ++--
  drivers/gpu/drm/i915/i915_drv.h   |  1 -
  drivers/gpu/drm/i915/i915_request.h   |  2 +-
  drivers/gpu/drm/i915/i915_scheduler.c | 57 ---
  drivers/gpu/drm/i915/i915_scheduler.h | 15 +
  drivers/gpu/drm/i915/i915_scheduler_types.h   | 14 +
  .../gpu/drm/i915/selftests/i915_scheduler.c   | 13 ++---
  13 files changed, 100 insertions(+), 69 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 085f6a3735e8..d5bc75508048 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -19,7 +19,7 @@
  
  #include "gt/intel_context_types.h"
  
-#include "i915_scheduler.h"

+#include "i915_scheduler_types.h"
  #include "i915_sw_fence.h"
  
  struct pid;

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c 
b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index d79bf16083bd..4d1897c347b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -13,6 +13,7 @@
  #include "dma_resv_utils.h"
  #include "i915_gem_ioctls.h"
  #include "i915_gem_object.h"
+#include "i915_scheduler.h"
  
  static long

  i915_gem_object_wait_fence(struct dma_fence *fence,
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c 
b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index d7ff84d92936..4c07c6f61924 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -574,7 +574,6 @@ void intel_engine_init_execlists(struct intel_engine_cs 
*engine)
memset(execlists->inflight, 0, sizeof(execlists->inflight));
  
  	execlists->queue_priority_hint = INT_MIN;

-   execlists->queue = RB_ROOT_CACHED;
  }
  
  static void cleanup_status_page(struct intel_engine_cs *engine)

@@ -911,7 +910,7 @@ int intel_engines_init(struct intel_gt *gt)
   */
  void intel_engine_cleanup_common(struct intel_engine_cs *engine)
  {
-   GEM_BUG_ON(!list_empty(>sched.requests));
+   i915_sched_fini(intel_engine_get_scheduler(engine));
tasklet_kill(>execlists.tasklet); /* flush the callback */
  
  	intel_breadcrumbs_free(engine->breadcrumbs);

@@ -1225,6 +1224,8 @@ void __intel_engine_flush_submission(struct 
intel_engine_cs *engine, bool sync)
   */
  bool intel_engine_is_idle(struct intel_engine_cs *engine)
  {
+   struct i915_sched *se = intel_engine_get_scheduler(engine);


What do you have 'se' stand for?


+
/* More white lies, if wedged, hw state is inconsistent */
if (intel_gt_is_wedged(engine->gt))
return true;
@@ -1237,7 +1238,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
intel_engine_flush_submission(engine);
  
  	/* ELSP is empty, but there are ready requests? E.g. after reset */

-   if (!RB_EMPTY_ROOT(>execlists.queue.rb_root))
+   if (!i915_sched_is_idle(se))
return false;
  
  	/* Ring stopped? */

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c 
b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 6372d7826bc9..3510c9236334 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -4,6 +4,7 @@
   */
  
  #include "i915_drv.h"

+#include "i915_scheduler.h"
  
  #include "intel_breadcrumbs.h"

  #include "intel_context.h"
@@ -276,7 +277,7 @@ static int __engine_park(struct intel_wakeref *wf)
if (engine->park)
engine->park(engine);
  
-	engine->execlists.no_priolist = false;

+   i915_sched_park(intel_engine_get_scheduler(engine));
  
  	/* While gt calls i915_vma_parked(), we have to break the lock cycle */

intel_gt_pm_put_async(engine->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h 
b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 0936b0699cbb..c36bdd957f8f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -153,11 +153,6 @@ struct intel_engine_execlists {
 */
struct timer_list preempt;
  
-	/**

-* @default_priolist: priority list for I915_PRIORITY_NORMAL
-*/
-   struct i915_priolist default_priolist;
-
/**
 * @ccid: identifier for contexts submitted to this engine
 */
@@ -192,11 +187,6 @@ struct intel_engine_execlists {
 */

[Intel-gfx] [PATCH 22/57] drm/i915: Move scheduler queue

2021-02-01 Thread Chris Wilson
Extract the scheduling queue from "execlists" into the per-engine
scheduling structs, for reuse by other backends.

Signed-off-by: Chris Wilson 
---
 .../gpu/drm/i915/gem/i915_gem_context_types.h |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_wait.c  |  1 +
 drivers/gpu/drm/i915/gt/intel_engine_cs.c |  7 ++-
 drivers/gpu/drm/i915/gt/intel_engine_pm.c |  3 +-
 drivers/gpu/drm/i915/gt/intel_engine_types.h  | 14 -
 .../drm/i915/gt/intel_execlists_submission.c  | 29 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 11 ++--
 drivers/gpu/drm/i915/i915_drv.h   |  1 -
 drivers/gpu/drm/i915/i915_request.h   |  2 +-
 drivers/gpu/drm/i915/i915_scheduler.c | 57 ---
 drivers/gpu/drm/i915/i915_scheduler.h | 15 +
 drivers/gpu/drm/i915/i915_scheduler_types.h   | 14 +
 .../gpu/drm/i915/selftests/i915_scheduler.c   | 13 ++---
 13 files changed, 100 insertions(+), 69 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 085f6a3735e8..d5bc75508048 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -19,7 +19,7 @@
 
 #include "gt/intel_context_types.h"
 
-#include "i915_scheduler.h"
+#include "i915_scheduler_types.h"
 #include "i915_sw_fence.h"
 
 struct pid;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c 
b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index d79bf16083bd..4d1897c347b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -13,6 +13,7 @@
 #include "dma_resv_utils.h"
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
+#include "i915_scheduler.h"
 
 static long
 i915_gem_object_wait_fence(struct dma_fence *fence,
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c 
b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index d7ff84d92936..4c07c6f61924 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -574,7 +574,6 @@ void intel_engine_init_execlists(struct intel_engine_cs 
*engine)
memset(execlists->inflight, 0, sizeof(execlists->inflight));
 
execlists->queue_priority_hint = INT_MIN;
-   execlists->queue = RB_ROOT_CACHED;
 }
 
 static void cleanup_status_page(struct intel_engine_cs *engine)
@@ -911,7 +910,7 @@ int intel_engines_init(struct intel_gt *gt)
  */
 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
-   GEM_BUG_ON(!list_empty(>sched.requests));
+   i915_sched_fini(intel_engine_get_scheduler(engine));
tasklet_kill(>execlists.tasklet); /* flush the callback */
 
intel_breadcrumbs_free(engine->breadcrumbs);
@@ -1225,6 +1224,8 @@ void __intel_engine_flush_submission(struct 
intel_engine_cs *engine, bool sync)
  */
 bool intel_engine_is_idle(struct intel_engine_cs *engine)
 {
+   struct i915_sched *se = intel_engine_get_scheduler(engine);
+
/* More white lies, if wedged, hw state is inconsistent */
if (intel_gt_is_wedged(engine->gt))
return true;
@@ -1237,7 +1238,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
intel_engine_flush_submission(engine);
 
/* ELSP is empty, but there are ready requests? E.g. after reset */
-   if (!RB_EMPTY_ROOT(>execlists.queue.rb_root))
+   if (!i915_sched_is_idle(se))
return false;
 
/* Ring stopped? */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c 
b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 6372d7826bc9..3510c9236334 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -4,6 +4,7 @@
  */
 
 #include "i915_drv.h"
+#include "i915_scheduler.h"
 
 #include "intel_breadcrumbs.h"
 #include "intel_context.h"
@@ -276,7 +277,7 @@ static int __engine_park(struct intel_wakeref *wf)
if (engine->park)
engine->park(engine);
 
-   engine->execlists.no_priolist = false;
+   i915_sched_park(intel_engine_get_scheduler(engine));
 
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
intel_gt_pm_put_async(engine->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h 
b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 0936b0699cbb..c36bdd957f8f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -153,11 +153,6 @@ struct intel_engine_execlists {
 */
struct timer_list preempt;
 
-   /**
-* @default_priolist: priority list for I915_PRIORITY_NORMAL
-*/
-   struct i915_priolist default_priolist;
-
/**
 * @ccid: identifier for contexts submitted to this engine
 */
@@ -192,11 +187,6 @@ struct intel_engine_execlists {
 */
u32 reset_ccid;
 
-   /**
-* @no_priolist: priority lists disabled
-*/
-