[PATCH 7/8] drm/i915: Update i915_scheduler to operate on i915_sched_engine

2021-06-17 Thread Matthew Brost
Rather passing around an intel_engine_cs in the scheduling code, pass
around a i915_sched_engine.

v3:
 (Jason Ekstrand)
  Add READ_ONCE around rq->engine in lock_sched_engine

Signed-off-by: Matthew Brost 
Reviewed-by: Jason Ekstrand 
---
 .../drm/i915/gt/intel_execlists_submission.c  | 11 +++--
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  2 +-
 drivers/gpu/drm/i915/i915_scheduler.c | 46 +--
 drivers/gpu/drm/i915/i915_scheduler.h |  2 +-
 4 files changed, 32 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 9487d9e0be62..ffad4d98cec0 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -382,7 +382,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
-   pl = i915_sched_lookup_priolist(engine, prio);
+   pl = i915_sched_lookup_priolist(engine->sched_engine,
+   prio);
}
GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
 
@@ -1096,7 +1097,8 @@ static void defer_active(struct intel_engine_cs *engine)
if (!rq)
return;
 
-   defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+   defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
+rq_prio(rq)));
 }
 
 static bool
@@ -2083,7 +2085,7 @@ static void __execlists_unhold(struct i915_request *rq)
 
i915_request_clear_hold(rq);
list_move_tail(>sched.link,
-  i915_sched_lookup_priolist(rq->engine,
+  
i915_sched_lookup_priolist(rq->engine->sched_engine,
  rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 
@@ -2452,7 +2454,8 @@ static void queue_request(struct intel_engine_cs *engine,
 {
GEM_BUG_ON(!list_empty(>sched.link));
list_add_tail(>sched.link,
- i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ i915_sched_lookup_priolist(engine->sched_engine,
+rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 60121809e6e2..cb13cc586c67 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -503,7 +503,7 @@ static inline void queue_request(struct intel_engine_cs 
*engine,
 {
GEM_BUG_ON(!list_empty(>sched.link));
list_add_tail(>sched.link,
- i915_sched_lookup_priolist(engine, prio));
+ i915_sched_lookup_priolist(engine->sched_engine, prio));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 035b88f2e4aa..fa8863df9513 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -61,14 +61,13 @@ static void assert_priolists(struct i915_sched_engine * 
const sched_engine)
 }
 
 struct list_head *
-i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio)
 {
-   struct i915_sched_engine * const sched_engine = engine->sched_engine;
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;
 
-   lockdep_assert_held(>sched_engine->lock);
+   lockdep_assert_held(_engine->lock);
assert_priolists(sched_engine);
 
if (unlikely(sched_engine->no_priolist))
@@ -130,13 +129,13 @@ struct sched_cache {
struct list_head *priolist;
 };
 
-static struct intel_engine_cs *
-sched_lock_engine(const struct i915_sched_node *node,
- struct intel_engine_cs *locked,
+static struct i915_sched_engine *
+lock_sched_engine(struct i915_sched_node *node,
+ struct i915_sched_engine *locked,
  struct sched_cache *cache)
 {
const struct i915_request *rq = node_to_request(node);
-   struct intel_engine_cs *engine;
+   struct i915_sched_engine *sched_engine;
 
GEM_BUG_ON(!locked);
 
@@ -146,14 +145,14 @@ sched_lock_engine(const struct i915_sched_node *node,
 * engine lock. The simple ploy we use is to take the lock then
 * check that the rq still belongs to the newly locked engine.
 */
-   while (locked != (engine = READ_ONCE(rq->engine))) {
-

[PATCH 7/8] drm/i915: Update i915_scheduler to operate on i915_sched_engine

2021-06-15 Thread Matthew Brost
Rather passing around an intel_engine_cs in the scheduling code, pass
around a i915_sched_engine.

v3:
 (Jason Ekstrand)
  Add READ_ONCE around rq->engine in lock_sched_engine

Signed-off-by: Matthew Brost 
Reviewed-by: Jason Ekstrand 
---
 .../drm/i915/gt/intel_execlists_submission.c  | 11 +++--
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  2 +-
 drivers/gpu/drm/i915/i915_scheduler.c | 46 +--
 drivers/gpu/drm/i915/i915_scheduler.h |  2 +-
 4 files changed, 32 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 9487d9e0be62..ffad4d98cec0 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -382,7 +382,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
-   pl = i915_sched_lookup_priolist(engine, prio);
+   pl = i915_sched_lookup_priolist(engine->sched_engine,
+   prio);
}
GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
 
@@ -1096,7 +1097,8 @@ static void defer_active(struct intel_engine_cs *engine)
if (!rq)
return;
 
-   defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+   defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
+rq_prio(rq)));
 }
 
 static bool
@@ -2083,7 +2085,7 @@ static void __execlists_unhold(struct i915_request *rq)
 
i915_request_clear_hold(rq);
list_move_tail(>sched.link,
-  i915_sched_lookup_priolist(rq->engine,
+  
i915_sched_lookup_priolist(rq->engine->sched_engine,
  rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 
@@ -2452,7 +2454,8 @@ static void queue_request(struct intel_engine_cs *engine,
 {
GEM_BUG_ON(!list_empty(>sched.link));
list_add_tail(>sched.link,
- i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ i915_sched_lookup_priolist(engine->sched_engine,
+rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 60121809e6e2..cb13cc586c67 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -503,7 +503,7 @@ static inline void queue_request(struct intel_engine_cs 
*engine,
 {
GEM_BUG_ON(!list_empty(>sched.link));
list_add_tail(>sched.link,
- i915_sched_lookup_priolist(engine, prio));
+ i915_sched_lookup_priolist(engine->sched_engine, prio));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 035b88f2e4aa..fa8863df9513 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -61,14 +61,13 @@ static void assert_priolists(struct i915_sched_engine * 
const sched_engine)
 }
 
 struct list_head *
-i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio)
 {
-   struct i915_sched_engine * const sched_engine = engine->sched_engine;
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;
 
-   lockdep_assert_held(>sched_engine->lock);
+   lockdep_assert_held(_engine->lock);
assert_priolists(sched_engine);
 
if (unlikely(sched_engine->no_priolist))
@@ -130,13 +129,13 @@ struct sched_cache {
struct list_head *priolist;
 };
 
-static struct intel_engine_cs *
-sched_lock_engine(const struct i915_sched_node *node,
- struct intel_engine_cs *locked,
+static struct i915_sched_engine *
+lock_sched_engine(struct i915_sched_node *node,
+ struct i915_sched_engine *locked,
  struct sched_cache *cache)
 {
const struct i915_request *rq = node_to_request(node);
-   struct intel_engine_cs *engine;
+   struct i915_sched_engine *sched_engine;
 
GEM_BUG_ON(!locked);
 
@@ -146,14 +145,14 @@ sched_lock_engine(const struct i915_sched_node *node,
 * engine lock. The simple ploy we use is to take the lock then
 * check that the rq still belongs to the newly locked engine.
 */
-   while (locked != (engine = READ_ONCE(rq->engine))) {
-

[PATCH 7/8] drm/i915: Update i915_scheduler to operate on i915_sched_engine

2021-06-15 Thread Matthew Brost
Rather passing around an intel_engine_cs in the scheduling code, pass
around a i915_sched_engine.

v3:
 (Jason Ekstrand)
  Add READ_ONCE around rq->engine in lock_sched_engine

Signed-off-by: Matthew Brost 
Reviewed-by: Jason Ekstrand 
---
 .../drm/i915/gt/intel_execlists_submission.c  | 11 +++--
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  2 +-
 drivers/gpu/drm/i915/i915_scheduler.c | 46 +--
 drivers/gpu/drm/i915/i915_scheduler.h |  2 +-
 4 files changed, 32 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 9487d9e0be62..ffad4d98cec0 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -382,7 +382,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
-   pl = i915_sched_lookup_priolist(engine, prio);
+   pl = i915_sched_lookup_priolist(engine->sched_engine,
+   prio);
}
GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
 
@@ -1096,7 +1097,8 @@ static void defer_active(struct intel_engine_cs *engine)
if (!rq)
return;
 
-   defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+   defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
+rq_prio(rq)));
 }
 
 static bool
@@ -2083,7 +2085,7 @@ static void __execlists_unhold(struct i915_request *rq)
 
i915_request_clear_hold(rq);
list_move_tail(>sched.link,
-  i915_sched_lookup_priolist(rq->engine,
+  
i915_sched_lookup_priolist(rq->engine->sched_engine,
  rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 
@@ -2452,7 +2454,8 @@ static void queue_request(struct intel_engine_cs *engine,
 {
GEM_BUG_ON(!list_empty(>sched.link));
list_add_tail(>sched.link,
- i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ i915_sched_lookup_priolist(engine->sched_engine,
+rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 60121809e6e2..cb13cc586c67 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -503,7 +503,7 @@ static inline void queue_request(struct intel_engine_cs 
*engine,
 {
GEM_BUG_ON(!list_empty(>sched.link));
list_add_tail(>sched.link,
- i915_sched_lookup_priolist(engine, prio));
+ i915_sched_lookup_priolist(engine->sched_engine, prio));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 035b88f2e4aa..fa8863df9513 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -61,14 +61,13 @@ static void assert_priolists(struct i915_sched_engine * 
const sched_engine)
 }
 
 struct list_head *
-i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio)
 {
-   struct i915_sched_engine * const sched_engine = engine->sched_engine;
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;
 
-   lockdep_assert_held(>sched_engine->lock);
+   lockdep_assert_held(_engine->lock);
assert_priolists(sched_engine);
 
if (unlikely(sched_engine->no_priolist))
@@ -130,13 +129,13 @@ struct sched_cache {
struct list_head *priolist;
 };
 
-static struct intel_engine_cs *
-sched_lock_engine(const struct i915_sched_node *node,
- struct intel_engine_cs *locked,
+static struct i915_sched_engine *
+lock_sched_engine(struct i915_sched_node *node,
+ struct i915_sched_engine *locked,
  struct sched_cache *cache)
 {
const struct i915_request *rq = node_to_request(node);
-   struct intel_engine_cs *engine;
+   struct i915_sched_engine *sched_engine;
 
GEM_BUG_ON(!locked);
 
@@ -146,14 +145,14 @@ sched_lock_engine(const struct i915_sched_node *node,
 * engine lock. The simple ploy we use is to take the lock then
 * check that the rq still belongs to the newly locked engine.
 */
-   while (locked != (engine = READ_ONCE(rq->engine))) {
-

[PATCH 7/8] drm/i915: Update i915_scheduler to operate on i915_sched_engine

2021-06-08 Thread Matthew Brost
Rather passing around an intel_engine_cs in the scheduling code, pass
around a i915_sched_engine.

v3:
 (Jason Ekstrand)
  Add READ_ONCE around rq->engine in lock_sched_engine

Signed-off-by: Matthew Brost 
Reviewed-by: Jason Ekstrand 
---
 .../drm/i915/gt/intel_execlists_submission.c  | 11 +++--
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  2 +-
 drivers/gpu/drm/i915/i915_scheduler.c | 46 +--
 drivers/gpu/drm/i915/i915_scheduler.h |  2 +-
 4 files changed, 32 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c 
b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 9487d9e0be62..ffad4d98cec0 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -382,7 +382,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
-   pl = i915_sched_lookup_priolist(engine, prio);
+   pl = i915_sched_lookup_priolist(engine->sched_engine,
+   prio);
}
GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
 
@@ -1096,7 +1097,8 @@ static void defer_active(struct intel_engine_cs *engine)
if (!rq)
return;
 
-   defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+   defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
+rq_prio(rq)));
 }
 
 static bool
@@ -2083,7 +2085,7 @@ static void __execlists_unhold(struct i915_request *rq)
 
i915_request_clear_hold(rq);
list_move_tail(>sched.link,
-  i915_sched_lookup_priolist(rq->engine,
+  
i915_sched_lookup_priolist(rq->engine->sched_engine,
  rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 
@@ -2452,7 +2454,8 @@ static void queue_request(struct intel_engine_cs *engine,
 {
GEM_BUG_ON(!list_empty(>sched.link));
list_add_tail(>sched.link,
- i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ i915_sched_lookup_priolist(engine->sched_engine,
+rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 60121809e6e2..cb13cc586c67 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -503,7 +503,7 @@ static inline void queue_request(struct intel_engine_cs 
*engine,
 {
GEM_BUG_ON(!list_empty(>sched.link));
list_add_tail(>sched.link,
- i915_sched_lookup_priolist(engine, prio));
+ i915_sched_lookup_priolist(engine->sched_engine, prio));
set_bit(I915_FENCE_FLAG_PQUEUE, >fence.flags);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 035b88f2e4aa..fa8863df9513 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -61,14 +61,13 @@ static void assert_priolists(struct i915_sched_engine * 
const sched_engine)
 }
 
 struct list_head *
-i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio)
 {
-   struct i915_sched_engine * const sched_engine = engine->sched_engine;
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;
 
-   lockdep_assert_held(>sched_engine->lock);
+   lockdep_assert_held(_engine->lock);
assert_priolists(sched_engine);
 
if (unlikely(sched_engine->no_priolist))
@@ -130,13 +129,13 @@ struct sched_cache {
struct list_head *priolist;
 };
 
-static struct intel_engine_cs *
-sched_lock_engine(const struct i915_sched_node *node,
- struct intel_engine_cs *locked,
+static struct i915_sched_engine *
+lock_sched_engine(struct i915_sched_node *node,
+ struct i915_sched_engine *locked,
  struct sched_cache *cache)
 {
const struct i915_request *rq = node_to_request(node);
-   struct intel_engine_cs *engine;
+   struct i915_sched_engine *sched_engine;
 
GEM_BUG_ON(!locked);
 
@@ -146,14 +145,14 @@ sched_lock_engine(const struct i915_sched_node *node,
 * engine lock. The simple ploy we use is to take the lock then
 * check that the rq still belongs to the newly locked engine.
 */
-   while (locked != (engine = READ_ONCE(rq->engine))) {
-