Re: [Intel-gfx] [CI 07/14] drm/i915/selftests: Exercise priority inheritance around an engine loop

2021-02-02 Thread Chris Wilson
Quoting Tvrtko Ursulin (2021-02-02 16:44:26)
> 
> On 02/02/2021 15:14, Chris Wilson wrote:
> > + err = 0;
> > + count = 0;
> > + for_each_uabi_engine(engine, i915) {
> > + if (!intel_engine_has_scheduler(engine))
> > + continue;
> > +
> > + rq = __write_timestamp(engine, obj, count, rq);
> > + if (IS_ERR(rq)) {
> > + err = PTR_ERR(rq);
> > + break;
> > + }
> > +
> > + count++;
> > + }
> 
>   - two of the same by copy error or couldn't be bothered 
> with outer loop?

It was just my thought process at the time, I wanted the
A->Z; A->Z pair so that it clear that it was cyclic and just didn't
think of putting it inside another loop.
-Chris
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [CI 07/14] drm/i915/selftests: Exercise priority inheritance around an engine loop

2021-02-02 Thread Tvrtko Ursulin



On 02/02/2021 15:14, Chris Wilson wrote:

Exercise rescheduling priority inheritance around a sequence of requests
that wrap around all the engines.

Signed-off-by: Chris Wilson 
---
  .../gpu/drm/i915/selftests/i915_scheduler.c   | 225 ++
  1 file changed, 225 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c 
b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index d095fab2ccec..acc666f755d7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -7,6 +7,7 @@
  
  #include "gt/intel_context.h"

  #include "gt/intel_gpu_commands.h"
+#include "gt/intel_ring.h"
  #include "gt/selftest_engine_heartbeat.h"
  #include "selftests/igt_spinner.h"
  #include "selftests/i915_random.h"
@@ -504,10 +505,234 @@ static int igt_priority_chains(void *arg)
return igt_schedule_chains(arg, igt_priority);
  }
  
+static struct i915_request *

+__write_timestamp(struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
+ int slot,
+ struct i915_request *prev)
+{
+   struct i915_request *rq = ERR_PTR(-EINVAL);
+   bool use_64b = INTEL_GEN(engine->i915) >= 8;
+   struct intel_context *ce;
+   struct i915_vma *vma;
+   int err = 0;
+   u32 *cs;
+
+   ce = intel_context_create(engine);
+   if (IS_ERR(ce))
+   return ERR_CAST(ce);
+
+   vma = i915_vma_instance(obj, ce->vm, NULL);
+   if (IS_ERR(vma)) {
+   err = PTR_ERR(vma);
+   goto out_ce;
+   }
+
+   err = i915_vma_pin(vma, 0, 0, PIN_USER);
+   if (err)
+   goto out_ce;
+
+   rq = intel_context_create_request(ce);
+   if (IS_ERR(rq)) {
+   err = PTR_ERR(rq);
+   goto out_unpin;
+   }
+
+   i915_vma_lock(vma);
+   err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+   i915_vma_unlock(vma);
+   if (err)
+   goto out_request;
+
+   if (prev) {
+   err = i915_request_await_dma_fence(rq, >fence);
+   if (err)
+   goto out_request;
+   }
+
+   if (engine->emit_init_breadcrumb) {
+   err = engine->emit_init_breadcrumb(rq);
+   if (err)
+   goto out_request;
+   }
+
+   cs = intel_ring_begin(rq, 4);
+   if (IS_ERR(cs)) {
+   err = PTR_ERR(cs);
+   goto out_request;
+   }
+
+   *cs++ = MI_STORE_REGISTER_MEM + use_64b;
+   *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base));
+   *cs++ = lower_32_bits(vma->node.start) + sizeof(u32) * slot;
+   *cs++ = upper_32_bits(vma->node.start);
+   intel_ring_advance(rq, cs);
+
+   i915_request_get(rq);
+out_request:
+   i915_request_add(rq);
+out_unpin:
+   i915_vma_unpin(vma);
+out_ce:
+   intel_context_put(ce);
+   i915_request_put(prev);
+   return err ? ERR_PTR(err) : rq;
+}
+
+static struct i915_request *create_spinner(struct drm_i915_private *i915,
+  struct igt_spinner *spin)
+{
+   struct intel_engine_cs *engine;
+
+   for_each_uabi_engine(engine, i915) {
+   struct intel_context *ce;
+   struct i915_request *rq;
+
+   if (igt_spinner_init(spin, engine->gt))
+   return ERR_PTR(-ENOMEM);
+
+   ce = intel_context_create(engine);
+   if (IS_ERR(ce))
+   return ERR_CAST(ce);
+
+   rq = igt_spinner_create_request(spin, ce, MI_NOOP);
+   intel_context_put(ce);
+   if (rq == ERR_PTR(-ENODEV))
+   continue;
+   if (IS_ERR(rq))
+   return rq;
+
+   i915_request_get(rq);
+   i915_request_add(rq);
+   return rq;
+   }
+
+   return ERR_PTR(-ENODEV);
+}
+
+static bool has_timestamp(const struct drm_i915_private *i915)
+{
+   return INTEL_GEN(i915) >= 7;
+}
+
+static int __igt_schedule_cycle(struct drm_i915_private *i915,
+   bool (*fn)(struct i915_request *rq,
+  unsigned long v, unsigned long e))
+{
+   struct intel_engine_cs *engine;
+   struct drm_i915_gem_object *obj;
+   struct igt_spinner spin;
+   struct i915_request *rq;
+   unsigned long count, n;
+   u32 *time, last;
+   int err;
+
+   /*
+* Queue a bunch of ordered requests (each waiting on the previous)
+* around the engines a couple of times. Each request will write
+* the timestamp it executes at into the scratch, with the expectation
+* that the timestamp will be in our desired execution order.
+*/
+
+   if (!i915->caps.scheduler || !has_timestamp(i915))
+   return 0;
+
+   obj = i915_gem_object_create_internal(i915, 

[Intel-gfx] [CI 07/14] drm/i915/selftests: Exercise priority inheritance around an engine loop

2021-02-02 Thread Chris Wilson
Exercise rescheduling priority inheritance around a sequence of requests
that wrap around all the engines.

Signed-off-by: Chris Wilson 
---
 .../gpu/drm/i915/selftests/i915_scheduler.c   | 225 ++
 1 file changed, 225 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c 
b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index d095fab2ccec..acc666f755d7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -7,6 +7,7 @@
 
 #include "gt/intel_context.h"
 #include "gt/intel_gpu_commands.h"
+#include "gt/intel_ring.h"
 #include "gt/selftest_engine_heartbeat.h"
 #include "selftests/igt_spinner.h"
 #include "selftests/i915_random.h"
@@ -504,10 +505,234 @@ static int igt_priority_chains(void *arg)
return igt_schedule_chains(arg, igt_priority);
 }
 
+static struct i915_request *
+__write_timestamp(struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
+ int slot,
+ struct i915_request *prev)
+{
+   struct i915_request *rq = ERR_PTR(-EINVAL);
+   bool use_64b = INTEL_GEN(engine->i915) >= 8;
+   struct intel_context *ce;
+   struct i915_vma *vma;
+   int err = 0;
+   u32 *cs;
+
+   ce = intel_context_create(engine);
+   if (IS_ERR(ce))
+   return ERR_CAST(ce);
+
+   vma = i915_vma_instance(obj, ce->vm, NULL);
+   if (IS_ERR(vma)) {
+   err = PTR_ERR(vma);
+   goto out_ce;
+   }
+
+   err = i915_vma_pin(vma, 0, 0, PIN_USER);
+   if (err)
+   goto out_ce;
+
+   rq = intel_context_create_request(ce);
+   if (IS_ERR(rq)) {
+   err = PTR_ERR(rq);
+   goto out_unpin;
+   }
+
+   i915_vma_lock(vma);
+   err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+   i915_vma_unlock(vma);
+   if (err)
+   goto out_request;
+
+   if (prev) {
+   err = i915_request_await_dma_fence(rq, >fence);
+   if (err)
+   goto out_request;
+   }
+
+   if (engine->emit_init_breadcrumb) {
+   err = engine->emit_init_breadcrumb(rq);
+   if (err)
+   goto out_request;
+   }
+
+   cs = intel_ring_begin(rq, 4);
+   if (IS_ERR(cs)) {
+   err = PTR_ERR(cs);
+   goto out_request;
+   }
+
+   *cs++ = MI_STORE_REGISTER_MEM + use_64b;
+   *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base));
+   *cs++ = lower_32_bits(vma->node.start) + sizeof(u32) * slot;
+   *cs++ = upper_32_bits(vma->node.start);
+   intel_ring_advance(rq, cs);
+
+   i915_request_get(rq);
+out_request:
+   i915_request_add(rq);
+out_unpin:
+   i915_vma_unpin(vma);
+out_ce:
+   intel_context_put(ce);
+   i915_request_put(prev);
+   return err ? ERR_PTR(err) : rq;
+}
+
+static struct i915_request *create_spinner(struct drm_i915_private *i915,
+  struct igt_spinner *spin)
+{
+   struct intel_engine_cs *engine;
+
+   for_each_uabi_engine(engine, i915) {
+   struct intel_context *ce;
+   struct i915_request *rq;
+
+   if (igt_spinner_init(spin, engine->gt))
+   return ERR_PTR(-ENOMEM);
+
+   ce = intel_context_create(engine);
+   if (IS_ERR(ce))
+   return ERR_CAST(ce);
+
+   rq = igt_spinner_create_request(spin, ce, MI_NOOP);
+   intel_context_put(ce);
+   if (rq == ERR_PTR(-ENODEV))
+   continue;
+   if (IS_ERR(rq))
+   return rq;
+
+   i915_request_get(rq);
+   i915_request_add(rq);
+   return rq;
+   }
+
+   return ERR_PTR(-ENODEV);
+}
+
+static bool has_timestamp(const struct drm_i915_private *i915)
+{
+   return INTEL_GEN(i915) >= 7;
+}
+
+static int __igt_schedule_cycle(struct drm_i915_private *i915,
+   bool (*fn)(struct i915_request *rq,
+  unsigned long v, unsigned long e))
+{
+   struct intel_engine_cs *engine;
+   struct drm_i915_gem_object *obj;
+   struct igt_spinner spin;
+   struct i915_request *rq;
+   unsigned long count, n;
+   u32 *time, last;
+   int err;
+
+   /*
+* Queue a bunch of ordered requests (each waiting on the previous)
+* around the engines a couple of times. Each request will write
+* the timestamp it executes at into the scratch, with the expectation
+* that the timestamp will be in our desired execution order.
+*/
+
+   if (!i915->caps.scheduler || !has_timestamp(i915))
+   return 0;
+
+   obj = i915_gem_object_create_internal(i915, SZ_64K);
+   if (IS_ERR(obj))
+   return