Re: [Intel-gfx] [PATCH igt] igt/gem_exec_schedule: Exercise "deep" preemption

2018-04-18 Thread Michał Winiarski
On Wed, Apr 11, 2018 at 11:13:56AM +0100, Chris Wilson wrote:
> In investigating the issue with having to force preemption within the
> executing ELSP[], we want to trigger preemption between all elements of
> that array. To that end, we issue a series of requests with different
> priorities to fill the in-flight ELSP[] and then demand preemption into
> the middle of that series. One can think of even more complicated
> reordering requirements of ELSP[], trying to switch between every
> possible combination of permutations. Rather than check all 2 billion
> combinations, be content with a few.
> 
> v2: Add a different pattern for queued requests. Not only do we need to
> inject a request into the middle of a single context with a queue of
> different priority contexts, but we also want a queue of different
> contexts, as they have different patterns of ELSP[] behaviour.
> 
> Signed-off-by: Chris Wilson 
> Cc: Mika Kuoppala 
> Cc: Michał Winiarski 
> ---
>  tests/gem_exec_schedule.c | 188 ++
>  1 file changed, 169 insertions(+), 19 deletions(-)
> 
> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
> index d2f040ab..6ff15b6e 100644
> --- a/tests/gem_exec_schedule.c
> +++ b/tests/gem_exec_schedule.c

[snip]

> @@ -981,12 +1117,26 @@ igt_main
>   igt_subtest_f("preempt-contexts-%s", 
> e->name)
>   preempt(fd, e->exec_id | 
> e->flags, NEW_CTX);
>  
> - igt_subtest_f("preempt-other-%s", 
> e->name)
> - preempt_other(fd, e->exec_id | 
> e->flags);
> -
>   igt_subtest_f("preempt-self-%s", 
> e->name)
>   preempt_self(fd, e->exec_id | 
> e->flags);
>  
> + igt_subtest_f("preempt-other-%s", 
> e->name)
> + preempt_other(fd, e->exec_id | 
> e->flags, 0);
> +
> + igt_subtest_f("preempt-other-chain-%s", 
> e->name)
> + preempt_other(fd, e->exec_id | 
> e->flags, CHAIN);
> +
> + igt_subtest_f("preempt-queue-%s", 
> e->name)
> + preempt_queue(fd, e->exec_id | 
> e->flags, 0);
> +
> + igt_subtest_f("preempt-queue-chain-%s", 
> e->name)
> + preempt_queue(fd, e->exec_id | 
> e->flags, CHAIN);
> + igt_subtest_f("preempt-contexts-%s", 
> e->name)
> + preempt_queue(fd, e->exec_id | 
> e->flags, CONTEXTS);

With preempt-queue-contexts.

Reviewed-by: Michał Winiarski 

-Michał

> +
> + 
> igt_subtest_f("preempt-contexts-chain-%s", e->name)
> + preempt_queue(fd, e->exec_id | 
> e->flags, CONTEXTS | CHAIN);
> +
>   igt_subtest_group {
>   igt_hang_t hang;
>  
> -- 
> 2.17.0
> 
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH igt] igt/gem_exec_schedule: Exercise "deep" preemption

2018-04-11 Thread Chris Wilson
In investigating the issue with having to force preemption within the
executing ELSP[], we want to trigger preemption between all elements of
that array. To that end, we issue a series of requests with different
priorities to fill the in-flight ELSP[] and then demand preemption into
the middle of that series. One can think of even more complicated
reordering requirements of ELSP[], trying to switch between every
possible combination of permutations. Rather than check all 2 billion
combinations, be content with a few.

v2: Add a different pattern for queued requests. Not only do we need to
inject a request into the middle of a single context with a queue of
different priority contexts, but we also want a queue of different
contexts, as they have different patterns of ELSP[] behaviour.

Signed-off-by: Chris Wilson 
Cc: Mika Kuoppala 
Cc: Michał Winiarski 
---
 tests/gem_exec_schedule.c | 188 ++
 1 file changed, 169 insertions(+), 19 deletions(-)

diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index d2f040ab..6ff15b6e 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -373,13 +373,78 @@ static void preempt(int fd, unsigned ring, unsigned flags)
gem_close(fd, result);
 }
 
-static void preempt_other(int fd, unsigned ring)
+#define CHAIN 0x1
+#define CONTEXTS 0x2
+
+static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
+{
+   unsigned other;
+
+   gem_context_set_priority(fd, ctx, prio);
+
+   for_each_physical_engine(fd, other) {
+   if (spin == NULL) {
+   spin = __igt_spin_batch_new(fd, ctx, other, 0);
+   } else {
+   struct drm_i915_gem_exec_object2 obj = {
+   .handle = spin->handle,
+   };
+   struct drm_i915_gem_execbuffer2 eb = {
+   .buffer_count = 1,
+   .buffers_ptr = to_user_pointer(&obj),
+   .rsvd1 = ctx,
+   .flags = other,
+   };
+   gem_execbuf(fd, &eb);
+   }
+   }
+
+   return spin;
+}
+
+static void __preempt_other(int fd,
+   uint32_t *ctx,
+   unsigned int target, unsigned int primary,
+   unsigned flags)
 {
uint32_t result = gem_create(fd, 4096);
uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
-   igt_spin_t *spin[MAX_ENGINES];
-   unsigned int other;
-   unsigned int n, i;
+   unsigned int n, i, other;
+
+   n = 0;
+   store_dword(fd, ctx[LO], primary,
+   result, (n + 1)*sizeof(uint32_t), n + 1,
+   0, I915_GEM_DOMAIN_RENDER);
+   n++;
+
+   if (flags & CHAIN) {
+   for_each_physical_engine(fd, other) {
+   store_dword(fd, ctx[LO], other,
+   result, (n + 1)*sizeof(uint32_t), n + 1,
+   0, I915_GEM_DOMAIN_RENDER);
+   n++;
+   }
+   }
+
+   store_dword(fd, ctx[HI], target,
+   result, (n + 1)*sizeof(uint32_t), n + 1,
+   0, I915_GEM_DOMAIN_RENDER);
+
+   igt_debugfs_dump(fd, "i915_engine_info");
+   gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
+
+   n++;
+   for (i = 0; i <= n; i++)
+   igt_assert_eq_u32(ptr[i], i);
+
+   munmap(ptr, 4096);
+   gem_close(fd, result);
+}
+
+static void preempt_other(int fd, unsigned ring, unsigned int flags)
+{
+   unsigned int primary;
+   igt_spin_t *spin = NULL;
uint32_t ctx[3];
 
/* On each engine, insert
@@ -396,36 +461,97 @@ static void preempt_other(int fd, unsigned ring)
gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
 
ctx[NOISE] = gem_context_create(fd);
+   spin = __noise(fd, ctx[NOISE], 0, NULL);
 
ctx[HI] = gem_context_create(fd);
gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
 
+   for_each_physical_engine(fd, primary) {
+   igt_debug("Primary engine: %s\n", e__->name);
+   __preempt_other(fd, ctx, ring, primary, flags);
+
+   }
+
+   igt_assert(gem_bo_busy(fd, spin->handle));
+   igt_spin_batch_free(fd, spin);
+
+   gem_context_destroy(fd, ctx[LO]);
+   gem_context_destroy(fd, ctx[NOISE]);
+   gem_context_destroy(fd, ctx[HI]);
+}
+
+static void __preempt_queue(int fd,
+   unsigned target, unsigned primary,
+   unsigned depth, unsigned flags)
+{
+   uint32_t result = gem_create(fd, 4096);
+   uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
+   igt_spin_t *above = NULL, *below = NULL;
+   unsigned int other, n, i;
+   int prio = MAX_PRIO;
+   uint32_t ctx[3] =

[Intel-gfx] [PATCH igt] igt/gem_exec_schedule: Exercise "deep" preemption

2018-02-25 Thread Chris Wilson
In investigating the issue with having to force preemption within the
executing ELSP[], we want to trigger preemption between all elements of
that array. To that end, we issue a series of requests with different
priorities to fill the in-flight ELSP[] and then demand preemption into
the middle of that series. One can think of even more complicated
reordering requirements of ELSP[], trying to switch between every
possible combination of permutations. Rather than check all 2 billion
combinations, be content with a few.

Signed-off-by: Chris Wilson 
Cc: Mika Kuoppala 
Cc: Michał Winiarski 
---
 tests/gem_exec_schedule.c | 172 +-
 1 file changed, 153 insertions(+), 19 deletions(-)

diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index 8a69ab5c..87fe4572 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -373,13 +373,77 @@ static void preempt(int fd, unsigned ring, unsigned flags)
gem_close(fd, result);
 }
 
-static void preempt_other(int fd, unsigned ring)
+#define CHAIN 0x1
+
+static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
+{
+   unsigned other;
+
+   gem_context_set_priority(fd, ctx, prio);
+
+   for_each_physical_engine(fd, other) {
+   if (spin == NULL) {
+   spin = __igt_spin_batch_new(fd, ctx, other, 0);
+   } else {
+   struct drm_i915_gem_exec_object2 obj = {
+   .handle = spin->handle,
+   };
+   struct drm_i915_gem_execbuffer2 eb = {
+   .buffer_count = 1,
+   .buffers_ptr = to_user_pointer(&obj),
+   .rsvd1 = ctx,
+   .flags = other,
+   };
+   gem_execbuf(fd, &eb);
+   }
+   }
+
+   return spin;
+}
+
+static void __preempt_other(int fd,
+   uint32_t *ctx,
+   unsigned int target, unsigned int primary,
+   unsigned flags)
 {
uint32_t result = gem_create(fd, 4096);
uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
-   igt_spin_t *spin[MAX_ENGINES];
-   unsigned int other;
-   unsigned int n, i;
+   unsigned int n, i, other;
+
+   n = 0;
+   store_dword(fd, ctx[LO], primary,
+   result, (n + 1)*sizeof(uint32_t), n + 1,
+   0, I915_GEM_DOMAIN_RENDER);
+   n++;
+
+   if (flags & CHAIN) {
+   for_each_physical_engine(fd, other) {
+   store_dword(fd, ctx[LO], other,
+   result, (n + 1)*sizeof(uint32_t), n + 1,
+   0, I915_GEM_DOMAIN_RENDER);
+   n++;
+   }
+   }
+
+   store_dword(fd, ctx[HI], target,
+   result, (n + 1)*sizeof(uint32_t), n + 1,
+   0, I915_GEM_DOMAIN_RENDER);
+
+   igt_debugfs_dump(fd, "i915_engine_info");
+   gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
+
+   n++;
+   for (i = 0; i <= n; i++)
+   igt_assert_eq_u32(ptr[i], i);
+
+   munmap(ptr, 4096);
+   gem_close(fd, result);
+}
+
+static void preempt_other(int fd, unsigned ring, unsigned int flags)
+{
+   unsigned int primary;
+   igt_spin_t *spin = NULL;
uint32_t ctx[3];
 
/* On each engine, insert
@@ -396,36 +460,87 @@ static void preempt_other(int fd, unsigned ring)
gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
 
ctx[NOISE] = gem_context_create(fd);
+   spin = __noise(fd, ctx[NOISE], 0, NULL);
 
ctx[HI] = gem_context_create(fd);
gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
 
+   for_each_physical_engine(fd, primary) {
+   igt_debug("Primary engine: %s\n", e__->name);
+   __preempt_other(fd, ctx, ring, primary, flags);
+
+   }
+
+   igt_assert(gem_bo_busy(fd, spin->handle));
+   igt_spin_batch_free(fd, spin);
+
+   gem_context_destroy(fd, ctx[LO]);
+   gem_context_destroy(fd, ctx[NOISE]);
+   gem_context_destroy(fd, ctx[HI]);
+}
+
+static void __preempt_queue(int fd,
+   unsigned target, unsigned primary,
+   unsigned depth, unsigned flags)
+{
+   uint32_t result = gem_create(fd, 4096);
+   uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
+   igt_spin_t *above = NULL, *below = NULL;
+   unsigned int other, n, i;
+   int prio = MAX_PRIO;
+   uint32_t ctx[3] = {
+   gem_context_create(fd),
+   gem_context_create(fd),
+   gem_context_create(fd),
+   };
+
+   for (n = 0; n < depth; n++)
+   above = __noise(fd, ctx[NOISE], prio--, above);
+
+   gem_context_set_priority(fd, ctx[HI], pr