Just create several batches of requests and expect it to not fall over!

Signed-off-by: Chris Wilson <[email protected]>
---
 drivers/gpu/drm/i915/selftests/i915_gem_request.c  | 95 ++++++++++++++++++++++
 .../gpu/drm/i915/selftests/i915_live_selftests.h   |  1 +
 2 files changed, 96 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 20bf10dd85ed..19103d87a4c3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -22,6 +22,8 @@
  *
  */
 
+#include <linux/prime_numbers.h>
+
 #include "i915_selftest.h"
 
 #include "mock_gem_device.h"
@@ -161,3 +163,96 @@ int i915_gem_request_mock_selftests(void)
 
        return err;
 }
+
+static int live_nop_request(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_engine_cs *engine;
+       unsigned int id;
+       int err;
+
+       /* Submit various sized batches of empty requests, to each engine
+        * (individually), and wait for the batch to complete. We can check
+        * the overhead of submitting requests to the hardware.
+        */
+
+       mutex_lock(&i915->drm.struct_mutex);
+
+       for_each_engine(engine, i915, id) {
+               IGT_TIMEOUT(end_time);
+               struct drm_i915_gem_request *request;
+               unsigned int reset_count;
+               unsigned long n, prime;
+               ktime_t times[2] = {};
+
+               err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+               if (err) {
+                       pr_err("Failed to idle GPU before %s(%s)\n",
+                              __func__, engine->name);
+                       goto out_unlock;
+               }
+
+               i915->gpu_error.missed_irq_rings = 0;
+               reset_count = i915_reset_count(&i915->gpu_error);
+
+               for_each_prime_number_from(prime, 1, 8192) {
+                       times[1] = ktime_get_raw();
+
+                       for (n = 0; n < prime; n++) {
+                               request = i915_gem_request_alloc(engine,
+                                                                
i915->kernel_context);
+                               if (IS_ERR(request)) {
+                                       err = PTR_ERR(request);
+                                       goto out_unlock;
+                               }
+
+                               i915_add_request(request);
+                       }
+                       i915_wait_request(request,
+                                         I915_WAIT_LOCKED,
+                                         MAX_SCHEDULE_TIMEOUT);
+
+                       times[1] = ktime_sub(ktime_get_raw(), times[1]);
+                       if (prime == 1)
+                               times[0] = times[1];
+
+                       if (igt_timeout(end_time,
+                                       "%s(%s) timed out: last batch size 
%lu\n",
+                                       __func__, engine->name, prime))
+                               break;
+               }
+
+               if (reset_count != i915_reset_count(&i915->gpu_error)) {
+                       pr_err("%s(%s): GPU was reset %d times!\n",
+                              __func__, engine->name,
+                              i915_reset_count(&i915->gpu_error) - 
reset_count);
+                       err = -EIO;
+                       goto out_unlock;
+               }
+
+               if (i915->gpu_error.missed_irq_rings) {
+                       pr_err("%s(%s): Missed interrupts on rings %lx\n",
+                              __func__, engine->name,
+                              i915->gpu_error.missed_irq_rings);
+                       err = -EIO;
+                       goto out_unlock;
+               }
+
+               pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
+                       engine->name,
+                       ktime_to_ns(times[0]),
+                       prime, div64_u64(ktime_to_ns(times[1]), prime));
+       }
+
+out_unlock:
+       mutex_unlock(&i915->drm.struct_mutex);
+       return err;
+}
+
+int i915_gem_request_live_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(live_nop_request),
+       };
+       return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h 
b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index f3e17cb10e05..09bf538826df 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -9,3 +9,4 @@
  * Tests are executed in order by igt/drv_selftest
  */
 selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
+selftest(requests, i915_gem_request_live_selftests)
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to