On 07/12/2016 13:58, Chris Wilson wrote:
Simple starting point for adding seltests for i915_gem_request, first
mock a device (with engines and contexts) that allows us to construct
and execute a request, along with waiting for the request to complete.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_request.c    | 402 +++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_mock_selftests.h |   1 +
 2 files changed, 403 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c 
b/drivers/gpu/drm/i915/i915_gem_request.c
index 881bed5347fb..6553457adc77 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -1208,3 +1208,405 @@ void i915_gem_retire_requests(struct drm_i915_private 
*dev_priv)
        for_each_engine(engine, dev_priv, id)
                engine_retire_requests(engine);
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "i915_selftest.h"
+
+struct mock_engine {
+       struct intel_engine_cs base;
+
+       spinlock_t hw_lock;
+       struct list_head hw_queue;
+       struct timer_list hw_delay;
+};
+
+struct mock_request {
+       struct drm_i915_gem_request base;
+
+       struct list_head link;
+       unsigned long delay;
+};
+
+static void mock_seqno_advance(struct intel_engine_cs *engine, u32 seqno)
+{
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+       intel_engine_wakeup(engine);
+}
+
+static void hw_delay_complete(unsigned long data)
+{
+       struct mock_engine *engine = (typeof(engine))data;
+       struct mock_request *request;
+
+       spin_lock(&engine->hw_lock);
+       request = list_first_entry_or_null(&engine->hw_queue,
+                                          typeof(*request),
+                                          link);
+       if (request) {
+               list_del(&request->link);
+               mock_seqno_advance(&engine->base, request->base.global_seqno);
+       }
+
+       request = list_first_entry_or_null(&engine->hw_queue,
+                                          typeof(*request),
+                                          link);
+       if (request)
+               mod_timer(&engine->hw_delay, jiffies + request->delay);
+       spin_unlock(&engine->hw_lock);
+}
+
+static void mock_engine_flush(struct intel_engine_cs *engine)
+{
+       struct mock_engine *mock =
+               container_of(engine, typeof(*mock), base);
+       struct mock_request *request, *rn;
+
+       del_timer_sync(&mock->hw_delay);
+
+       spin_lock_irq(&mock->hw_lock);
+       list_for_each_entry_safe(request, rn, &mock->hw_queue, link) {
+               list_del_init(&request->link);
+               mock_seqno_advance(&mock->base, request->base.global_seqno);
+       }
+       spin_unlock_irq(&mock->hw_lock);
+}
+
+static int mock_context_pin(struct intel_engine_cs *engine,
+                           struct i915_gem_context *ctx)
+{
+       i915_gem_context_get(ctx);
+       return 0;
+}
+
+static void mock_context_unpin(struct intel_engine_cs *engine,
+                              struct i915_gem_context *ctx)
+{
+       i915_gem_context_put(ctx);
+}
+
+static int mock_request_alloc(struct drm_i915_gem_request *request)
+{
+       request->ring = request->engine->buffer;
+       return 0;
+}
+
+static int mock_emit_flush(struct drm_i915_gem_request *request,
+                          unsigned int flags)
+{
+       return 0;
+}
+
+static void mock_emit_breadcrumb(struct drm_i915_gem_request *request,
+                                u32 *flags)
+{
+}
+
+static void mock_submit_request(struct drm_i915_gem_request *request)
+{
+       struct mock_request *mock = container_of(request, typeof(*mock), base);
+       struct mock_engine *engine =
+               container_of(request->engine, typeof(*engine), base);
+
+       i915_gem_request_submit(request);
+
+       spin_lock_irq(&engine->hw_lock);
+       list_add_tail(&mock->link, &engine->hw_queue);
+       if (list_first_entry(&engine->hw_queue, typeof(*mock), link) == mock)
+               mod_timer(&engine->hw_delay, jiffies + mock->delay);
+       spin_unlock_irq(&engine->hw_lock);
+}
+
+static struct drm_i915_gem_request *
+mock_request(struct intel_engine_cs *engine,
+            struct i915_gem_context *context,
+            unsigned long delay)
+{
+       struct drm_i915_gem_request *request;
+       struct mock_request *mock;
+
+       request = i915_gem_request_alloc(engine, context);
+       if (!request)
+               return NULL;
+
+       mock = container_of(request, typeof(*mock), base);
+       INIT_LIST_HEAD(&mock->link);
+       mock->delay = delay;

How about just:

mock = (struct mock_request *)i915_gem_request_alloc(...)

?

Both versions are equally confusing until the reader figures out the kmem cache size is correct. Doesn't matter really, just thinking out loud. :)

+
+       return &mock->base;
+}
+
+static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
+{
+       struct intel_ring *ring;
+
+       ring = kzalloc(sizeof(*ring) + 4096, GFP_KERNEL);
+       if (!ring)
+               return NULL;
+
+       ring->engine = engine;
+       ring->size = 4096;
+       ring->effective_size = ring->size;
+       ring->vaddr = (void *)(ring + 1);
+
+       INIT_LIST_HEAD(&ring->request_list);
+       ring->last_retired_head = -1;
+       intel_ring_update_space(ring);
+
+       return ring;
+}
+
+static struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
+                                          const char *name)
+{
+       struct mock_engine *engine;
+       static int id;
+
+       engine = kzalloc(sizeof(*engine) + 4096, GFP_KERNEL);
+       if (!engine)
+               return NULL;
+
+       engine->base.buffer = mock_ring(&engine->base);
+       if (!engine->base.buffer) {
+               kfree(engine);
+               return NULL;
+       }
+
+       /* minimal engine setup for requests */
+       engine->base.i915 = i915;
+       engine->base.name = name;
+       engine->base.id = id++;
+       engine->base.status_page.page_addr = (void *)(engine + 1);
+
+       engine->base.context_pin = mock_context_pin;
+       engine->base.context_unpin = mock_context_unpin;
+       engine->base.request_alloc = mock_request_alloc;
+       engine->base.emit_flush = mock_emit_flush;
+       engine->base.emit_breadcrumb = mock_emit_breadcrumb;
+       engine->base.submit_request = mock_submit_request;
+
+       engine->base.timeline =
+               &i915->gt.global_timeline.engine[engine->base.id];
+
+       /* minimal breadcrumbs init */
+       spin_lock_init(&engine->base.breadcrumbs.lock);
+       engine->base.breadcrumbs.mock = true;
+
+       /* fake hw queue */
+       spin_lock_init(&engine->hw_lock);
+       setup_timer(&engine->hw_delay,
+                   hw_delay_complete,
+                   (unsigned long)engine);
+       INIT_LIST_HEAD(&engine->hw_queue);
+
+       return &engine->base;
+}
+
+static void mock_engine_free(struct intel_engine_cs *engine)
+{
+       if (engine->last_context)
+               engine->context_unpin(engine, engine->last_context);
+
+       kfree(engine->buffer);
+       kfree(engine);
+}
+
+static void mock_ppgtt_cleanup(struct i915_address_space *vm)
+{
+}
+
+static struct i915_hw_ppgtt *
+mock_ppgtt(struct drm_i915_private *i915,
+          const char *name)
+{
+       struct i915_hw_ppgtt *ppgtt;
+
+       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+       if (!ppgtt)
+               return NULL;
+
+       kref_init(&ppgtt->ref);
+
+       INIT_LIST_HEAD(&ppgtt->base.active_list);
+       INIT_LIST_HEAD(&ppgtt->base.inactive_list);
+       INIT_LIST_HEAD(&ppgtt->base.unbound_list);
+
+       INIT_LIST_HEAD(&ppgtt->base.global_link);
+       drm_mm_init(&ppgtt->base.mm, 0, ~0);
+       i915_gem_timeline_init(i915, &ppgtt->base.timeline, name);
+
+       ppgtt->base.cleanup = mock_ppgtt_cleanup;
+
+       return ppgtt;
+}
+
+static struct i915_gem_context *
+mock_context(struct drm_i915_private *i915,
+            const char *name)
+{
+       struct i915_gem_context *ctx;
+       int ret;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return NULL;
+
+       kref_init(&ctx->ref);
+       INIT_LIST_HEAD(&ctx->link);
+       ctx->name = name ? kstrdup(name, GFP_KERNEL) : NULL;
+       ctx->i915 = i915;
+
+       ret = ida_simple_get(&i915->context_hw_ida,
+                            0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+       if (ret < 0) {
+               kfree(ctx);
+               return NULL;
+       }
+       ctx->hw_id = ret;
+
+       if (name) {
+               ctx->ppgtt = mock_ppgtt(i915, name);
+               if (!ctx->ppgtt) {
+                       kfree(ctx);
+                       return NULL;
+               }
+       }
+
+       return ctx;
+}
+
+static void mock_retire_work_handler(struct work_struct *work)
+{
+}
+
+static void mock_idle_work_handler(struct work_struct *work)
+{
+}
+
+static struct drm_i915_private *mock_device(void)
+{
+       struct drm_i915_private *i915;
+
+       i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
+       if (!i915)
+               return NULL;
+
+       mutex_init(&i915->drm.struct_mutex);
+
+       init_waitqueue_head(&i915->gpu_error.wait_queue);
+       init_waitqueue_head(&i915->gpu_error.reset_queue);
+
+       ida_init(&i915->context_hw_ida);
+
+       INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
+       INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
+
+       INIT_LIST_HEAD(&i915->gt.timelines);
+       i915->gt.awake = true;
+
+       mutex_lock(&i915->drm.struct_mutex);
+       i915_gem_timeline_init__global(i915);
+       i915_gem_timeline_init(i915, &i915->ggtt.base.timeline, "mock");
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       i915->requests = KMEM_CACHE(mock_request,
+                                   SLAB_HWCACHE_ALIGN |
+                                   SLAB_RECLAIM_ACCOUNT |
+                                   SLAB_DESTROY_BY_RCU);
+       if (!i915->requests)
+               goto err_device;
+
+       i915->dependencies = KMEM_CACHE(i915_dependency,
+                                       SLAB_HWCACHE_ALIGN |
+                                       SLAB_RECLAIM_ACCOUNT);
+       if (!i915->dependencies)
+               goto err_requests;
+
+       mkwrite_device_info(i915)->ring_mask = BIT(0);
+       i915->engine[RCS] = mock_engine(i915, "mock");
+       if (!i915->engine[RCS])
+               goto err_dependencies;
+
+       i915->kernel_context = mock_context(i915, NULL);
+       if (!i915->kernel_context)
+               goto err_engine;
+
+       return i915;
+
+err_engine:
+       mock_engine_free(i915->engine[RCS]);
+err_dependencies:
+       kmem_cache_destroy(i915->dependencies);
+err_requests:
+       kmem_cache_destroy(i915->requests);
+err_device:
+       kfree(i915);
+       return NULL;
+}
+
+static void mock_device_free(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, i915, id)
+               mock_engine_flush(engine);
+
+       mutex_lock(&i915->drm.struct_mutex);
+       i915_gem_retire_requests(i915);
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       cancel_delayed_work_sync(&i915->gt.retire_work);
+       cancel_delayed_work_sync(&i915->gt.idle_work);
+
+       mutex_lock(&i915->drm.struct_mutex);
+       for_each_engine(engine, i915, id)
+               mock_engine_free(engine);
+
+       i915_gem_context_fini(i915);
+
+       i915_gem_timeline_fini(&i915->ggtt.base.timeline);
+       i915_gem_timeline_fini(&i915->gt.global_timeline);
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       kmem_cache_destroy(i915->dependencies);
+       kmem_cache_destroy(i915->requests);
+
+       kfree(i915);
+}
+
+static int igt_add_request(void *ignore)
+{
+       struct drm_i915_private *i915;
+       struct drm_i915_gem_request *request;
+       int err = -ENOMEM;
+
+       i915 = mock_device();
+       if (!i915)
+               goto out;
+
+       mutex_lock(&i915->drm.struct_mutex);
+       request = mock_request(i915->engine[RCS],
+                              i915->kernel_context,
+                              HZ / 10);
+       if (!request)
+               goto out_unlock;
+
+       i915_add_request(request);
+
+       err = 0;
+out_unlock:
+       mutex_unlock(&i915->drm.struct_mutex);
+       mock_device_free(i915);
+out:
+       return err;
+}
+
+int i915_gem_request_selftest(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_add_request),
+       };
+
+       return i915_subtests(tests, NULL);
+}
+#endif
diff --git a/drivers/gpu/drm/i915/i915_mock_selftests.h 
b/drivers/gpu/drm/i915/i915_mock_selftests.h
index 1603fd35d190..9ff379b18c20 100644
--- a/drivers/gpu/drm/i915/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/i915_mock_selftests.h
@@ -8,5 +8,6 @@
  *
  * Tests are executed in reverse order by igt/drv_selftest
  */
+selftest(requests, i915_gem_request_selftest)
 selftest(breadcrumbs, intel_breadcrumbs_selftest)
 selftest(sanitycheck, i915_mock_sanitycheck) /* keep last */


It looks tidy enough. I trust you'll run it through the full memory debugging arsenal just to double-check it doesn't leak anything. But it looks OK to me.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursu...@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to