Impose a restraint that we have all vma pinned for a request prior to
its allocation. This is to simplify request construction, and should
facilitate unravelling the lock interdependencies later.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/huge_pages.c   |  31 +++--
 .../gpu/drm/i915/selftests/intel_hangcheck.c  | 123 +++++++++---------
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |  86 ++++++------
 3 files changed, 119 insertions(+), 121 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c 
b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 26c065c8d2c0..a0c7cbc212ba 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -972,7 +972,6 @@ static int gpu_write(struct i915_vma *vma,
 {
        struct i915_request *rq;
        struct i915_vma *batch;
-       int flags = 0;
        int err;
 
        GEM_BUG_ON(!intel_engine_can_store_dword(engine));
@@ -981,14 +980,14 @@ static int gpu_write(struct i915_vma *vma,
        if (err)
                return err;
 
-       rq = i915_request_alloc(engine, ctx);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-
        batch = gpu_write_dw(vma, dword * sizeof(u32), value);
-       if (IS_ERR(batch)) {
-               err = PTR_ERR(batch);
-               goto err_request;
+       if (IS_ERR(batch))
+               return PTR_ERR(batch);
+
+       rq = i915_request_alloc(engine, ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto err_batch;
        }
 
        err = i915_vma_move_to_active(batch, rq, 0);
@@ -996,21 +995,21 @@ static int gpu_write(struct i915_vma *vma,
                goto err_request;
 
        i915_gem_object_set_active_reference(batch->obj);
-       i915_vma_unpin(batch);
-       i915_vma_close(batch);
 
-       err = engine->emit_bb_start(rq,
-                                   batch->node.start, batch->node.size,
-                                   flags);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        if (err)
                goto err_request;
 
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = engine->emit_bb_start(rq,
+                                   batch->node.start, batch->node.size,
+                                   0);
+err_request:
        if (err)
                i915_request_skip(rq, err);
-
-err_request:
        i915_request_add(rq);
+err_batch:
+       i915_vma_unpin(batch);
+       i915_vma_close(batch);
 
        return err;
 }
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c 
b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index a48fbe2557ea..b767fab9ce1f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -102,52 +102,87 @@ static u64 hws_address(const struct i915_vma *hws,
        return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
 }
 
-static int emit_recurse_batch(struct hang *h,
-                             struct i915_request *rq)
+static int move_to_active(struct i915_vma *vma,
+                         struct i915_request *rq,
+                         unsigned int flags)
+{
+       int err;
+
+       err = i915_vma_move_to_active(vma, rq, 0);
+       if (err)
+               return err;
+
+       if (!i915_gem_object_has_active_reference(vma->obj)) {
+               i915_gem_object_get(vma->obj);
+               i915_gem_object_set_active_reference(vma->obj);
+       }
+
+       return 0;
+}
+
+static struct i915_request *
+hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 {
        struct drm_i915_private *i915 = h->i915;
        struct i915_address_space *vm =
-               rq->gem_context->ppgtt ?
-               &rq->gem_context->ppgtt->vm :
-               &i915->ggtt.vm;
+               h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm;
+       struct i915_request *rq = NULL;
        struct i915_vma *hws, *vma;
        unsigned int flags;
        u32 *batch;
        int err;
 
+       if (i915_gem_object_is_active(h->obj)) {
+               struct drm_i915_gem_object *obj;
+               void *vaddr;
+
+               obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
+               if (IS_ERR(obj))
+                       return ERR_CAST(obj);
+
+               vaddr = i915_gem_object_pin_map(obj,
+                                               
i915_coherent_map_type(h->i915));
+               if (IS_ERR(vaddr)) {
+                       i915_gem_object_put(obj);
+                       return ERR_CAST(vaddr);
+               }
+
+               i915_gem_object_unpin_map(h->obj);
+               i915_gem_object_put(h->obj);
+
+               h->obj = obj;
+               h->batch = vaddr;
+       }
+
        vma = i915_vma_instance(h->obj, vm, NULL);
        if (IS_ERR(vma))
-               return PTR_ERR(vma);
+               return ERR_CAST(vma);
 
        hws = i915_vma_instance(h->hws, vm, NULL);
        if (IS_ERR(hws))
-               return PTR_ERR(hws);
+               return ERR_CAST(hws);
 
        err = i915_vma_pin(vma, 0, 0, PIN_USER);
        if (err)
-               return err;
+               return ERR_PTR(err);
 
        err = i915_vma_pin(hws, 0, 0, PIN_USER);
        if (err)
                goto unpin_vma;
 
-       err = i915_vma_move_to_active(vma, rq, 0);
-       if (err)
+       rq = i915_request_alloc(engine, h->ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
                goto unpin_hws;
-
-       if (!i915_gem_object_has_active_reference(vma->obj)) {
-               i915_gem_object_get(vma->obj);
-               i915_gem_object_set_active_reference(vma->obj);
        }
 
-       err = i915_vma_move_to_active(hws, rq, 0);
+       err = move_to_active(vma, rq, 0);
        if (err)
-               goto unpin_hws;
+               goto cancel_rq;
 
-       if (!i915_gem_object_has_active_reference(hws->obj)) {
-               i915_gem_object_get(hws->obj);
-               i915_gem_object_set_active_reference(hws->obj);
-       }
+       err = move_to_active(hws, rq, 0);
+       if (err)
+               goto cancel_rq;
 
        batch = h->batch;
        if (INTEL_GEN(i915) >= 8) {
@@ -212,52 +247,16 @@ static int emit_recurse_batch(struct hang *h,
 
        err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
 
+cancel_rq:
+       if (err) {
+               i915_request_skip(rq, err);
+               i915_request_add(rq);
+       }
 unpin_hws:
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
-       return err;
-}
-
-static struct i915_request *
-hang_create_request(struct hang *h, struct intel_engine_cs *engine)
-{
-       struct i915_request *rq;
-       int err;
-
-       if (i915_gem_object_is_active(h->obj)) {
-               struct drm_i915_gem_object *obj;
-               void *vaddr;
-
-               obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
-               if (IS_ERR(obj))
-                       return ERR_CAST(obj);
-
-               vaddr = i915_gem_object_pin_map(obj,
-                                               
i915_coherent_map_type(h->i915));
-               if (IS_ERR(vaddr)) {
-                       i915_gem_object_put(obj);
-                       return ERR_CAST(vaddr);
-               }
-
-               i915_gem_object_unpin_map(h->obj);
-               i915_gem_object_put(h->obj);
-
-               h->obj = obj;
-               h->batch = vaddr;
-       }
-
-       rq = i915_request_alloc(engine, h->ctx);
-       if (IS_ERR(rq))
-               return rq;
-
-       err = emit_recurse_batch(h, rq);
-       if (err) {
-               i915_request_add(rq);
-               return ERR_PTR(err);
-       }
-
-       return rq;
+       return err ? ERR_PTR(err) : rq;
 }
 
 static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c 
b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 94fc0e5c8766..80f881423c41 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -80,48 +80,65 @@ static u64 hws_address(const struct i915_vma *hws,
        return hws->node.start + seqno_offset(rq->fence.context);
 }
 
-static int emit_recurse_batch(struct spinner *spin,
-                             struct i915_request *rq,
-                             u32 arbitration_command)
+static int move_to_active(struct i915_vma *vma,
+                         struct i915_request *rq,
+                         unsigned int flags)
 {
-       struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+       int err;
+
+       err = i915_vma_move_to_active(vma, rq, 0);
+       if (err)
+               return err;
+
+       if (!i915_gem_object_has_active_reference(vma->obj)) {
+               i915_gem_object_get(vma->obj);
+               i915_gem_object_set_active_reference(vma->obj);
+       }
+
+       return 0;
+}
+
+static struct i915_request *
+spinner_create_request(struct spinner *spin,
+                      struct i915_gem_context *ctx,
+                      struct intel_engine_cs *engine,
+                      u32 arbitration_command)
+{
+       struct i915_address_space *vm = &ctx->ppgtt->vm;
+       struct i915_request *rq = NULL;
        struct i915_vma *hws, *vma;
        u32 *batch;
        int err;
 
        vma = i915_vma_instance(spin->obj, vm, NULL);
        if (IS_ERR(vma))
-               return PTR_ERR(vma);
+               return ERR_CAST(vma);
 
        hws = i915_vma_instance(spin->hws, vm, NULL);
        if (IS_ERR(hws))
-               return PTR_ERR(hws);
+               return ERR_CAST(hws);
 
        err = i915_vma_pin(vma, 0, 0, PIN_USER);
        if (err)
-               return err;
+               return ERR_PTR(err);
 
        err = i915_vma_pin(hws, 0, 0, PIN_USER);
        if (err)
                goto unpin_vma;
 
-       err = i915_vma_move_to_active(vma, rq, 0);
-       if (err)
+       rq = i915_request_alloc(engine, ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
                goto unpin_hws;
-
-       if (!i915_gem_object_has_active_reference(vma->obj)) {
-               i915_gem_object_get(vma->obj);
-               i915_gem_object_set_active_reference(vma->obj);
        }
 
-       err = i915_vma_move_to_active(hws, rq, 0);
+       err = move_to_active(vma, rq, 0);
        if (err)
-               goto unpin_hws;
+               goto cancel_rq;
 
-       if (!i915_gem_object_has_active_reference(hws->obj)) {
-               i915_gem_object_get(hws->obj);
-               i915_gem_object_set_active_reference(hws->obj);
-       }
+       err = move_to_active(hws, rq, 0);
+       if (err)
+               goto cancel_rq;
 
        batch = spin->batch;
 
@@ -139,35 +156,18 @@ static int emit_recurse_batch(struct spinner *spin,
 
        i915_gem_chipset_flush(spin->i915);
 
-       err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+       err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
 
+cancel_rq:
+       if (err) {
+               i915_request_skip(rq, err);
+               i915_request_add(rq);
+       }
 unpin_hws:
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
-       return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct spinner *spin,
-                      struct i915_gem_context *ctx,
-                      struct intel_engine_cs *engine,
-                      u32 arbitration_command)
-{
-       struct i915_request *rq;
-       int err;
-
-       rq = i915_request_alloc(engine, ctx);
-       if (IS_ERR(rq))
-               return rq;
-
-       err = emit_recurse_batch(spin, rq, arbitration_command);
-       if (err) {
-               i915_request_add(rq);
-               return ERR_PTR(err);
-       }
-
-       return rq;
+       return err ? ERR_PTR(err) : rq;
 }
 
 static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
-- 
2.20.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to