To properly handle asynchronous migration of batch objects, we need to
couple the fences on the incoming batch into the request and should not
assume that they always start idle.

Signed-off-by: Chris Wilson <[email protected]>
Cc: Matthew Auld <[email protected]>
---
 drivers/gpu/drm/i915/gt/selftest_workarounds.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c 
b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index d06d68ac2a3b..999a98f00494 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -565,6 +565,14 @@ static int check_dirty_whitelist(struct i915_gem_context 
*ctx,
                                goto err_request;
                }
 
+               i915_vma_lock(batch);
+               err = i915_request_await_object(rq, batch->obj, false);
+               if (err == 0)
+                       err = i915_vma_move_to_active(batch, rq, 0);
+               i915_vma_unlock(batch);
+               if (err)
+                       goto err_request;
+
                err = engine->emit_bb_start(rq,
                                            batch->node.start, PAGE_SIZE,
                                            0);
@@ -850,6 +858,14 @@ static int scrub_whitelisted_registers(struct 
i915_gem_context *ctx,
                        goto err_request;
        }
 
+       i915_vma_lock(batch);
+       err = i915_request_await_object(rq, batch->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(batch, rq, 0);
+       i915_vma_unlock(batch);
+       if (err)
+               goto err_request;
+
        /* Perform the writes from an unprivileged "user" batch */
        err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
 
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to