Currently on execlists, we use a local hwsp for the kernel_context,
rather than the engine's HWSP, as this is the default for execlists.
However, seqno rollover requires allocating a new HWSP cachline, and may
require pinning a new HWSP page in the GTT. This operation requiring
pinning in the GGTT is not allowed within the kernel_context timeline,
as doing so may require re-entering the kernel_context in order to evict
from the GGTT. As we want to avoid requiring a new HWSP for the
kernel_context, we can use the permanently pinned engine's HWSP instead.
However to do so we must prevent the use of semaphores reading the
kernel_context's HWSP, as the use of semaphores do not support rollover
onto the same cacheline. Fortunately, the kernel_context is mostly
isolated, so unlikely to give benefit to semaphores.

Reported-by: Maarten Lankhorst <maarten.lankho...@linux.intel.com>
Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Maarten Lankhorst <maarten.lankho...@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c    | 14 ++++++++++++--
 drivers/gpu/drm/i915/gt/selftest_lrc.c | 12 +++++++++---
 drivers/gpu/drm/i915/i915_request.c    | 14 +++++++++-----
 3 files changed, 30 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c 
b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 70d91ad923ef..902d440ef07d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2964,7 +2964,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request 
*rq)
 {
        u32 *cs;
 
-       GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb);
+       if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+               return 0;
 
        cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
@@ -4616,8 +4617,17 @@ static int __execlists_context_alloc(struct 
intel_context *ce,
 
        if (!ce->timeline) {
                struct intel_timeline *tl;
+               struct i915_vma *hwsp;
+
+               /*
+                * Use the static global HWSP for the kernel context, and
+                * a dynamically allocated cacheline for everyone else.
+                */
+               hwsp = NULL;
+               if (unlikely(intel_context_is_barrier(ce)))
+                       hwsp = engine->status_page.vma;
 
-               tl = intel_timeline_create(engine->gt, NULL);
+               tl = intel_timeline_create(engine->gt, hwsp);
                if (IS_ERR(tl)) {
                        ret = PTR_ERR(tl);
                        goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c 
b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index ccd4cd2c202d..6f458f6d5523 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -3494,15 +3494,21 @@ static int bond_virtual_engine(struct intel_gt *gt,
        rq[0] = ERR_PTR(-ENOMEM);
        for_each_engine(master, gt, id) {
                struct i915_sw_fence fence = {};
+               struct intel_context *ce;
 
                if (master->class == class)
                        continue;
 
+               ce = intel_context_create(master);
+               if (IS_ERR(ce)) {
+                       err = PTR_ERR(ce);
+                       goto out;
+               }
+
                memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
 
-               rq[0] = igt_spinner_create_request(&spin,
-                                                  master->kernel_context,
-                                                  MI_NOOP);
+               rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
+               intel_context_put(ce);
                if (IS_ERR(rq[0])) {
                        err = PTR_ERR(rq[0]);
                        goto out;
diff --git a/drivers/gpu/drm/i915/i915_request.c 
b/drivers/gpu/drm/i915/i915_request.c
index 0ecc2cf64216..1adb8cf35f75 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -886,6 +886,12 @@ emit_semaphore_wait(struct i915_request *to,
                    struct i915_request *from,
                    gfp_t gfp)
 {
+       if (!intel_context_use_semaphores(to->context))
+               goto await_fence;
+
+       if (!rcu_access_pointer(from->hwsp_cacheline))
+               goto await_fence;
+
        /* Just emit the first semaphore we see as request space is limited. */
        if (already_busywaiting(to) & from->engine->mask)
                goto await_fence;
@@ -931,12 +937,8 @@ i915_request_await_request(struct i915_request *to, struct 
i915_request *from)
                ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
                                                       &from->submit,
                                                       I915_FENCE_GFP);
-       else if (intel_context_use_semaphores(to->context))
-               ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
        else
-               ret = i915_sw_fence_await_dma_fence(&to->submit,
-                                                   &from->fence, 0,
-                                                   I915_FENCE_GFP);
+               ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
        if (ret < 0)
                return ret;
 
@@ -1035,6 +1037,8 @@ __i915_request_await_execution(struct i915_request *to,
 {
        int err;
 
+       GEM_BUG_ON(intel_context_is_barrier(from->context));
+
        /* Submit both requests at the same time */
        err = __await_execution(to, from, hook, I915_FENCE_GFP);
        if (err)
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to