gen3 does not fully flush MI stores to memory on MI_FLUSH, such that a
subsequent read from e.g. the sampler can bypass the store and read the
stale value from memory. This is a serious issue when we are using MI
stores to rewrite the batches for relocation, as it means that the batch
is reading from random user/kernel memory. While it is particularly
sensitive [and detectable] for relocations, reading stale data at any
time is a worry.

Having started with a small number of delaying stores and doubling until
no more incoherency was seen over a few hours (with and without
background memory pressure), 32 was the magic number.

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2018
References: a889580c087a ("drm/i915: Flush GPU relocs harder for gen3")
Signed-off-by: Chris Wilson <[email protected]>
Cc: Mika Kuoppala <[email protected]>
Cc: Joonas Lahtinen <[email protected]>
---
So gen3 requires a delay after to flush the previous stores, gen5 is
assuming it requires a delay between the seqno and the
MI_USER_INTERRUPT. Here I've made gen5 reuse the gen3 approach, but I
need to verify that it still holds.
---
 drivers/gpu/drm/i915/gt/gen2_engine_cs.c | 39 +++++++++---------------
 1 file changed, 15 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c 
b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 3fb0dc1fb910..342c476ec872 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -142,19 +142,26 @@ int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
        return 0;
 }
 
-u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, int count)
 {
        GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != 
rq->engine->status_page.vma);
        
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != 
I915_GEM_HWS_SEQNO_ADDR);
 
        *cs++ = MI_FLUSH;
+       *cs++ = MI_NOOP;
+
+       while (count--) {
+               *cs++ = MI_STORE_DWORD_INDEX;
+               *cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
+               *cs++ = rq->fence.seqno;
+               *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+       }
 
        *cs++ = MI_STORE_DWORD_INDEX;
        *cs++ = I915_GEM_HWS_SEQNO_ADDR;
        *cs++ = rq->fence.seqno;
 
        *cs++ = MI_USER_INTERRUPT;
-       *cs++ = MI_NOOP;
 
        rq->tail = intel_ring_offset(rq, cs);
        assert_ring_tail_valid(rq->ring, rq->tail);
@@ -162,31 +169,15 @@ u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 
*cs)
        return cs;
 }
 
-#define GEN5_WA_STORES 8 /* must be at least 1! */
-u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 {
-       int i;
-
-       GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != 
rq->engine->status_page.vma);
-       
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != 
I915_GEM_HWS_SEQNO_ADDR);
-
-       *cs++ = MI_FLUSH;
-
-       BUILD_BUG_ON(GEN5_WA_STORES < 1);
-       for (i = 0; i < GEN5_WA_STORES; i++) {
-               *cs++ = MI_STORE_DWORD_INDEX;
-               *cs++ = I915_GEM_HWS_SEQNO_ADDR;
-               *cs++ = rq->fence.seqno;
-       }
-
-       *cs++ = MI_USER_INTERRUPT;
-
-       rq->tail = intel_ring_offset(rq, cs);
-       assert_ring_tail_valid(rq->ring, rq->tail);
+       return __gen2_emit_breadcrumb(rq, cs, 32);
+}
 
-       return cs;
+u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+       return __gen2_emit_breadcrumb(rq, cs, 8);
 }
-#undef GEN5_WA_STORES
 
 /* Just userspace ABI convention to limit the wa batch bo to a resonable size 
*/
 #define I830_BATCH_LIMIT SZ_256K
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to