In order to fully serialize access to the fenced region and the update
to the fence register we need to take extreme measures on SNB+, and
write the fence from each cpu taking care to serialise memory accesses
on each.  The usual mb(), or even a mb() on each CPU is not enough to
ensure that access to the fenced region is coherent across the change in
fence register - however a full blown write-back invalidate (wbinvd) per
processor is sufficient.

Fixes i-g-t/gem_fence_thrash

v2: Bring a bigger gun
v3: Switch the bigger gun for heavier bullets (Arjan van de Ven)
v4: Remove changes for working generations.
v5: Reduce to a per-cpu wbinvd() call prior to updating the fences.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=62191
Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Jon Bloomfield <jon.bloomfi...@intel.com>
Tested-by: Jon Bloomfield <jon.bloomfi...@intel.com> (v2)
Cc: sta...@vger.kernel.org
---
 drivers/gpu/drm/i915/i915_gem.c |   26 +++++++++++++++++++++-----
 1 file changed, 21 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fa4ea1a..632a050 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2689,17 +2689,33 @@ static inline int fence_number(struct drm_i915_private 
*dev_priv,
        return fence - dev_priv->fence_regs;
 }
 
+static void i915_gem_write_fence__ipi(void *data)
+{
+       wbinvd();
+}
+
 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                                         struct drm_i915_fence_reg *fence,
                                         bool enable)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       int reg = fence_number(dev_priv, fence);
-
-       i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int fence_reg = fence_number(dev_priv, fence);
+
+       /* In order to fully serialize access to the fenced region and
+        * the update to the fence register we need to take extreme
+        * measures on SNB+, and write the fence from each cpu taking
+        * care to serialise memory accesses on each. The usual mb(),
+        * or even a mb() on each CPU is not enough to ensure that access
+        * to the fenced region is coherent across the change in fence
+        * register, but a wbinvd() per processor is sufficient.
+        */
+       if (HAS_LLC(obj->base.dev))
+               on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
+       i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
 
        if (enable) {
-               obj->fence_reg = reg;
+               obj->fence_reg = fence_reg;
                fence->obj = obj;
                list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
        } else {
-- 
1.7.10.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to