This migrates the fence tracking onto the existing seqno
infrastructure so that the later conversion to tracking via requests is
simplified.

Signed-off-by: Chris Wilson <[email protected]>
---
 drivers/gpu/drm/i915/i915_drv.h            |  7 ------
 drivers/gpu/drm/i915/i915_gem.c            | 17 ---------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 34 +++++++++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_tiling.c     |  2 +-
 4 files changed, 20 insertions(+), 40 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fab97bc3215f..5c3f033ff928 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1777,13 +1777,6 @@ struct drm_i915_gem_object {
         * Only honoured if hardware has relevant pte bit
         */
        unsigned long gt_ro:1;
-
-       /*
-        * Is the GPU currently using a fence to access this buffer,
-        */
-       unsigned int pending_fenced_gpu_access:1;
-       unsigned int fenced_gpu_access:1;
-
        unsigned int cache_level:3;
 
        unsigned int has_aliasing_ppgtt_mapping:1;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6366230c4e32..e808b449ac75 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2163,8 +2163,6 @@ static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 seqno = intel_ring_get_seqno(ring);
 
        BUG_ON(ring == NULL);
@@ -2183,19 +2181,6 @@ i915_gem_object_move_to_active(struct 
drm_i915_gem_object *obj,
        list_move_tail(&obj->ring_list, &ring->active_list);
 
        obj->last_read_seqno = seqno;
-
-       if (obj->fenced_gpu_access) {
-               obj->last_fenced_seqno = seqno;
-
-               /* Bump MRU to take account of the delayed flush */
-               if (obj->fence_reg != I915_FENCE_REG_NONE) {
-                       struct drm_i915_fence_reg *reg;
-
-                       reg = &dev_priv->fence_regs[obj->fence_reg];
-                       list_move_tail(&reg->lru_list,
-                                      &dev_priv->mm.fence_list);
-               }
-       }
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2231,7 +2216,6 @@ i915_gem_object_move_to_inactive(struct 
drm_i915_gem_object *obj)
        obj->base.write_domain = 0;
 
        obj->last_fenced_seqno = 0;
-       obj->fenced_gpu_access = false;
 
        obj->active = 0;
        drm_gem_object_unreference(&obj->base);
@@ -3176,7 +3160,6 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object 
*obj)
                obj->last_fenced_seqno = 0;
        }
 
-       obj->fenced_gpu_access = false;
        return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87e9599f9e4b..fbac7b51736c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -542,7 +542,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 {
        struct drm_i915_gem_object *obj = vma->obj;
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-       bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        uint64_t flags;
        int ret;
 
@@ -560,17 +559,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 
        entry->flags |= __EXEC_OBJECT_HAS_PIN;
 
-       if (has_fenced_gpu_access) {
-               if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-                       ret = i915_gem_object_get_fence(obj);
-                       if (ret)
-                               return ret;
-
-                       if (i915_gem_object_pin_fence(obj))
-                               entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+       if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+               ret = i915_gem_object_get_fence(obj);
+               if (ret)
+                       return ret;
 
-                       obj->pending_fenced_gpu_access = true;
-               }
+               if (i915_gem_object_pin_fence(obj))
+                       entry->flags |= __EXEC_OBJECT_HAS_FENCE;
        }
 
        if (entry->offset != vma->node.start) {
@@ -658,8 +653,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                obj = vma->obj;
                entry = vma->exec_entry;
 
+               if (!has_fenced_gpu_access)
+                       entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
                need_fence =
-                       has_fenced_gpu_access &&
                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                        obj->tiling_mode != I915_TILING_NONE;
                need_mappable = need_fence || need_reloc_mappable(vma);
@@ -672,7 +668,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 
                obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & 
~I915_GEM_DOMAIN_COMMAND;
                obj->base.pending_write_domain = 0;
-               obj->pending_fenced_gpu_access = false;
        }
        list_splice(&ordered_vmas, vmas);
 
@@ -959,9 +954,11 @@ static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct intel_engine_cs *ring)
 {
+       u32 seqno = intel_ring_get_seqno(ring);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, vmas, exec_list) {
+               struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
                struct drm_i915_gem_object *obj = vma->obj;
                u32 old_read = obj->base.read_domains;
                u32 old_write = obj->base.write_domain;
@@ -970,18 +967,25 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                if (obj->base.write_domain == 0)
                        obj->base.pending_read_domains |= 
obj->base.read_domains;
                obj->base.read_domains = obj->base.pending_read_domains;
-               obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
                i915_vma_move_to_active(vma, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
-                       obj->last_write_seqno = intel_ring_get_seqno(ring);
+                       obj->last_write_seqno = seqno;
 
                        intel_fb_obj_invalidate(obj, ring);
 
                        /* update for the implicit flush after a batch */
                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
                }
+               if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+                       obj->last_fenced_seqno = seqno;
+                       if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+                               struct drm_i915_private *dev_priv = 
to_i915(ring->dev);
+                               
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
+                                              &dev_priv->mm.fence_list);
+                       }
+               }
 
                trace_i915_gem_object_change_domain(obj, old_read, old_write);
        }
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c 
b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cb150e8b4336..7e623bf097a1 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -376,7 +376,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 
                if (ret == 0) {
                        obj->fence_dirty =
-                               obj->fenced_gpu_access ||
+                               obj->last_fenced_seqno ||
                                obj->fence_reg != I915_FENCE_REG_NONE;
 
                        obj->tiling_mode = args->tiling_mode;
-- 
2.1.0.rc1

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to