When choosing a slot for an execbuffer, we ideally want to use the same
address as last time (so that we don't have to rebind it) and the same
address as expected by the user (so that we don't have to fixup any
relocations pointing to it). If we first try to bind the incoming
execbuffer->offset from the user, or the currently bound offset that
should hopefully achieve the goal of avoiding the rebind cost and the
relocation penalty. However, if the object is not currently bound there
we don't want to arbitrarily unbind an object in our chosen position and
so choose to rebind/relocate the incoming object instead. After we
report the new position back to the user, on the next pass the
relocations should have settled down.

Signed-off-by: Chris Wilson <[email protected]>
---
 drivers/gpu/drm/i915/i915_gem.c            | 6 ++++++
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 9 ++++++---
 drivers/gpu/drm/i915/i915_gem_gtt.h        | 1 +
 3 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1f35dd6219cb..0398f86683e7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3055,6 +3055,9 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 
alignment, u64 flags)
                vma->node.color = obj->cache_level;
                ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
                if (ret) {
+                       if (flags & PIN_NOEVICT)
+                               goto err_unpin;
+
                        ret = i915_gem_evict_for_vma(vma, flags);
                        if (ret == 0)
                                ret = drm_mm_reserve_node(&vma->vm->mm, 
&vma->node);
@@ -3090,6 +3093,9 @@ search_free:
                                                          search_flag,
                                                          alloc_flag);
                if (ret) {
+                       if (flags & PIN_NOEVICT)
+                               goto err_unpin;
+
                        ret = i915_gem_evict_something(vma->vm, size, alignment,
                                                       obj->cache_level,
                                                       start, end,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 71b18fcbd8a7..1fa3b1888b0b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -163,10 +163,14 @@ eb_pin_vma(struct i915_execbuffer *eb,
 {
        u64 flags;
 
-       flags = vma->node.start;
+       flags = entry->offset & PIN_OFFSET_MASK;
+       if (vma->node.size && flags != vma->node.start)
+               flags = vma->node.start | PIN_NOEVICT;
+
        flags |= PIN_USER | PIN_NONBLOCK | PIN_OFFSET_FIXED;
        if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_GTT))
                flags |= PIN_GLOBAL;
+
        if (unlikely(i915_vma_pin(vma, 0, 0, flags)))
                return;
 
@@ -276,8 +280,7 @@ eb_add_vma(struct i915_execbuffer *eb,
                entry->flags |= eb->context_flags;
 
        ret = 0;
-       if (vma->node.size)
-               eb_pin_vma(eb, entry, vma);
+       eb_pin_vma(eb, entry, vma);
        if (eb_vma_misplaced(entry, vma)) {
                eb_unreserve_vma(vma, entry);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h 
b/drivers/gpu/drm/i915/i915_gem_gtt.h
index ba04b0bf7fe0..9eeb9a37f177 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -645,6 +645,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object 
*obj);
 #define PIN_MAPPABLE           BIT(1)
 #define PIN_ZONE_4G            BIT(2)
 #define PIN_NONFAULT           BIT(3)
+#define PIN_NOEVICT            BIT(4)
 
 #define PIN_MBZ                        BIT(5) /* I915_VMA_PIN_OVERFLOW */
 #define PIN_GLOBAL             BIT(6) /* I915_VMA_GLOBAL_BIND */
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to