We can already clear an object with the blt, so try to do the same to
support copying from one object backing store to another. Really this is
just object -> object, which is not that useful yet, what we really want
is two backing stores, but that will require some vma rework first,
otherwise we are stuck with "tmp" objects.

Signed-off-by: Matthew Auld <matthew.a...@intel.com>
Cc: Joonas Lahtinen <joonas.lahti...@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janul...@linux.intel.com
---
 .../gpu/drm/i915/gem/i915_gem_object_blt.c    | 207 ++++++++++++++++++
 .../gpu/drm/i915/gem/i915_gem_object_blt.h    |   9 +
 .../i915/gem/selftests/i915_gem_object_blt.c  | 106 +++++++++
 drivers/gpu/drm/i915/gt/intel_gpu_commands.h  |   5 +-
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c    |   2 +-
 5 files changed, 326 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 
b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index c1e5edd1e359..0361b8c80ad1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -175,6 +175,213 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object 
*obj,
        return err;
 }
 
+struct i915_vma *intel_emit_vma_copy_blt(struct intel_engine_pool_node **p,
+                                        struct intel_context *ce,
+                                        struct i915_vma *src,
+                                        struct i915_vma *dst)
+{
+       struct drm_i915_private *i915 = ce->vm->i915;
+       const u32 block_size = S16_MAX * PAGE_SIZE;
+       struct intel_engine_pool_node *pool;
+       struct i915_vma *batch;
+       u64 src_offset, dst_offset;
+       u64 count;
+       u64 rem;
+       u32 size;
+       u32 *cmd;
+       int err;
+
+       GEM_BUG_ON(src->size != dst->size);
+
+       count = div_u64(dst->size, block_size);
+       size = (1 + 11 * count) * sizeof(u32);
+       size = round_up(size, PAGE_SIZE);
+       pool = intel_engine_pool_get(&ce->engine->pool, size);
+       if (IS_ERR(pool))
+               return ERR_CAST(pool);
+
+       cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+       if (IS_ERR(cmd)) {
+               err = PTR_ERR(cmd);
+               goto out_put;
+       }
+
+       rem = src->size;
+       src_offset = src->node.start;
+       dst_offset = dst->node.start;
+
+       do {
+               u32 size = min_t(u64, rem, block_size);
+
+               GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+               if (INTEL_GEN(i915) >= 9) {
+                       *cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
+                       *cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
+                       *cmd++ = 0;
+                       *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+                       *cmd++ = lower_32_bits(dst_offset);
+                       *cmd++ = upper_32_bits(dst_offset);
+                       *cmd++ = 0;
+                       *cmd++ = PAGE_SIZE;
+                       *cmd++ = lower_32_bits(src_offset);
+                       *cmd++ = upper_32_bits(src_offset);
+               } else if (INTEL_GEN(i915) >= 8) {
+                       *cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 
2);
+                       *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+                       *cmd++ = 0;
+                       *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+                       *cmd++ = lower_32_bits(dst_offset);
+                       *cmd++ = upper_32_bits(dst_offset);
+                       *cmd++ = 0;
+                       *cmd++ = PAGE_SIZE;
+                       *cmd++ = lower_32_bits(src_offset);
+                       *cmd++ = upper_32_bits(src_offset);
+               } else {
+                       *cmd++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+                       *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+                       *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
+                       *cmd++ = dst_offset;
+                       *cmd++ = PAGE_SIZE;
+                       *cmd++ = src_offset;
+               }
+
+               /* Allow ourselves to be preempted in between blocks. */
+               *cmd++ = MI_ARB_CHECK;
+
+               src_offset += size;
+               dst_offset += size;
+               rem -= size;
+       } while (rem);
+
+       *cmd = MI_BATCH_BUFFER_END;
+       intel_gt_chipset_flush(ce->vm->gt);
+
+       i915_gem_object_unpin_map(pool->obj);
+
+       batch = i915_vma_instance(pool->obj, ce->vm, NULL);
+       if (IS_ERR(batch)) {
+               err = PTR_ERR(batch);
+               goto out_put;
+       }
+
+       err = i915_vma_pin(batch, 0, 0, PIN_USER);
+       if (unlikely(err))
+               goto out_put;
+
+       *p = pool;
+       return batch;
+
+out_put:
+       intel_engine_pool_put(pool);
+       return ERR_PTR(err);
+}
+
+int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
+                            struct drm_i915_gem_object *dst,
+                            struct intel_context *ce)
+{
+       struct drm_gem_object *objs[] = { &src->base, &dst->base };
+       struct i915_address_space *vm = ce->vm;
+       struct intel_engine_pool_node *pool;
+       struct ww_acquire_ctx acquire;
+       struct i915_vma *vma_src, *vma_dst;
+       struct i915_vma *batch;
+       struct i915_request *rq;
+       int err;
+
+       vma_src = i915_vma_instance(src, vm, NULL);
+       if (IS_ERR(vma_src))
+               return PTR_ERR(vma_src);
+
+       err = i915_vma_pin(vma_src, 0, 0, PIN_USER);
+       if (unlikely(err))
+               return err;
+
+       vma_dst = i915_vma_instance(dst, vm, NULL);
+       if (IS_ERR(vma_dst))
+               goto out_unpin_src;
+
+       err = i915_vma_pin(vma_dst, 0, 0, PIN_USER);
+       if (unlikely(err))
+               goto out_unpin_src;
+
+       intel_engine_pm_get(ce->engine);
+       batch = intel_emit_vma_copy_blt(&pool, ce, vma_src, vma_dst);
+       if (IS_ERR(batch)) {
+               err = PTR_ERR(batch);
+               goto out_unpin_dst;
+       }
+
+       rq = intel_context_create_request(ce);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto out_batch;
+       }
+
+       i915_vma_lock(batch);
+       err = i915_vma_move_to_active(batch, rq, 0);
+       i915_vma_unlock(batch);
+       if (unlikely(err))
+               goto out_request;
+
+       err = intel_engine_pool_mark_active(pool, rq);
+       if (unlikely(err))
+               goto out_request;
+
+       err = drm_gem_lock_reservations(objs, ARRAY_SIZE(objs), &acquire);
+       if (unlikely(err))
+               goto out_request;
+
+       if (src->cache_dirty & ~src->cache_coherent)
+               i915_gem_clflush_object(src, 0);
+
+       if (dst->cache_dirty & ~dst->cache_coherent)
+               i915_gem_clflush_object(dst, 0);
+
+       err = i915_request_await_object(rq, src, false);
+       if (unlikely(err))
+               goto out_unlock;
+
+       err = i915_vma_move_to_active(vma_src, rq, 0);
+       if (unlikely(err))
+               goto out_unlock;
+
+       err = i915_request_await_object(rq, dst, true);
+       if (unlikely(err))
+               goto out_unlock;
+
+       err = i915_vma_move_to_active(vma_dst, rq, EXEC_OBJECT_WRITE);
+       if (unlikely(err))
+               goto out_unlock;
+
+       if (ce->engine->emit_init_breadcrumb) {
+               err = ce->engine->emit_init_breadcrumb(rq);
+               if (unlikely(err))
+                       goto out_unlock;
+       }
+
+       err = ce->engine->emit_bb_start(rq,
+                                       batch->node.start, batch->node.size,
+                                       0);
+out_unlock:
+       drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
+out_request:
+       if (unlikely(err))
+               i915_request_skip(rq, err);
+
+       i915_request_add(rq);
+out_batch:
+       i915_vma_unpin(batch);
+       intel_engine_pool_put(pool);
+out_unpin_dst:
+       i915_vma_unpin(vma_dst);
+       intel_engine_pm_put(ce->engine);
+out_unpin_src:
+       i915_vma_unpin(vma_src);
+       return err;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/i915_gem_object_blt.c"
 #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
index a7425c234d50..5d3e4ed6e060 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
@@ -22,4 +22,13 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
                             struct intel_context *ce,
                             u32 value);
 
+struct i915_vma *intel_emit_vma_copy_blt(struct intel_engine_pool_node **p,
+                                        struct intel_context *ce,
+                                        struct i915_vma *src,
+                                        struct i915_vma *dst);
+
+int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
+                            struct drm_i915_gem_object *dst,
+                            struct intel_context *ce);
+
 #endif
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index c6e1eebe53f5..c21d747e7d05 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -103,10 +103,116 @@ static int igt_fill_blt(void *arg)
        return err;
 }
 
+static int igt_copy_blt(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+       struct drm_i915_gem_object *src, *dst;
+       struct rnd_state prng;
+       IGT_TIMEOUT(end);
+       u32 *vaddr;
+       int err = 0;
+
+       prandom_seed_state(&prng, i915_selftest.random_seed);
+
+       do {
+               const u32 max_block_size = S16_MAX * PAGE_SIZE;
+               u32 sz = min_t(u64, ce->vm->total >> 4, 
prandom_u32_state(&prng));
+               u32 phys_sz = sz % (max_block_size + 1);
+               u32 val = prandom_u32_state(&prng);
+               u32 i;
+
+               sz = round_up(sz, PAGE_SIZE);
+               phys_sz = round_up(phys_sz, PAGE_SIZE);
+
+               pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
+                        phys_sz, sz, val);
+
+               src = huge_gem_object(i915, phys_sz, sz);
+               if (IS_ERR(src)) {
+                       err = PTR_ERR(src);
+                       goto err_flush;
+               }
+
+               vaddr = i915_gem_object_pin_map(src, I915_MAP_WB);
+               if (IS_ERR(vaddr)) {
+                       err = PTR_ERR(vaddr);
+                       goto err_put_src;
+               }
+
+               memset32(vaddr, val,
+                        huge_gem_object_phys_size(src) / sizeof(u32));
+
+               i915_gem_object_unpin_map(src);
+
+               if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+                       src->cache_dirty = true;
+
+               dst = huge_gem_object(i915, phys_sz, sz);
+               if (IS_ERR(dst)) {
+                       err = PTR_ERR(dst);
+                       goto err_put_src;
+               }
+
+               vaddr = i915_gem_object_pin_map(dst, I915_MAP_WB);
+               if (IS_ERR(vaddr)) {
+                       err = PTR_ERR(vaddr);
+                       goto err_put_dst;
+               }
+
+               memset32(vaddr, val ^ 0xdeadbeaf,
+                        huge_gem_object_phys_size(dst) / sizeof(u32));
+
+               if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+                       dst->cache_dirty = true;
+
+               mutex_lock(&i915->drm.struct_mutex);
+               err = i915_gem_object_copy_blt(src, dst, ce);
+               mutex_unlock(&i915->drm.struct_mutex);
+               if (err)
+                       goto err_unpin;
+
+               i915_gem_object_lock(dst);
+               err = i915_gem_object_set_to_cpu_domain(dst, false);
+               i915_gem_object_unlock(dst);
+               if (err)
+                       goto err_unpin;
+
+               for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); 
++i) {
+                       if (vaddr[i] != val) {
+                               pr_err("vaddr[%u]=%x, expected=%x\n", i,
+                                      vaddr[i], val);
+                               err = -EINVAL;
+                               goto err_unpin;
+                       }
+               }
+
+               i915_gem_object_unpin_map(dst);
+
+               i915_gem_object_put(src);
+               i915_gem_object_put(dst);
+       } while (!time_after(jiffies, end));
+
+       goto err_flush;
+
+err_unpin:
+       i915_gem_object_unpin_map(dst);
+err_put_dst:
+       i915_gem_object_put(dst);
+err_put_src:
+       i915_gem_object_put(src);
+err_flush:
+       if (err == -ENOMEM)
+               err = 0;
+
+       return err;
+}
+
 int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_fill_blt),
+               SUBTEST(igt_copy_blt),
        };
 
        if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h 
b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 69f34737325f..af7b9d272144 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -188,8 +188,9 @@
 
 #define COLOR_BLT_CMD                  (2<<29 | 0x40<<22 | (5-2))
 #define XY_COLOR_BLT_CMD               (2 << 29 | 0x50 << 22)
-#define SRC_COPY_BLT_CMD               ((2<<29)|(0x43<<22)|4)
-#define XY_SRC_COPY_BLT_CMD            ((2<<29)|(0x53<<22)|6)
+#define SRC_COPY_BLT_CMD               ((2<<29)|(0x43<<22))
+#define GEN9_XY_FAST_COPY_BLT_CMD      ((2<<29)|(0x42<<22))
+#define XY_SRC_COPY_BLT_CMD            ((2<<29)|(0x53<<22))
 #define XY_MONO_SRC_COPY_IMM_BLT       ((2<<29)|(0x71<<22)|5)
 #define   BLT_WRITE_A                  (2<<20)
 #define   BLT_WRITE_RGB                        (1<<20)
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 78b4235f9c0f..f109a1736ed8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1136,7 +1136,7 @@ i830_emit_bb_start(struct i915_request *rq,
                 * stable batch scratch bo area (so that the CS never
                 * stumbles over its tlb invalidation bug) ...
                 */
-               *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+               *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
                *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
                *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
                *cs++ = cs_offset;
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to