When a GPUVA remap is triggered as a consequence of a VM operation
insersecting with existing VAs, when mapping the split VAs one must take
into account whether they were repeat-mapped.

Signed-off-by: Adrián Larumbe <[email protected]>
---
 drivers/gpu/drm/panthor/panthor_mmu.c | 63 +++++++++++++++++----------
 1 file changed, 39 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c 
b/drivers/gpu/drm/panthor/panthor_mmu.c
index a357063bb9f6..ba322e2029b9 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -2124,41 +2124,52 @@ static void panthor_vma_init(struct panthor_vma *vma, 
u32 flags)
         DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
         DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED)
 
-static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
+static int
+panthor_vm_map_range(struct panthor_vm *vm, bool repeat, struct sg_table *sgt,
+                    u64 addr, u64 offset, u64 size, u32 repeat_range, int prot)
 {
-       struct panthor_vm *vm = priv;
-       struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
-       struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx);
        int ret;
 
-       if (!vma)
-               return -EINVAL;
-
-       panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS);
+       if (!size)
+               return 0;
 
-       if (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_MAP_REPEAT) {
-               u64 repeat_count = op->map.va.range;
+       if (repeat) {
+               u64 repeat_count = size;
 
-               do_div(repeat_count, op->map.gem.repeat_range);
+               do_div(repeat_count, repeat_range);
 
                if (drm_WARN_ON(&vm->ptdev->base, !repeat_count))
                        return -EINVAL;
 
-               ret = panthor_vm_repeated_map_pages(vm, op->map.va.addr,
-                                                   flags_to_prot(vma->flags),
-                                                   op_ctx->map.sgt,
-                                                   op->map.gem.offset,
-                                                   op->map.gem.repeat_range,
+               ret = panthor_vm_repeated_map_pages(vm, addr, prot, sgt,
+                                                   offset, repeat_range,
                                                    repeat_count);
                if (!ret)
                        vm->base.flags |= DRM_GPUVM_HAS_REPEAT_MAPS;
        } else {
-               ret = panthor_vm_map_pages(vm, op->map.va.addr,
-                                          flags_to_prot(vma->flags),
-                                          op_ctx->map.sgt, op->map.gem.offset,
-                                          op->map.va.range);
+               ret = panthor_vm_map_pages(vm, addr, prot, sgt,
+                                          offset, size);
        }
 
+       return ret;
+}
+
+static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
+{
+       struct panthor_vm *vm = priv;
+       struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
+       struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx);
+       int ret;
+
+       if (!vma)
+               return -EINVAL;
+
+       panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS);
+
+       ret = panthor_vm_map_range(vm, op_ctx->flags & 
DRM_PANTHOR_VM_BIND_OP_MAP_REPEAT,
+                                  op_ctx->map.sgt, op->map.va.addr, 
op->map.gem.offset,
+                                  op->map.va.range, op->map.gem.repeat_range,
+                                  flags_to_prot(vma->flags));
        if (ret) {
                panthor_vm_op_ctx_return_vma(op_ctx, vma);
                return ret;
@@ -2262,8 +2273,10 @@ static int panthor_gpuva_sm_step_remap(struct 
drm_gpuva_op *op,
                u64 offset = op->remap.prev->gem.offset + unmap_start - 
op->remap.prev->va.addr;
                u64 size = op->remap.prev->va.addr + op->remap.prev->va.range - 
unmap_start;
 
-               ret = panthor_vm_map_pages(vm, unmap_start, 
flags_to_prot(unmap_vma->flags),
-                                          bo->base.sgt, offset, size);
+               ret = panthor_vm_map_range(vm, op->remap.prev->flags & 
DRM_GPUVA_REPEAT,
+                                          bo->base.sgt, 
op->remap.prev->va.addr, offset,
+                                          size, 
op->remap.prev->gem.repeat_range,
+                                          flags_to_prot(unmap_vma->flags));
                if (ret)
                        return ret;
 
@@ -2276,8 +2289,10 @@ static int panthor_gpuva_sm_step_remap(struct 
drm_gpuva_op *op,
                u64 addr = op->remap.next->va.addr;
                u64 size = unmap_start + unmap_range - op->remap.next->va.addr;
 
-               ret = panthor_vm_map_pages(vm, addr, 
flags_to_prot(unmap_vma->flags),
-                                          bo->base.sgt, 
op->remap.next->gem.offset, size);
+               ret = panthor_vm_map_range(vm, op->remap.next->flags & 
DRM_GPUVA_REPEAT,
+                                          bo->base.sgt, addr, 
op->remap.next->gem.offset,
+                                          size, 
op->remap.next->gem.repeat_range,
+                                          flags_to_prot(unmap_vma->flags));
                if (ret)
                        return ret;
 
-- 
2.53.0

Reply via email to