From: Mary Guillemard <[email protected]>

Currently memory allocated by VM_BIND uAPI can only have a  granuality
matching PAGE_SIZE (4KiB in common case)

To have a better memory management and to allow big (64KiB) and huge
(2MiB) pages later in the series, we are now passing the page shift all
around the internals of UVMM.

Signed-off-by: Mary Guillemard <[email protected]>
Co-developed-by: Mohamed Ahmed <[email protected]>
Signed-off-by: Mohamed Ahmed <[email protected]>
---
 drivers/gpu/drm/nouveau/nouveau_uvmm.c | 46 ++++++++++++++++----------
 drivers/gpu/drm/nouveau/nouveau_uvmm.h |  1 +
 2 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c 
b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 79eefdfd08a2..2cd0835b05e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -107,34 +107,34 @@ nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
 
 static int
 nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
-                    u64 addr, u64 range)
+                    u64 addr, u64 range, u8 page_shift)
 {
        struct nvif_vmm *vmm = &uvmm->vmm.vmm;
 
-       return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT);
+       return nvif_vmm_raw_get(vmm, addr, range, page_shift);
 }
 
 static int
 nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
-                    u64 addr, u64 range)
+                    u64 addr, u64 range, u8 page_shift)
 {
        struct nvif_vmm *vmm = &uvmm->vmm.vmm;
 
-       return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT);
+       return nvif_vmm_raw_put(vmm, addr, range, page_shift);
 }
 
 static int
 nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
-                      u64 addr, u64 range, bool sparse)
+                      u64 addr, u64 range, u8 page_shift, bool sparse)
 {
        struct nvif_vmm *vmm = &uvmm->vmm.vmm;
 
-       return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse);
+       return nvif_vmm_raw_unmap(vmm, addr, range, page_shift, sparse);
 }
 
 static int
 nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
-                    u64 addr, u64 range,
+                    u64 addr, u64 range, u8 page_shift,
                     u64 bo_offset, u8 kind,
                     struct nouveau_mem *mem)
 {
@@ -163,7 +163,7 @@ nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
                return -ENOSYS;
        }
 
-       return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT,
+       return nvif_vmm_raw_map(vmm, addr, range, page_shift,
                                &args, argc,
                                &mem->mem, bo_offset);
 }
@@ -182,8 +182,9 @@ nouveau_uvma_vmm_put(struct nouveau_uvma *uvma)
 {
        u64 addr = uvma->va.va.addr;
        u64 range = uvma->va.va.range;
+       u8 page_shift = uvma->page_shift;
 
-       return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range);
+       return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range, page_shift);
 }
 
 static int
@@ -193,9 +194,11 @@ nouveau_uvma_map(struct nouveau_uvma *uvma,
        u64 addr = uvma->va.va.addr;
        u64 offset = uvma->va.gem.offset;
        u64 range = uvma->va.va.range;
+       u8 page_shift = uvma->page_shift;
 
        return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range,
-                                   offset, uvma->kind, mem);
+                                   page_shift, offset, uvma->kind,
+                                   mem);
 }
 
 static int
@@ -203,12 +206,13 @@ nouveau_uvma_unmap(struct nouveau_uvma *uvma)
 {
        u64 addr = uvma->va.va.addr;
        u64 range = uvma->va.va.range;
+       u8 page_shift = uvma->page_shift;
        bool sparse = !!uvma->region;
 
        if (drm_gpuva_invalidated(&uvma->va))
                return 0;
 
-       return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+       return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, 
sparse);
 }
 
 static int
@@ -501,7 +505,8 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
 
                        if (vmm_get_range)
                                nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
-                                                    vmm_get_range);
+                                                    vmm_get_range,
+                                                    PAGE_SHIFT);
                        break;
                }
                case DRM_GPUVA_OP_REMAP: {
@@ -528,6 +533,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
                        u64 ustart = va->va.addr;
                        u64 urange = va->va.range;
                        u64 uend = ustart + urange;
+                       u8 page_shift = uvma_from_va(va)->page_shift;
 
                        /* Nothing to do for mappings we merge with. */
                        if (uend == vmm_get_start ||
@@ -538,7 +544,8 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
                                u64 vmm_get_range = ustart - vmm_get_start;
 
                                nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
-                                                    vmm_get_range);
+                                                    vmm_get_range,
+                                                    page_shift);
                        }
                        vmm_get_start = uend;
                        break;
@@ -592,6 +599,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
 
        uvma->region = args->region;
        uvma->kind = args->kind;
+       uvma->page_shift = PAGE_SHIFT;
 
        drm_gpuva_map(&uvmm->base, &uvma->va, op);
 
@@ -633,7 +641,8 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
 
                        if (vmm_get_range) {
                                ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
-                                                          vmm_get_range);
+                                                          vmm_get_range,
+                                                          
new->map->page_shift);
                                if (ret) {
                                        op_map_prepare_unwind(new->map);
                                        goto unwind;
@@ -689,6 +698,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
                        u64 ustart = va->va.addr;
                        u64 urange = va->va.range;
                        u64 uend = ustart + urange;
+                       u8 page_shift = uvma_from_va(va)->page_shift;
 
                        op_unmap_prepare(u);
 
@@ -704,7 +714,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
                                u64 vmm_get_range = ustart - vmm_get_start;
 
                                ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
-                                                          vmm_get_range);
+                                                          vmm_get_range, 
page_shift);
                                if (ret) {
                                        op_unmap_prepare_unwind(va);
                                        goto unwind;
@@ -799,10 +809,11 @@ op_unmap_range(struct drm_gpuva_op_unmap *u,
               u64 addr, u64 range)
 {
        struct nouveau_uvma *uvma = uvma_from_va(u->va);
+       u8 page_shift = uvma->page_shift;
        bool sparse = !!uvma->region;
 
        if (!drm_gpuva_invalidated(u->va))
-               nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+               nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, 
sparse);
 }
 
 static void
@@ -882,6 +893,7 @@ nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
                        struct drm_gpuva_op_map *n = r->next;
                        struct drm_gpuva *va = r->unmap->va;
                        struct nouveau_uvma *uvma = uvma_from_va(va);
+                       u8 page_shift = uvma->page_shift;
 
                        if (unmap) {
                                u64 addr = va->va.addr;
@@ -893,7 +905,7 @@ nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
                                if (n)
                                        end = n->va.addr;
 
-                               nouveau_uvmm_vmm_put(uvmm, addr, end - addr);
+                               nouveau_uvmm_vmm_put(uvmm, addr, end - addr, 
page_shift);
                        }
 
                        nouveau_uvma_gem_put(uvma);
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h 
b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
index 9d3c348581eb..51925711ae90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -33,6 +33,7 @@ struct nouveau_uvma {
 
        struct nouveau_uvma_region *region;
        u8 kind;
+       u8 page_shift;
 };
 
 #define uvmm_from_gpuvm(x) container_of((x), struct nouveau_uvmm, base)
-- 
2.51.0

Reply via email to