Some platforms have IOMMU to map non-contiguous physical memory into
contiguous GPU virtual address. We can use this feature to enable big pages
mapping on scattered small pages. To achieve that, we also need changes in
subdev/mmu as well.

Signed-off-by: Vince Hsu <[email protected]>
---
 drm/nouveau/nouveau_bo.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drm/nouveau/nouveau_bo.c b/drm/nouveau/nouveau_bo.c
index 77326e344dad..da76ee1121e4 100644
--- a/drm/nouveau/nouveau_bo.c
+++ b/drm/nouveau/nouveau_bo.c
@@ -221,6 +221,11 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
        if (drm->client.vm) {
                if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
                        nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
+
+               if ((flags & TTM_PL_FLAG_TT) &&
+                               drm->client.vm->mmu->iommu_capable &&
+                               (size % (1 << drm->client.vm->mmu->lpg_shift)) 
== 0)
+                       nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
        }
 
        nouveau_bo_fixup_align(nvbo, flags, &align, &size);
@@ -1641,6 +1646,10 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct 
nvkm_vm *vm,
            (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
             nvbo->page_shift != vma->vm->mmu->lpg_shift))
                nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
+       else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+               vma->vm->mmu->iommu_capable &&
+               nvbo->page_shift == vma->vm->mmu->lpg_shift)
+               nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
 
        list_add_tail(&vma->head, &nvbo->vma_list);
        vma->refcount = 1;
-- 
2.1.4

_______________________________________________
Nouveau mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/nouveau

Reply via email to