This patch moves all of the functions which can be shared with GK20A to
public for later use.

Signed-off-by: Vince Hsu <[email protected]>
---
 drm/nouveau/nvkm/subdev/mmu/gf100.c | 28 +++++++---------------
 drm/nouveau/nvkm/subdev/mmu/gf100.h | 46 +++++++++++++++++++++++++++++++++++++
 2 files changed, 54 insertions(+), 20 deletions(-)
 create mode 100644 drm/nouveau/nvkm/subdev/mmu/gf100.h

diff --git a/drm/nouveau/nvkm/subdev/mmu/gf100.c 
b/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 294cda37f068..b067ded5d3be 100644
--- a/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -29,6 +29,8 @@
 
 #include <core/gpuobj.h>
 
+#include "gf100.h"
+
 struct gf100_mmu_priv {
        struct nvkm_mmu base;
 };
@@ -74,7 +76,7 @@ const u8 gf100_pte_storage_type_map[256] =
 };
 
 
-static void
+void
 gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj 
*pgt[2])
 {
        u32 pde[2] = { 0, 0 };
@@ -88,21 +90,7 @@ gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct 
nvkm_gpuobj *pgt[2])
        nv_wo32(pgd, (index * 8) + 4, pde[1]);
 }
 
-static inline u64
-gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
-{
-       phys >>= 8;
-
-       phys |= 0x00000001; /* present */
-       if (vma->access & NV_MEM_ACCESS_SYS)
-               phys |= 0x00000002;
-
-       phys |= ((u64)target  << 32);
-       phys |= ((u64)memtype << 36);
-       return phys;
-}
-
-static void
+void
 gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
             struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
 {
@@ -127,7 +115,7 @@ gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
        }
 }
 
-static void
+void
 gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
                struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
@@ -144,7 +132,7 @@ gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj 
*pgt,
        }
 }
 
-static void
+void
 gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
 {
        pte <<= 3;
@@ -155,7 +143,7 @@ gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
        }
 }
 
-static void
+void
 gf100_vm_flush(struct nvkm_vm *vm)
 {
        struct gf100_mmu_priv *priv = (void *)vm->mmu;
@@ -191,7 +179,7 @@ gf100_vm_flush(struct nvkm_vm *vm)
        mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
-static int
+int
 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
                struct nvkm_vm **pvm)
 {
diff --git a/drm/nouveau/nvkm/subdev/mmu/gf100.h 
b/drm/nouveau/nvkm/subdev/mmu/gf100.h
new file mode 100644
index 000000000000..a66ca45bc755
--- /dev/null
+++ b/drm/nouveau/nvkm/subdev/mmu/gf100.h
@@ -0,0 +1,46 @@
+#ifndef __GF100_MMU_PRIV__
+#define __GF100_MMU_PRIV__
+
+#include <subdev/mmu.h>
+
+struct nv04_mmu_priv {
+       struct nvkm_mmu base;
+       struct nvkm_vm *vm;
+       dma_addr_t null;
+       void *nullp;
+};
+
+int
+gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
+               struct nvkm_vm **pvm);
+
+void
+gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index,
+               struct nvkm_gpuobj *pgt[2]);
+void
+gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+            struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta);
+void
+gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+               struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list);
+void
+gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt);
+
+void
+gf100_vm_flush(struct nvkm_vm *vm);
+
+static inline u64
+gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
+{
+       phys >>= 8;
+
+       phys |= 0x00000001; /* present */
+       if (vma->access & NV_MEM_ACCESS_SYS)
+               phys |= 0x00000002;
+
+       phys |= ((u64)target  << 32);
+       phys |= ((u64)memtype << 36);
+       return phys;
+}
+
+#endif
-- 
2.1.4

_______________________________________________
Nouveau mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/nouveau

Reply via email to