Change-Id: I8d0c625c9f1c9a16b8e2e915831590be5a9a5242
Signed-off-by: Chunming Zhou <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 67 +++++++++++++++++++++++-----------
 2 files changed, 46 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 90805b4..c168212 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -883,6 +883,7 @@ struct amdgpu_ring {
 struct amdgpu_vm_pt {
        struct amdgpu_bo_list_entry     entry;
        uint64_t                        addr;
+       uint64_t                        shadow_addr;
 };
 
 struct amdgpu_vm {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index daf4098..8eb91a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -597,23 +597,13 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, 
uint64_t addr)
        return result;
 }
 
-/**
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-                                   struct amdgpu_vm *vm)
+int amdgpu_vm_update_page_directory_or_shadow(struct amdgpu_device *adev,
+                                             struct amdgpu_vm *vm,
+                                             bool shadow)
 {
        struct amdgpu_ring *ring;
-       struct amdgpu_bo *pd = vm->page_directory;
+       struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
+               vm->page_directory;
        uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
        uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
        uint64_t last_pde = ~0, last_pt = ~0;
@@ -647,10 +637,17 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device 
*adev,
                if (bo == NULL)
                        continue;
 
-               pt = amdgpu_bo_gpu_offset(bo);
-               if (vm->page_tables[pt_idx].addr == pt)
-                       continue;
-               vm->page_tables[pt_idx].addr = pt;
+               if (!shadow) {
+                       pt = amdgpu_bo_gpu_offset(bo);
+                       if (vm->page_tables[pt_idx].addr == pt)
+                               continue;
+                       vm->page_tables[pt_idx].addr = pt;
+               } else {
+                       pt = amdgpu_bo_gpu_offset(bo);
+                       if (vm->page_tables[pt_idx].shadow_addr == pt)
+                               continue;
+                       vm->page_tables[pt_idx].shadow_addr = pt;
+               }
 
                pde = pd_addr + pt_idx * 8;
                if (((last_pde + 8 * count) != pde) ||
@@ -678,10 +675,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device 
*adev,
 
        if (vm_update_params.ib->length_dw != 0) {
                amdgpu_ring_pad_ib(ring, vm_update_params.ib);
-               amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
-                                AMDGPU_FENCE_OWNER_VM);
+               if (!shadow)
+                       amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
+                                        AMDGPU_FENCE_OWNER_VM);
                WARN_ON(vm_update_params.ib->length_dw > ndw);
-               r = amdgpu_job_submit(job, ring, &vm->entity,
+               r = amdgpu_job_submit(job, ring,
+                                     shadow ? &vm->shadow_entity : &vm->entity,
                                      AMDGPU_FENCE_OWNER_VM, &fence);
                if (r)
                        goto error_free;
@@ -702,6 +701,29 @@ error_free:
        return r;
 }
 
+/**
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+                                   struct amdgpu_vm *vm)
+{
+       int r;
+
+       r = amdgpu_vm_update_page_directory_or_shadow(adev, vm, true);
+       if (r)
+               return r;
+       return amdgpu_vm_update_page_directory_or_shadow(adev, vm, false);
+}
+
 int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
                                             struct amdgpu_vm *vm)
 {
@@ -1410,6 +1432,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                entry->tv.shared = true;
                entry->user_pages = NULL;
                vm->page_tables[pt_idx].addr = 0;
+               vm->page_tables[pt_idx].shadow_addr = 0;
        }
 
        return 0;
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to