It requires validating the userq VA whether is mapped before
trying to resume the queue.

Signed-off-by: Prike Liang <prike.li...@amd.com>
Reviewed-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 31 ++++++++++++++++++++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h |  2 ++
 2 files changed, 29 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 2626a41a8418..fa44a47b2734 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -101,15 +101,12 @@ int amdgpu_userq_buffer_va_get(struct 
amdgpu_usermode_queue *queue,
 bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
 {
        struct amdgpu_bo_va_mapping *mapping;
-       u64 user_addr;
        bool r;
 
-       user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
-
        if (amdgpu_bo_reserve(vm->root.bo, false))
                return false;
 
-       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+       mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
        if (!IS_ERR_OR_NULL(mapping) && 
atomic_read(&mapping->bo_va->userq_va_mapped))
                r = true;
        else
@@ -119,6 +116,24 @@ bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, 
u64 addr)
        return r;
 }
 
+bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm,
+                                   struct amdgpu_usermode_queue *queue)
+{
+       struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+       int r;
+
+       list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+               r += amdgpu_userq_buffer_va_mapped(vm, va_cursor->gpu_addr);
+               dev_dbg(queue->userq_mgr->adev->dev, "validate the userq 
mapping: %p va:%llx r:%d\n",
+                       queue, va_cursor->gpu_addr, r);
+       }
+
+       if (r != 0)
+               return true;
+
+       return false;
+}
+
 int amdgpu_userq_buffer_va_put(struct amdgpu_usermode_queue *queue,
                               u64 addr)
 {
@@ -774,11 +789,19 @@ static int
 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
 {
        struct amdgpu_usermode_queue *queue;
+       struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
        int queue_id;
        int ret = 0, r;
 
        /* Resume all the queues for this process */
        idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+
+               if (!amdgpu_userq_buffer_vas_mapped(&fpriv->vm, queue)) {
+                       drm_file_err(uq_mgr->file, "trying restore queue 
without va mappping\n");
+                       queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
+                       continue;
+               }
+
                r = amdgpu_userq_map_helper(uq_mgr, queue);
                if (r)
                        ret = r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index 3623c8f6899a..cd63f7d79a95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -157,4 +157,6 @@ int amdgpu_userq_buffer_va_put(struct amdgpu_usermode_queue 
*queue, u64 addr);
 int amdgpu_userq_buffer_vas_put(struct amdgpu_device *adev,
                                struct amdgpu_vm *vm,
                                struct amdgpu_usermode_queue *queue);
+bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm,
+                                   struct amdgpu_usermode_queue *queue);
 #endif
-- 
2.34.1

Reply via email to