Track the userq obj for its life time, and reference and
dereference the buffer counter at its creating and destroying
period.

Suggested-by: Alex Deucher <alexander.deuc...@amd.com>
Signed-off-by: Prike Liang <prike.li...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 38 +++++++++++++++++++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h | 11 +++++++
 2 files changed, 46 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index f9e817acfdea..7b7dae436e5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -81,7 +81,19 @@ int amdgpu_userq_input_va_validate(struct 
amdgpu_usermode_queue *queue,
 int amdgpu_userq_buffer_va_get(struct amdgpu_usermode_queue *queue,
                               struct amdgpu_bo_va_mapping *va_map, u64 addr)
 {
+       struct amdgpu_userq_va_cursor *va_cursor;
+       struct userq_va_list;
+
+       va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL);
+       if (!va_cursor)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&va_cursor->list);
+       va_cursor->gpu_addr = addr;
+
        atomic_set(&va_map->bo_va->userq_va_mapped, 1);
+       list_add(&va_cursor->list, &queue->userq_va_list);
+
        return 0;
 
 }
@@ -112,15 +124,13 @@ int amdgpu_userq_buffer_va_put(struct 
amdgpu_usermode_queue *queue,
 {
        struct amdgpu_bo_va_mapping *mapping;
        struct amdgpu_vm *vm = queue->vm;
-       u64 user_addr;
        int r;
 
-       user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
        r = amdgpu_bo_reserve(vm->root.bo, false);
        if (r)
                return r;
 
-       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+       mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
        if (!mapping)
                goto out_err;
 
@@ -133,6 +143,23 @@ int amdgpu_userq_buffer_va_put(struct 
amdgpu_usermode_queue *queue,
        return -EINVAL;
 }
 
+int amdgpu_userq_buffer_vas_put(struct amdgpu_device *adev,
+                               struct amdgpu_vm *vm,
+                               struct amdgpu_usermode_queue *queue)
+{
+       struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+       int r;
+
+       list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+               r += amdgpu_userq_buffer_va_put(queue, va_cursor->gpu_addr);
+               dev_dbg(adev->dev, "put the userq:%p va:%llx r:%d\n",
+                       queue, va_cursor->gpu_addr, r);
+               list_del(&va_cursor->list);
+               kfree(va_cursor);
+       }
+       return r;
+}
+
 static int
 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
                          struct amdgpu_usermode_queue *queue)
@@ -195,9 +222,12 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
        struct amdgpu_device *adev = uq_mgr->adev;
        const struct amdgpu_userq_funcs *uq_funcs = 
adev->userq_funcs[queue->queue_type];
 
+       /* Drop the userq reference. */
+       amdgpu_userq_buffer_vas_put(adev, queue->vm, queue);
        uq_funcs->mqd_destroy(uq_mgr, queue);
        amdgpu_userq_fence_driver_free(queue);
        idr_remove(&uq_mgr->userq_idr, queue_id);
+       list_del(&queue->userq_va_list);
        kfree(queue);
 }
 
@@ -518,6 +548,8 @@ amdgpu_userq_create(struct drm_file *filp, union 
drm_amdgpu_userq *args)
                goto unlock;
        }
 
+       INIT_LIST_HEAD(&queue->userq_va_list);
+
        /* Validate the userq virtual address.*/
        if (amdgpu_userq_input_va_validate(queue, &fpriv->vm, 
args->in.queue_va, args->in.queue_size) ||
            amdgpu_userq_input_va_validate(queue, &fpriv->vm, args->in.rptr_va, 
AMDGPU_GPU_PAGE_SIZE ) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index 6044afeeb741..3623c8f6899a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -48,6 +48,12 @@ struct amdgpu_userq_obj {
        struct amdgpu_bo *obj;
 };
 
+struct amdgpu_userq_va_cursor {
+       uint64_t         gpu_addr;
+       struct list_head        list;
+
+};
+
 struct amdgpu_usermode_queue {
        int                     queue_type;
        enum amdgpu_userq_state state;
@@ -69,6 +75,8 @@ struct amdgpu_usermode_queue {
        int                     priority;
        struct dentry           *debugfs_queue;
        uint64_t                generation;
+
+       struct list_head        userq_va_list;
 };
 
 struct amdgpu_userq_funcs {
@@ -146,4 +154,7 @@ int amdgpu_userq_buffer_va_get(struct amdgpu_usermode_queue 
*queue,
                               struct amdgpu_bo_va_mapping *va_map, u64 addr);
 bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr);
 int amdgpu_userq_buffer_va_put(struct amdgpu_usermode_queue *queue, u64 addr);
+int amdgpu_userq_buffer_vas_put(struct amdgpu_device *adev,
+                               struct amdgpu_vm *vm,
+                               struct amdgpu_usermode_queue *queue);
 #endif
-- 
2.34.1

Reply via email to