Clean up the amdgpu_userq_create function clean up in
failure condition using goto method. This avoid replication
of cleanup for every failure conditon.
Signed-off-by: Sunil Khatri <[email protected]>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 37 ++++++++++-------------
1 file changed, 16 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 937403beacdc..115f294ae8da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -818,17 +818,15 @@ amdgpu_userq_create(struct drm_file *filp, union
drm_amdgpu_userq *args)
amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va,
AMDGPU_GPU_PAGE_SIZE) ||
amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va,
AMDGPU_GPU_PAGE_SIZE)) {
r = -EINVAL;
- kfree(queue);
- goto unlock;
+ goto free_queue;
}
/* Convert relative doorbell offset into absolute doorbell index */
index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
if (index == (uint64_t)-EINVAL) {
drm_file_err(uq_mgr->file, "Failed to get doorbell for
queue\n");
- kfree(queue);
r = -EINVAL;
- goto unlock;
+ goto free_queue;
}
queue->doorbell_index = index;
@@ -836,15 +834,13 @@ amdgpu_userq_create(struct drm_file *filp, union
drm_amdgpu_userq *args)
r = amdgpu_userq_fence_driver_alloc(adev, queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
- goto unlock;
+ goto free_queue;
}
r = uq_funcs->mqd_create(queue, &args->in);
if (r) {
drm_file_err(uq_mgr->file, "Failed to create Queue\n");
- amdgpu_userq_fence_driver_free(queue);
- kfree(queue);
- goto unlock;
+ goto clean_fence_driver;
}
/* don't map the queue if scheduling is halted */
@@ -858,10 +854,7 @@ amdgpu_userq_create(struct drm_file *filp, union
drm_amdgpu_userq *args)
r = amdgpu_userq_map_helper(queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
- uq_funcs->mqd_destroy(queue);
- amdgpu_userq_fence_driver_free(queue);
- kfree(queue);
- goto unlock;
+ goto clean_mqd;
}
}
@@ -877,12 +870,9 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (!skip_map_queue)
amdgpu_userq_unmap_helper(queue);
- uq_funcs->mqd_destroy(queue);
- amdgpu_userq_fence_driver_free(queue);
- kfree(queue);
- r = -ENOMEM;
up_read(&adev->reset_domain->sem);
- goto unlock;
+ r = -ENOMEM;
+ goto clean_mqd;
}
r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
@@ -891,11 +881,8 @@ amdgpu_userq_create(struct drm_file *filp, union
drm_amdgpu_userq *args)
if (!skip_map_queue)
amdgpu_userq_unmap_helper(queue);
- uq_funcs->mqd_destroy(queue);
- amdgpu_userq_fence_driver_free(queue);
- kfree(queue);
up_read(&adev->reset_domain->sem);
- goto unlock;
+ goto clean_mqd;
}
up_read(&adev->reset_domain->sem);
@@ -911,7 +898,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
args->out.queue_id = qid;
atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return 0;
+clean_mqd:
+ uq_funcs->mqd_destroy(queue);