amdgpu_userq_create() has multiple error paths that jump directly to the 'unlock' label, which only releases the mutex. This leaks resources that were allocated earlier in the function:
- When amdgpu_userq_fence_driver_alloc() fails, the queue struct, doorbell BO, and VA list entries are leaked. - When xa_store_irq() fails, the MQD and fence driver are leaked in addition to the queue struct. - When kasprintf() fails for the queue debug name, the entire queue with all its resources (xa entry, MQD, fence driver, queue struct) is leaked. Fix this by adding cleanup labels in reverse allocation order (erase_xa, destroy_mqd, free_fence_driver, free_queue) before the existing unlock label, and routing each error path to the correct label that matches the resources allocated up to that point. Signed-off-by: David Baum <[email protected]> --- drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 37 +++++++++++------------ 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 7c4503508..93c44798c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -819,17 +819,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { r = -EINVAL; - kfree(queue); - goto unlock; + goto free_queue; } /* Convert relative doorbell offset into absolute doorbell index */ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp); if (index == (uint64_t)-EINVAL) { drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); - kfree(queue); r = -EINVAL; - goto unlock; + goto free_queue; } queue->doorbell_index = index; @@ -837,15 +835,13 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) r = amdgpu_userq_fence_driver_alloc(adev, queue); if (r) { drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n"); - goto unlock; + goto free_queue; } r = uq_funcs->mqd_create(queue, &args->in); if (r) { drm_file_err(uq_mgr->file, "Failed to create Queue\n"); - amdgpu_userq_fence_driver_free(queue); - kfree(queue); - goto unlock; + goto free_fence_driver; } /* drop this refcount during queue destroy */ @@ -855,21 +851,17 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) down_read(&adev->reset_domain->sem); r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL)); if (r) { - kfree(queue); up_read(&adev->reset_domain->sem); - goto unlock; + goto destroy_mqd; } r = xa_alloc(&uq_mgr->userq_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL); if (r) { drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); - amdgpu_userq_fence_driver_free(queue); - uq_funcs->mqd_destroy(queue); - kfree(queue); r = -ENOMEM; up_read(&adev->reset_domain->sem); - goto unlock; + goto destroy_mqd; } up_read(&adev->reset_domain->sem); @@ -884,18 +876,14 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) r = amdgpu_userq_map_helper(queue); if (r) { drm_file_err(uq_mgr->file, "Failed to map Queue\n"); - xa_erase(&uq_mgr->userq_xa, qid); - amdgpu_userq_fence_driver_free(queue); - uq_funcs->mqd_destroy(queue); - kfree(queue); - goto unlock; + goto erase_xa; } } queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid); if (!queue_name) { r = -ENOMEM; - goto unlock; + goto erase_xa; } #if defined(CONFIG_DEBUG_FS) @@ -908,7 +896,16 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) args->out.queue_id = qid; atomic_inc(&uq_mgr->userq_count[queue->queue_type]); + goto unlock; +erase_xa: + xa_erase_irq(&uq_mgr->userq_xa, qid); +destroy_mqd: + uq_funcs->mqd_destroy(queue); +free_fence_driver: + amdgpu_userq_fence_driver_free(queue); +free_queue: + kfree(queue); unlock: mutex_unlock(&uq_mgr->userq_mutex); -- 2.50.1 (Apple Git-155)
