On 07/07/2023 09:17, Christian König wrote:


Am 06.07.23 um 14:36 schrieb Shashank Sharma:
This patch adds code to cleanup any leftover userqueues which
a user might have missed to destroy due to a crash or any other
programming error.

Cc: Alex Deucher <alexander.deuc...@amd.com>
Cc: Christian Koenig <christian.koe...@amd.com>
Suggested-by: Bas Nieuwenhuizen <b...@basnieuwenhuizen.nl>
Signed-off-by: Bas Nieuwenhuizen <b...@basnieuwenhuizen.nl>
Signed-off-by: Shashank Sharma <shashank.sha...@amd.com>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 31 ++++++++++++++++---
  1 file changed, 26 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
index 61064266c4f8..6e32e2854a58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
@@ -57,12 +57,23 @@ amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
      return index;
  }
  +static void
+amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+             struct amdgpu_usermode_queue *queue,
+             int queue_id)
+{
+    const struct amdgpu_userq_funcs *uq_funcs = uq_mgr->userq_funcs[queue->queue_type];
+
+    uq_funcs->mqd_destroy(uq_mgr, queue);
+    idr_remove(&uq_mgr->userq_idr, queue_id);
+    kfree(queue);
+}
+
  static int
  amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id)
  {
      struct amdgpu_fpriv *fpriv = filp->driver_priv;
      struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
-    const struct amdgpu_userq_funcs *uq_funcs;
      struct amdgpu_usermode_queue *queue;
        mutex_lock(&uq_mgr->userq_mutex);
@@ -73,11 +84,8 @@ amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id)
          mutex_unlock(&uq_mgr->userq_mutex);
          return -EINVAL;
      }
-    uq_funcs = uq_mgr->userq_funcs[queue->queue_type];
-    uq_funcs->mqd_destroy(uq_mgr, queue);
-    idr_remove(&uq_mgr->userq_idr, queue_id);
-    kfree(queue);
  +    amdgpu_userqueue_cleanup(uq_mgr, queue, queue_id);
      mutex_unlock(&uq_mgr->userq_mutex);
      return 0;
  }
@@ -193,8 +201,21 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_devi
      return 0;
  }
  +static int amdgpu_userqueue_cleanup_residue(int queue_id, void *ptr, void *data)
+{
+    struct amdgpu_userq_mgr *uq_mgr = data;
+    struct amdgpu_usermode_queue *queue = ptr;
+
+    amdgpu_userqueue_cleanup(uq_mgr, queue, queue_id);
+    return 0;
+}
+
  void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
  {
+    idr_for_each(&userq_mgr->userq_idr,
+             amdgpu_userqueue_cleanup_residue,
+             userq_mgr);
+

Better use idr_for_each_entry() here which avoids the mid layer function.

Apart from that it would be nice to have to merge this patch into the original one adding the user queues, but really only nice to have.

Noted, will check that out.

- Shashank


Christian.

idr_destroy(&userq_mgr->userq_idr);
      mutex_destroy(&userq_mgr->userq_mutex);
  }

Reply via email to