On 31-Mar-26 8:29 AM, Prike Liang wrote:
Replace the IDR based allocator with XArray and XArray's
internal state machine can handle memory allocation correctly.


There is already another one -

https://patchwork.freedesktop.org/patch/715381/?series=164079&rev=3

Thanks,
Lijo

Signed-off-by: Prike Liang <[email protected]>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 49 +++++++++++--------------
  1 file changed, 21 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index e495a8fa13fd..7b0afeddbb05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -22,7 +22,7 @@
   */
  #include "amdgpu_ids.h"
-#include <linux/idr.h>
+#include <linux/xarray.h>
  #include <linux/dma-fence-array.h>
@@ -35,13 +35,12 @@
   * PASIDs are global address space identifiers that can be shared
   * between the GPU, an IOMMU and the driver. VMs on different devices
   * may use the same PASID if they share the same address
- * space. Therefore PASIDs are allocated using IDR cyclic allocator
- * (similar to kernel PID allocation) which naturally delays reuse.
- * VMs are looked up from the PASID per amdgpu_device.
+ * space. Therefore PASIDs are allocated using XArray cyclic allocation
+ * which naturally delays reuse. VMs are looked up from the PASID per 
amdgpu_device.
   */
-static DEFINE_IDR(amdgpu_pasid_idr);
-static DEFINE_SPINLOCK(amdgpu_pasid_idr_lock);
+static DEFINE_XARRAY_ALLOC(amdgpu_pasid_xa);
+static u32 amdgpu_pasid_next;
/* Helper to free pasid from a fence callback */
  struct amdgpu_pasid_cb {
@@ -52,33 +51,31 @@ struct amdgpu_pasid_cb {
  /**
   * amdgpu_pasid_alloc - Allocate a PASID
   * @bits: Maximum width of the PASID in bits, must be at least 1
- *
- * Uses kernel's IDR cyclic allocator (same as PID allocation).
- * Allocates sequentially with automatic wrap-around.
- *
+
+ * Uses XArray cyclic allocator. Allocates sequentially with automatic
+ * wrap-around, delaying PASID reuse naturally.
+
   * Returns a positive integer on success. Returns %-EINVAL if bits==0.
   * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
   * memory allocation failure.
   */
  int amdgpu_pasid_alloc(unsigned int bits)
  {
-       int pasid;
+       u32 pasid;
+       int r;
if (bits == 0)
                return -EINVAL;
- spin_lock(&amdgpu_pasid_idr_lock);
-       /* TODO: Need to replace the idr with an xarry, and then
-        * handle the internal locking with ATOMIC safe paths.
-        */
-       pasid = idr_alloc_cyclic(&amdgpu_pasid_idr, NULL, 1,
-                                1U << bits, GFP_ATOMIC);
-       spin_unlock(&amdgpu_pasid_idr_lock);
-
-       if (pasid >= 0)
-               trace_amdgpu_pasid_allocated(pasid);
+       r = xa_alloc_cyclic(&amdgpu_pasid_xa, &pasid, NULL,
+                           XA_LIMIT(1, (1U << bits) - 1),
+                           &amdgpu_pasid_next, GFP_KERNEL);
+       if (r < 0)
+               return r;
+ trace_amdgpu_pasid_allocated(pasid);
        return pasid;
+
  }
/**
@@ -89,9 +86,7 @@ void amdgpu_pasid_free(u32 pasid)
  {
        trace_amdgpu_pasid_freed(pasid);
- spin_lock(&amdgpu_pasid_idr_lock);
-       idr_remove(&amdgpu_pasid_idr, pasid);
-       spin_unlock(&amdgpu_pasid_idr_lock);
+       xa_erase(&amdgpu_pasid_xa, pasid);
  }
static void amdgpu_pasid_free_cb(struct dma_fence *fence,
@@ -630,11 +625,9 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
  /**
   * amdgpu_pasid_mgr_cleanup - cleanup PASID manager
   *
- * Cleanup the IDR allocator.
+ * Cleanup the XArray allocator.
   */
  void amdgpu_pasid_mgr_cleanup(void)
  {
-       spin_lock(&amdgpu_pasid_idr_lock);
-       idr_destroy(&amdgpu_pasid_idr);
-       spin_unlock(&amdgpu_pasid_idr_lock);
+       xa_destroy(&amdgpu_pasid_xa);
  }

Reply via email to