On 6/17/25 05:07, Alex Deucher wrote: > Just use kmalloc for the fences in the rare case we need > an independent fence. > > Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
This also means that we can nuke the two different fence implementations here, see amdgpu_job_fence_free(). But this patch alone is Reviewed-by: Christian König <christian.koe...@amd.com> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 --- > drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 5 ----- > drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 21 +++------------------ > 3 files changed, 3 insertions(+), 26 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h > b/drivers/gpu/drm/amd/amdgpu/amdgpu.h > index 5e2f086d2c99e..534d999b1433d 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h > @@ -470,9 +470,6 @@ struct amdgpu_sa_manager { > void *cpu_ptr; > }; > > -int amdgpu_fence_slab_init(void); > -void amdgpu_fence_slab_fini(void); > - > /* > * IRQS. > */ > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c > b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c > index 7f8fa69300bf4..d645fa9bdff3b 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c > @@ -3113,10 +3113,6 @@ static int __init amdgpu_init(void) > if (r) > goto error_sync; > > - r = amdgpu_fence_slab_init(); > - if (r) > - goto error_fence; > - > r = amdgpu_userq_fence_slab_init(); > if (r) > goto error_fence; > @@ -3151,7 +3147,6 @@ static void __exit amdgpu_exit(void) > amdgpu_unregister_atpx_handler(); > amdgpu_acpi_release(); > amdgpu_sync_fini(); > - amdgpu_fence_slab_fini(); > amdgpu_userq_fence_slab_fini(); > mmu_notifier_synchronize(); > amdgpu_xcp_drv_release(); > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c > b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c > index e88848c14491a..5555f3ae08c60 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c > @@ -41,21 +41,6 @@ > #include "amdgpu_trace.h" > #include "amdgpu_reset.h" > > -static struct kmem_cache *amdgpu_fence_slab; > - > -int amdgpu_fence_slab_init(void) > -{ > - amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN); > - if (!amdgpu_fence_slab) > - return -ENOMEM; > - return 0; > -} > - > -void amdgpu_fence_slab_fini(void) > -{ > - rcu_barrier(); > - kmem_cache_destroy(amdgpu_fence_slab); > -} > /* > * Cast helper > */ > @@ -132,8 +117,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct > dma_fence **f, > > if (!af) { > /* create a separate hw fence */ > - am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); > - if (am_fence == NULL) > + am_fence = kmalloc(sizeof(*am_fence), GFP_KERNEL); > + if (!am_fence) > return -ENOMEM; > } else { > am_fence = af; > @@ -806,7 +791,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu) > struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); > > /* free fence_slab if it's separated fence*/ > - kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f)); > + kfree(to_amdgpu_fence(f)); > } > > /**