On 11/21/25 11:12, Pierre-Eric Pelloux-Prayer wrote: > This way the caller can select the one it wants to use. > > Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]>
I'm wondering if it wouldn't make sense to put a pointer to adev into each amdgpu_ttm_buffer_entity. But that is maybe something for another patch. For now: Reviewed-by: Christian König <[email protected]> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 3 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +-- > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 34 +++++++++---------- > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 16 +++++---- > drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 3 +- > 5 files changed, 32 insertions(+), 28 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c > b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c > index 3636b757c974..a050167e76a4 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c > @@ -37,7 +37,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device > *adev, unsigned size, > > stime = ktime_get(); > for (i = 0; i < n; i++) { > - r = amdgpu_copy_buffer(adev, saddr, daddr, size, NULL, &fence, > + r = amdgpu_copy_buffer(adev, &adev->mman.default_entity, > + saddr, daddr, size, NULL, &fence, > false, 0); > if (r) > goto exit_do_move; > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c > b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c > index 926a3f09a776..858eb9fa061b 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c > @@ -1322,8 +1322,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object > *bo) > if (r) > goto out; > > - r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true, > - AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE); > + r = amdgpu_fill_buffer(&adev->mman.clear_entity, abo, 0, > &bo->base._resv, > + &fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE); > if (WARN_ON(r)) > goto out; > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > index 3d850893b97f..1d3afad885da 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > @@ -359,7 +359,7 @@ static int amdgpu_ttm_copy_mem_to_mem(struct > amdgpu_device *adev, > > write_compress_disable)); > } > > - r = amdgpu_copy_buffer(adev, from, to, cur_size, resv, > + r = amdgpu_copy_buffer(adev, entity, from, to, cur_size, resv, > &next, true, copy_flags); > if (r) > goto error; > @@ -414,8 +414,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, > (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { > struct dma_fence *wipe_fence = NULL; > > - r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence, > - false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT); > + r = amdgpu_fill_buffer(&adev->mman.move_entity, > + abo, 0, NULL, &wipe_fence, > + AMDGPU_KERNEL_JOB_ID_MOVE_BLIT); > if (r) { > goto error; > } else if (wipe_fence) { > @@ -2258,7 +2259,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device > *adev, > DMA_RESV_USAGE_BOOKKEEP); > } > > -int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset, > +int amdgpu_copy_buffer(struct amdgpu_device *adev, > + struct amdgpu_ttm_buffer_entity *entity, > + uint64_t src_offset, > uint64_t dst_offset, uint32_t byte_count, > struct dma_resv *resv, > struct dma_fence **fence, > @@ -2282,7 +2285,7 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev, > uint64_t src_offset, > max_bytes = adev->mman.buffer_funcs->copy_max_bytes; > num_loops = DIV_ROUND_UP(byte_count, max_bytes); > num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); > - r = amdgpu_ttm_prepare_job(adev, &adev->mman.move_entity, num_dw, > + r = amdgpu_ttm_prepare_job(adev, entity, num_dw, > resv, vm_needs_flush, &job, > AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER); > if (r) > @@ -2411,22 +2414,18 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, > return r; > } > > -int amdgpu_fill_buffer(struct amdgpu_bo *bo, > - uint32_t src_data, > - struct dma_resv *resv, > - struct dma_fence **f, > - bool delayed, > - u64 k_job_id) > +int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity, > + struct amdgpu_bo *bo, > + uint32_t src_data, > + struct dma_resv *resv, > + struct dma_fence **f, > + u64 k_job_id) > { > struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); > - struct amdgpu_ttm_buffer_entity *entity; > struct dma_fence *fence = NULL; > struct amdgpu_res_cursor dst; > int r; > > - entity = delayed ? &adev->mman.clear_entity : > - &adev->mman.move_entity; > - > if (!adev->mman.buffer_funcs_enabled) { > dev_err(adev->dev, > "Trying to clear memory with ring turned off.\n"); > @@ -2443,13 +2442,14 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, > /* Never fill more than 256MiB at once to avoid timeouts */ > cur_size = min(dst.size, 256ULL << 20); > > - r = amdgpu_ttm_map_buffer(adev, &adev->mman.default_entity, > + r = amdgpu_ttm_map_buffer(adev, entity, > &bo->tbo, bo->tbo.resource, &dst, > 1, false, &cur_size, &to); > if (r) > goto error; > > - r = amdgpu_ttm_fill_mem(adev, entity, src_data, to, cur_size, > resv, > + r = amdgpu_ttm_fill_mem(adev, entity, > + src_data, to, cur_size, resv, > &next, true, k_job_id); > if (r) > goto error; > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h > b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h > index 41bbc25680a2..9288599c9c46 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h > @@ -167,7 +167,9 @@ int amdgpu_ttm_init(struct amdgpu_device *adev); > void amdgpu_ttm_fini(struct amdgpu_device *adev); > void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, > bool enable); > -int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset, > +int amdgpu_copy_buffer(struct amdgpu_device *adev, > + struct amdgpu_ttm_buffer_entity *entity, > + uint64_t src_offset, > uint64_t dst_offset, uint32_t byte_count, > struct dma_resv *resv, > struct dma_fence **fence, > @@ -175,12 +177,12 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev, > uint64_t src_offset, > int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, > struct dma_resv *resv, > struct dma_fence **fence); > -int amdgpu_fill_buffer(struct amdgpu_bo *bo, > - uint32_t src_data, > - struct dma_resv *resv, > - struct dma_fence **fence, > - bool delayed, > - u64 k_job_id); > +int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity, > + struct amdgpu_bo *bo, > + uint32_t src_data, > + struct dma_resv *resv, > + struct dma_fence **f, > + u64 k_job_id); > > int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); > void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c > b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c > index ade1d4068d29..9c76f1ba0e55 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c > @@ -157,7 +157,8 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, > dma_addr_t *sys, > goto out_unlock; > } > > - r = amdgpu_copy_buffer(adev, gart_s, gart_d, size * PAGE_SIZE, > + r = amdgpu_copy_buffer(adev, entity, > + gart_s, gart_d, size * PAGE_SIZE, > NULL, &next, true, 0); > if (r) { > dev_err(adev->dev, "fail %d to copy memory\n", r);
