On 11/4/25 09:35, Pierre-Eric Pelloux-Prayer wrote:
> It's doing the same thing as amdgpu_fill_buffer(src_data=0), so drop it.
> 
> The only caveat is that amdgpu_res_cleared() return value is only valid
> right after allocation.
> 
> Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  9 +--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    | 86 ++++------------------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    |  3 -
>  3 files changed, 18 insertions(+), 80 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> index 4a69324bb730..410e9b68ff81 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> @@ -723,15 +723,10 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
>  
>       if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
>           bo->tbo.resource->mem_type == TTM_PL_VRAM) {
> -             struct dma_fence *fence;
> -
> -             r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
> +             r = amdgpu_fill_buffer(NULL, bo, 0, NULL,
> +                                    NULL, 
> AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
>               if (unlikely(r))
>                       goto fail_unreserve;
> -
> -             dma_resv_add_fence(bo->tbo.base.resv, fence,
> -                                DMA_RESV_USAGE_KERNEL);
> -             dma_fence_put(fence);
>       }
>       if (!bp->resv)
>               amdgpu_bo_unreserve(bo);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index d88bdb2ac083..1f553c56f31d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -2412,75 +2412,6 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring 
> *ring,
>       return 0;
>  }
>  
> -/**
> - * amdgpu_ttm_clear_buffer - clear memory buffers
> - * @bo: amdgpu buffer object
> - * @resv: reservation object
> - * @fence: dma_fence associated with the operation
> - *
> - * Clear the memory buffer resource.
> - *
> - * Returns:
> - * 0 for success or a negative error code on failure.
> - */
> -int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
> -                         struct dma_resv *resv,
> -                         struct dma_fence **fence)
> -{
> -     struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
> -     struct amdgpu_ring *ring = adev->mman.buffer_funcs_rings[0];
> -     struct amdgpu_ttm_entity *entity;
> -     struct amdgpu_res_cursor cursor;
> -     u64 addr;
> -     int r = 0;
> -
> -     if (!adev->mman.buffer_funcs_enabled)
> -             return -EINVAL;
> -
> -     if (!fence)
> -             return -EINVAL;
> -     entity = &adev->mman.clear_entities[0];
> -     *fence = dma_fence_get_stub();
> -
> -     amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
> -
> -     mutex_lock(&entity->gart_window_lock);
> -     while (cursor.remaining) {
> -             struct dma_fence *next = NULL;
> -             u64 size;
> -
> -             if (amdgpu_res_cleared(&cursor)) {
> -                     amdgpu_res_next(&cursor, cursor.size);
> -                     continue;
> -             }
> -
> -             /* Never clear more than 256MiB at once to avoid timeouts */
> -             size = min(cursor.size, 256ULL << 20);
> -
> -             r = amdgpu_ttm_map_buffer(&entity->base,
> -                                       &bo->tbo, bo->tbo.resource, &cursor,
> -                                       entity->gart_window_id1, ring, false, 
> &size, &addr,
> -                                       NULL, NULL);
> -             if (r)
> -                     goto err;
> -
> -             r = amdgpu_ttm_fill_mem(ring, &entity->base, 0, addr, size, 
> resv,
> -                                     &next, true,
> -                                     AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
> -             if (r)
> -                     goto err;
> -
> -             dma_fence_put(*fence);
> -             *fence = next;
> -
> -             amdgpu_res_next(&cursor, size);
> -     }
> -err:
> -     mutex_unlock(&entity->gart_window_lock);
> -
> -     return r;
> -}
> -
>  /**
>   * amdgpu_fill_buffer - fill a buffer with a given value
>   * @entity: optional entity to use. If NULL, the clearing entities will be
> @@ -2508,6 +2439,9 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_entity *entity,
>       struct amdgpu_res_cursor dst;
>       uint64_t cur_size, to;
>       int r, e, n_fences;

> +     /* The clear flag is only valid directly after allocation. */
> +     bool consider_clear_flag =
> +             src_data == 0 && k_job_id == 
> AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER;

Absolutely clear NAK to that.

Christian.

>  
>       /* The fences will be either added to the resv object or the last fence
>        * will be returned to the caller. In the latter case, all fill jobs 
> will
> @@ -2531,6 +2465,11 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_entity 
> *entity,
>               while (dst.remaining) {
>                       cur_size = min(dst.size, 256ULL << 20);
>  
> +                     if (consider_clear_flag && amdgpu_res_cleared(&dst)) {
> +                             amdgpu_res_next(&dst, dst.size);
> +                             continue;
> +                     }
> +
>                       n_fences += 1;
>                       amdgpu_res_next(&dst, cur_size);
>               }
> @@ -2550,6 +2489,11 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_entity 
> *entity,
>       amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
>  
>       while (dst.remaining) {
> +             if (consider_clear_flag && amdgpu_res_cleared(&dst)) {
> +                     amdgpu_res_next(&dst, dst.size);
> +                     continue;
> +             }
> +
>               /* Never fill more than 256MiB at once to avoid timeouts */
>               cur_size = min(dst.size, 256ULL << 20);
>  
> @@ -2574,8 +2518,10 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_entity 
> *entity,
>                       goto error;
>               }
>  
> +
>               r = amdgpu_ttm_fill_mem(ring, &entity->base,
> -                                     src_data, to, cur_size, resv,
> +                                     src_data, to, cur_size,
> +                                     resv,
>                                       &fence, true, k_job_id);
>               if (r) {
>                       mutex_unlock(&entity->gart_window_lock);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> index c059a3d52b57..97e73919cb0c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> @@ -182,9 +182,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
>                      struct dma_resv *resv,
>                      struct dma_fence **fence,
>                      bool vm_needs_flush, uint32_t copy_flags);
> -int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
> -                         struct dma_resv *resv,
> -                         struct dma_fence **fence);
>  int amdgpu_fill_buffer(struct amdgpu_ttm_entity *entity,
>                      struct amdgpu_bo *bo,
>                      uint32_t src_data,

Reply via email to