This is the only use case for this function.

Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  8 +++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    | 25 ++++++++++------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    | 11 +++++-----
 3 files changed, 20 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 410e9b68ff81..9dc262cac39f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -723,8 +723,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
 
        if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
            bo->tbo.resource->mem_type == TTM_PL_VRAM) {
-               r = amdgpu_fill_buffer(NULL, bo, 0, NULL,
-                                      NULL, 
AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
+               r = amdgpu_clear_buffer(NULL, bo, NULL,
+                                       NULL, 
AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
                if (unlikely(r))
                        goto fail_unreserve;
        }
@@ -1311,8 +1311,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object 
*bo)
            adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
                goto out;
 
-       r = amdgpu_fill_buffer(NULL, abo, 0, NULL,
-                              NULL, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
+       r = amdgpu_clear_buffer(NULL, abo, NULL,
+                               NULL, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
        if (WARN_ON(r))
                goto out;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1f553c56f31d..ac2857314d68 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -431,9 +431,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
 
-               r = amdgpu_fill_buffer(entity,
-                                      abo, 0, &wipe_fence, fence,
-                                      AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
+               r = amdgpu_clear_buffer(entity,
+                                       abo, &wipe_fence, fence,
+                                       AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
                if (r) {
                        goto error;
                } else if (wipe_fence) {
@@ -2413,23 +2413,21 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring,
 }
 
 /**
- * amdgpu_fill_buffer - fill a buffer with a given value
+ * amdgpu_clear_buffer - fill a buffer with 0
  * @entity: optional entity to use. If NULL, the clearing entities will be
  *          used to load-balance the partial clears
  * @bo: the bo to fill
- * @src_data: the value to set
  * @f: optional out fence. If @entity is NULL, this must be NULL and the
  *     fences from each partial clear will be added to the &dma_resv.
  * @dependency: optional input dependency fence.
  * @k_job_id: trace id
  *
  */
-int amdgpu_fill_buffer(struct amdgpu_ttm_entity *entity,
-                      struct amdgpu_bo *bo,
-                      uint32_t src_data,
-                      struct dma_fence **f,
-                      struct dma_fence *dependency,
-                      u64 k_job_id)
+int amdgpu_clear_buffer(struct amdgpu_ttm_entity *entity,
+                       struct amdgpu_bo *bo,
+                       struct dma_fence **f,
+                       struct dma_fence *dependency,
+                       u64 k_job_id)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_rings[0];
@@ -2440,8 +2438,7 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_entity *entity,
        uint64_t cur_size, to;
        int r, e, n_fences;
        /* The clear flag is only valid directly after allocation. */
-       bool consider_clear_flag =
-               src_data == 0 && k_job_id == 
AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER;
+       bool consider_clear_flag = k_job_id == 
AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER;
 
        /* The fences will be either added to the resv object or the last fence
         * will be returned to the caller. In the latter case, all fill jobs 
will
@@ -2520,7 +2517,7 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_entity *entity,
 
 
                r = amdgpu_ttm_fill_mem(ring, &entity->base,
-                                       src_data, to, cur_size,
+                                       0, to, cur_size,
                                        resv,
                                        &fence, true, k_job_id);
                if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 97e73919cb0c..b685bf207e43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -182,12 +182,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
                       struct dma_resv *resv,
                       struct dma_fence **fence,
                       bool vm_needs_flush, uint32_t copy_flags);
-int amdgpu_fill_buffer(struct amdgpu_ttm_entity *entity,
-                      struct amdgpu_bo *bo,
-                      uint32_t src_data,
-                      struct dma_fence **f,
-                      struct dma_fence *dependency,
-                      u64 k_job_id);
+int amdgpu_clear_buffer(struct amdgpu_ttm_entity *entity,
+                       struct amdgpu_bo *bo,
+                       struct dma_fence **f,
+                       struct dma_fence *dependency,
+                       u64 k_job_id);
 
 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
-- 
2.43.0

Reply via email to