Thanks to "drm/ttm: rework pipelined eviction fence handling", ttm
can deal correctly with moves and evictions being executed from
different contexts.

Create several entities and use them in a round-robin fashion.

Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]>
Reviewed-by: Christian König <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 10 ++++++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |  1 +
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8d70bea66dd0..575a4d4a1747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -427,6 +427,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+       struct amdgpu_ttm_buffer_entity *entity;
        struct amdgpu_copy_mem src, dst;
        struct dma_fence *fence = NULL;
        int r;
@@ -438,8 +439,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        src.offset = 0;
        dst.offset = 0;
 
+       int e = atomic_inc_return(&adev->mman.next_move_entity) %
+                                 adev->mman.num_move_entities;
+       entity = &adev->mman.move_entities[e];
+
        r = amdgpu_ttm_copy_mem_to_mem(adev,
-                                      &adev->mman.move_entities[0],
+                                      entity,
                                       &src, &dst,
                                       new_mem->size,
                                       amdgpu_bo_encrypted(abo),
@@ -452,7 +457,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
 
-               r = amdgpu_fill_buffer(adev, &adev->mman.move_entities[0],
+               r = amdgpu_fill_buffer(adev, entity,
                                       abo, 0, NULL, &wipe_fence,
                                       AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
                if (r) {
@@ -2258,6 +2263,7 @@ u32 amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
                }
 
                adev->mman.num_move_entities = num_move_entities;
+               atomic_set(&adev->mman.next_move_entity, 0);
                for (i = 0; i < num_move_entities; i++) {
                        r = 
drm_sched_entity_init(&adev->mman.move_entities[i].base,
                                                  DRM_SCHED_PRIORITY_NORMAL, 
&sched,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 887531126d9d..0785a2c594f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -76,6 +76,7 @@ struct amdgpu_mman {
        atomic_t next_clear_entity;
        u32 num_clear_entities;
        struct amdgpu_ttm_buffer_entity move_entities[TTM_NUM_MOVE_FENCES];
+       atomic_t next_move_entity;
        u32 num_move_entities;
 
        struct amdgpu_vram_mgr vram_mgr;
-- 
2.43.0

Reply via email to