No functional change for now, as we always use entity 0.

Signed-off-by: Pierre-Eric Pelloux-Prayer <[email protected]>
Acked-by: Felix Kuehling <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  | 45 +++++++++++++++---------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h  |  3 +-
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c |  2 +-
 3 files changed, 31 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index d7f041e43eca..438e8a3b7a06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -439,7 +439,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        dst.offset = 0;
 
        r = amdgpu_ttm_copy_mem_to_mem(adev,
-                                      &adev->mman.move_entity,
+                                      &adev->mman.move_entities[0],
                                       &src, &dst,
                                       new_mem->size,
                                       amdgpu_bo_encrypted(abo),
@@ -452,7 +452,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
 
-               r = amdgpu_fill_buffer(adev, &adev->mman.move_entity,
+               r = amdgpu_fill_buffer(adev, &adev->mman.move_entities[0],
                                       abo, 0, NULL, &wipe_fence,
                                       AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
                if (r) {
@@ -2224,12 +2224,13 @@ u32 amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
 {
        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, 
TTM_PL_VRAM);
        u32 used_windows, reserved_windows;
-       u32 num_clear_entities;
+       u32 num_clear_entities, num_move_entities;
        uint64_t size;
        int r, i, j;
 
        num_clear_entities = adev->sdma.num_instances;
-       reserved_windows = 2 + num_clear_entities;
+       num_move_entities = MIN(adev->sdma.num_instances, TTM_NUM_MOVE_FENCES);
+       reserved_windows = 2 * num_move_entities + num_clear_entities;
 
        if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
            adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
@@ -2251,20 +2252,25 @@ u32 amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
                                          DRM_SCHED_PRIORITY_KERNEL, &sched,
                                          1, NULL);
                if (r) {
-                       dev_err(adev->dev,
-                               "Failed setting up TTM BO eviction entity 
(%d)\n",
+                       dev_err(adev->dev, "Failed setting up entity (%d)\n",
                                r);
                        return 0;
                }
 
-               r = drm_sched_entity_init(&adev->mman.move_entity.base,
-                                         DRM_SCHED_PRIORITY_NORMAL, &sched,
-                                         1, NULL);
-               if (r) {
-                       dev_err(adev->dev,
-                               "Failed setting up TTM BO move entity (%d)\n",
-                               r);
-                       goto error_free_entity;
+               adev->mman.num_move_entities = num_move_entities;
+               for (i = 0; i < num_move_entities; i++) {
+                       r = 
drm_sched_entity_init(&adev->mman.move_entities[i].base,
+                                                 DRM_SCHED_PRIORITY_NORMAL, 
&sched,
+                                                 1, NULL);
+                       if (r) {
+                               dev_err(adev->dev,
+                                       "Failed setting up TTM BO move entities 
(%d)\n",
+                                       r);
+                               for (j = 0; j < i; j++)
+                                       drm_sched_entity_destroy(
+                                               
&adev->mman.move_entities[j].base);
+                               goto error_free_entity;
+                       }
                }
 
                adev->mman.num_clear_entities = num_clear_entities;
@@ -2279,6 +2285,9 @@ u32 amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
                                                  DRM_SCHED_PRIORITY_NORMAL, 
&sched,
                                                  1, NULL);
                        if (r) {
+                               for (j = 0; j < num_move_entities; j++)
+                                       drm_sched_entity_destroy(
+                                               
&adev->mman.move_entities[j].base);
                                for (j = 0; j < i; j++)
                                        drm_sched_entity_destroy(
                                                
&adev->mman.clear_entities[j].base);
@@ -2290,15 +2299,17 @@ u32 amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
                /* Statically assign GART windows to each entity. */
                used_windows = 
amdgpu_ttm_buffer_entity_init(&adev->mman.default_entity,
                                                             0, false, false);
-               used_windows = 
amdgpu_ttm_buffer_entity_init(&adev->mman.move_entity,
-                                                            used_windows, 
true, true);
+               for (i = 0; i < num_move_entities; i++)
+                       used_windows = 
amdgpu_ttm_buffer_entity_init(&adev->mman.move_entities[i],
+                                                                    
used_windows, true, true);
                for (i = 0; i < num_clear_entities; i++)
                        used_windows = 
amdgpu_ttm_buffer_entity_init(&adev->mman.clear_entities[i],
                                                                     
used_windows, false, true);
                WARN_ON(used_windows != reserved_windows);
        } else {
                drm_sched_entity_destroy(&adev->mman.default_entity.base);
-               drm_sched_entity_destroy(&adev->mman.move_entity.base);
+               for (i = 0; i < num_move_entities; i++)
+                       
drm_sched_entity_destroy(&adev->mman.move_entities[i].base);
                for (i = 0; i < num_clear_entities; i++)
                        
drm_sched_entity_destroy(&adev->mman.clear_entities[i].base);
                /* Drop all the old fences since re-creating the scheduler 
entities
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 250ef54a5550..eabc5a1549e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -72,9 +72,10 @@ struct amdgpu_mman {
        struct mutex                            gtt_window_lock;
 
        struct amdgpu_ttm_buffer_entity default_entity; /* has no gart windows 
*/
-       struct amdgpu_ttm_buffer_entity move_entity;
        struct amdgpu_ttm_buffer_entity *clear_entities;
        u32 num_clear_entities;
+       struct amdgpu_ttm_buffer_entity move_entities[TTM_NUM_MOVE_FENCES];
+       u32 num_move_entities;
 
        struct amdgpu_vram_mgr vram_mgr;
        struct amdgpu_gtt_mgr gtt_mgr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 0cc1d2b35026..5dd65f05a1e0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -135,7 +135,7 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, 
dma_addr_t *sys,
        u64 size;
        int r;
 
-       entity = &adev->mman.move_entity;
+       entity = &adev->mman.move_entities[0];
 
        mutex_lock(&entity->lock);
 
-- 
2.43.0

Reply via email to