Instead of taking the first pipe and giving the rest to kfd, take the
first 2 queues of each pipe.

Effectively, amdgpu and amdkfd own the same number of queues. But
because the queues are spread over multiple pipes the hardware will be
able to better handle concurrent compute workloads.

amdgpu goes from 1 pipe to 4 pipes, i.e. from 1 compute threads to 4
amdkfd goes from 3 pipe to 4 pipes, i.e. from 3 compute threads to 4

Reviewed-by: Edward O'Callaghan <funfunc...@folklore1984.net>
Acked-by: Christian König <christian.koe...@amd.com>
Signed-off-by: Andres Rodriguez <andre...@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 2 +-
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 5a8ebae..c0cfcb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2833,21 +2833,21 @@ static void gfx_v7_0_compute_queue_acquire(struct 
amdgpu_device *adev)
                pipe = (i / adev->gfx.mec.num_queue_per_pipe)
                        % adev->gfx.mec.num_pipe_per_mec;
                mec = (i / adev->gfx.mec.num_queue_per_pipe)
                        / adev->gfx.mec.num_pipe_per_mec;
 
                /* we've run out of HW */
                if (mec > adev->gfx.mec.num_mec)
                        break;
 
                /* policy: amdgpu owns all queues in the first pipe */
-               if (mec == 0 && pipe == 0)
+               if (mec == 0 && queue < 2)
                        set_bit(i, adev->gfx.mec.queue_bitmap);
        }
 
        /* update the number of active compute rings */
        adev->gfx.num_compute_rings =
                bitmap_weight(adev->gfx.mec.queue_bitmap, 
AMDGPU_MAX_COMPUTE_QUEUES);
 
        /* If you hit this case and edited the policy, you probably just
         * need to increase AMDGPU_MAX_COMPUTE_RINGS */
        WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 70119c5..f0c1a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1453,21 +1453,21 @@ static void gfx_v8_0_compute_queue_acquire(struct 
amdgpu_device *adev)
                pipe = (i / adev->gfx.mec.num_queue_per_pipe)
                        % adev->gfx.mec.num_pipe_per_mec;
                mec = (i / adev->gfx.mec.num_queue_per_pipe)
                        / adev->gfx.mec.num_pipe_per_mec;
 
                /* we've run out of HW */
                if (mec > adev->gfx.mec.num_mec)
                        break;
 
                /* policy: amdgpu owns all queues in the first pipe */
-               if (mec == 0 && pipe == 0)
+               if (mec == 0 && queue < 2)
                        set_bit(i, adev->gfx.mec.queue_bitmap);
        }
 
        /* update the number of active compute rings */
        adev->gfx.num_compute_rings =
                bitmap_weight(adev->gfx.mec.queue_bitmap, 
AMDGPU_MAX_COMPUTE_QUEUES);
 
        /* If you hit this case and edited the policy, you probably just
         * need to increase AMDGPU_MAX_COMPUTE_RINGS */
        if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
-- 
2.9.3

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to