Only set up the scheduling entities if the rings are
valid.

Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 83 ++++++++++++++++++++++++---------
 1 file changed, 61 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 95f4c4139fc6..d12eb863285b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -74,7 +74,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
                           struct amdgpu_ctx *ctx)
 {
        unsigned num_entities = amdgput_ctx_total_num_entities();
-       unsigned i, j;
+       unsigned i, j, k;
        int r;
 
        if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
@@ -123,46 +123,85 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
        for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
                struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
                struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
-               unsigned num_rings;
+               unsigned num_rings = 0;
 
                switch (i) {
                case AMDGPU_HW_IP_GFX:
-                       rings[0] = &adev->gfx.gfx_ring[0];
-                       num_rings = 1;
+                       for (j = 0; j < adev->gfx.num_gfx_rings; j++) {
+                               if (adev->gfx.gfx_ring[j].sched.ready)
+                                       rings[num_rings++] = 
&adev->gfx.gfx_ring[j];
+                       }
                        break;
                case AMDGPU_HW_IP_COMPUTE:
-                       for (j = 0; j < adev->gfx.num_compute_rings; ++j)
-                               rings[j] = &adev->gfx.compute_ring[j];
-                       num_rings = adev->gfx.num_compute_rings;
+                       for (j = 0; j < adev->gfx.num_compute_rings; ++j) {
+                               if (adev->gfx.compute_ring[j].sched.ready)
+                                       rings[num_rings++] = 
&adev->gfx.compute_ring[j];
+                       }
                        break;
                case AMDGPU_HW_IP_DMA:
-                       for (j = 0; j < adev->sdma.num_instances; ++j)
-                               rings[j] = &adev->sdma.instance[j].ring;
-                       num_rings = adev->sdma.num_instances;
+                       for (j = 0; j < adev->sdma.num_instances; ++j) {
+                               if (adev->sdma.instance[j].ring.sched.ready)
+                                       rings[num_rings++] = 
&adev->sdma.instance[j].ring;
+                       }
                        break;
                case AMDGPU_HW_IP_UVD:
-                       rings[0] = &adev->uvd.inst[0].ring;
-                       num_rings = 1;
+                       for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+                               if (adev->uvd.harvest_config & (1 << j))
+                                       continue;
+
+                               if (adev->uvd.inst[j].ring.sched.ready)
+                                       rings[num_rings++] = 
&adev->uvd.inst[j].ring;
+                       }
                        break;
                case AMDGPU_HW_IP_VCE:
-                       rings[0] = &adev->vce.ring[0];
-                       num_rings = 1;
+                       for (j = 0; j < adev->vce.num_rings; j++) {
+                               if (adev->vce.ring[j].sched.ready)
+                                       rings[num_rings++] = &adev->vce.ring[j];
+                       }
                        break;
                case AMDGPU_HW_IP_UVD_ENC:
-                       rings[0] = &adev->uvd.inst[0].ring_enc[0];
-                       num_rings = 1;
+                       for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+                               if (adev->uvd.harvest_config & (1 << j))
+                                       continue;
+                               for (k = 0; k < adev->uvd.num_enc_rings; k++) {
+                                       if 
(adev->uvd.inst[j].ring_enc[k].sched.ready)
+                                               rings[num_rings++] = 
&adev->uvd.inst[j].ring_enc[k];
+                               }
+                       }
                        break;
                case AMDGPU_HW_IP_VCN_DEC:
-                       rings[0] = &adev->vcn.ring_dec;
-                       num_rings = 1;
+                       if (adev->vcn.ring_dec.sched.ready)
+                               rings[num_rings++] = &adev->vcn.ring_dec;
                        break;
                case AMDGPU_HW_IP_VCN_ENC:
-                       rings[0] = &adev->vcn.ring_enc[0];
-                       num_rings = 1;
+                       for (j = 0; j < adev->vcn.num_enc_rings; j++) {
+                               if (adev->vcn.ring_enc[j].sched.ready)
+                                       rings[num_rings++] = 
&adev->vcn.ring_enc[j];
+                       }
+                       break;
+               case AMDGPU_HW_IP_VCN_JPEG:
+                       if (adev->vcn.ring_jpeg.sched.ready)
+                               rings[num_rings++] = &adev->vcn.ring_jpeg;
                        break;
+               }
+
+               /* if there are no rings, then the IP doesn't exist or is 
disabled  */
+               if (num_rings == 0)
+                       continue;
+
+               switch (i) {
+               case AMDGPU_HW_IP_GFX:
+               case AMDGPU_HW_IP_UVD:
+               case AMDGPU_HW_IP_VCE:
+               case AMDGPU_HW_IP_UVD_ENC:
+               case AMDGPU_HW_IP_VCN_DEC:
+               case AMDGPU_HW_IP_VCN_ENC:
                case AMDGPU_HW_IP_VCN_JPEG:
-                       rings[0] = &adev->vcn.ring_jpeg;
-                       num_rings = 1;
+                       /* limit rings to 1 for scheduling */
+                       if (num_rings > 1)
+                               num_rings = 1;
+                       break;
+               default:
                        break;
                }
 
-- 
2.13.6

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to