Re: [PATCH 2/2] drm/amdgpu: rework ctx entity creation

2018-08-16 Thread zhoucm1



On 2018年08月16日 16:11, Christian König wrote:

Am 16.08.2018 um 04:07 schrieb zhoucm1:



On 2018年08月15日 18:59, Christian König wrote:

Use a fixed number of entities for each hardware IP.

The number of compute entities is reduced to four, SDMA keeps it two
entities and all other engines just expose one entity.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 291 


  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h |  30 ++--
  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  36 ++--
  3 files changed, 190 insertions(+), 167 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index 0a6cd1202ee5..987b7f256463 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -27,8 +27,29 @@
  #include "amdgpu.h"
  #include "amdgpu_sched.h"
  -#define to_amdgpu_ctx_ring(e)    \
-    container_of((e), struct amdgpu_ctx_ring, entity)
+#define to_amdgpu_ctx_entity(e)    \
+    container_of((e), struct amdgpu_ctx_entity, entity)
+
+const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
+    [AMDGPU_HW_IP_GFX]    =    1,
+    [AMDGPU_HW_IP_COMPUTE]    =    4,

Could you explain why reduct to four? otherwise it looks good to me.


Currently we change the priority of the compute queues on the fly, but 
the idea is that we will have fixed high priority and low priority 
compute queues in the future.
Yeah, I see that, feel free to add my RB: Reviewed-by: Chunming Zhou 



Regards,
David Zhou


We could as well say we have only 2 or 3 if the closed stack is fine 
with that.


Regards,
Christian.



Thanks,
David Zhou

+    [AMDGPU_HW_IP_DMA]    =    2,
+    [AMDGPU_HW_IP_UVD]    =    1,
+    [AMDGPU_HW_IP_VCE]    =    1,
+    [AMDGPU_HW_IP_UVD_ENC]    =    1,
+    [AMDGPU_HW_IP_VCN_DEC]    =    1,
+    [AMDGPU_HW_IP_VCN_ENC]    =    1,
+};
+
+static int amdgput_ctx_total_num_entities(void)
+{
+    unsigned i, num_entities = 0;
+
+    for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+    num_entities += amdgpu_ctx_num_entities[i];
+
+    return num_entities;
+}
    static int amdgpu_ctx_priority_permit(struct drm_file *filp,
    enum drm_sched_priority priority)
@@ -51,9 +72,8 @@ static int amdgpu_ctx_init(struct amdgpu_device 
*adev,

 struct drm_file *filp,
 struct amdgpu_ctx *ctx)
  {
-    struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS];
-    struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS];
-    unsigned i, j, num_sdma_rqs, num_comp_rqs;
+    unsigned num_entities = amdgput_ctx_total_num_entities();
+    unsigned i, j;
  int r;
    if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
@@ -65,19 +85,33 @@ static int amdgpu_ctx_init(struct amdgpu_device 
*adev,

    memset(ctx, 0, sizeof(*ctx));
  ctx->adev = adev;
-    kref_init(>refcount);
-    spin_lock_init(>ring_lock);
-    ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
+
+    ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
    sizeof(struct dma_fence*), GFP_KERNEL);
  if (!ctx->fences)
  return -ENOMEM;
  -    mutex_init(>lock);
+    ctx->entities[0] = kcalloc(num_entities,
+   sizeof(struct amdgpu_ctx_entity),
+   GFP_KERNEL);
+    if (!ctx->entities[0]) {
+    r = -ENOMEM;
+    goto error_free_fences;
+    }
  -    for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-    ctx->rings[i].sequence = 1;
-    ctx->rings[i].fences = >fences[amdgpu_sched_jobs * i];
+    for (i = 0; i < num_entities; ++i) {
+    struct amdgpu_ctx_entity *entity = >entities[0][i];
+
+    entity->sequence = 1;
+    entity->fences = >fences[amdgpu_sched_jobs * i];
  }
+    for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
+    ctx->entities[i] = ctx->entities[i - 1] +
+    amdgpu_ctx_num_entities[i - 1];
+
+    kref_init(>refcount);
+    spin_lock_init(>ring_lock);
+    mutex_init(>lock);
    ctx->reset_counter = atomic_read(>gpu_reset_counter);
  ctx->reset_counter_query = ctx->reset_counter;
@@ -85,50 +119,70 @@ static int amdgpu_ctx_init(struct amdgpu_device 
*adev,

  ctx->init_priority = priority;
  ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
  -    num_sdma_rqs = 0;
-    num_comp_rqs = 0;
-    for (i = 0; i < adev->num_rings; i++) {
-    struct amdgpu_ring *ring = adev->rings[i];
-    struct drm_sched_rq *rq;
-
-    rq = >sched.sched_rq[priority];
-    if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA)
-    sdma_rqs[num_sdma_rqs++] = rq;
-    else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
-    comp_rqs[num_comp_rqs++] = rq;
-    }
-
-    /* create context entity for each ring */
-    for (i = 0; i < adev->num_rings; i++) {
-    struct amdgpu_ring *ring = adev->rings[i];
+    for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+    struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
+    struct drm_sched_rq 

Re: [PATCH 2/2] drm/amdgpu: rework ctx entity creation

2018-08-16 Thread Christian König

Am 16.08.2018 um 04:07 schrieb zhoucm1:



On 2018年08月15日 18:59, Christian König wrote:

Use a fixed number of entities for each hardware IP.

The number of compute entities is reduced to four, SDMA keeps it two
entities and all other engines just expose one entity.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 291 


  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h |  30 ++--
  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  36 ++--
  3 files changed, 190 insertions(+), 167 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index 0a6cd1202ee5..987b7f256463 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -27,8 +27,29 @@
  #include "amdgpu.h"
  #include "amdgpu_sched.h"
  -#define to_amdgpu_ctx_ring(e)    \
-    container_of((e), struct amdgpu_ctx_ring, entity)
+#define to_amdgpu_ctx_entity(e)    \
+    container_of((e), struct amdgpu_ctx_entity, entity)
+
+const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
+    [AMDGPU_HW_IP_GFX]    =    1,
+    [AMDGPU_HW_IP_COMPUTE]    =    4,

Could you explain why reduct to four? otherwise it looks good to me.


Currently we change the priority of the compute queues on the fly, but 
the idea is that we will have fixed high priority and low priority 
compute queues in the future.


We could as well say we have only 2 or 3 if the closed stack is fine 
with that.


Regards,
Christian.



Thanks,
David Zhou

+    [AMDGPU_HW_IP_DMA]    =    2,
+    [AMDGPU_HW_IP_UVD]    =    1,
+    [AMDGPU_HW_IP_VCE]    =    1,
+    [AMDGPU_HW_IP_UVD_ENC]    =    1,
+    [AMDGPU_HW_IP_VCN_DEC]    =    1,
+    [AMDGPU_HW_IP_VCN_ENC]    =    1,
+};
+
+static int amdgput_ctx_total_num_entities(void)
+{
+    unsigned i, num_entities = 0;
+
+    for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+    num_entities += amdgpu_ctx_num_entities[i];
+
+    return num_entities;
+}
    static int amdgpu_ctx_priority_permit(struct drm_file *filp,
    enum drm_sched_priority priority)
@@ -51,9 +72,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 struct drm_file *filp,
 struct amdgpu_ctx *ctx)
  {
-    struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS];
-    struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS];
-    unsigned i, j, num_sdma_rqs, num_comp_rqs;
+    unsigned num_entities = amdgput_ctx_total_num_entities();
+    unsigned i, j;
  int r;
    if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
@@ -65,19 +85,33 @@ static int amdgpu_ctx_init(struct amdgpu_device 
*adev,

    memset(ctx, 0, sizeof(*ctx));
  ctx->adev = adev;
-    kref_init(>refcount);
-    spin_lock_init(>ring_lock);
-    ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
+
+    ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
    sizeof(struct dma_fence*), GFP_KERNEL);
  if (!ctx->fences)
  return -ENOMEM;
  -    mutex_init(>lock);
+    ctx->entities[0] = kcalloc(num_entities,
+   sizeof(struct amdgpu_ctx_entity),
+   GFP_KERNEL);
+    if (!ctx->entities[0]) {
+    r = -ENOMEM;
+    goto error_free_fences;
+    }
  -    for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-    ctx->rings[i].sequence = 1;
-    ctx->rings[i].fences = >fences[amdgpu_sched_jobs * i];
+    for (i = 0; i < num_entities; ++i) {
+    struct amdgpu_ctx_entity *entity = >entities[0][i];
+
+    entity->sequence = 1;
+    entity->fences = >fences[amdgpu_sched_jobs * i];
  }
+    for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
+    ctx->entities[i] = ctx->entities[i - 1] +
+    amdgpu_ctx_num_entities[i - 1];
+
+    kref_init(>refcount);
+    spin_lock_init(>ring_lock);
+    mutex_init(>lock);
    ctx->reset_counter = atomic_read(>gpu_reset_counter);
  ctx->reset_counter_query = ctx->reset_counter;
@@ -85,50 +119,70 @@ static int amdgpu_ctx_init(struct amdgpu_device 
*adev,

  ctx->init_priority = priority;
  ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
  -    num_sdma_rqs = 0;
-    num_comp_rqs = 0;
-    for (i = 0; i < adev->num_rings; i++) {
-    struct amdgpu_ring *ring = adev->rings[i];
-    struct drm_sched_rq *rq;
-
-    rq = >sched.sched_rq[priority];
-    if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA)
-    sdma_rqs[num_sdma_rqs++] = rq;
-    else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
-    comp_rqs[num_comp_rqs++] = rq;
-    }
-
-    /* create context entity for each ring */
-    for (i = 0; i < adev->num_rings; i++) {
-    struct amdgpu_ring *ring = adev->rings[i];
+    for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+    struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
+    struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+    unsigned num_rings;
+
+    switch (i) {
+    case AMDGPU_HW_IP_GFX:
+    rings[0] = 

Re: [PATCH 2/2] drm/amdgpu: rework ctx entity creation

2018-08-15 Thread zhoucm1



On 2018年08月15日 18:59, Christian König wrote:

Use a fixed number of entities for each hardware IP.

The number of compute entities is reduced to four, SDMA keeps it two
entities and all other engines just expose one entity.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 291 
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h |  30 ++--
  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  36 ++--
  3 files changed, 190 insertions(+), 167 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0a6cd1202ee5..987b7f256463 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -27,8 +27,29 @@
  #include "amdgpu.h"
  #include "amdgpu_sched.h"
  
-#define to_amdgpu_ctx_ring(e)	\

-   container_of((e), struct amdgpu_ctx_ring, entity)
+#define to_amdgpu_ctx_entity(e)\
+   container_of((e), struct amdgpu_ctx_entity, entity)
+
+const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
+   [AMDGPU_HW_IP_GFX]  =   1,
+   [AMDGPU_HW_IP_COMPUTE]  =   4,

Could you explain why reduct to four? otherwise it looks good to me.

Thanks,
David Zhou

+   [AMDGPU_HW_IP_DMA]  =   2,
+   [AMDGPU_HW_IP_UVD]  =   1,
+   [AMDGPU_HW_IP_VCE]  =   1,
+   [AMDGPU_HW_IP_UVD_ENC]  =   1,
+   [AMDGPU_HW_IP_VCN_DEC]  =   1,
+   [AMDGPU_HW_IP_VCN_ENC]  =   1,
+};
+
+static int amdgput_ctx_total_num_entities(void)
+{
+   unsigned i, num_entities = 0;
+
+   for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+   num_entities += amdgpu_ctx_num_entities[i];
+
+   return num_entities;
+}
  
  static int amdgpu_ctx_priority_permit(struct drm_file *filp,

  enum drm_sched_priority priority)
@@ -51,9 +72,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
   struct drm_file *filp,
   struct amdgpu_ctx *ctx)
  {
-   struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS];
-   struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS];
-   unsigned i, j, num_sdma_rqs, num_comp_rqs;
+   unsigned num_entities = amdgput_ctx_total_num_entities();
+   unsigned i, j;
int r;
  
  	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)

@@ -65,19 +85,33 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
  
  	memset(ctx, 0, sizeof(*ctx));

ctx->adev = adev;
-   kref_init(>refcount);
-   spin_lock_init(>ring_lock);
-   ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
+
+   ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
  sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
  
-	mutex_init(>lock);

+   ctx->entities[0] = kcalloc(num_entities,
+  sizeof(struct amdgpu_ctx_entity),
+  GFP_KERNEL);
+   if (!ctx->entities[0]) {
+   r = -ENOMEM;
+   goto error_free_fences;
+   }
  
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {

-   ctx->rings[i].sequence = 1;
-   ctx->rings[i].fences = >fences[amdgpu_sched_jobs * i];
+   for (i = 0; i < num_entities; ++i) {
+   struct amdgpu_ctx_entity *entity = >entities[0][i];
+
+   entity->sequence = 1;
+   entity->fences = >fences[amdgpu_sched_jobs * i];
}
+   for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
+   ctx->entities[i] = ctx->entities[i - 1] +
+   amdgpu_ctx_num_entities[i - 1];
+
+   kref_init(>refcount);
+   spin_lock_init(>ring_lock);
+   mutex_init(>lock);
  
  	ctx->reset_counter = atomic_read(>gpu_reset_counter);

ctx->reset_counter_query = ctx->reset_counter;
@@ -85,50 +119,70 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->init_priority = priority;
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
  
-	num_sdma_rqs = 0;

-   num_comp_rqs = 0;
-   for (i = 0; i < adev->num_rings; i++) {
-   struct amdgpu_ring *ring = adev->rings[i];
-   struct drm_sched_rq *rq;
-
-   rq = >sched.sched_rq[priority];
-   if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA)
-   sdma_rqs[num_sdma_rqs++] = rq;
-   else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
-   comp_rqs[num_comp_rqs++] = rq;
-   }
-
-   /* create context entity for each ring */
-   for (i = 0; i < adev->num_rings; i++) {
-   struct amdgpu_ring *ring = adev->rings[i];
+   for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+   struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
+   struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+   unsigned num_rings;
+
+   switch (i) {
+ 

[PATCH 2/2] drm/amdgpu: rework ctx entity creation

2018-08-15 Thread Christian König
Use a fixed number of entities for each hardware IP.

The number of compute entities is reduced to four, SDMA keeps it two
entities and all other engines just expose one entity.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 291 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h |  30 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  36 ++--
 3 files changed, 190 insertions(+), 167 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0a6cd1202ee5..987b7f256463 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -27,8 +27,29 @@
 #include "amdgpu.h"
 #include "amdgpu_sched.h"
 
-#define to_amdgpu_ctx_ring(e)  \
-   container_of((e), struct amdgpu_ctx_ring, entity)
+#define to_amdgpu_ctx_entity(e)\
+   container_of((e), struct amdgpu_ctx_entity, entity)
+
+const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
+   [AMDGPU_HW_IP_GFX]  =   1,
+   [AMDGPU_HW_IP_COMPUTE]  =   4,
+   [AMDGPU_HW_IP_DMA]  =   2,
+   [AMDGPU_HW_IP_UVD]  =   1,
+   [AMDGPU_HW_IP_VCE]  =   1,
+   [AMDGPU_HW_IP_UVD_ENC]  =   1,
+   [AMDGPU_HW_IP_VCN_DEC]  =   1,
+   [AMDGPU_HW_IP_VCN_ENC]  =   1,
+};
+
+static int amdgput_ctx_total_num_entities(void)
+{
+   unsigned i, num_entities = 0;
+
+   for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+   num_entities += amdgpu_ctx_num_entities[i];
+
+   return num_entities;
+}
 
 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
  enum drm_sched_priority priority)
@@ -51,9 +72,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
   struct drm_file *filp,
   struct amdgpu_ctx *ctx)
 {
-   struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS];
-   struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS];
-   unsigned i, j, num_sdma_rqs, num_comp_rqs;
+   unsigned num_entities = amdgput_ctx_total_num_entities();
+   unsigned i, j;
int r;
 
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
@@ -65,19 +85,33 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
-   kref_init(>refcount);
-   spin_lock_init(>ring_lock);
-   ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
+
+   ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
  sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
 
-   mutex_init(>lock);
+   ctx->entities[0] = kcalloc(num_entities,
+  sizeof(struct amdgpu_ctx_entity),
+  GFP_KERNEL);
+   if (!ctx->entities[0]) {
+   r = -ENOMEM;
+   goto error_free_fences;
+   }
 
-   for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-   ctx->rings[i].sequence = 1;
-   ctx->rings[i].fences = >fences[amdgpu_sched_jobs * i];
+   for (i = 0; i < num_entities; ++i) {
+   struct amdgpu_ctx_entity *entity = >entities[0][i];
+
+   entity->sequence = 1;
+   entity->fences = >fences[amdgpu_sched_jobs * i];
}
+   for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
+   ctx->entities[i] = ctx->entities[i - 1] +
+   amdgpu_ctx_num_entities[i - 1];
+
+   kref_init(>refcount);
+   spin_lock_init(>ring_lock);
+   mutex_init(>lock);
 
ctx->reset_counter = atomic_read(>gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
@@ -85,50 +119,70 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->init_priority = priority;
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
 
-   num_sdma_rqs = 0;
-   num_comp_rqs = 0;
-   for (i = 0; i < adev->num_rings; i++) {
-   struct amdgpu_ring *ring = adev->rings[i];
-   struct drm_sched_rq *rq;
-
-   rq = >sched.sched_rq[priority];
-   if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA)
-   sdma_rqs[num_sdma_rqs++] = rq;
-   else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
-   comp_rqs[num_comp_rqs++] = rq;
-   }
-
-   /* create context entity for each ring */
-   for (i = 0; i < adev->num_rings; i++) {
-   struct amdgpu_ring *ring = adev->rings[i];
+   for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+   struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
+   struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+   unsigned num_rings;
+
+   switch (i) {
+   case AMDGPU_HW_IP_GFX:
+   rings[0] = >gfx.gfx_ring[0];
+   num_rings = 1;