Re: [PATCH v2 1/4] drm/amdgpu: set compute queue priority at mqd_init

2020-02-28 Thread Alex Deucher
On Fri, Feb 28, 2020 at 9:36 AM Nirmoy Das  wrote:
>
> We were changing compute ring priority while rings were being used
> before every job submission which is not recommended. This patch
> sets compute queue priority at mqd initialization for gfx8, gfx9 and
> gfx10.
>
> Policy: make queue 0 of each pipe as high priority compute queue
>
> High/normal priority compute sched lists are generated from set of high/normal
> priority compute queues. At context creation, entity of compute queue
> get a sched list from high or normal priority depending on ctx->priority
>
> Signed-off-by: Nirmoy Das 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   |  4 ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  | 40 +++-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c  |  8 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h  | 13 +++-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  6 
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h |  1 +
>  drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c   | 19 +++
>  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c| 23 --
>  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c| 20 
>  9 files changed, 113 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index f397ff97b4e4..8304d0c87899 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1205,7 +1205,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
> struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
> struct drm_sched_entity *entity = p->entity;
> enum drm_sched_priority priority;
> -   struct amdgpu_ring *ring;
> struct amdgpu_bo_list_entry *e;
> struct amdgpu_job *job;
> uint64_t seq;
> @@ -1258,9 +1257,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
> priority = job->base.s_priority;
> drm_sched_entity_push_job(>base, entity);
>
> -   ring = to_amdgpu_ring(entity->rq->sched);
> -   amdgpu_ring_priority_get(ring, priority);
> -
> amdgpu_vm_move_to_lru_tail(p->adev, >vm);
>
> ttm_eu_fence_buffer_objects(>ticket, >validated, p->fence);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 94a6c42f29ea..b21771b37300 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -85,8 +85,8 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, 
> const u32 hw_ip, const
> num_scheds = 1;
> break;
> case AMDGPU_HW_IP_COMPUTE:
> -   scheds = adev->gfx.compute_sched;
> -   num_scheds = adev->gfx.num_compute_sched;
> +   scheds = adev->gfx.compute_prio_sched[priority];
> +   num_scheds = adev->gfx.num_compute_sched[priority];
> break;
> case AMDGPU_HW_IP_DMA:
> scheds = adev->sdma.sdma_sched;
> @@ -628,20 +628,46 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
> mutex_destroy(>lock);
>  }
>
> +
> +static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
> +{
> +   int num_compute_sched_normal = 0;
> +   int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
> +   int i;
> +
> +
> +   for (i = 0; i < adev->gfx.num_compute_rings; i++) {
> +   if (adev->gfx.compute_ring[i].high_priority)
> +   adev->gfx.compute_sched[num_compute_sched_normal++] =
> +   >gfx.compute_ring[i].sched;
> +   else
> +   adev->gfx.compute_sched[num_compute_sched_high--] =
> +   >gfx.compute_ring[i].sched;
> +   }
> +
> +   for (i = DRM_SCHED_PRIORITY_MIN; i <= DRM_SCHED_PRIORITY_NORMAL; i++) 
> {
> +   adev->gfx.compute_prio_sched[i] = >gfx.compute_sched[0];
> +   adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
> +   }
> +
> +   for (i = DRM_SCHED_PRIORITY_NORMAL + 1; i < DRM_SCHED_PRIORITY_MAX; 
> i++) {
> +   adev->gfx.compute_prio_sched[i] =
> +   >gfx.compute_sched[num_compute_sched_high - 1];
> +   adev->gfx.num_compute_sched[i] =
> +   adev->gfx.num_compute_rings - 
> num_compute_sched_normal;
> +   }
> +}
> +
>  void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
>  {
> int i, j;
>
> +   amdgpu_ctx_init_compute_sched(adev);
> for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
> adev->gfx.gfx_sched[i] = >gfx.gfx_ring[i].sched;
> adev->gfx.num_gfx_sched++;
> }
>
> -   for (i = 0; i < adev->gfx.num_compute_rings; i++) {
> -   adev->gfx.compute_sched[i] = >gfx.compute_ring[i].sched;
> -   adev->gfx.num_compute_sched++;
> 

Re: [PATCH v2 1/4] drm/amdgpu: set compute queue priority at mqd_init

2020-02-28 Thread Nirmoy

Thanks Christian, I will send a updated one soon.

On 2/28/20 3:44 PM, Christian König wrote:

Am 28.02.20 um 15:39 schrieb Nirmoy Das:

We were changing compute ring priority while rings were being used
before every job submission which is not recommended. This patch
sets compute queue priority at mqd initialization for gfx8, gfx9 and
gfx10.

Policy: make queue 0 of each pipe as high priority compute queue

High/normal priority compute sched lists are generated from set of 
high/normal

priority compute queues. At context creation, entity of compute queue
get a sched list from high or normal priority depending on ctx->priority

Signed-off-by: Nirmoy Das 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   |  4 ---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  | 40 +++-
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c  |  8 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h  | 13 +++-
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  6 
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h |  1 +
  drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c   | 19 +++
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c    | 23 --
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c    | 20 
  9 files changed, 113 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

index f397ff97b4e4..8304d0c87899 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1205,7 +1205,6 @@ static int amdgpu_cs_submit(struct 
amdgpu_cs_parser *p,

  struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
  struct drm_sched_entity *entity = p->entity;
  enum drm_sched_priority priority;
-    struct amdgpu_ring *ring;
  struct amdgpu_bo_list_entry *e;
  struct amdgpu_job *job;
  uint64_t seq;
@@ -1258,9 +1257,6 @@ static int amdgpu_cs_submit(struct 
amdgpu_cs_parser *p,

  priority = job->base.s_priority;
  drm_sched_entity_push_job(>base, entity);

-    ring = to_amdgpu_ring(entity->rq->sched);
-    amdgpu_ring_priority_get(ring, priority);
-
  amdgpu_vm_move_to_lru_tail(p->adev, >vm);

  ttm_eu_fence_buffer_objects(>ticket, >validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index 94a6c42f29ea..b21771b37300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -85,8 +85,8 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx 
*ctx, const u32 hw_ip, const

  num_scheds = 1;
  break;
  case AMDGPU_HW_IP_COMPUTE:
-    scheds = adev->gfx.compute_sched;
-    num_scheds = adev->gfx.num_compute_sched;
+    scheds = adev->gfx.compute_prio_sched[priority];
+    num_scheds = adev->gfx.num_compute_sched[priority];
  break;
  case AMDGPU_HW_IP_DMA:
  scheds = adev->sdma.sdma_sched;
@@ -628,20 +628,46 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr 
*mgr)

  mutex_destroy(>lock);
  }

+
+static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
+{
+    int num_compute_sched_normal = 0;
+    int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+    int i;
+
+
+    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+    if (adev->gfx.compute_ring[i].high_priority)
+ adev->gfx.compute_sched[num_compute_sched_normal++] =
+    >gfx.compute_ring[i].sched;
+    else
+ adev->gfx.compute_sched[num_compute_sched_high--] =
+    >gfx.compute_ring[i].sched;
+    }
+
+    for (i = DRM_SCHED_PRIORITY_MIN; i <= DRM_SCHED_PRIORITY_NORMAL; 
i++) {

+    adev->gfx.compute_prio_sched[i] = >gfx.compute_sched[0];
+    adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+    }
+
+    for (i = DRM_SCHED_PRIORITY_NORMAL + 1; i < 
DRM_SCHED_PRIORITY_MAX; i++) {

+    adev->gfx.compute_prio_sched[i] =
+ >gfx.compute_sched[num_compute_sched_high - 1];
+    adev->gfx.num_compute_sched[i] =
+    adev->gfx.num_compute_rings - num_compute_sched_normal;
+    }
+}
+
  void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
  {
  int i, j;

+    amdgpu_ctx_init_compute_sched(adev);
  for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
  adev->gfx.gfx_sched[i] = >gfx.gfx_ring[i].sched;
  adev->gfx.num_gfx_sched++;
  }

-    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-    adev->gfx.compute_sched[i] = >gfx.compute_ring[i].sched;
-    adev->gfx.num_compute_sched++;
-    }
-
  for (i = 0; i < adev->sdma.num_instances; i++) {
  adev->sdma.sdma_sched[i] = >sdma.instance[i].ring.sched;
  adev->sdma.num_sdma_sched++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c

index 7403588684b3..952725e7243c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -192,6 +192,14 @@ static bool 

Re: [PATCH v2 1/4] drm/amdgpu: set compute queue priority at mqd_init

2020-02-28 Thread Christian König

Am 28.02.20 um 15:39 schrieb Nirmoy Das:

We were changing compute ring priority while rings were being used
before every job submission which is not recommended. This patch
sets compute queue priority at mqd initialization for gfx8, gfx9 and
gfx10.

Policy: make queue 0 of each pipe as high priority compute queue

High/normal priority compute sched lists are generated from set of high/normal
priority compute queues. At context creation, entity of compute queue
get a sched list from high or normal priority depending on ctx->priority

Signed-off-by: Nirmoy Das 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   |  4 ---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  | 40 +++-
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c  |  8 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h  | 13 +++-
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  6 
  drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h |  1 +
  drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c   | 19 +++
  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c| 23 --
  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c| 20 
  9 files changed, 113 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index f397ff97b4e4..8304d0c87899 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1205,7 +1205,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_sched_entity *entity = p->entity;
enum drm_sched_priority priority;
-   struct amdgpu_ring *ring;
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
@@ -1258,9 +1257,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
priority = job->base.s_priority;
drm_sched_entity_push_job(>base, entity);

-   ring = to_amdgpu_ring(entity->rq->sched);
-   amdgpu_ring_priority_get(ring, priority);
-
amdgpu_vm_move_to_lru_tail(p->adev, >vm);

ttm_eu_fence_buffer_objects(>ticket, >validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 94a6c42f29ea..b21771b37300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -85,8 +85,8 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, 
const u32 hw_ip, const
num_scheds = 1;
break;
case AMDGPU_HW_IP_COMPUTE:
-   scheds = adev->gfx.compute_sched;
-   num_scheds = adev->gfx.num_compute_sched;
+   scheds = adev->gfx.compute_prio_sched[priority];
+   num_scheds = adev->gfx.num_compute_sched[priority];
break;
case AMDGPU_HW_IP_DMA:
scheds = adev->sdma.sdma_sched;
@@ -628,20 +628,46 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
mutex_destroy(>lock);
  }

+
+static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
+{
+   int num_compute_sched_normal = 0;
+   int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+   int i;
+
+
+   for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+   if (adev->gfx.compute_ring[i].high_priority)
+   adev->gfx.compute_sched[num_compute_sched_normal++] =
+   >gfx.compute_ring[i].sched;
+   else
+   adev->gfx.compute_sched[num_compute_sched_high--] =
+   >gfx.compute_ring[i].sched;
+   }
+
+   for (i = DRM_SCHED_PRIORITY_MIN; i <= DRM_SCHED_PRIORITY_NORMAL; i++) {
+   adev->gfx.compute_prio_sched[i] = >gfx.compute_sched[0];
+   adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+   }
+
+   for (i = DRM_SCHED_PRIORITY_NORMAL + 1; i < DRM_SCHED_PRIORITY_MAX; 
i++) {
+   adev->gfx.compute_prio_sched[i] =
+   >gfx.compute_sched[num_compute_sched_high - 1];
+   adev->gfx.num_compute_sched[i] =
+   adev->gfx.num_compute_rings - num_compute_sched_normal;
+   }
+}
+
  void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
  {
int i, j;

+   amdgpu_ctx_init_compute_sched(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
adev->gfx.gfx_sched[i] = >gfx.gfx_ring[i].sched;
adev->gfx.num_gfx_sched++;
}

-   for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-   adev->gfx.compute_sched[i] = >gfx.compute_ring[i].sched;
-   adev->gfx.num_compute_sched++;
-   }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.sdma_sched[i] = >sdma.instance[i].ring.sched;
adev->sdma.num_sdma_sched++;
diff --git 

[PATCH v2 1/4] drm/amdgpu: set compute queue priority at mqd_init

2020-02-28 Thread Nirmoy Das
We were changing compute ring priority while rings were being used
before every job submission which is not recommended. This patch
sets compute queue priority at mqd initialization for gfx8, gfx9 and
gfx10.

Policy: make queue 0 of each pipe as high priority compute queue

High/normal priority compute sched lists are generated from set of high/normal
priority compute queues. At context creation, entity of compute queue
get a sched list from high or normal priority depending on ctx->priority

Signed-off-by: Nirmoy Das 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   |  4 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  | 40 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c  |  8 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h  | 13 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  6 
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h |  1 +
 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c   | 19 +++
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c| 23 --
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c| 20 
 9 files changed, 113 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index f397ff97b4e4..8304d0c87899 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1205,7 +1205,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_sched_entity *entity = p->entity;
enum drm_sched_priority priority;
-   struct amdgpu_ring *ring;
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
@@ -1258,9 +1257,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
priority = job->base.s_priority;
drm_sched_entity_push_job(>base, entity);

-   ring = to_amdgpu_ring(entity->rq->sched);
-   amdgpu_ring_priority_get(ring, priority);
-
amdgpu_vm_move_to_lru_tail(p->adev, >vm);

ttm_eu_fence_buffer_objects(>ticket, >validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 94a6c42f29ea..b21771b37300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -85,8 +85,8 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, 
const u32 hw_ip, const
num_scheds = 1;
break;
case AMDGPU_HW_IP_COMPUTE:
-   scheds = adev->gfx.compute_sched;
-   num_scheds = adev->gfx.num_compute_sched;
+   scheds = adev->gfx.compute_prio_sched[priority];
+   num_scheds = adev->gfx.num_compute_sched[priority];
break;
case AMDGPU_HW_IP_DMA:
scheds = adev->sdma.sdma_sched;
@@ -628,20 +628,46 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
mutex_destroy(>lock);
 }

+
+static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
+{
+   int num_compute_sched_normal = 0;
+   int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+   int i;
+
+
+   for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+   if (adev->gfx.compute_ring[i].high_priority)
+   adev->gfx.compute_sched[num_compute_sched_normal++] =
+   >gfx.compute_ring[i].sched;
+   else
+   adev->gfx.compute_sched[num_compute_sched_high--] =
+   >gfx.compute_ring[i].sched;
+   }
+
+   for (i = DRM_SCHED_PRIORITY_MIN; i <= DRM_SCHED_PRIORITY_NORMAL; i++) {
+   adev->gfx.compute_prio_sched[i] = >gfx.compute_sched[0];
+   adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+   }
+
+   for (i = DRM_SCHED_PRIORITY_NORMAL + 1; i < DRM_SCHED_PRIORITY_MAX; 
i++) {
+   adev->gfx.compute_prio_sched[i] =
+   >gfx.compute_sched[num_compute_sched_high - 1];
+   adev->gfx.num_compute_sched[i] =
+   adev->gfx.num_compute_rings - num_compute_sched_normal;
+   }
+}
+
 void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
 {
int i, j;

+   amdgpu_ctx_init_compute_sched(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
adev->gfx.gfx_sched[i] = >gfx.gfx_ring[i].sched;
adev->gfx.num_gfx_sched++;
}

-   for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-   adev->gfx.compute_sched[i] = >gfx.compute_ring[i].sched;
-   adev->gfx.num_compute_sched++;
-   }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.sdma_sched[i] = >sdma.instance[i].ring.sched;
adev->sdma.num_sdma_sched++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c