[PATCH 1/4] drm/scheduler: rework entity creation

2019-12-11 Thread Nirmoy Das
Entity currently keeps a copy of run_queue list and modify it in
drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
list. Use drm_gpu_scheduler list instead of drm_sched_rq list
in drm_sched_entity struct. In this way we can select a runqueue based
on entity/ctx's priority for a  drm scheduler.

Signed-off-by: Nirmoy Das 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  8 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c|  7 ++-
 drivers/gpu/drm/lima/lima_sched.c|  5 +-
 drivers/gpu/drm/panfrost/panfrost_job.c  |  8 ++-
 drivers/gpu/drm/scheduler/sched_entity.c | 74 ++--
 drivers/gpu/drm/v3d/v3d_drv.c|  8 ++-
 include/drm/gpu_scheduler.h  |  8 ++-
 11 files changed, 78 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a0d3d7b756eb..1d6850af9908 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,

for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-   struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
unsigned num_rings = 0;
unsigned num_rqs = 0;

@@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (!rings[j]->adev)
continue;

-   rqs[num_rqs++] = [j]->sched.sched_rq[priority];
+   sched_list[num_rqs++] = [j]->sched;
}

for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(>entities[i][j].entity,
- rqs, num_rqs, >guilty);
+ priority, sched_list,
+ num_rqs, >guilty);
if (r)
goto error_cleanup_entities;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 81f6764f1ba6..2ff63d0414c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1954,11 +1954,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)

if (enable) {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;

ring = adev->mman.buffer_funcs_ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-   r = drm_sched_entity_init(>mman.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>mman.entity,
+ DRM_SCHED_PRIORITY_KERNEL, ,
+ 1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
  r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d587ffe2af8e..a92f3b18e657 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;

ring = >uvd.inst[0].ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>uvd.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+ , 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..ceb0dbf685f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;

ring = >vce.ring[0];
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>vce.entity, , 1, NULL);
+   sched = >sched;
+   r = 

[PATCH 1/4] drm/scheduler: rework entity creation

2019-12-10 Thread Nirmoy Das
Entity currently keeps a copy of run_queue list and modify it in
drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
list. Use drm_gpu_scheduler list instead of drm_sched_rq list
in drm_sched_entity struct. In this way we can select a runqueue based
on entity/ctx's priority for a  drm scheduler.

Signed-off-by: Nirmoy Das 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  8 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c|  7 ++-
 drivers/gpu/drm/lima/lima_sched.c|  5 +-
 drivers/gpu/drm/panfrost/panfrost_job.c  |  8 ++-
 drivers/gpu/drm/scheduler/sched_entity.c | 74 ++--
 drivers/gpu/drm/v3d/v3d_drv.c|  8 ++-
 include/drm/gpu_scheduler.h  |  8 ++-
 11 files changed, 78 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a0d3d7b756eb..1d6850af9908 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-   struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
unsigned num_rings = 0;
unsigned num_rqs = 0;
 
@@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (!rings[j]->adev)
continue;
 
-   rqs[num_rqs++] = [j]->sched.sched_rq[priority];
+   sched_list[num_rqs++] = [j]->sched;
}
 
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(>entities[i][j].entity,
- rqs, num_rqs, >guilty);
+ priority, sched_list,
+ num_rqs, >guilty);
if (r)
goto error_cleanup_entities;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 81f6764f1ba6..2ff63d0414c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1954,11 +1954,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
 
if (enable) {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
 
ring = adev->mman.buffer_funcs_ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-   r = drm_sched_entity_init(>mman.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>mman.entity,
+ DRM_SCHED_PRIORITY_KERNEL, ,
+ 1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
  r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d587ffe2af8e..a92f3b18e657 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
 
ring = >uvd.inst[0].ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>uvd.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+ , 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..ceb0dbf685f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
 
ring = >vce.ring[0];
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>vce.entity, , 1, NULL);
+   sched = >sched;
+   r = 

[PATCH 1/4] drm/scheduler: rework entity creation

2019-12-10 Thread Nirmoy Das
Entity currently keeps a copy of run_queue list and modify it in
drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
list. Use drm_gpu_scheduler list instead of drm_sched_rq list
in drm_sched_entity struct. In this way we can select a runqueue based
on entity/ctx's priority for a  drm scheduler.

Signed-off-by: Nirmoy Das 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  8 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c|  7 ++-
 drivers/gpu/drm/lima/lima_sched.c|  5 +-
 drivers/gpu/drm/panfrost/panfrost_job.c  |  8 ++-
 drivers/gpu/drm/scheduler/sched_entity.c | 74 ++--
 drivers/gpu/drm/v3d/v3d_drv.c|  8 ++-
 include/drm/gpu_scheduler.h  |  8 ++-
 11 files changed, 78 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a0d3d7b756eb..1d6850af9908 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-   struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
unsigned num_rings = 0;
unsigned num_rqs = 0;
 
@@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (!rings[j]->adev)
continue;
 
-   rqs[num_rqs++] = [j]->sched.sched_rq[priority];
+   sched_list[num_rqs++] = [j]->sched;
}
 
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(>entities[i][j].entity,
- rqs, num_rqs, >guilty);
+ priority, sched_list,
+ num_rqs, >guilty);
if (r)
goto error_cleanup_entities;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 19ffe00d9072..2b6e35893918 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1957,11 +1957,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
 
if (enable) {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
 
ring = adev->mman.buffer_funcs_ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-   r = drm_sched_entity_init(>mman.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>mman.entity,
+ DRM_SCHED_PRIORITY_KERNEL, ,
+ 1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
  r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e324bfe6c58f..a1a110f5513d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
 
ring = >uvd.inst[0].ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>uvd.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+ , 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..ceb0dbf685f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
 
ring = >vce.ring[0];
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>vce.entity, , 1, NULL);
+   sched = >sched;
+   r = 

[PATCH 1/4] drm/scheduler: rework entity creation

2019-12-09 Thread Nirmoy Das
Entity currently keeps a copy of run_queue list and modify it in
drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
list. Use drm_gpu_scheduler list instead of drm_sched_rq list
in drm_sched_entity struct. In this way we can select a runqueue based
on entity/ctx's priority for a  drm scheduler.

Signed-off-by: Nirmoy Das 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  8 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c|  7 ++-
 drivers/gpu/drm/lima/lima_sched.c|  5 +-
 drivers/gpu/drm/panfrost/panfrost_job.c  |  8 ++-
 drivers/gpu/drm/scheduler/sched_entity.c | 74 ++--
 drivers/gpu/drm/v3d/v3d_drv.c|  8 ++-
 include/drm/gpu_scheduler.h  |  8 ++-
 11 files changed, 78 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a0d3d7b756eb..1d6850af9908 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-   struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
unsigned num_rings = 0;
unsigned num_rqs = 0;
 
@@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (!rings[j]->adev)
continue;
 
-   rqs[num_rqs++] = [j]->sched.sched_rq[priority];
+   sched_list[num_rqs++] = [j]->sched;
}
 
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(>entities[i][j].entity,
- rqs, num_rqs, >guilty);
+ priority, sched_list,
+ num_rqs, >guilty);
if (r)
goto error_cleanup_entities;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 19ffe00d9072..2b6e35893918 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1957,11 +1957,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
 
if (enable) {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
 
ring = adev->mman.buffer_funcs_ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-   r = drm_sched_entity_init(>mman.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>mman.entity,
+ DRM_SCHED_PRIORITY_KERNEL, ,
+ 1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
  r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e324bfe6c58f..a1a110f5513d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
 
ring = >uvd.inst[0].ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>uvd.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+ , 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..ceb0dbf685f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
 
ring = >vce.ring[0];
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>vce.entity, , 1, NULL);
+   sched = >sched;
+   r = 

[PATCH 1/4] drm/scheduler: rework entity creation

2019-12-06 Thread Nirmoy Das
Entity currently keeps a copy of run_queue list and modify it in
drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
list. Use drm_gpu_scheduler list instead of drm_sched_rq list
in drm_sched_entity struct. In this way we can select a runqueue based
on entity/ctx's priority for a  drm scheduler.

Signed-off-by: Nirmoy Das 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  8 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c|  7 ++-
 drivers/gpu/drm/lima/lima_sched.c|  5 +-
 drivers/gpu/drm/panfrost/panfrost_job.c  |  8 ++-
 drivers/gpu/drm/scheduler/sched_entity.c | 74 ++--
 drivers/gpu/drm/v3d/v3d_drv.c|  8 ++-
 include/drm/gpu_scheduler.h  |  8 ++-
 11 files changed, 78 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a0d3d7b756eb..1d6850af9908 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-   struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
unsigned num_rings = 0;
unsigned num_rqs = 0;
 
@@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (!rings[j]->adev)
continue;
 
-   rqs[num_rqs++] = [j]->sched.sched_rq[priority];
+   sched_list[num_rqs++] = [j]->sched;
}
 
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(>entities[i][j].entity,
- rqs, num_rqs, >guilty);
+ priority, sched_list,
+ num_rqs, >guilty);
if (r)
goto error_cleanup_entities;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 19ffe00d9072..2b6e35893918 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1957,11 +1957,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
 
if (enable) {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
 
ring = adev->mman.buffer_funcs_ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-   r = drm_sched_entity_init(>mman.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>mman.entity,
+ DRM_SCHED_PRIORITY_KERNEL, ,
+ 1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
  r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e324bfe6c58f..a1a110f5513d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
 
ring = >uvd.inst[0].ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>uvd.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+ , 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..ceb0dbf685f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
 
ring = >vce.ring[0];
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>vce.entity, , 1, NULL);
+   sched = >sched;
+   r =