[RFC PATCH] drm/scheduler: rework entity creation

2019-12-05 Thread Nirmoy Das
Entity currently keeps a copy of run_queue list and modify it in
drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
list. Use drm_gpu_scheduler list instead of drm_sched_rq list
in drm_sched_entity struct. In this way we can select a runqueue based
on entity/ctx's priority for a  drm scheduler.

Signed-off-by: Nirmoy Das 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  7 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c|  8 +--
 drivers/gpu/drm/lima/lima_sched.c|  5 +-
 drivers/gpu/drm/panfrost/panfrost_job.c  |  7 +--
 drivers/gpu/drm/scheduler/sched_entity.c | 65 +---
 drivers/gpu/drm/v3d/v3d_drv.c|  7 +--
 include/drm/gpu_scheduler.h  |  9 ++--
 11 files changed, 69 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a0d3d7b756eb..e8f46c13d073 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-   struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
unsigned num_rings = 0;
unsigned num_rqs = 0;
 
@@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (!rings[j]->adev)
continue;
 
-   rqs[num_rqs++] = [j]->sched.sched_rq[priority];
+   sched_list[num_rqs++] = [j]->sched;
}
 
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(>entities[i][j].entity,
- rqs, num_rqs, >guilty);
+ sched_list, num_rqs,
+ >guilty, priority);
if (r)
goto error_cleanup_entities;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 19ffe00d9072..a960dd7c0711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1957,11 +1957,12 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
 
if (enable) {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
 
ring = adev->mman.buffer_funcs_ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-   r = drm_sched_entity_init(>mman.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>mman.entity, ,
+ 1, NULL, DRM_SCHED_PRIORITY_KERNEL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
  r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e324bfe6c58f..b803a8882864 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
 
ring = >uvd.inst[0].ring;
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>uvd.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>uvd.entity, ,
+ 1, NULL, DRM_SCHED_PRIORITY_NORMAL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..b44f28d44fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
 {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
 
ring = >vce.ring[0];
-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>vce.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>vce.entity, ,
+ 1, NULL, 

Re: [RFC PATCH] drm/scheduler: rework entity creation

2019-12-05 Thread Das, Nirmoy
[AMD Official Use Only - Internal Distribution Only]

Thanks Christian,


I will modify and resend.

Regards,
Nirmoy

From: Koenig, Christian 
Sent: Thursday, December 5, 2019 1:29:49 PM
To: Das, Nirmoy ; Nirmoy Das ; 
Deucher, Alexander ; Ho, Kenny 
Cc: amd-gfx@lists.freedesktop.org ; Das, Nirmoy 

Subject: Re: [RFC PATCH] drm/scheduler: rework entity creation

Am 05.12.19 um 12:04 schrieb Nirmoy:
> Hi Christian,
>
> I am not exactly sure about drm_sched_entity_set_priority() I wonder
> if just changing
>
> entity->priority  to ctx->override_priority should work. With this
> change drm_sched_entity_select_rq()
>
> will chose a rq based on entity->priority which seems to me correct.
> But is this enough to fix the old bug you were
>
> talking about which mess up already scheduled job on priority change?

Yes, that should perfectly do it.

>
> okay I just realized I need a lock to make sure
>
> drm_sched_entity_set_priority() and drm_sched_entity_select_rq()
> shouldn't happen at the same time.

Yeah, you probably need to grab the lock and make sure that you get the
priority to use while holding the lock as well.

Regards,
Christian.

>
>
> Regards,
>
> Nirmoy
>
>
> On 12/5/19 11:52 AM, Nirmoy Das wrote:
>> Entity currently keeps a copy of run_queue list and modify it in
>> drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
>> list. Use drm_gpu_scheduler list instead of drm_sched_rq list
>> in drm_sched_entity struct. In this way we can select a runqueue based
>> on entity/ctx's priority for a  drm scheduler.
>>
>> Signed-off-by: Nirmoy Das 
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 +--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  7 +--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 +--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 +--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
>>   drivers/gpu/drm/etnaviv/etnaviv_drv.c|  8 +--
>>   drivers/gpu/drm/lima/lima_sched.c|  5 +-
>>   drivers/gpu/drm/panfrost/panfrost_job.c  |  7 +--
>>   drivers/gpu/drm/scheduler/sched_entity.c | 65 +---
>>   drivers/gpu/drm/v3d/v3d_drv.c|  7 +--
>>   include/drm/gpu_scheduler.h  |  9 ++--
>>   11 files changed, 69 insertions(+), 74 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> index a0d3d7b756eb..e8f46c13d073 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> @@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device
>> *adev,
>> for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
>>   struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
>> -struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
>> +struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
>>   unsigned num_rings = 0;
>>   unsigned num_rqs = 0;
>>   @@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct
>> amdgpu_device *adev,
>>   if (!rings[j]->adev)
>>   continue;
>>   -rqs[num_rqs++] = [j]->sched.sched_rq[priority];
>> +sched_list[num_rqs++] = [j]->sched;
>>   }
>> for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
>>   r = drm_sched_entity_init(>entities[i][j].entity,
>> -  rqs, num_rqs, >guilty);
>> +  sched_list, num_rqs,
>> +  >guilty, priority);
>>   if (r)
>>   goto error_cleanup_entities;
>>   }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> index 19ffe00d9072..a960dd7c0711 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> @@ -1957,11 +1957,12 @@ void
>> amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool
>> enable)
>> if (enable) {
>>   struct amdgpu_ring *ring;
>> -struct drm_sched_rq *rq;
>> +struct drm_gpu_scheduler *sched;
>> ring = adev->mman.buffer_funcs_ring;
>> -rq = >sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
>> -r = drm_sched_entity_init(>mman.entity, , 1, NULL);
>> +sched = >sched;
>> +r = drm_sched_entity_init(>mman.entity, ,
>> +  1, NULL, DRM_SCHED_PRIORITY_KERNEL);
>>   if (r) {
>> 

Re: [RFC PATCH] drm/scheduler: rework entity creation

2019-12-05 Thread Christian König

Am 05.12.19 um 12:04 schrieb Nirmoy:

Hi Christian,

I am not exactly sure about drm_sched_entity_set_priority() I wonder 
if just changing


entity->priority  to ctx->override_priority should work. With this 
change drm_sched_entity_select_rq()


will chose a rq based on entity->priority which seems to me correct. 
But is this enough to fix the old bug you were


talking about which mess up already scheduled job on priority change?


Yes, that should perfectly do it.



okay I just realized I need a lock to make sure

drm_sched_entity_set_priority() and drm_sched_entity_select_rq() 
shouldn't happen at the same time.


Yeah, you probably need to grab the lock and make sure that you get the 
priority to use while holding the lock as well.


Regards,
Christian.




Regards,

Nirmoy


On 12/5/19 11:52 AM, Nirmoy Das wrote:

Entity currently keeps a copy of run_queue list and modify it in
drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
list. Use drm_gpu_scheduler list instead of drm_sched_rq list
in drm_sched_entity struct. In this way we can select a runqueue based
on entity/ctx's priority for a  drm scheduler.

Signed-off-by: Nirmoy Das 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
  drivers/gpu/drm/etnaviv/etnaviv_drv.c    |  8 +--
  drivers/gpu/drm/lima/lima_sched.c    |  5 +-
  drivers/gpu/drm/panfrost/panfrost_job.c  |  7 +--
  drivers/gpu/drm/scheduler/sched_entity.c | 65 +---
  drivers/gpu/drm/v3d/v3d_drv.c    |  7 +--
  include/drm/gpu_scheduler.h  |  9 ++--
  11 files changed, 69 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index a0d3d7b756eb..e8f46c13d073 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device 
*adev,

    for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
  struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-    struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+    struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
  unsigned num_rings = 0;
  unsigned num_rqs = 0;
  @@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct 
amdgpu_device *adev,

  if (!rings[j]->adev)
  continue;
  -    rqs[num_rqs++] = [j]->sched.sched_rq[priority];
+    sched_list[num_rqs++] = [j]->sched;
  }
    for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
  r = drm_sched_entity_init(>entities[i][j].entity,
-  rqs, num_rqs, >guilty);
+  sched_list, num_rqs,
+  >guilty, priority);
  if (r)
  goto error_cleanup_entities;
  }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index 19ffe00d9072..a960dd7c0711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1957,11 +1957,12 @@ void 
amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool 
enable)

    if (enable) {
  struct amdgpu_ring *ring;
-    struct drm_sched_rq *rq;
+    struct drm_gpu_scheduler *sched;
    ring = adev->mman.buffer_funcs_ring;
-    rq = >sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-    r = drm_sched_entity_init(>mman.entity, , 1, NULL);
+    sched = >sched;
+    r = drm_sched_entity_init(>mman.entity, ,
+  1, NULL, DRM_SCHED_PRIORITY_KERNEL);
  if (r) {
  DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
    r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

index e324bfe6c58f..b803a8882864 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
  int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
  {
  struct amdgpu_ring *ring;
-    struct drm_sched_rq *rq;
+    struct drm_gpu_scheduler *sched;
  int r;
    ring = >uvd.inst[0].ring;
-    rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-    r = drm_sched_entity_init(>uvd.entity, , 1, NULL);
+    sched = >sched;
+    r = drm_sched_entity_init(>uvd.entity, ,
+  1, NULL, DRM_SCHED_PRIORITY_NORMAL);
  if (r) {
  DRM_ERROR("Failed setting up UVD kernel entity.\n");
  return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c

index 46b590af2fd2..b44f28d44fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ 

Re: [RFC PATCH] drm/scheduler: rework entity creation

2019-12-05 Thread Christian König

Am 05.12.19 um 11:52 schrieb Nirmoy Das:

Entity currently keeps a copy of run_queue list and modify it in
drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
list. Use drm_gpu_scheduler list instead of drm_sched_rq list
in drm_sched_entity struct. In this way we can select a runqueue based
on entity/ctx's priority for a  drm scheduler.


Looks good to me in general only a few nit picks below.



Signed-off-by: Nirmoy Das 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
  drivers/gpu/drm/etnaviv/etnaviv_drv.c|  8 +--
  drivers/gpu/drm/lima/lima_sched.c|  5 +-
  drivers/gpu/drm/panfrost/panfrost_job.c  |  7 +--
  drivers/gpu/drm/scheduler/sched_entity.c | 65 +---
  drivers/gpu/drm/v3d/v3d_drv.c|  7 +--
  include/drm/gpu_scheduler.h  |  9 ++--
  11 files changed, 69 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a0d3d7b756eb..e8f46c13d073 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
  
  	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {

struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-   struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
unsigned num_rings = 0;
unsigned num_rqs = 0;
  
@@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,

if (!rings[j]->adev)
continue;
  
-			rqs[num_rqs++] = [j]->sched.sched_rq[priority];

+   sched_list[num_rqs++] = [j]->sched;
}
  
  		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)

r = drm_sched_entity_init(>entities[i][j].entity,
- rqs, num_rqs, >guilty);
+ sched_list, num_rqs,
+ >guilty, priority);
if (r)
goto error_cleanup_entities;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 19ffe00d9072..a960dd7c0711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1957,11 +1957,12 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
  
  	if (enable) {

struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
  
  		ring = adev->mman.buffer_funcs_ring;

-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-   r = drm_sched_entity_init(>mman.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>mman.entity, ,
+ 1, NULL, DRM_SCHED_PRIORITY_KERNEL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
  r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e324bfe6c58f..b803a8882864 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
  int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
  {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
  
  	ring = >uvd.inst[0].ring;

-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>uvd.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>uvd.entity, ,
+ 1, NULL, DRM_SCHED_PRIORITY_NORMAL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..b44f28d44fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
  int amdgpu_vce_entity_init(struct amdgpu_device *adev)
  {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
  
  	ring = >vce.ring[0];

-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>vce.entity, , 1, NULL);
+   sched = >sched;
+   r = 

Re: [RFC PATCH] drm/scheduler: rework entity creation

2019-12-05 Thread Nirmoy

Hi Christian,

I am not exactly sure about drm_sched_entity_set_priority() I wonder if 
just changing


entity->priority  to ctx->override_priority should work. With this 
change drm_sched_entity_select_rq()


will chose a rq based on entity->priority which seems to me correct. But 
is this enough to fix the old bug you were


talking about which mess up already scheduled job on priority change?


okay I just realized I need a lock to make sure

drm_sched_entity_set_priority() and drm_sched_entity_select_rq() 
shouldn't happen at the same time.



Regards,

Nirmoy


On 12/5/19 11:52 AM, Nirmoy Das wrote:

Entity currently keeps a copy of run_queue list and modify it in
drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
list. Use drm_gpu_scheduler list instead of drm_sched_rq list
in drm_sched_entity struct. In this way we can select a runqueue based
on entity/ctx's priority for a  drm scheduler.

Signed-off-by: Nirmoy Das 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
  drivers/gpu/drm/etnaviv/etnaviv_drv.c|  8 +--
  drivers/gpu/drm/lima/lima_sched.c|  5 +-
  drivers/gpu/drm/panfrost/panfrost_job.c  |  7 +--
  drivers/gpu/drm/scheduler/sched_entity.c | 65 +---
  drivers/gpu/drm/v3d/v3d_drv.c|  7 +--
  include/drm/gpu_scheduler.h  |  9 ++--
  11 files changed, 69 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a0d3d7b756eb..e8f46c13d073 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
  
  	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {

struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-   struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
unsigned num_rings = 0;
unsigned num_rqs = 0;
  
@@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,

if (!rings[j]->adev)
continue;
  
-			rqs[num_rqs++] = [j]->sched.sched_rq[priority];

+   sched_list[num_rqs++] = [j]->sched;
}
  
  		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)

r = drm_sched_entity_init(>entities[i][j].entity,
- rqs, num_rqs, >guilty);
+ sched_list, num_rqs,
+ >guilty, priority);
if (r)
goto error_cleanup_entities;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 19ffe00d9072..a960dd7c0711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1957,11 +1957,12 @@ void amdgpu_ttm_set_buffer_funcs_status(struct 
amdgpu_device *adev, bool enable)
  
  	if (enable) {

struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
  
  		ring = adev->mman.buffer_funcs_ring;

-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-   r = drm_sched_entity_init(>mman.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>mman.entity, ,
+ 1, NULL, DRM_SCHED_PRIORITY_KERNEL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
  r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e324bfe6c58f..b803a8882864 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
  int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
  {
struct amdgpu_ring *ring;
-   struct drm_sched_rq *rq;
+   struct drm_gpu_scheduler *sched;
int r;
  
  	ring = >uvd.inst[0].ring;

-   rq = >sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-   r = drm_sched_entity_init(>uvd.entity, , 1, NULL);
+   sched = >sched;
+   r = drm_sched_entity_init(>uvd.entity, ,
+ 1, NULL, DRM_SCHED_PRIORITY_NORMAL);
if (r) {
DRM_ERROR("Failed setting up UVD kernel entity.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..b44f28d44fb4 100644
---