Am 16.03.2017 um 10:00 schrieb Chunming Zhou:
if high priority rq is full, then process with low priority could be starve.
Add policy for this problem, the high proiority can ahead of next priority
queue,
the ratio is 2 : 1.
Change-Id: I58f4a6b9cdce8689b18dd8e83dd6e2cf5f99d5fb
Signed-off-by: Chunming Zhou <[email protected]>
Well, the idea behind the high priority queues is to actually starve the
low priority queues to a certain amount.
At least for the kernel queue that is really desired.
Christian.
---
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 26 +++++++++++++++++++++++---
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 2 ++
2 files changed, 25 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 0f439dd..4637b6f 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -35,11 +35,16 @@
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
/* Initialize a given run queue struct */
-static void amd_sched_rq_init(struct amd_sched_rq *rq)
+static void amd_sched_rq_init(struct amd_gpu_scheduler *sched, enum
+ amd_sched_priority pri)
{
+ struct amd_sched_rq *rq = &sched->sched_rq[pri];
+
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
rq->current_entity = NULL;
+ rq->wait_base = pri * 2;
+ rq->wait = rq->wait_base;
}
static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
@@ -494,17 +499,32 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler
*sched)
{
struct amd_sched_entity *entity;
int i;
+ bool skip;
if (!amd_sched_ready(sched))
return NULL;
+retry:
+ skip = false;
/* Kernel run queue has higher priority than normal run queue*/
for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+ if ((i > AMD_SCHED_PRIORITY_MIN) &&
+ (sched->sched_rq[i - 1].wait >=
sched->sched_rq[i].wait_base)) {
+ sched->sched_rq[i - 1].wait = sched->sched_rq[i -
1].wait_base;
+ skip = true;
+ continue;
+ }
entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
- if (entity)
+ if (entity) {
+ if (i > AMD_SCHED_PRIORITY_MIN)
+ sched->sched_rq[i - 1].wait++;
break;
+ }
}
+ if (!entity && skip)
+ goto retry;
+
return entity;
}
@@ -608,7 +628,7 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
sched->name = name;
sched->timeout = timeout;
for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
- amd_sched_rq_init(&sched->sched_rq[i]);
+ amd_sched_rq_init(sched, i);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 99f0240..4caed30 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -64,6 +64,8 @@ struct amd_sched_rq {
spinlock_t lock;
struct list_head entities;
struct amd_sched_entity *current_entity;
+ int wait_base;
+ int wait;
};
struct amd_sched_fence {
_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx