Signed-off-by: Tvrtko Ursulin <tvrtko.ursu...@igalia.com>
Cc: Christian König <christian.koe...@amd.com>
Cc: Danilo Krummrich <d...@kernel.org>
Cc: Matthew Brost <matthew.br...@intel.com>
Cc: Philipp Stanner <pha...@kernel.org>
---
drivers/gpu/drm/scheduler/sched_main.c | 39 +++++++++++-------------
--
1 file changed, 16 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/sched_main.c
b/drivers/gpu/drm/scheduler/sched_main.c
index 86e40157b09b..a45b02fd2af3 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -365,22 +365,6 @@ static void __drm_sched_run_free_queue(struct
drm_gpu_scheduler *sched)
queue_work(sched->submit_wq, &sched->work_free_job);
}
-/**
- * drm_sched_run_free_queue - enqueue free-job work if ready
- * @sched: scheduler instance
- */
-static void drm_sched_run_free_queue(struct drm_gpu_scheduler
*sched)
-{
- struct drm_sched_job *job;
-
- spin_lock(&sched->job_list_lock);
- job = list_first_entry_or_null(&sched->pending_list,
- struct drm_sched_job, list);
- if (job && dma_fence_is_signaled(&job->s_fence->finished))
- __drm_sched_run_free_queue(sched);
- spin_unlock(&sched->job_list_lock);
-}
-
/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
@@ -1097,12 +1081,13 @@ drm_sched_select_entity(struct
drm_gpu_scheduler *sched)
* drm_sched_get_finished_job - fetch the next finished job to be
destroyed
*
* @sched: scheduler instance
+ * @have_more: are there more finished jobs on the list
*
* Returns the next finished job from the pending list (if there is
one)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
-drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
+drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool
*have_more)
{
struct drm_sched_job *job, *next;
@@ -1110,22 +1095,27 @@ drm_sched_get_finished_job(struct
drm_gpu_scheduler *sched)
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
-
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
/* cancel this job's TO timer */
cancel_delayed_work(&sched->work_tdr);
- /* make the scheduled timestamp more accurate */
+
+ *have_more = false;
next = list_first_entry_or_null(&sched-
pending_list,
typeof(*next),
list);
-
if (next) {
+ /* make the scheduled timestamp more
accurate */
if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
&next->s_fence-
scheduled.flags))
next->s_fence->scheduled.timestamp =
dma_fence_timestamp(&job-
s_fence->finished);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &next->s_fence-
finished.flags))
+ *have_more = true;
+
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
@@ -1184,12 +1174,15 @@ static void drm_sched_free_job_work(struct
work_struct *w)
struct drm_gpu_scheduler *sched =
container_of(w, struct drm_gpu_scheduler,
work_free_job);
struct drm_sched_job *job;
+ bool have_more;
- job = drm_sched_get_finished_job(sched);
- if (job)
+ job = drm_sched_get_finished_job(sched, &have_more);
+ if (job) {
sched->ops->free_job(job);
+ if (have_more)
+ __drm_sched_run_free_queue(sched);
+ }
- drm_sched_run_free_queue(sched);
drm_sched_run_job_queue(sched);
}