---
drivers/gpu/drm/scheduler/sched_main.c | 37 ++++++++++--------------
--
1 file changed, 14 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/sched_main.c
b/drivers/gpu/drm/scheduler/sched_main.c
index 1f077782ec12..1bce0b66f89c 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -366,22 +366,6 @@ static void __drm_sched_run_free_queue(struct
drm_gpu_scheduler *sched)
queue_work(sched->submit_wq, &sched->work_free_job);
}
-/**
- * drm_sched_run_free_queue - enqueue free-job work if ready
- * @sched: scheduler instance
- */
-static void drm_sched_run_free_queue(struct drm_gpu_scheduler
*sched)
-{
- struct drm_sched_job *job;
-
- spin_lock(&sched->job_list_lock);
- job = list_first_entry_or_null(&sched->pending_list,
- struct drm_sched_job, list);
- if (job && dma_fence_is_signaled(&job->s_fence->finished))
- __drm_sched_run_free_queue(sched);
- spin_unlock(&sched->job_list_lock);
-}
-
/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
@@ -1102,12 +1086,13 @@ drm_sched_select_entity(struct
drm_gpu_scheduler *sched)
* drm_sched_get_finished_job - fetch the next finished job to be
destroyed
*
* @sched: scheduler instance
+ * @have_more: are there more finished jobs on the list
*
* Returns the next finished job from the pending list (if there is
one)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
-drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
+drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool
*have_more)
{
struct drm_sched_job *job, *next;
@@ -1115,22 +1100,25 @@ drm_sched_get_finished_job(struct
drm_gpu_scheduler *sched)
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
-
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
/* cancel this job's TO timer */
cancel_delayed_work(&sched->work_tdr);
- /* make the scheduled timestamp more accurate */
+
+ *have_more = false;
next = list_first_entry_or_null(&sched-
pending_list,
typeof(*next),
list);
-
if (next) {
+ /* make the scheduled timestamp more
accurate */
if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
&next->s_fence-
scheduled.flags))
next->s_fence->scheduled.timestamp =
dma_fence_timestamp(&job-
s_fence->finished);
+
+ *have_more = dma_fence_is_signaled(&next-
s_fence->finished);
+
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
@@ -1189,12 +1177,15 @@ static void drm_sched_free_job_work(struct
work_struct *w)
struct drm_gpu_scheduler *sched =
container_of(w, struct drm_gpu_scheduler,
work_free_job);
struct drm_sched_job *job;
+ bool have_more;
- job = drm_sched_get_finished_job(sched);
- if (job)
+ job = drm_sched_get_finished_job(sched, &have_more);
+ if (job) {
sched->ops->free_job(job);
+ if (have_more)
+ __drm_sched_run_free_queue(sched);
+ }
- drm_sched_run_free_queue(sched);
drm_sched_run_job_queue(sched);
}