Re: [PATCH v8 3/5] drm/sched: Split free_job into own work item
On 2023-11-02 07:13, Tvrtko Ursulin wrote: > > On 31/10/2023 03:24, Matthew Brost wrote: >> Rather than call free_job and run_job in same work item have a dedicated >> work item for each. This aligns with the design and intended use of work >> queues. >> >> v2: >> - Test for DMA_FENCE_FLAG_TIMESTAMP_BIT before setting >> timestamp in free_job() work item (Danilo) >> v3: >>- Drop forward dec of drm_sched_select_entity (Boris) >>- Return in drm_sched_run_job_work if entity NULL (Boris) >> v4: >>- Replace dequeue with peek and invert logic (Luben) >>- Wrap to 100 lines (Luben) >>- Update comments for *_queue / *_queue_if_ready functions (Luben) >> v5: >>- Drop peek argument, blindly reinit idle (Luben) >>- s/drm_sched_free_job_queue_if_ready/drm_sched_free_job_queue_if_done >> (Luben) >>- Update work_run_job & work_free_job kernel doc (Luben) >> v6: >>- Do not move drm_sched_select_entity in file (Luben) >> >> Signed-off-by: Matthew Brost >> --- >> drivers/gpu/drm/scheduler/sched_main.c | 146 + >> include/drm/gpu_scheduler.h| 4 +- >> 2 files changed, 101 insertions(+), 49 deletions(-) >> >> diff --git a/drivers/gpu/drm/scheduler/sched_main.c >> b/drivers/gpu/drm/scheduler/sched_main.c >> index d1ae05bded15..3b1b2f8eafe8 100644 >> --- a/drivers/gpu/drm/scheduler/sched_main.c >> +++ b/drivers/gpu/drm/scheduler/sched_main.c >> @@ -265,6 +265,32 @@ static void drm_sched_run_job_queue(struct >> drm_gpu_scheduler *sched) >> queue_work(sched->submit_wq, >work_run_job); >> } >> >> +/** >> + * drm_sched_free_job_queue - enqueue free-job work >> + * @sched: scheduler instance >> + */ >> +static void drm_sched_free_job_queue(struct drm_gpu_scheduler *sched) >> +{ >> +if (!READ_ONCE(sched->pause_submit)) >> +queue_work(sched->submit_wq, >work_free_job); >> +} >> + >> +/** >> + * drm_sched_free_job_queue_if_done - enqueue free-job work if ready >> + * @sched: scheduler instance >> + */ >> +static void drm_sched_free_job_queue_if_done(struct drm_gpu_scheduler >> *sched) >> +{ >> +struct drm_sched_job *job; >> + >> +spin_lock(>job_list_lock); >> +job = list_first_entry_or_null(>pending_list, >> + struct drm_sched_job, list); >> +if (job && dma_fence_is_signaled(>s_fence->finished)) >> +drm_sched_free_job_queue(sched); >> +spin_unlock(>job_list_lock); >> +} >> + >> /** >>* drm_sched_job_done - complete a job >>* @s_job: pointer to the job which is done >> @@ -284,7 +310,7 @@ static void drm_sched_job_done(struct drm_sched_job >> *s_job, int result) >> dma_fence_get(_fence->finished); >> drm_sched_fence_finished(s_fence, result); >> dma_fence_put(_fence->finished); >> -drm_sched_run_job_queue(sched); >> +drm_sched_free_job_queue(sched); >> } >> >> /** >> @@ -943,8 +969,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler >> *sched) >> typeof(*next), list); >> >> if (next) { >> -next->s_fence->scheduled.timestamp = >> -dma_fence_timestamp(>s_fence->finished); >> +if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, >> + >s_fence->scheduled.flags)) >> +next->s_fence->scheduled.timestamp = >> + >> dma_fence_timestamp(>s_fence->finished); >> /* start TO timer for next job */ >> drm_sched_start_timeout(sched); >> } >> @@ -994,7 +1022,40 @@ drm_sched_pick_best(struct drm_gpu_scheduler >> **sched_list, >> EXPORT_SYMBOL(drm_sched_pick_best); >> >> /** >> - * drm_sched_run_job_work - main scheduler thread >> + * drm_sched_run_job_queue_if_ready - enqueue run-job work if ready >> + * @sched: scheduler instance >> + */ >> +static void drm_sched_run_job_queue_if_ready(struct drm_gpu_scheduler >> *sched) >> +{ >> +if (drm_sched_select_entity(sched)) >> +drm_sched_run_job_queue(sched); >> +} >> + >> +/** >> + * drm_sched_free_job_work - worker to call free_job >> + * >> + * @w: free job work >> + */ >> +static void drm_sched_free_job_work(struct work_struct *w) >> +{ >> +struct drm_gpu_scheduler *sched = >> +container_of(w, struct drm_gpu_scheduler, work_free_job); >> +struct drm_sched_job *cleanup_job; >> + >> +if (READ_ONCE(sched->pause_submit)) >> +return; >> + >> +cleanup_job = drm_sched_get_cleanup_job(sched); >> +if (cleanup_job) { >> +sched->ops->free_job(cleanup_job); >> + >> +drm_sched_free_job_queue_if_done(sched); >> +drm_sched_run_job_queue_if_ready(sched); > > Are finished jobs now disturbing the round robin selection? > > Every time this cleans up a job we get: > > drm_sched_run_job_queue_if_ready > ->
Re: [PATCH v8 3/5] drm/sched: Split free_job into own work item
On 31/10/2023 03:24, Matthew Brost wrote: Rather than call free_job and run_job in same work item have a dedicated work item for each. This aligns with the design and intended use of work queues. v2: - Test for DMA_FENCE_FLAG_TIMESTAMP_BIT before setting timestamp in free_job() work item (Danilo) v3: - Drop forward dec of drm_sched_select_entity (Boris) - Return in drm_sched_run_job_work if entity NULL (Boris) v4: - Replace dequeue with peek and invert logic (Luben) - Wrap to 100 lines (Luben) - Update comments for *_queue / *_queue_if_ready functions (Luben) v5: - Drop peek argument, blindly reinit idle (Luben) - s/drm_sched_free_job_queue_if_ready/drm_sched_free_job_queue_if_done (Luben) - Update work_run_job & work_free_job kernel doc (Luben) v6: - Do not move drm_sched_select_entity in file (Luben) Signed-off-by: Matthew Brost --- drivers/gpu/drm/scheduler/sched_main.c | 146 + include/drm/gpu_scheduler.h| 4 +- 2 files changed, 101 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index d1ae05bded15..3b1b2f8eafe8 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -265,6 +265,32 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) queue_work(sched->submit_wq, >work_run_job); } +/** + * drm_sched_free_job_queue - enqueue free-job work + * @sched: scheduler instance + */ +static void drm_sched_free_job_queue(struct drm_gpu_scheduler *sched) +{ + if (!READ_ONCE(sched->pause_submit)) + queue_work(sched->submit_wq, >work_free_job); +} + +/** + * drm_sched_free_job_queue_if_done - enqueue free-job work if ready + * @sched: scheduler instance + */ +static void drm_sched_free_job_queue_if_done(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *job; + + spin_lock(>job_list_lock); + job = list_first_entry_or_null(>pending_list, + struct drm_sched_job, list); + if (job && dma_fence_is_signaled(>s_fence->finished)) + drm_sched_free_job_queue(sched); + spin_unlock(>job_list_lock); +} + /** * drm_sched_job_done - complete a job * @s_job: pointer to the job which is done @@ -284,7 +310,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result) dma_fence_get(_fence->finished); drm_sched_fence_finished(s_fence, result); dma_fence_put(_fence->finished); - drm_sched_run_job_queue(sched); + drm_sched_free_job_queue(sched); } /** @@ -943,8 +969,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) typeof(*next), list); if (next) { - next->s_fence->scheduled.timestamp = - dma_fence_timestamp(>s_fence->finished); + if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, +>s_fence->scheduled.flags)) + next->s_fence->scheduled.timestamp = + dma_fence_timestamp(>s_fence->finished); /* start TO timer for next job */ drm_sched_start_timeout(sched); } @@ -994,7 +1022,40 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, EXPORT_SYMBOL(drm_sched_pick_best); /** - * drm_sched_run_job_work - main scheduler thread + * drm_sched_run_job_queue_if_ready - enqueue run-job work if ready + * @sched: scheduler instance + */ +static void drm_sched_run_job_queue_if_ready(struct drm_gpu_scheduler *sched) +{ + if (drm_sched_select_entity(sched)) + drm_sched_run_job_queue(sched); +} + +/** + * drm_sched_free_job_work - worker to call free_job + * + * @w: free job work + */ +static void drm_sched_free_job_work(struct work_struct *w) +{ + struct drm_gpu_scheduler *sched = + container_of(w, struct drm_gpu_scheduler, work_free_job); + struct drm_sched_job *cleanup_job; + + if (READ_ONCE(sched->pause_submit)) + return; + + cleanup_job = drm_sched_get_cleanup_job(sched); + if (cleanup_job) { + sched->ops->free_job(cleanup_job); + + drm_sched_free_job_queue_if_done(sched); + drm_sched_run_job_queue_if_ready(sched); Are finished jobs now disturbing the round robin selection? Every time this cleans up a job we get: drm_sched_run_job_queue_if_ready -> drm_sched_select_entity -> drm_sched_rq_select_entity_rr -> rq->current_entity bumped to next in list So when the job run worker does: entity = drm_sched_select_entity(sched); It does not pick the same entity as before this patch? If so perhaps drm_sched_run_job_queue_if_ready needs a "peek" helper which does not modify any
Re: [PATCH v8 3/5] drm/sched: Split free_job into own work item
On 2023-10-30 23:24, Matthew Brost wrote: > Rather than call free_job and run_job in same work item have a dedicated > work item for each. This aligns with the design and intended use of work > queues. > > v2: >- Test for DMA_FENCE_FLAG_TIMESTAMP_BIT before setting > timestamp in free_job() work item (Danilo) > v3: > - Drop forward dec of drm_sched_select_entity (Boris) > - Return in drm_sched_run_job_work if entity NULL (Boris) > v4: > - Replace dequeue with peek and invert logic (Luben) > - Wrap to 100 lines (Luben) > - Update comments for *_queue / *_queue_if_ready functions (Luben) > v5: > - Drop peek argument, blindly reinit idle (Luben) > - s/drm_sched_free_job_queue_if_ready/drm_sched_free_job_queue_if_done > (Luben) > - Update work_run_job & work_free_job kernel doc (Luben) > v6: > - Do not move drm_sched_select_entity in file (Luben) > > Signed-off-by: Matthew Brost Reviewed-by: Luben Tuikov Regards, Luben > --- > drivers/gpu/drm/scheduler/sched_main.c | 146 + > include/drm/gpu_scheduler.h| 4 +- > 2 files changed, 101 insertions(+), 49 deletions(-) > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c > b/drivers/gpu/drm/scheduler/sched_main.c > index d1ae05bded15..3b1b2f8eafe8 100644 > --- a/drivers/gpu/drm/scheduler/sched_main.c > +++ b/drivers/gpu/drm/scheduler/sched_main.c > @@ -265,6 +265,32 @@ static void drm_sched_run_job_queue(struct > drm_gpu_scheduler *sched) > queue_work(sched->submit_wq, >work_run_job); > } > > +/** > + * drm_sched_free_job_queue - enqueue free-job work > + * @sched: scheduler instance > + */ > +static void drm_sched_free_job_queue(struct drm_gpu_scheduler *sched) > +{ > + if (!READ_ONCE(sched->pause_submit)) > + queue_work(sched->submit_wq, >work_free_job); > +} > + > +/** > + * drm_sched_free_job_queue_if_done - enqueue free-job work if ready > + * @sched: scheduler instance > + */ > +static void drm_sched_free_job_queue_if_done(struct drm_gpu_scheduler *sched) > +{ > + struct drm_sched_job *job; > + > + spin_lock(>job_list_lock); > + job = list_first_entry_or_null(>pending_list, > +struct drm_sched_job, list); > + if (job && dma_fence_is_signaled(>s_fence->finished)) > + drm_sched_free_job_queue(sched); > + spin_unlock(>job_list_lock); > +} > + > /** > * drm_sched_job_done - complete a job > * @s_job: pointer to the job which is done > @@ -284,7 +310,7 @@ static void drm_sched_job_done(struct drm_sched_job > *s_job, int result) > dma_fence_get(_fence->finished); > drm_sched_fence_finished(s_fence, result); > dma_fence_put(_fence->finished); > - drm_sched_run_job_queue(sched); > + drm_sched_free_job_queue(sched); > } > > /** > @@ -943,8 +969,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler > *sched) > typeof(*next), list); > > if (next) { > - next->s_fence->scheduled.timestamp = > - dma_fence_timestamp(>s_fence->finished); > + if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, > + >s_fence->scheduled.flags)) > + next->s_fence->scheduled.timestamp = > + > dma_fence_timestamp(>s_fence->finished); > /* start TO timer for next job */ > drm_sched_start_timeout(sched); > } > @@ -994,7 +1022,40 @@ drm_sched_pick_best(struct drm_gpu_scheduler > **sched_list, > EXPORT_SYMBOL(drm_sched_pick_best); > > /** > - * drm_sched_run_job_work - main scheduler thread > + * drm_sched_run_job_queue_if_ready - enqueue run-job work if ready > + * @sched: scheduler instance > + */ > +static void drm_sched_run_job_queue_if_ready(struct drm_gpu_scheduler *sched) > +{ > + if (drm_sched_select_entity(sched)) > + drm_sched_run_job_queue(sched); > +} > + > +/** > + * drm_sched_free_job_work - worker to call free_job > + * > + * @w: free job work > + */ > +static void drm_sched_free_job_work(struct work_struct *w) > +{ > + struct drm_gpu_scheduler *sched = > + container_of(w, struct drm_gpu_scheduler, work_free_job); > + struct drm_sched_job *cleanup_job; > + > + if (READ_ONCE(sched->pause_submit)) > + return; > + > + cleanup_job = drm_sched_get_cleanup_job(sched); > + if (cleanup_job) { > + sched->ops->free_job(cleanup_job); > + > + drm_sched_free_job_queue_if_done(sched); > + drm_sched_run_job_queue_if_ready(sched); > + } > +} > + > +/** > + * drm_sched_run_job_work - worker to call run_job > * > * @w: run job work > */ > @@ -1003,65 +1064,51 @@ static void drm_sched_run_job_work(struct work_struct > *w) > struct drm_gpu_scheduler *sched = > container_of(w, struct
[PATCH v8 3/5] drm/sched: Split free_job into own work item
Rather than call free_job and run_job in same work item have a dedicated work item for each. This aligns with the design and intended use of work queues. v2: - Test for DMA_FENCE_FLAG_TIMESTAMP_BIT before setting timestamp in free_job() work item (Danilo) v3: - Drop forward dec of drm_sched_select_entity (Boris) - Return in drm_sched_run_job_work if entity NULL (Boris) v4: - Replace dequeue with peek and invert logic (Luben) - Wrap to 100 lines (Luben) - Update comments for *_queue / *_queue_if_ready functions (Luben) v5: - Drop peek argument, blindly reinit idle (Luben) - s/drm_sched_free_job_queue_if_ready/drm_sched_free_job_queue_if_done (Luben) - Update work_run_job & work_free_job kernel doc (Luben) v6: - Do not move drm_sched_select_entity in file (Luben) Signed-off-by: Matthew Brost --- drivers/gpu/drm/scheduler/sched_main.c | 146 + include/drm/gpu_scheduler.h| 4 +- 2 files changed, 101 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index d1ae05bded15..3b1b2f8eafe8 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -265,6 +265,32 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) queue_work(sched->submit_wq, >work_run_job); } +/** + * drm_sched_free_job_queue - enqueue free-job work + * @sched: scheduler instance + */ +static void drm_sched_free_job_queue(struct drm_gpu_scheduler *sched) +{ + if (!READ_ONCE(sched->pause_submit)) + queue_work(sched->submit_wq, >work_free_job); +} + +/** + * drm_sched_free_job_queue_if_done - enqueue free-job work if ready + * @sched: scheduler instance + */ +static void drm_sched_free_job_queue_if_done(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *job; + + spin_lock(>job_list_lock); + job = list_first_entry_or_null(>pending_list, + struct drm_sched_job, list); + if (job && dma_fence_is_signaled(>s_fence->finished)) + drm_sched_free_job_queue(sched); + spin_unlock(>job_list_lock); +} + /** * drm_sched_job_done - complete a job * @s_job: pointer to the job which is done @@ -284,7 +310,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result) dma_fence_get(_fence->finished); drm_sched_fence_finished(s_fence, result); dma_fence_put(_fence->finished); - drm_sched_run_job_queue(sched); + drm_sched_free_job_queue(sched); } /** @@ -943,8 +969,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) typeof(*next), list); if (next) { - next->s_fence->scheduled.timestamp = - dma_fence_timestamp(>s_fence->finished); + if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, +>s_fence->scheduled.flags)) + next->s_fence->scheduled.timestamp = + dma_fence_timestamp(>s_fence->finished); /* start TO timer for next job */ drm_sched_start_timeout(sched); } @@ -994,7 +1022,40 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, EXPORT_SYMBOL(drm_sched_pick_best); /** - * drm_sched_run_job_work - main scheduler thread + * drm_sched_run_job_queue_if_ready - enqueue run-job work if ready + * @sched: scheduler instance + */ +static void drm_sched_run_job_queue_if_ready(struct drm_gpu_scheduler *sched) +{ + if (drm_sched_select_entity(sched)) + drm_sched_run_job_queue(sched); +} + +/** + * drm_sched_free_job_work - worker to call free_job + * + * @w: free job work + */ +static void drm_sched_free_job_work(struct work_struct *w) +{ + struct drm_gpu_scheduler *sched = + container_of(w, struct drm_gpu_scheduler, work_free_job); + struct drm_sched_job *cleanup_job; + + if (READ_ONCE(sched->pause_submit)) + return; + + cleanup_job = drm_sched_get_cleanup_job(sched); + if (cleanup_job) { + sched->ops->free_job(cleanup_job); + + drm_sched_free_job_queue_if_done(sched); + drm_sched_run_job_queue_if_ready(sched); + } +} + +/** + * drm_sched_run_job_work - worker to call run_job * * @w: run job work */ @@ -1003,65 +1064,51 @@ static void drm_sched_run_job_work(struct work_struct *w) struct drm_gpu_scheduler *sched = container_of(w, struct drm_gpu_scheduler, work_run_job); struct drm_sched_entity *entity; - struct drm_sched_job *cleanup_job; + struct dma_fence *fence; + struct drm_sched_fence *s_fence; + struct drm_sched_job *sched_job; int r; if