On 02.10.25 07:16, Matthew Brost wrote:
> Stop open coding pending job list in drivers. Add pending job list
> iterator which safely walks DRM scheduler list either locklessly
> asserting DRM scheduler is stopped or takes pending job list lock.

Taking the job list lock and walking the pending list while the scheduler is 
not stopped is most likely a NO-GO.

As far as I understand it that is exactly what Philip rejected as looking into 
scheduler internals during the discussion on XDC.

So why is that actually needed? For debugging or something functional?

Regards,
Christian.

> 
> Signed-off-by: Matthew Brost <[email protected]>
> ---
>  include/drm/gpu_scheduler.h | 64 +++++++++++++++++++++++++++++++++++++
>  1 file changed, 64 insertions(+)
> 
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index fb88301b3c45..a2dcabab8284 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -698,4 +698,68 @@ void drm_sched_entity_modify_sched(struct 
> drm_sched_entity *entity,
>                                  struct drm_gpu_scheduler **sched_list,
>                                  unsigned int num_sched_list);
>  
> +/* Inlines */
> +
> +/**
> + * struct drm_sched_iter_pending_job - DRM scheduler pending job iterator 
> state
> + * @sched: DRM scheduler associated with pending job iterator
> + * @stopped: DRM scheduler stopped state associated with pending job iterator
> + */
> +struct drm_sched_iter_pending_job {
> +     struct drm_gpu_scheduler *sched;
> +     bool stopped;
> +};
> +
> +/* Drivers should never call this directly */
> +static inline struct drm_sched_iter_pending_job
> +__drm_sched_iter_pending_job_begin(struct drm_gpu_scheduler *sched, bool 
> stopped)
> +{
> +     struct drm_sched_iter_pending_job iter = {
> +             .sched = sched,
> +             .stopped = stopped,
> +     };
> +
> +     if (stopped)
> +             WARN_ON(!READ_ONCE(sched->pause_submit));
> +     else
> +             spin_lock(&sched->job_list_lock);
> +
> +     return iter;
> +}
> +
> +/* Drivers should never call this directly */
> +static inline void
> +__drm_sched_iter_pending_job_end(const struct drm_sched_iter_pending_job 
> iter)
> +{
> +     if (iter.stopped)
> +             WARN_ON(!READ_ONCE(iter.sched->pause_submit));
> +     else
> +             spin_unlock(&iter.sched->job_list_lock);
> +}
> +
> +DEFINE_CLASS(drm_sched_iter_pending_job, struct drm_sched_iter_pending_job,
> +          __drm_sched_iter_pending_job_end(_T);,
> +          __drm_sched_iter_pending_job_begin(__sched, __stopped),
> +          struct drm_gpu_scheduler * __sched, bool __stopped);
> +static inline void
> +*class_drm_sched_iter_pending_job_lock_ptr(class_drm_sched_iter_pending_job_t
>  *_T)
> +{return _T; }
> +#define class_drm_sched_iter_pending_job_is_conditional false
> +
> +/**
> + * drm_sched_for_each_pending_job() - Iterator for each pending job in 
> scheduler
> + * @__job: Current pending job being iterated over
> + * @__sched: DRM scheduler to iterate over pending jobs
> + * @__entity: DRM scheduler entity to filter jobs, NULL indicates no filter
> + * @__stopped: DRM scheduler stopped state
> + *
> + * Iterator for each pending job in scheduler, filtering on an entity, and
> + * enforcing locking rules (either scheduler fully stoppped or correctly 
> takes
> + * job_list_lock).
> + */
> +#define drm_sched_for_each_pending_job(__job, __sched, __entitiy, __stopped) 
> \
> +     scoped_guard(drm_sched_iter_pending_job, __sched, __stopped)            
> \
> +     list_for_each_entry(__job, &(__sched)->pending_list, list)              
> \
> +     for_each_if(!__entitiy || (__job)->entity == (__entitiy))
> +
>  #endif

Reply via email to