On Thu, Nov 13, 2025 at 7:32 AM Boris Brezillon
<[email protected]> wrote:
>
> If the group is already assigned a slot but was idle before this job
> submission, we need to make sure the priority rotation happens in the
> future. Extract the existing logic living in group_schedule_locked()
> and call this new sched_resume_tick() helper from the "group is
> assigned a slot" path.
>
> v2:
> - Add R-b
>
> Fixes: de8548813824 ("drm/panthor: Add the scheduler logical block")
> Signed-off-by: Boris Brezillon <[email protected]>
> Reviewed-by: Steven Price <[email protected]>
> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 43 +++++++++++++++++++------
>  1 file changed, 34 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
> b/drivers/gpu/drm/panthor/panthor_sched.c
> index 0394f377c284..d1484f4a1c5b 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2543,14 +2543,33 @@ static void sync_upd_work(struct work_struct *work)
>                 sched_queue_delayed_work(sched, tick, 0);
>  }
>
> +static void sched_resume_tick(struct panthor_device *ptdev)
> +{
> +       struct panthor_scheduler *sched = ptdev->scheduler;
> +       u64 delay_jiffies, now;
> +
> +       drm_WARN_ON(&ptdev->base, sched->resched_target != U64_MAX);
> +
> +       /* Scheduler tick was off, recalculate the resched_target based on the
> +        * last tick event, and queue the scheduler work.
> +        */
> +       now = get_jiffies_64();
> +       sched->resched_target = sched->last_tick + sched->tick_period;
> +       if (sched->used_csg_slot_count == sched->csg_slot_count &&
> +           time_before64(now, sched->resched_target))
> +               delay_jiffies = min_t(unsigned long, sched->resched_target - 
> now, ULONG_MAX);
> +       else
> +               delay_jiffies = 0;
> +
> +       sched_queue_delayed_work(sched, tick, delay_jiffies);
> +}
> +
>  static void group_schedule_locked(struct panthor_group *group, u32 
> queue_mask)
>  {
>         struct panthor_device *ptdev = group->ptdev;
>         struct panthor_scheduler *sched = ptdev->scheduler;
>         struct list_head *queue = &sched->groups.runnable[group->priority];
> -       u64 delay_jiffies = 0;
>         bool was_idle;
> -       u64 now;
>
>         if (!group_can_run(group))
>                 return;
> @@ -2595,13 +2614,7 @@ static void group_schedule_locked(struct panthor_group 
> *group, u32 queue_mask)
>         /* Scheduler tick was off, recalculate the resched_target based on the
>          * last tick event, and queue the scheduler work.
>          */
> -       now = get_jiffies_64();
> -       sched->resched_target = sched->last_tick + sched->tick_period;
> -       if (sched->used_csg_slot_count == sched->csg_slot_count &&
> -           time_before64(now, sched->resched_target))
> -               delay_jiffies = min_t(unsigned long, sched->resched_target - 
> now, ULONG_MAX);
> -
> -       sched_queue_delayed_work(sched, tick, delay_jiffies);
> +       sched_resume_tick(ptdev);
>  }
>
>  static void queue_stop(struct panthor_queue *queue,
> @@ -3202,6 +3215,18 @@ queue_run_job(struct drm_sched_job *sched_job)
>
>                 group_schedule_locked(group, BIT(job->queue_idx));
>         } else {
> +               u32 queue_mask = BIT(job->queue_idx);
> +               bool resume_tick = group_is_idle(group) &&
> +                                  (group->idle_queues & queue_mask) &&
> +                                  !(group->blocked_queues & queue_mask) &&
> +                                  sched->resched_target == U64_MAX;
The logic here should be the same as the first part of
group_schedule_locked. I wonder if we can refactor that as well.

> +
> +               /* We just added something to the queue, so it's no longer 
> idle. */
> +               group->idle_queues &= ~BIT(job->queue_idx);
group->idle_queues &= queue_mask;

> +
> +               if (resume_tick)
> +                       sched_resume_tick(ptdev);
> +
>                 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
>                 if (!sched->pm.has_ref &&
>                     !(group->blocked_queues & BIT(job->queue_idx))) {
> --
> 2.51.1
>

Reply via email to