On 06/11/2025 14:46, Boris Brezillon wrote:
> It's the group item that's supposed to be inserted before other_group,
> not the other way around.
> 
> Fixes: de8548813824 ("drm/panthor: Add the scheduler logical block")
> Signed-off-by: Boris Brezillon <[email protected]>

Looks correct to me (but then I think I thought that about the old
code... ;) ).

Reviewed-by: Steven Price <[email protected]>

> ---
>  drivers/gpu/drm/panthor/panthor_sched.c | 46 +++++++++++++++----------
>  1 file changed, 28 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c 
> b/drivers/gpu/drm/panthor/panthor_sched.c
> index 94514d464e64..69cc1b4c23f2 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -1960,31 +1960,22 @@ tick_ctx_pick_groups_from_list(const struct 
> panthor_scheduler *sched,
>  static void
>  tick_ctx_insert_old_group(struct panthor_scheduler *sched,
>                         struct panthor_sched_tick_ctx *ctx,
> -                       struct panthor_group *group,
> -                       bool full_tick)
> +                       struct panthor_group *group)
>  {
>       struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
>       struct panthor_group *other_group;
>  
> -     if (!full_tick) {
> -             list_add_tail(&group->run_node, 
> &ctx->old_groups[group->priority]);
> -             return;
> -     }
> -
> -     /* Rotate to make sure groups with lower CSG slot
> -      * priorities have a chance to get a higher CSG slot
> -      * priority next time they get picked. This priority
> -      * has an impact on resource request ordering, so it's
> -      * important to make sure we don't let one group starve
> -      * all other groups with the same group priority.
> -      */
> +     /* Class groups in descending priority order so we can easily rotate. */
>       list_for_each_entry(other_group,
>                           &ctx->old_groups[csg_slot->group->priority],
>                           run_node) {
>               struct panthor_csg_slot *other_csg_slot = 
> &sched->csg_slots[other_group->csg_id];
>  
> -             if (other_csg_slot->priority > csg_slot->priority) {
> -                     list_add_tail(&csg_slot->group->run_node, 
> &other_group->run_node);
> +             /* Our group has a higher prio than the one we're testing 
> against,
> +              * place it just before.
> +              */
> +             if (csg_slot->priority > other_csg_slot->priority) {
> +                     list_add_tail(&group->run_node, &other_group->run_node);
>                       return;
>               }
>       }
> @@ -2033,7 +2024,7 @@ tick_ctx_init(struct panthor_scheduler *sched,
>                               group->fatal_queues |= 
> GENMASK(group->queue_count - 1, 0);
>               }
>  
> -             tick_ctx_insert_old_group(sched, ctx, group, full_tick);
> +             tick_ctx_insert_old_group(sched, ctx, group);
>               csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
>                                       csg_iface->output->ack ^ 
> CSG_STATUS_UPDATE,
>                                       CSG_STATUS_UPDATE);
> @@ -2399,9 +2390,28 @@ static void tick_work(struct work_struct *work)
>       for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
>            prio >= 0 && !tick_ctx_is_full(sched, &ctx);
>            prio--) {
> +             struct panthor_group *old_highest_prio_group =
> +                     list_first_entry_or_null(&ctx.old_groups[prio],
> +                                              struct panthor_group, 
> run_node);
> +
> +             /* Pull out the group with the highest prio for rotation. */
> +             if (old_highest_prio_group)
> +                     list_del(&old_highest_prio_group->run_node);
> +
> +             /* Re-insert old active groups so they get a chance to run with 
> higher prio. */
> +             tick_ctx_pick_groups_from_list(sched, &ctx, 
> &ctx.old_groups[prio], true, true);
> +
> +             /* Fill the remaining slots with runnable groups. */
>               tick_ctx_pick_groups_from_list(sched, &ctx, 
> &sched->groups.runnable[prio],
>                                              true, false);
> -             tick_ctx_pick_groups_from_list(sched, &ctx, 
> &ctx.old_groups[prio], true, true);
> +
> +             /* Re-insert the old group with the highest prio, and give it a 
> chance to be
> +              * scheduled again (but with a lower prio) if there's room left.
> +              */
> +             if (old_highest_prio_group) {
> +                     list_add_tail(&old_highest_prio_group->run_node, 
> &ctx.old_groups[prio]);
> +                     tick_ctx_pick_groups_from_list(sched, &ctx, 
> &ctx.old_groups[prio], true, true);
> +             }
>       }
>  
>       /* If we have free CSG slots left, pick idle groups */

Reply via email to