On Fri, Aug 28, 2020 at 03:51:13PM -0400, Julien Desfossez wrote:
>  /*
>   * The static-key + stop-machine variable are needed such that:
>   *
> @@ -4641,7 +4656,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
> struct rq_flags *rf)
>       struct task_struct *next, *max = NULL;
>       const struct sched_class *class;
>       const struct cpumask *smt_mask;
> -     int i, j, cpu;
> +     int i, j, cpu, occ = 0;
>       int smt_weight;
>       bool need_sync;
>  
> @@ -4750,6 +4765,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
> struct rq_flags *rf)
>                               goto done;
>                       }
>  
> +                     if (!is_idle_task(p))
> +                             occ++;
> +
>                       rq_i->core_pick = p;
>  
>                       /*
> @@ -4775,6 +4793,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, 
> struct rq_flags *rf)
>  
>                                               cpu_rq(j)->core_pick = NULL;
>                                       }
> +                                     occ = 1;
>                                       goto again;
>                               } else {
>                                       /*
> @@ -4820,6 +4839,8 @@ next_class:;
>               if (is_idle_task(rq_i->core_pick) && rq_i->nr_running)
>                       rq_i->core_forceidle = true;
>  
> +             rq_i->core_pick->core_occupation = occ;
> +
>               if (i == cpu)
>                       continue;
>  
> @@ -4837,6 +4858,113 @@ next_class:;
>       return next;
>  }
>  
> +static bool try_steal_cookie(int this, int that)
> +{
> +     struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
> +     struct task_struct *p;
> +     unsigned long cookie;
> +     bool success = false;
> +
> +     local_irq_disable();
> +     double_rq_lock(dst, src);
> +
> +     cookie = dst->core->core_cookie;
> +     if (!cookie)
> +             goto unlock;
> +
> +     if (dst->curr != dst->idle)
> +             goto unlock;
> +
> +     p = sched_core_find(src, cookie);
> +     if (p == src->idle)
> +             goto unlock;
> +
> +     do {
> +             if (p == src->core_pick || p == src->curr)
> +                     goto next;
> +
> +             if (!cpumask_test_cpu(this, &p->cpus_mask))
> +                     goto next;
> +
> +             if (p->core_occupation > dst->idle->core_occupation)
> +                     goto next;
> +
Can you please explain the rationale behind this check? If I understand
correctly, p->core_occupation is set in pick_next_task() to indicate
the number of matching cookie (except idle) tasks picked on this core.
It is not reset anywhere.

> +             p->on_rq = TASK_ON_RQ_MIGRATING;
> +             deactivate_task(src, p, 0);
> +             set_task_cpu(p, this);
> +             activate_task(dst, p, 0);
> +             p->on_rq = TASK_ON_RQ_QUEUED;
> +
> +             resched_curr(dst);
> +
> +             success = true;
> +             break;
> +
> +next:
> +             p = sched_core_next(p, cookie);
> +     } while (p);
> +
> +unlock:
> +     double_rq_unlock(dst, src);
> +     local_irq_enable();
> +
> +     return success;
> +}

Thanks,
Pavan
-- 
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a Linux 
Foundation Collaborative Project.

Reply via email to