Hi, Aubrey,

> On Jul 1, 2020, at 5:32 AM, Vineeth Remanan Pillai <[email protected]> 
> wrote:
> 
> From: Aubrey Li <[email protected]>
> 
> - Don't migrate if there is a cookie mismatch
>     Load balance tries to move task from busiest CPU to the
>     destination CPU. When core scheduling is enabled, if the
>     task's cookie does not match with the destination CPU's
>     core cookie, this task will be skipped by this CPU. This
>     mitigates the forced idle time on the destination CPU.
> 
> - Select cookie matched idle CPU
>     In the fast path of task wakeup, select the first cookie matched
>     idle CPU instead of the first idle CPU.
> 
> - Find cookie matched idlest CPU
>     In the slow path of task wakeup, find the idlest CPU whose core
>     cookie matches with task's cookie
> 
> - Don't migrate task if cookie not match
>     For the NUMA load balance, don't migrate task to the CPU whose
>     core cookie does not match with task's cookie
> 
> Signed-off-by: Aubrey Li <[email protected]>
> Signed-off-by: Tim Chen <[email protected]>
> Signed-off-by: Vineeth Remanan Pillai <[email protected]>
> ---
> kernel/sched/fair.c  | 64 ++++++++++++++++++++++++++++++++++++++++----
> kernel/sched/sched.h | 29 ++++++++++++++++++++
> 2 files changed, 88 insertions(+), 5 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index d16939766361..33dc4bf01817 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -2051,6 +2051,15 @@ static void task_numa_find_cpu(struct task_numa_env 
> *env,
>               if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
>                       continue;
> 
> +#ifdef CONFIG_SCHED_CORE
> +             /*
> +              * Skip this cpu if source task's cookie does not match
> +              * with CPU's core cookie.
> +              */
> +             if (!sched_core_cookie_match(cpu_rq(cpu), env->p))
> +                     continue;
> +#endif
> +
>               env->dst_cpu = cpu;
>               if (task_numa_compare(env, taskimp, groupimp, maymove))
>                       break;
> @@ -5963,11 +5972,17 @@ find_idlest_group_cpu(struct sched_group *group, 
> struct task_struct *p, int this
> 
>       /* Traverse only the allowed CPUs */
>       for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
> +             struct rq *rq = cpu_rq(i);
> +
> +#ifdef CONFIG_SCHED_CORE
> +             if (!sched_core_cookie_match(rq, p))
> +                     continue;
> +#endif
> +
>               if (sched_idle_cpu(i))
>                       return i;
> 
>               if (available_idle_cpu(i)) {
> -                     struct rq *rq = cpu_rq(i);
>                       struct cpuidle_state *idle = idle_get_state(rq);
>                       if (idle && idle->exit_latency < min_exit_latency) {
>                               /*
> @@ -6224,8 +6239,18 @@ static int select_idle_cpu(struct task_struct *p, 
> struct sched_domain *sd, int t
>       for_each_cpu_wrap(cpu, cpus, target) {
>               if (!--nr)
>                       return -1;
> -             if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
> -                     break;
> +
> +             if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) {
> +#ifdef CONFIG_SCHED_CORE
> +                     /*
> +                      * If Core Scheduling is enabled, select this cpu
> +                      * only if the process cookie matches core cookie.
> +                      */
> +                     if (sched_core_enabled(cpu_rq(cpu)) &&
> +                         p->core_cookie == cpu_rq(cpu)->core->core_cookie)
Why not also add similar logic in select_idle_smt to reduce forced-idle? :)


> +#endif
> +                             break;
> +             }
>       }
> 
>       time = cpu_clock(this) - time;
> @@ -7609,8 +7634,9 @@ int can_migrate_task(struct task_struct *p, struct 
> lb_env *env)
>        * We do not migrate tasks that are:
>        * 1) throttled_lb_pair, or
>        * 2) cannot be migrated to this CPU due to cpus_ptr, or
> -      * 3) running (obviously), or
> -      * 4) are cache-hot on their current CPU.
> +      * 3) task's cookie does not match with this CPU's core cookie
> +      * 4) running (obviously), or
> +      * 5) are cache-hot on their current CPU.
>        */
>       if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
>               return 0;
> @@ -7645,6 +7671,15 @@ int can_migrate_task(struct task_struct *p, struct 
> lb_env *env)
>               return 0;
>       }
> 
> +#ifdef CONFIG_SCHED_CORE
> +     /*
> +      * Don't migrate task if the task's cookie does not match
> +      * with the destination CPU's core cookie.
> +      */
> +     if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
> +             return 0;
> +#endif
> +
>       /* Record that we found atleast one task that could run on dst_cpu */
>       env->flags &= ~LBF_ALL_PINNED;
> 
> @@ -8857,6 +8892,25 @@ find_idlest_group(struct sched_domain *sd, struct 
> task_struct *p,
>                                       p->cpus_ptr))
>                       continue;
> 
> +#ifdef CONFIG_SCHED_CORE
> +             if (sched_core_enabled(cpu_rq(this_cpu))) {
> +                     int i = 0;
> +                     bool cookie_match = false;
> +
> +                     for_each_cpu(i, sched_group_span(group)) {
Should we consider the p->cpus_ptr here? like,
                        for_each_cpu_and(i, sched_group_span(group), 
p->cpus_ptr ) {
                                ...
                        }
Thx.
Regards,
Jiang

> +                             struct rq *rq = cpu_rq(i);
> +
> +                             if (sched_core_cookie_match(rq, p)) {
> +                                     cookie_match = true;
> +                                     break;
> +                             }
> +                     }
> +                     /* Skip over this group if no cookie matched */
> +                     if (!cookie_match)
> +                             continue;
> +             }
> +#endif
> +
>               local_group = cpumask_test_cpu(this_cpu,
>                                              sched_group_span(group));
> 
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 464559676fd2..875796d43fca 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1089,6 +1089,35 @@ static inline raw_spinlock_t *rq_lockp(struct rq *rq)
> bool cfs_prio_less(struct task_struct *a, struct task_struct *b);
> void sched_core_adjust_sibling_vruntime(int cpu, bool coresched_enabled);
> 
> +/*
> + * Helper to check if the CPU's core cookie matches with the task's cookie
> + * when core scheduling is enabled.
> + * A special case is that the task's cookie always matches with CPU's core
> + * cookie if the CPU is in an idle core.
> + */
> +static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct 
> *p)
> +{
> +     bool idle_core = true;
> +     int cpu;
> +
> +     /* Ignore cookie match if core scheduler is not enabled on the CPU. */
> +     if (!sched_core_enabled(rq))
> +             return true;
> +
> +     for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) {
> +             if (!available_idle_cpu(cpu)) {
> +                     idle_core = false;
> +                     break;
> +             }
> +     }
> +
> +     /*
> +      * A CPU in an idle core is always the best choice for tasks with
> +      * cookies.
> +      */
> +     return idle_core || rq->core->core_cookie == p->core_cookie;
> +}
> +
> extern void queue_core_balance(struct rq *rq);
> 
> #else /* !CONFIG_SCHED_CORE */
> -- 
> 2.17.1
> 
> 

Reply via email to