On Thu, Mar 15, 2018 at 02:46:59PM +0000, Morten Rasmussen wrote:

> +static inline unsigned long task_util(struct task_struct *p);
> +static inline int task_fits_capacity(struct task_struct *p, long capacity)
> +{
> +     return capacity * 1024 > task_util(p) * capacity_margin;
> +}
> +
> +static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
> +{
> +     if (!static_branch_unlikely(&sched_asym_cpucapacity))
> +             return;
> +
> +     if (!p) {
> +             rq->misfit_task_load = 0;
> +             return;
> +     }
> +
> +     if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
> +             rq->misfit_task_load = 0;
> +             return;
> +     }
> +
> +     rq->misfit_task_load = task_h_load(p);
> +}

So RT/IRQ pressure can also cause misfit..


> @@ -7972,6 +8005,10 @@ static inline void update_sg_lb_stats(struct lb_env 
> *env,
>                */
>               if (!nr_running && idle_cpu(i))
>                       sgs->idle_cpus++;
> +
> +             if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
> +                 !sgs->group_misfit_task_load && rq->misfit_task_load)
> +                     sgs->group_misfit_task_load = rq->misfit_task_load;
>       }

Should we not look for the biggest misfit instead of the first?

>  
>       /* Adjust by relative CPU capacity of the group */

Reply via email to