Re: [PATCH 4/8] sched/fair: Use dst_cpu's capacity rather than group {min, max} capacity

2021-02-03 Thread Qais Yousef
On 01/28/21 18:31, Valentin Schneider wrote:
> Comparing capacity extrema of local and source sched_group's doesn't make
> much sense when at the day of the day the imbalance will be pulled by a
> known env->dst_cpu, whose capacity can be anywhere within the local group's
> capacity extrema.
> 
> Replace group_smaller_{min, max}_cpu_capacity() with comparisons of the
> source group's min/max capacity and the destination CPU's capacity.
> 
> Signed-off-by: Valentin Schneider 
> ---

Reviewed-by: Qais Yousef 

Thanks

--
Qais Yousef

>  kernel/sched/fair.c | 31 +++
>  1 file changed, 3 insertions(+), 28 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 58ce0b22fcb0..0959a770ecc0 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -8352,26 +8352,6 @@ group_is_overloaded(unsigned int imbalance_pct, struct 
> sg_lb_stats *sgs)
>   return false;
>  }
>  
> -/*
> - * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
> - * per-CPU capacity than sched_group ref.
> - */
> -static inline bool
> -group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group 
> *ref)
> -{
> - return capacity_greater(ref->sgc->min_capacity, sg->sgc->min_capacity);
> -}
> -
> -/*
> - * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller
> - * per-CPU capacity_orig than sched_group ref.
> - */
> -static inline bool
> -group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group 
> *ref)
> -{
> - return capacity_greater(ref->sgc->max_capacity, sg->sgc->max_capacity);
> -}
> -
>  static inline enum
>  group_type group_classify(unsigned int imbalance_pct,
> struct sched_group *group,
> @@ -8523,15 +8503,10 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>   if (!sgs->sum_h_nr_running)
>   return false;
>  
> - /*
> -  * Don't try to pull misfit tasks we can't help.
> -  * We can use max_capacity here as reduction in capacity on some
> -  * CPUs in the group should either be possible to resolve
> -  * internally or be covered by avg_load imbalance (eventually).
> -  */
> + /* Don't try to pull misfit tasks we can't help */
>   if (static_branch_unlikely(_asym_cpucapacity) &&
>   sgs->group_type == group_misfit_task &&
> - (!group_smaller_max_cpu_capacity(sg, sds->local) ||
> + (!capacity_greater(capacity_of(env->dst_cpu), 
> sg->sgc->max_capacity) ||
>sds->local_stat.group_type != group_has_spare))
>   return false;
>  
> @@ -8615,7 +8590,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>*/
>   if (sd_has_asym_cpucapacity(env->sd) &&
>   (sgs->group_type <= group_fully_busy) &&
> - (group_smaller_min_cpu_capacity(sds->local, sg)))
> + (capacity_greater(sg->sgc->min_capacity, 
> capacity_of(env->dst_cpu
>   return false;
>  
>   return true;
> -- 
> 2.27.0
> 


[PATCH 4/8] sched/fair: Use dst_cpu's capacity rather than group {min, max} capacity

2021-01-28 Thread Valentin Schneider
Comparing capacity extrema of local and source sched_group's doesn't make
much sense when at the day of the day the imbalance will be pulled by a
known env->dst_cpu, whose capacity can be anywhere within the local group's
capacity extrema.

Replace group_smaller_{min, max}_cpu_capacity() with comparisons of the
source group's min/max capacity and the destination CPU's capacity.

Signed-off-by: Valentin Schneider 
---
 kernel/sched/fair.c | 31 +++
 1 file changed, 3 insertions(+), 28 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 58ce0b22fcb0..0959a770ecc0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8352,26 +8352,6 @@ group_is_overloaded(unsigned int imbalance_pct, struct 
sg_lb_stats *sgs)
return false;
 }
 
-/*
- * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
- * per-CPU capacity than sched_group ref.
- */
-static inline bool
-group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
-{
-   return capacity_greater(ref->sgc->min_capacity, sg->sgc->min_capacity);
-}
-
-/*
- * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller
- * per-CPU capacity_orig than sched_group ref.
- */
-static inline bool
-group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
-{
-   return capacity_greater(ref->sgc->max_capacity, sg->sgc->max_capacity);
-}
-
 static inline enum
 group_type group_classify(unsigned int imbalance_pct,
  struct sched_group *group,
@@ -8523,15 +8503,10 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (!sgs->sum_h_nr_running)
return false;
 
-   /*
-* Don't try to pull misfit tasks we can't help.
-* We can use max_capacity here as reduction in capacity on some
-* CPUs in the group should either be possible to resolve
-* internally or be covered by avg_load imbalance (eventually).
-*/
+   /* Don't try to pull misfit tasks we can't help */
if (static_branch_unlikely(_asym_cpucapacity) &&
sgs->group_type == group_misfit_task &&
-   (!group_smaller_max_cpu_capacity(sg, sds->local) ||
+   (!capacity_greater(capacity_of(env->dst_cpu), 
sg->sgc->max_capacity) ||
 sds->local_stat.group_type != group_has_spare))
return false;
 
@@ -8615,7 +8590,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 */
if (sd_has_asym_cpucapacity(env->sd) &&
(sgs->group_type <= group_fully_busy) &&
-   (group_smaller_min_cpu_capacity(sds->local, sg)))
+   (capacity_greater(sg->sgc->min_capacity, 
capacity_of(env->dst_cpu
return false;
 
return true;
-- 
2.27.0