Le Thursday 28 Apr 2016 à 11:19:19 (+0200), Peter Zijlstra a écrit :
> On Tue, Apr 05, 2016 at 12:12:30PM +0800, Yuyang Du wrote:
> > Rename scale_load() and scale_load_down() to user_to_kernel_load()
> > and kernel_to_user_load() respectively, to allow the names to bear
> > what they are really about.
> 
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -189,7 +189,7 @@ static void __update_inv_weight(struct load_weight *lw)
> >     if (likely(lw->inv_weight))
> >             return;
> >  
> > -   w = scale_load_down(lw->weight);
> > +   w = kernel_to_user_load(lw->weight);
> >  
> >     if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
> >             lw->inv_weight = 1;
> > @@ -213,7 +213,7 @@ static void __update_inv_weight(struct load_weight *lw)
> >   */
> >  static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct 
> > load_weight *lw)
> >  {
> > -   u64 fact = scale_load_down(weight);
> > +   u64 fact = kernel_to_user_load(weight);
> >     int shift = WMULT_SHIFT;
> >  
> >     __update_inv_weight(lw);
> > @@ -6952,10 +6952,11 @@ static inline void calculate_imbalance(struct 
> > lb_env *env, struct sd_lb_stats *s
> >      */
> >     if (busiest->group_type == group_overloaded &&
> >         local->group_type   == group_overloaded) {
> > +           unsigned long min_cpu_load =
> > +                   kernel_to_user_load(NICE_0_LOAD) * 
> > busiest->group_capacity;
> >             load_above_capacity = busiest->sum_nr_running * NICE_0_LOAD;
> > -           if (load_above_capacity > scale_load(busiest->group_capacity))
> > -                   load_above_capacity -=
> > -                           scale_load(busiest->group_capacity);
> > +           if (load_above_capacity > min_cpu_load)
> > +                   load_above_capacity -= min_cpu_load;
> >             else
> >                     load_above_capacity = ~0UL;
> >     }
> 
> Except these 3 really are not about user/kernel visible fixed point
> ranges _at_all_... :/

While trying to optimize the calcultaion of min_cpu_load, i have broken 
evrything

it should be :

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0b6659d..3411eb7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6953,7 +6953,7 @@ static inline void calculate_imbalance(struct lb_env 
*env, struct sd_lb_stats *s
        if (busiest->group_type == group_overloaded &&
            local->group_type   == group_overloaded) {
                unsigned long min_cpu_load =
-                       kernel_to_user_load(NICE_0_LOAD) * 
busiest->group_capacity;
+                       busiest->group_capacity * NICE_0_LOAD / 
SCHED_CAPACITY_SCALE;
                load_above_capacity = busiest->sum_nr_running * NICE_0_LOAD;
                if (load_above_capacity > min_cpu_load)
                        load_above_capacity -= min_cpu_load;


>
>

Reply via email to