Since we already compute (but don't store) the sgs load_per_task value
in update_sg_lb_stats() we might as well store it and not re-compute
it later on.

Signed-off-by: Peter Zijlstra <pet...@infradead.org>
---
 kernel/sched/fair.c |   13 ++-----------
 1 file changed, 2 insertions(+), 11 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4504,7 +4504,6 @@ static inline void update_sg_lb_stats(st
 {
        unsigned long nr_running, max_nr_running, min_nr_running;
        unsigned long load, max_cpu_load, min_cpu_load;
-       unsigned long avg_load_per_task = 0;
        int i;
 
        /* Tally up the load of all CPUs in the group */
@@ -4559,9 +4558,9 @@ static inline void update_sg_lb_stats(st
         *      the hierarchy?
         */
        if (sgs->sum_nr_running)
-               avg_load_per_task = sgs->sum_weighted_load / 
sgs->sum_nr_running;
+               sgs->load_per_task = sgs->sum_weighted_load / 
sgs->sum_nr_running;
 
-       if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
+       if ((max_cpu_load - min_cpu_load) >= sgs->load_per_task &&
            (max_nr_running - min_nr_running) > 1)
                sgs->group_imb = 1;
 
@@ -4807,15 +4806,7 @@ static inline void calculate_imbalance(s
        struct sg_lb_stats *this, *busiest;
 
        this = &sds->this_stat;
-       if (this->sum_nr_running) {
-               this->load_per_task =
-                       this->sum_weighted_load / this->sum_nr_running;
-       }
-
        busiest = &sds->busiest_stat;
-       /* busiest must have some tasks */
-       busiest->load_per_task =
-               busiest->sum_weighted_load / busiest->sum_nr_running;
 
        if (busiest->group_imb) {
                busiest->load_per_task =


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to