Commit: 2582f0eba54066b5e98ff2b27ef0cfa833b59f54 upstream
Author: Nikhil Rao <[email protected]>
AuthorDate: Wed Oct 13 12:09:36 2010 -0700

When cycling through sched groups to determine the busiest group, set
group_imb only if the busiest cpu has more than 1 runnable task. This patch
fixes the case where two cpus in a group have one runnable task each, but there
is a large weight differential between these two tasks. The load balancer is
unable to migrate any task from this group, and hence do not consider this
group to be imbalanced.

Signed-off-by: Nikhil Rao <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
LKML-Reference: <[email protected]>
[ small code readability edits ]
Signed-off-by: Ingo Molnar <[email protected]>
Signed-off-by: Mike Galbraith <[email protected]>
Acked-by: Peter Zijlstra <[email protected]>
---
 kernel/sched.c |   12 +++++++-----
 1 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index 0a1fb24..d985956 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3745,7 +3745,7 @@ static inline void update_sg_lb_stats(struct sched_domain 
*sd,
                        int local_group, const struct cpumask *cpus,
                        int *balance, struct sg_lb_stats *sgs)
 {
-       unsigned long load, max_cpu_load, min_cpu_load;
+       unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
        int i;
        unsigned int balance_cpu = -1, first_idle_cpu = 0;
        unsigned long avg_load_per_task = 0;
@@ -3759,6 +3759,7 @@ static inline void update_sg_lb_stats(struct sched_domain 
*sd,
        /* Tally up the load of all CPUs in the group */
        max_cpu_load = 0;
        min_cpu_load = ~0UL;
+       max_nr_running = 0;
 
        for_each_cpu_and(i, sched_group_cpus(group), cpus) {
                struct rq *rq = cpu_rq(i);
@@ -3776,8 +3777,10 @@ static inline void update_sg_lb_stats(struct 
sched_domain *sd,
                        load = target_load(i, load_idx);
                } else {
                        load = source_load(i, load_idx);
-                       if (load > max_cpu_load)
+                       if (load > max_cpu_load) {
                                max_cpu_load = load;
+                               max_nr_running = rq->nr_running;
+                       }
                        if (min_cpu_load > load)
                                min_cpu_load = load;
                }
@@ -3815,11 +3818,10 @@ static inline void update_sg_lb_stats(struct 
sched_domain *sd,
        if (sgs->sum_nr_running)
                avg_load_per_task = sgs->sum_weighted_load / 
sgs->sum_nr_running;
 
-       if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
+       if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && 
max_nr_running > 1)
                sgs->group_imb = 1;
 
-       sgs->group_capacity =
-               DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
+       sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, 
SCHED_LOAD_SCALE);
 }
 
 /**
-- 
1.7.4


_______________________________________________
stable mailing list
[email protected]
http://linux.kernel.org/mailman/listinfo/stable

Reply via email to