Make wake-ups of new tasks (find_idlest_group) aware of any differences
in cpu compute capacity so new tasks don't get handed off to a cpus with
lower capacity.

cc: Ingo Molnar <mi...@redhat.com>
cc: Peter Zijlstra <pet...@infradead.org>

Signed-off-by: Morten Rasmussen <morten.rasmus...@arm.com>
---
 kernel/sched/fair.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8713310..4251e75 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4950,6 +4950,7 @@ find_idlest_group(struct sched_domain *sd, struct 
task_struct *p,
 {
        struct sched_group *idlest = NULL, *group = sd->groups;
        unsigned long min_load = ULONG_MAX, this_load = 0;
+       unsigned long this_cpu_cap = 0, idlest_cpu_cap = 0;
        int load_idx = sd->forkexec_idx;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
@@ -4957,7 +4958,7 @@ find_idlest_group(struct sched_domain *sd, struct 
task_struct *p,
                load_idx = sd->wake_idx;
 
        do {
-               unsigned long load, avg_load;
+               unsigned long load, avg_load, cpu_capacity;
                int local_group;
                int i;
 
@@ -4971,6 +4972,7 @@ find_idlest_group(struct sched_domain *sd, struct 
task_struct *p,
 
                /* Tally up the load of all CPUs in the group */
                avg_load = 0;
+               cpu_capacity = 0;
 
                for_each_cpu(i, sched_group_cpus(group)) {
                        /* Bias balancing toward cpus of our domain */
@@ -4980,6 +4982,9 @@ find_idlest_group(struct sched_domain *sd, struct 
task_struct *p,
                                load = target_load(i, load_idx);
 
                        avg_load += load;
+
+                       if (cpu_capacity < capacity_of(i))
+                               cpu_capacity = capacity_of(i);
                }
 
                /* Adjust by relative CPU capacity of the group */
@@ -4987,14 +4992,20 @@ find_idlest_group(struct sched_domain *sd, struct 
task_struct *p,
 
                if (local_group) {
                        this_load = avg_load;
+                       this_cpu_cap = cpu_capacity;
                } else if (avg_load < min_load) {
                        min_load = avg_load;
                        idlest = group;
+                       idlest_cpu_cap = cpu_capacity;
                }
        } while (group = group->next, group != sd->groups);
 
-       if (!idlest || 100*this_load < imbalance*min_load)
+       if (!idlest)
+               return NULL;
+
+       if (100*this_load < imbalance*min_load && this_cpu_cap >= 
idlest_cpu_cap)
                return NULL;
+
        return idlest;
 }
 
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to