Add the case for iteration of sched_domains without SD_WAKE_AFFINE flags to select a cpu, this flag may be unset through proc by users.
Signed-off-by: pang.xunlei <[email protected]> --- kernel/sched/rt.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index da6922e..49164f1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1549,6 +1549,7 @@ static int find_lowest_rq(struct task_struct *task) struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); int this_cpu = smp_processor_id(); int cpu = task_cpu(task); + int cachehot_cpu = nr_cpu_ids; /* Make sure the mask is initialized first */ if (unlikely(!lowest_mask)) @@ -1581,7 +1582,7 @@ static int find_lowest_rq(struct task_struct *task) rcu_read_lock(); for_each_domain(cpu, sd) { if (sd->flags & SD_WAKE_AFFINE) { - int best_cpu; + int wakeaffine_cpu; /* * "this_cpu" is cheaper to preempt than a @@ -1593,16 +1594,25 @@ static int find_lowest_rq(struct task_struct *task) return this_cpu; } - best_cpu = cpumask_first_and(lowest_mask, + wakeaffine_cpu = cpumask_first_and(lowest_mask, sched_domain_span(sd)); - if (best_cpu < nr_cpu_ids) { + if (wakeaffine_cpu < nr_cpu_ids) { rcu_read_unlock(); - return best_cpu; + return wakeaffine_cpu; } + } else { + /* affine domain outweighs lower level non-affine domain? */ + if (cachehot_cpu >= nr_cpu_ids) + cachehot_cpu = cpumask_first_and(lowest_mask, + sched_domain_span(sd)); } } rcu_read_unlock(); + /* most likely cache-hot */ + if (cachehot_cpu < nr_cpu_ids) + return cachehot_cpu; + /* * And finally, if there were no matches within the domains * just give the caller *something* to work with from the compatible -- 1.7.9.5 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to [email protected] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/

