On Thu, Mar 23, 2017 at 11:12:49AM +0900, Byungchul Park wrote: > It would be better to avoid pushing tasks to other cpu within > a SD_PREFER_SIBLING domain, instead, get more chances to check other > siblings.
+cc ghask...@novell.com +cc srost...@redhat.com > > Signed-off-by: Byungchul Park <byungchul.p...@lge.com> > --- > kernel/sched/rt.c | 17 +++++++++++++++++ > 1 file changed, 17 insertions(+) > > diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c > index 979b734..6332b2ad 100644 > --- a/kernel/sched/rt.c > +++ b/kernel/sched/rt.c > @@ -1624,6 +1624,7 @@ static int find_lowest_rq(struct task_struct *task) > struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); > int this_cpu = smp_processor_id(); > int cpu = task_cpu(task); > + int fallback_cpu = -1; > > /* Make sure the mask is initialized first */ > if (unlikely(!lowest_mask)) > @@ -1671,6 +1672,15 @@ static int find_lowest_rq(struct task_struct *task) > best_cpu = cpumask_first_and(lowest_mask, > sched_domain_span(sd)); > if (best_cpu < nr_cpu_ids) { > + /* > + * If current domain is SD_PREFER_SIBLING > + * flaged, we have to get more chances to > + * check other siblings. > + */ > + if (sd->flags & SD_PREFER_SIBLING) { > + fallback_cpu = best_cpu; > + continue; > + } > rcu_read_unlock(); > return best_cpu; > } > @@ -1679,6 +1689,13 @@ static int find_lowest_rq(struct task_struct *task) > rcu_read_unlock(); > > /* > + * If fallback_cpu is valid, all our quesses failed *except* for > + * SD_PREFER_SIBLING domain. Now, we can return the fallback cpu. > + */ > + if (fallback_cpu != -1) > + return fallback_cpu; > + > + /* > * And finally, if there were no matches within the domains > * just give the caller *something* to work with from the compatible > * locations. > -- > 1.9.1