From: Peter Zijlstra <pet...@infradead.org>

Because sched_class::pick_next_task() also implies
sched_class::set_next_task() (and possibly put_prev_task() and
newidle_balance) it is not state invariant. This makes it unsuitable
for remote task selection.

Tested-by: Julien Desfossez <jdesfos...@digitalocean.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Signed-off-by: Vineeth Remanan Pillai <vpil...@digitalocean.com>
Signed-off-by: Julien Desfossez <jdesfos...@digitalocean.com>
Signed-off-by: Joel Fernandes (Google) <j...@joelfernandes.org>
---
 kernel/sched/deadline.c  | 16 ++++++++++++++--
 kernel/sched/fair.c      | 32 +++++++++++++++++++++++++++++++-
 kernel/sched/idle.c      |  8 ++++++++
 kernel/sched/rt.c        | 14 ++++++++++++--
 kernel/sched/sched.h     |  3 +++
 kernel/sched/stop_task.c | 13 +++++++++++--
 6 files changed, 79 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 814ec49502b1..0271a7848ab3 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1848,7 +1848,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct 
rq *rq,
        return rb_entry(left, struct sched_dl_entity, rb_node);
 }
 
-static struct task_struct *pick_next_task_dl(struct rq *rq)
+static struct task_struct *pick_task_dl(struct rq *rq)
 {
        struct sched_dl_entity *dl_se;
        struct dl_rq *dl_rq = &rq->dl;
@@ -1860,7 +1860,18 @@ static struct task_struct *pick_next_task_dl(struct rq 
*rq)
        dl_se = pick_next_dl_entity(rq, dl_rq);
        BUG_ON(!dl_se);
        p = dl_task_of(dl_se);
-       set_next_task_dl(rq, p, true);
+
+       return p;
+}
+
+static struct task_struct *pick_next_task_dl(struct rq *rq)
+{
+       struct task_struct *p;
+
+       p = pick_task_dl(rq);
+       if (p)
+               set_next_task_dl(rq, p, true);
+
        return p;
 }
 
@@ -2517,6 +2528,7 @@ const struct sched_class dl_sched_class
 
 #ifdef CONFIG_SMP
        .balance                = balance_dl,
+       .pick_task              = pick_task_dl,
        .select_task_rq         = select_task_rq_dl,
        .migrate_task_rq        = migrate_task_rq_dl,
        .set_cpus_allowed       = set_cpus_allowed_dl,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index dbd9368a959d..bd6aed63f5e3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4450,7 +4450,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct 
sched_entity *curr)
         * Avoid running the skip buddy, if running something else can
         * be done without getting too unfair.
         */
-       if (cfs_rq->skip == se) {
+       if (cfs_rq->skip && cfs_rq->skip == se) {
                struct sched_entity *second;
 
                if (se == curr) {
@@ -6976,6 +6976,35 @@ static void check_preempt_wakeup(struct rq *rq, struct 
task_struct *p, int wake_
                set_last_buddy(se);
 }
 
+#ifdef CONFIG_SMP
+static struct task_struct *pick_task_fair(struct rq *rq)
+{
+       struct cfs_rq *cfs_rq = &rq->cfs;
+       struct sched_entity *se;
+
+       if (!cfs_rq->nr_running)
+               return NULL;
+
+       do {
+               struct sched_entity *curr = cfs_rq->curr;
+
+               se = pick_next_entity(cfs_rq, NULL);
+
+               if (curr) {
+                       if (se && curr->on_rq)
+                               update_curr(cfs_rq);
+
+                       if (!se || entity_before(curr, se))
+                               se = curr;
+               }
+
+               cfs_rq = group_cfs_rq(se);
+       } while (cfs_rq);
+
+       return task_of(se);
+}
+#endif
+
 struct task_struct *
 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags 
*rf)
 {
@@ -11173,6 +11202,7 @@ const struct sched_class fair_sched_class
 
 #ifdef CONFIG_SMP
        .balance                = balance_fair,
+       .pick_task              = pick_task_fair,
        .select_task_rq         = select_task_rq_fair,
        .migrate_task_rq        = migrate_task_rq_fair,
 
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8ce6e80352cf..ce7552c6bc65 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -405,6 +405,13 @@ static void set_next_task_idle(struct rq *rq, struct 
task_struct *next, bool fir
        schedstat_inc(rq->sched_goidle);
 }
 
+#ifdef CONFIG_SMP
+static struct task_struct *pick_task_idle(struct rq *rq)
+{
+       return rq->idle;
+}
+#endif
+
 struct task_struct *pick_next_task_idle(struct rq *rq)
 {
        struct task_struct *next = rq->idle;
@@ -472,6 +479,7 @@ const struct sched_class idle_sched_class
 
 #ifdef CONFIG_SMP
        .balance                = balance_idle,
+       .pick_task              = pick_task_idle,
        .select_task_rq         = select_task_rq_idle,
        .set_cpus_allowed       = set_cpus_allowed_common,
 #endif
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e57fca05b660..a5851c775270 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1624,7 +1624,7 @@ static struct task_struct *_pick_next_task_rt(struct rq 
*rq)
        return rt_task_of(rt_se);
 }
 
-static struct task_struct *pick_next_task_rt(struct rq *rq)
+static struct task_struct *pick_task_rt(struct rq *rq)
 {
        struct task_struct *p;
 
@@ -1632,7 +1632,16 @@ static struct task_struct *pick_next_task_rt(struct rq 
*rq)
                return NULL;
 
        p = _pick_next_task_rt(rq);
-       set_next_task_rt(rq, p, true);
+
+       return p;
+}
+
+static struct task_struct *pick_next_task_rt(struct rq *rq)
+{
+       struct task_struct *p = pick_task_rt(rq);
+       if (p)
+               set_next_task_rt(rq, p, true);
+
        return p;
 }
 
@@ -2443,6 +2452,7 @@ const struct sched_class rt_sched_class
 
 #ifdef CONFIG_SMP
        .balance                = balance_rt,
+       .pick_task              = pick_task_rt,
        .select_task_rq         = select_task_rq_rt,
        .set_cpus_allowed       = set_cpus_allowed_common,
        .rq_online              = rq_online_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 587ebabebaff..54bfac702805 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1800,6 +1800,9 @@ struct sched_class {
 
 #ifdef CONFIG_SMP
        int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags 
*rf);
+
+       struct task_struct * (*pick_task)(struct rq *rq);
+
        int  (*select_task_rq)(struct task_struct *p, int task_cpu, int 
sd_flag, int flags);
        void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
 
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 394bc8126a1e..8f92915dd95e 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -34,15 +34,23 @@ static void set_next_task_stop(struct rq *rq, struct 
task_struct *stop, bool fir
        stop->se.exec_start = rq_clock_task(rq);
 }
 
-static struct task_struct *pick_next_task_stop(struct rq *rq)
+static struct task_struct *pick_task_stop(struct rq *rq)
 {
        if (!sched_stop_runnable(rq))
                return NULL;
 
-       set_next_task_stop(rq, rq->stop, true);
        return rq->stop;
 }
 
+static struct task_struct *pick_next_task_stop(struct rq *rq)
+{
+       struct task_struct *p = pick_task_stop(rq);
+       if (p)
+               set_next_task_stop(rq, p, true);
+
+       return p;
+}
+
 static void
 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 {
@@ -124,6 +132,7 @@ const struct sched_class stop_sched_class
 
 #ifdef CONFIG_SMP
        .balance                = balance_stop,
+       .pick_task              = pick_task_stop,
        .select_task_rq         = select_task_rq_stop,
        .set_cpus_allowed       = set_cpus_allowed_common,
 #endif
-- 
2.29.0.rc1.297.gfa9743e501-goog

Reply via email to