From: Peter Zijlstra <pet...@infradead.org>

Introduce task_struct::core_cookie as an opaque identifier for core
scheduling. When enabled; core scheduling will only allow matching
task to be on the core; where idle matches everything.

When task_struct::core_cookie is set (and core scheduling is enabled)
these tasks are indexed in a second RB-tree, first on cookie value
then on scheduling function, such that matching task selection always
finds the most elegible match.

NOTE: *shudder* at the overhead...

NOTE: *sigh*, a 3rd copy of the scheduling function; the alternative
is per class tracking of cookies and that just duplicates a lot of
stuff for no raisin (the 2nd copy lives in the rt-mutex PI code).

Tested-by: Julien Desfossez <jdesfos...@digitalocean.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Signed-off-by: Vineeth Remanan Pillai <vpil...@digitalocean.com>
Signed-off-by: Julien Desfossez <jdesfos...@digitalocean.com>
---
 include/linux/sched.h |   8 ++-
 kernel/sched/core.c   | 146 ++++++++++++++++++++++++++++++++++++++++++
 kernel/sched/fair.c   |  46 -------------
 kernel/sched/sched.h  |  55 ++++++++++++++++
 4 files changed, 208 insertions(+), 47 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 393db0690101..c3563d7cab7f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -683,10 +683,16 @@ struct task_struct {
        const struct sched_class        *sched_class;
        struct sched_entity             se;
        struct sched_rt_entity          rt;
+       struct sched_dl_entity          dl;
+
+#ifdef CONFIG_SCHED_CORE
+       struct rb_node                  core_node;
+       unsigned long                   core_cookie;
+#endif
+
 #ifdef CONFIG_CGROUP_SCHED
        struct task_group               *sched_task_group;
 #endif
-       struct sched_dl_entity          dl;
 
 #ifdef CONFIG_UCLAMP_TASK
        /*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cecbf91cb477..a032f481c6e6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -78,6 +78,141 @@ __read_mostly int scheduler_running;
 
 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
 
+/* kernel prio, less is more */
+static inline int __task_prio(struct task_struct *p)
+{
+       if (p->sched_class == &stop_sched_class) /* trumps deadline */
+               return -2;
+
+       if (rt_prio(p->prio)) /* includes deadline */
+               return p->prio; /* [-1, 99] */
+
+       if (p->sched_class == &idle_sched_class)
+               return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
+
+       return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
+}
+
+/*
+ * l(a,b)
+ * le(a,b) := !l(b,a)
+ * g(a,b)  := l(b,a)
+ * ge(a,b) := !l(a,b)
+ */
+
+/* real prio, less is less */
+static inline bool prio_less(struct task_struct *a, struct task_struct *b)
+{
+
+       int pa = __task_prio(a), pb = __task_prio(b);
+
+       if (-pa < -pb)
+               return true;
+
+       if (-pb < -pa)
+               return false;
+
+       if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
+               return !dl_time_before(a->dl.deadline, b->dl.deadline);
+
+       if (pa == MAX_RT_PRIO + MAX_NICE)  { /* fair */
+               u64 vruntime = b->se.vruntime;
+
+               /*
+                * Normalize the vruntime if tasks are in different cpus.
+                */
+               if (task_cpu(a) != task_cpu(b)) {
+                       vruntime -= task_cfs_rq(b)->min_vruntime;
+                       vruntime += task_cfs_rq(a)->min_vruntime;
+               }
+
+               return !((s64)(a->se.vruntime - vruntime) <= 0);
+       }
+
+       return false;
+}
+
+static inline bool __sched_core_less(struct task_struct *a, struct task_struct 
*b)
+{
+       if (a->core_cookie < b->core_cookie)
+               return true;
+
+       if (a->core_cookie > b->core_cookie)
+               return false;
+
+       /* flip prio, so high prio is leftmost */
+       if (prio_less(b, a))
+               return true;
+
+       return false;
+}
+
+static void sched_core_enqueue(struct rq *rq, struct task_struct *p)
+{
+       struct rb_node *parent, **node;
+       struct task_struct *node_task;
+
+       rq->core->core_task_seq++;
+
+       if (!p->core_cookie)
+               return;
+
+       node = &rq->core_tree.rb_node;
+       parent = *node;
+
+       while (*node) {
+               node_task = container_of(*node, struct task_struct, core_node);
+               parent = *node;
+
+               if (__sched_core_less(p, node_task))
+                       node = &parent->rb_left;
+               else
+                       node = &parent->rb_right;
+       }
+
+       rb_link_node(&p->core_node, parent, node);
+       rb_insert_color(&p->core_node, &rq->core_tree);
+}
+
+static void sched_core_dequeue(struct rq *rq, struct task_struct *p)
+{
+       rq->core->core_task_seq++;
+
+       if (!p->core_cookie)
+               return;
+
+       rb_erase(&p->core_node, &rq->core_tree);
+}
+
+/*
+ * Find left-most (aka, highest priority) task matching @cookie.
+ */
+static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
+{
+       struct rb_node *node = rq->core_tree.rb_node;
+       struct task_struct *node_task, *match;
+
+       /*
+        * The idle task always matches any cookie!
+        */
+       match = idle_sched_class.pick_task(rq);
+
+       while (node) {
+               node_task = container_of(node, struct task_struct, core_node);
+
+               if (cookie < node_task->core_cookie) {
+                       node = node->rb_left;
+               } else if (cookie > node_task->core_cookie) {
+                       node = node->rb_right;
+               } else {
+                       match = node_task;
+                       node = node->rb_left;
+               }
+       }
+
+       return match;
+}
+
 /*
  * The static-key + stop-machine variable are needed such that:
  *
@@ -136,6 +271,11 @@ void sched_core_put(void)
        mutex_unlock(&sched_core_mutex);
 }
 
+#else /* !CONFIG_SCHED_CORE */
+
+static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
+static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { }
+
 #endif /* CONFIG_SCHED_CORE */
 
 /*
@@ -1624,6 +1764,9 @@ static inline void init_uclamp(void) { }
 
 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int 
flags)
 {
+       if (sched_core_enabled(rq))
+               sched_core_enqueue(rq, p);
+
        if (!(flags & ENQUEUE_NOCLOCK))
                update_rq_clock(rq);
 
@@ -1638,6 +1781,9 @@ static inline void enqueue_task(struct rq *rq, struct 
task_struct *p, int flags)
 
 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int 
flags)
 {
+       if (sched_core_enabled(rq))
+               sched_core_dequeue(rq, p);
+
        if (!(flags & DEQUEUE_NOCLOCK))
                update_rq_clock(rq);
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b4bc82f46fe7..58f670e5704d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -258,33 +258,11 @@ const struct sched_class fair_sched_class;
  */
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
-       SCHED_WARN_ON(!entity_is_task(se));
-       return container_of(se, struct task_struct, se);
-}
 
 /* Walk up scheduling entities hierarchy */
 #define for_each_sched_entity(se) \
                for (; se; se = se->parent)
 
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
-{
-       return p->se.cfs_rq;
-}
-
-/* runqueue on which this entity is (to be) queued */
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
-{
-       return se->cfs_rq;
-}
-
-/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
-{
-       return grp->my_q;
-}
-
 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
 {
        if (!path)
@@ -445,33 +423,9 @@ find_matching_se(struct sched_entity **se, struct 
sched_entity **pse)
 
 #else  /* !CONFIG_FAIR_GROUP_SCHED */
 
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
-       return container_of(se, struct task_struct, se);
-}
-
 #define for_each_sched_entity(se) \
                for (; se; se = NULL)
 
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
-{
-       return &task_rq(p)->cfs;
-}
-
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
-{
-       struct task_struct *p = task_of(se);
-       struct rq *rq = task_rq(p);
-
-       return &rq->cfs;
-}
-
-/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
-{
-       return NULL;
-}
-
 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len)
 {
        if (path)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 85c8472b5d00..4964453591c3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1053,6 +1053,10 @@ struct rq {
        /* per rq */
        struct rq               *core;
        unsigned int            core_enabled;
+       struct rb_root          core_tree;
+
+       /* shared state */
+       unsigned int            core_task_seq;
 #endif
 };
 
@@ -1132,6 +1136,57 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 #define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
 #define raw_rq()               raw_cpu_ptr(&runqueues)
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+       SCHED_WARN_ON(!entity_is_task(se));
+       return container_of(se, struct task_struct, se);
+}
+
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+{
+       return p->se.cfs_rq;
+}
+
+/* runqueue on which this entity is (to be) queued */
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+       return se->cfs_rq;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+       return grp->my_q;
+}
+
+#else
+
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+       return container_of(se, struct task_struct, se);
+}
+
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+{
+       return &task_rq(p)->cfs;
+}
+
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+       struct task_struct *p = task_of(se);
+       struct rq *rq = task_rq(p);
+
+       return &rq->cfs;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+       return NULL;
+}
+#endif
+
 extern void update_rq_clock(struct rq *rq);
 
 static inline u64 __rq_clock_broken(struct rq *rq)
-- 
2.29.0.rc1.297.gfa9743e501-goog

Reply via email to