At a later point (load balancing and throttling at non-CPU levels), we
will have to iterate through parts of the task group hierarchy, visiting
all SD-RQs at the same position within the SD-hierarchy.

Keep track of the task group hierarchy within each SD-RQ to make that
use case efficient.

Signed-off-by: Jan H. Schönherr <[email protected]>
---
 kernel/sched/core.c    |  2 ++
 kernel/sched/cosched.c | 19 +++++++++++++++++++
 kernel/sched/sched.h   |  4 ++++
 3 files changed, 25 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a9f5339d58cb..b3ff885a88d4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6310,6 +6310,7 @@ void sched_online_group(struct task_group *tg, struct 
task_group *parent)
        tg->parent = parent;
        INIT_LIST_HEAD(&tg->children);
        list_add_rcu(&tg->siblings, &parent->children);
+       cosched_online_group(tg);
        spin_unlock_irqrestore(&task_group_lock, flags);
 
        online_fair_sched_group(tg);
@@ -6338,6 +6339,7 @@ void sched_offline_group(struct task_group *tg)
        spin_lock_irqsave(&task_group_lock, flags);
        list_del_rcu(&tg->list);
        list_del_rcu(&tg->siblings);
+       cosched_offline_group(tg);
        spin_unlock_irqrestore(&task_group_lock, flags);
 }
 
diff --git a/kernel/sched/cosched.c b/kernel/sched/cosched.c
index b897319d046c..1b442e20faad 100644
--- a/kernel/sched/cosched.c
+++ b/kernel/sched/cosched.c
@@ -495,3 +495,22 @@ void cosched_init_sdrq(struct task_group *tg, struct 
cfs_rq *cfs_rq,
        init_sdrq(tg, &cfs_rq->sdrq, sd_parent ? &sd_parent->sdrq : NULL,
                  &tg_parent->sdrq, tg_parent->sdrq.data);
 }
+
+void cosched_online_group(struct task_group *tg)
+{
+       struct cfs_rq *cfs;
+
+       /* Track each SD-RQ within the same SD-RQ in the TG parent */
+       taskgroup_for_each_cfsrq(tg, cfs)
+               list_add_tail_rcu(&cfs->sdrq.tg_siblings,
+                                 &cfs->sdrq.tg_parent->tg_children);
+}
+
+void cosched_offline_group(struct task_group *tg)
+{
+       struct cfs_rq *cfs;
+
+       /* Remove each SD-RQ from the children list in its TG parent */
+       taskgroup_for_each_cfsrq(tg, cfs)
+               list_del_rcu(&cfs->sdrq.tg_siblings);
+}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 38b4500095ca..0dfefa31704e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1195,6 +1195,8 @@ void cosched_init_topology(void);
 void cosched_init_hierarchy(void);
 void cosched_init_sdrq(struct task_group *tg, struct cfs_rq *cfs,
                       struct cfs_rq *sd_parent, struct cfs_rq *tg_parent);
+void cosched_online_group(struct task_group *tg);
+void cosched_offline_group(struct task_group *tg);
 #else /* !CONFIG_COSCHEDULING */
 static inline void cosched_init_bottom(void) { }
 static inline void cosched_init_topology(void) { }
@@ -1202,6 +1204,8 @@ static inline void cosched_init_hierarchy(void) { }
 static inline void cosched_init_sdrq(struct task_group *tg, struct cfs_rq *cfs,
                                     struct cfs_rq *sd_parent,
                                     struct cfs_rq *tg_parent) { }
+static inline void cosched_online_group(struct task_group *tg) { }
+static inline void cosched_offline_group(struct task_group *tg) { }
 #endif /* !CONFIG_COSCHEDULING */
 
 #ifdef CONFIG_SCHED_SMT
-- 
2.9.3.1.gcba166c.dirty

Reply via email to