Re: [PATCH v8 -tip 24/26] sched: Move core-scheduler interfacing code to a new file
On Mon, Oct 26, 2020 at 09:05:52AM +0800, Li, Aubrey wrote: [..] > > +int sched_core_share_tasks(struct task_struct *t1, struct task_struct *t2) > > +{ > > + struct sched_core_task_write_tag wr = {}; /* for stop machine. */ > > + bool sched_core_put_after_stopper = false; > > + unsigned long cookie; > > + int ret = -ENOMEM; > > + > > + mutex_lock(_core_tasks_mutex); > > + > > + /* > > +* NOTE: sched_core_get() is done by sched_core_alloc_task_cookie() or > > +* sched_core_put_task_cookie(). However, sched_core_put() is done > > +* by this function *after* the stopper removes the tasks from the > > +* core queue, and not before. This is just to play it safe. > > +*/ > > + if (t2 == NULL) { > > + if (t1->core_task_cookie) { > > + sched_core_put_task_cookie(t1->core_task_cookie); > > + sched_core_put_after_stopper = true; > > + wr.tasks[0] = t1; /* Keep wr.cookies[0] reset for t1. */ > > + } > > + } else if (t1 == t2) { > > + /* Assign a unique per-task cookie solely for t1. */ > > + > > + cookie = sched_core_alloc_task_cookie(); > > + if (!cookie) > > + goto out_unlock; > > + > > + if (t1->core_task_cookie) { > > + sched_core_put_task_cookie(t1->core_task_cookie); > > + sched_core_put_after_stopper = true; > > + } > > + wr.tasks[0] = t1; > > + wr.cookies[0] = cookie; > > + } else > > + /* > > +* t1 joining t2 > > +* CASE 1: > > +* before 0 0 > > +* afternew cookie new cookie > > +* > > +* CASE 2: > > +* before X (non-zero)0 > > +* after0 0 > > +* > > +* CASE 3: > > +* before 0 X (non-zero) > > +* afterX X > > +* > > +* CASE 4: > > +* before Y (non-zero)X (non-zero) > > +* afterX X > > +*/ > > + if (!t1->core_task_cookie && !t2->core_task_cookie) { > > + /* CASE 1. */ > > + cookie = sched_core_alloc_task_cookie(); > > + if (!cookie) > > + goto out_unlock; > > + > > + /* Add another reference for the other task. */ > > + if (!sched_core_get_task_cookie(cookie)) { > > + return -EINVAL; > > ret = -EINVAL; mutex is not released otherwise... Good find and will fix. Minor suggestion: Could you truncate your emails when replying? thanks, - Joel
Re: [PATCH v8 -tip 24/26] sched: Move core-scheduler interfacing code to a new file
On 2020/10/20 9:43, Joel Fernandes (Google) wrote: > core.c is already huge. The core-tagging interface code is largely > independent of it. Move it to its own file to make both files easier to > maintain. > > Tested-by: Julien Desfossez > Signed-off-by: Joel Fernandes (Google) > --- > kernel/sched/Makefile | 1 + > kernel/sched/core.c| 481 + > kernel/sched/coretag.c | 468 +++ > kernel/sched/sched.h | 56 - > 4 files changed, 523 insertions(+), 483 deletions(-) > create mode 100644 kernel/sched/coretag.c > > diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile > index 5fc9c9b70862..c526c20adf9d 100644 > --- a/kernel/sched/Makefile > +++ b/kernel/sched/Makefile > @@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o > obj-$(CONFIG_MEMBARRIER) += membarrier.o > obj-$(CONFIG_CPU_ISOLATION) += isolation.o > obj-$(CONFIG_PSI) += psi.o > +obj-$(CONFIG_SCHED_CORE) += coretag.o > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > index b3afbba5abe1..211e0784675f 100644 > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -162,11 +162,6 @@ static bool sched_core_empty(struct rq *rq) > return RB_EMPTY_ROOT(>core_tree); > } > > -static bool sched_core_enqueued(struct task_struct *task) > -{ > - return !RB_EMPTY_NODE(>core_node); > -} > - > static struct task_struct *sched_core_first(struct rq *rq) > { > struct task_struct *task; > @@ -188,7 +183,7 @@ static void sched_core_flush(int cpu) > rq->core->core_task_seq++; > } > > -static void sched_core_enqueue(struct rq *rq, struct task_struct *p) > +void sched_core_enqueue(struct rq *rq, struct task_struct *p) > { > struct rb_node *parent, **node; > struct task_struct *node_task; > @@ -215,7 +210,7 @@ static void sched_core_enqueue(struct rq *rq, struct > task_struct *p) > rb_insert_color(>core_node, >core_tree); > } > > -static void sched_core_dequeue(struct rq *rq, struct task_struct *p) > +void sched_core_dequeue(struct rq *rq, struct task_struct *p) > { > rq->core->core_task_seq++; > > @@ -310,7 +305,6 @@ static int __sched_core_stopper(void *data) > } > > static DEFINE_MUTEX(sched_core_mutex); > -static DEFINE_MUTEX(sched_core_tasks_mutex); > static int sched_core_count; > > static void __sched_core_enable(void) > @@ -346,16 +340,6 @@ void sched_core_put(void) > __sched_core_disable(); > mutex_unlock(_core_mutex); > } > - > -static int sched_core_share_tasks(struct task_struct *t1, struct task_struct > *t2); > - > -#else /* !CONFIG_SCHED_CORE */ > - > -static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) > { } > -static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) > { } > -static bool sched_core_enqueued(struct task_struct *task) { return false; } > -static int sched_core_share_tasks(struct task_struct *t1, struct task_struct > *t2) { } > - > #endif /* CONFIG_SCHED_CORE */ > > /* > @@ -8505,9 +8489,6 @@ void sched_offline_group(struct task_group *tg) > spin_unlock_irqrestore(_group_lock, flags); > } > > -#define SCHED_CORE_GROUP_COOKIE_MASK ((1UL << (sizeof(unsigned long) * 4)) - > 1) > -static unsigned long cpu_core_get_group_cookie(struct task_group *tg); > - > static void sched_change_group(struct task_struct *tsk, int type) > { > struct task_group *tg; > @@ -8583,11 +8564,6 @@ void sched_move_task(struct task_struct *tsk) > task_rq_unlock(rq, tsk, ); > } > > -static inline struct task_group *css_tg(struct cgroup_subsys_state *css) > -{ > - return css ? container_of(css, struct task_group, css) : NULL; > -} > - > static struct cgroup_subsys_state * > cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) > { > @@ -9200,459 +9176,6 @@ static u64 cpu_rt_period_read_uint(struct > cgroup_subsys_state *css, > } > #endif /* CONFIG_RT_GROUP_SCHED */ > > -#ifdef CONFIG_SCHED_CORE > -/* > - * A simple wrapper around refcount. An allocated sched_core_cookie's > - * address is used to compute the cookie of the task. > - */ > -struct sched_core_cookie { > - refcount_t refcnt; > -}; > - > -/* > - * sched_core_tag_requeue - Common helper for all interfaces to set a cookie. > - * @p: The task to assign a cookie to. > - * @cookie: The cookie to assign. > - * @group: is it a group interface or a per-task interface. > - * > - * This function is typically called from a stop-machine handler. > - */ > -void sched_core_tag_requeue(struct task_struct *p, unsigned long cookie, > bool group) > -{ > - if (!p) > - return; > - > - if (group) > - p->core_group_cookie = cookie; > - else > - p->core_task_cookie = cookie; > - > - /* Use up half of the cookie's bits for task cookie and remaining for > group cookie. */ > - p->core_cookie = (p->core_task_cookie << > -
[PATCH v8 -tip 24/26] sched: Move core-scheduler interfacing code to a new file
core.c is already huge. The core-tagging interface code is largely independent of it. Move it to its own file to make both files easier to maintain. Tested-by: Julien Desfossez Signed-off-by: Joel Fernandes (Google) --- kernel/sched/Makefile | 1 + kernel/sched/core.c| 481 + kernel/sched/coretag.c | 468 +++ kernel/sched/sched.h | 56 - 4 files changed, 523 insertions(+), 483 deletions(-) create mode 100644 kernel/sched/coretag.c diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 5fc9c9b70862..c526c20adf9d 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_MEMBARRIER) += membarrier.o obj-$(CONFIG_CPU_ISOLATION) += isolation.o obj-$(CONFIG_PSI) += psi.o +obj-$(CONFIG_SCHED_CORE) += coretag.o diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b3afbba5abe1..211e0784675f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -162,11 +162,6 @@ static bool sched_core_empty(struct rq *rq) return RB_EMPTY_ROOT(>core_tree); } -static bool sched_core_enqueued(struct task_struct *task) -{ - return !RB_EMPTY_NODE(>core_node); -} - static struct task_struct *sched_core_first(struct rq *rq) { struct task_struct *task; @@ -188,7 +183,7 @@ static void sched_core_flush(int cpu) rq->core->core_task_seq++; } -static void sched_core_enqueue(struct rq *rq, struct task_struct *p) +void sched_core_enqueue(struct rq *rq, struct task_struct *p) { struct rb_node *parent, **node; struct task_struct *node_task; @@ -215,7 +210,7 @@ static void sched_core_enqueue(struct rq *rq, struct task_struct *p) rb_insert_color(>core_node, >core_tree); } -static void sched_core_dequeue(struct rq *rq, struct task_struct *p) +void sched_core_dequeue(struct rq *rq, struct task_struct *p) { rq->core->core_task_seq++; @@ -310,7 +305,6 @@ static int __sched_core_stopper(void *data) } static DEFINE_MUTEX(sched_core_mutex); -static DEFINE_MUTEX(sched_core_tasks_mutex); static int sched_core_count; static void __sched_core_enable(void) @@ -346,16 +340,6 @@ void sched_core_put(void) __sched_core_disable(); mutex_unlock(_core_mutex); } - -static int sched_core_share_tasks(struct task_struct *t1, struct task_struct *t2); - -#else /* !CONFIG_SCHED_CORE */ - -static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } -static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { } -static bool sched_core_enqueued(struct task_struct *task) { return false; } -static int sched_core_share_tasks(struct task_struct *t1, struct task_struct *t2) { } - #endif /* CONFIG_SCHED_CORE */ /* @@ -8505,9 +8489,6 @@ void sched_offline_group(struct task_group *tg) spin_unlock_irqrestore(_group_lock, flags); } -#define SCHED_CORE_GROUP_COOKIE_MASK ((1UL << (sizeof(unsigned long) * 4)) - 1) -static unsigned long cpu_core_get_group_cookie(struct task_group *tg); - static void sched_change_group(struct task_struct *tsk, int type) { struct task_group *tg; @@ -8583,11 +8564,6 @@ void sched_move_task(struct task_struct *tsk) task_rq_unlock(rq, tsk, ); } -static inline struct task_group *css_tg(struct cgroup_subsys_state *css) -{ - return css ? container_of(css, struct task_group, css) : NULL; -} - static struct cgroup_subsys_state * cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { @@ -9200,459 +9176,6 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, } #endif /* CONFIG_RT_GROUP_SCHED */ -#ifdef CONFIG_SCHED_CORE -/* - * A simple wrapper around refcount. An allocated sched_core_cookie's - * address is used to compute the cookie of the task. - */ -struct sched_core_cookie { - refcount_t refcnt; -}; - -/* - * sched_core_tag_requeue - Common helper for all interfaces to set a cookie. - * @p: The task to assign a cookie to. - * @cookie: The cookie to assign. - * @group: is it a group interface or a per-task interface. - * - * This function is typically called from a stop-machine handler. - */ -void sched_core_tag_requeue(struct task_struct *p, unsigned long cookie, bool group) -{ - if (!p) - return; - - if (group) - p->core_group_cookie = cookie; - else - p->core_task_cookie = cookie; - - /* Use up half of the cookie's bits for task cookie and remaining for group cookie. */ - p->core_cookie = (p->core_task_cookie << - (sizeof(unsigned long) * 4)) + p->core_group_cookie; - - if (sched_core_enqueued(p)) { - sched_core_dequeue(task_rq(p), p); - if (!p->core_cookie) - return; - } - - if (sched_core_enabled(task_rq(p)) && - p->core_cookie