This allows kernel threads created by modules to run under SCHED_BATCH. Typically this may be a good option if the task of the kernel thread is not sensitive to scheduling latency, for example rebalancing or gc tasks.
Signed-off-by: Florian Schmaus <[email protected]> --- include/linux/sched.h | 1 + kernel/sched/syscalls.c | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 64934e0830af..80d46ed1dfa8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1874,6 +1874,7 @@ extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sc extern void sched_set_fifo(struct task_struct *p); extern void sched_set_fifo_low(struct task_struct *p); extern void sched_set_normal(struct task_struct *p, int nice); +extern void sched_set_batch(struct task_struct *p, int nice); extern int sched_setattr(struct task_struct *, const struct sched_attr *); extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); extern struct task_struct *idle_task(int cpu); diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index ff0e5ab4e37c..e0e0be4223df 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -880,6 +880,16 @@ void sched_set_normal(struct task_struct *p, int nice) } EXPORT_SYMBOL_GPL(sched_set_normal); +void sched_set_batch(struct task_struct *p, int nice) +{ + struct sched_attr attr = { + .sched_policy = SCHED_BATCH, + .sched_nice = nice, + }; + WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); +} +EXPORT_SYMBOL_GPL(sched_set_batch); + static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { -- 2.45.2
