The branch main has been updated by kib: URL: https://cgit.FreeBSD.org/src/commit/?id=d14e018024bb094705dd218ceeeea124706f6ec0
commit d14e018024bb094705dd218ceeeea124706f6ec0 Author: Konstantin Belousov <[email protected]> AuthorDate: 2026-01-22 04:36:15 +0000 Commit: Konstantin Belousov <[email protected]> CommitDate: 2026-01-29 18:11:55 +0000 sys: Make sched_ule a sched instance Reviewed by: olce Tested by: pho Sponsored by: The FreeBSD Foundation MFC after: 1 week Differential revision: https://reviews.freebsd.org/D54831 --- sys/kern/sched_ule.c | 243 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 144 insertions(+), 99 deletions(-) diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 59b59a30acab..116a1d6105d0 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -409,13 +409,6 @@ static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg, int indent); #endif -static void sched_setup(void *dummy); -SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); - -static void sched_initticks(void *dummy); -SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, - NULL); - SDT_PROVIDER_DEFINE(sched); SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *", @@ -1640,7 +1633,7 @@ sched_setup_smp(void) * information. */ static void -sched_setup(void *dummy) +sched_ule_setup(void) { struct tdq *tdq; @@ -1665,7 +1658,7 @@ sched_setup(void *dummy) */ /* ARGSUSED */ static void -sched_initticks(void *dummy) +sched_ule_initticks(void) { int incr; @@ -1889,8 +1882,8 @@ sched_interact_fork(struct thread *td) /* * Called from proc0_init() to setup the scheduler fields. */ -void -schedinit(void) +static void +sched_ule_init(void) { struct td_sched *ts0; @@ -1914,8 +1907,8 @@ schedinit(void) * TDQ_SELF() relies on the below sched pcpu setting; it may be used only * after schedinit_ap(). */ -void -schedinit_ap(void) +static void +sched_ule_init_ap(void) { #ifdef SMP @@ -1929,8 +1922,8 @@ schedinit_ap(void) * priority they will switch when their slices run out, which will be * at most sched_slice stathz ticks. */ -int -sched_rr_interval(void) +static int +sched_ule_rr_interval(void) { /* Convert sched_slice from stathz to hz. */ @@ -2049,8 +2042,8 @@ sched_thread_priority(struct thread *td, u_char prio) * Update a thread's priority when it is lent another thread's * priority. */ -void -sched_lend_prio(struct thread *td, u_char prio) +static void +sched_ule_lend_prio(struct thread *td, u_char prio) { td->td_flags |= TDF_BORROWING; @@ -2065,8 +2058,8 @@ sched_lend_prio(struct thread *td, u_char prio) * important than prio, the thread will keep a priority boost * of prio. */ -void -sched_unlend_prio(struct thread *td, u_char prio) +static void +sched_ule_unlend_prio(struct thread *td, u_char prio) { u_char base_pri; @@ -2085,8 +2078,8 @@ sched_unlend_prio(struct thread *td, u_char prio) /* * Standard entry for setting the priority to an absolute value. */ -void -sched_prio(struct thread *td, u_char prio) +static void +sched_ule_prio(struct thread *td, u_char prio) { u_char oldprio; @@ -2115,8 +2108,8 @@ sched_prio(struct thread *td, u_char prio) /* * Set the base interrupt thread priority. */ -void -sched_ithread_prio(struct thread *td, u_char prio) +static void +sched_ule_ithread_prio(struct thread *td, u_char prio) { THREAD_LOCK_ASSERT(td, MA_OWNED); MPASS(td->td_pri_class == PRI_ITHD); @@ -2127,8 +2120,8 @@ sched_ithread_prio(struct thread *td, u_char prio) /* * Set the base user priority, does not effect current running priority. */ -void -sched_user_prio(struct thread *td, u_char prio) +static void +sched_ule_user_prio(struct thread *td, u_char prio) { td->td_base_user_pri = prio; @@ -2137,8 +2130,8 @@ sched_user_prio(struct thread *td, u_char prio) td->td_user_pri = prio; } -void -sched_lend_user_prio(struct thread *td, u_char prio) +static void +sched_ule_lend_user_prio(struct thread *td, u_char prio) { THREAD_LOCK_ASSERT(td, MA_OWNED); @@ -2153,8 +2146,8 @@ sched_lend_user_prio(struct thread *td, u_char prio) /* * Like the above but first check if there is anything to do. */ -void -sched_lend_user_prio_cond(struct thread *td, u_char prio) +static void +sched_ule_lend_user_prio_cond(struct thread *td, u_char prio) { if (td->td_lend_user_pri == prio) @@ -2325,8 +2318,8 @@ thread_unblock_switch(struct thread *td, struct mtx *mtx) * migrating a thread from one queue to another as running threads may * be assigned elsewhere via binding. */ -void -sched_switch(struct thread *td, int flags) +static void +sched_ule_sswitch(struct thread *td, int flags) { struct thread *newtd; struct tdq *tdq; @@ -2464,8 +2457,8 @@ sched_switch(struct thread *td, int flags) /* * Adjust thread priorities as a result of a nice request. */ -void -sched_nice(struct proc *p, int nice) +static void +sched_ule_nice(struct proc *p, int nice) { struct thread *td; @@ -2483,8 +2476,8 @@ sched_nice(struct proc *p, int nice) /* * Record the sleep time for the interactivity scorer. */ -void -sched_sleep(struct thread *td, int prio) +static void +sched_ule_sleep(struct thread *td, int prio) { THREAD_LOCK_ASSERT(td, MA_OWNED); @@ -2504,8 +2497,8 @@ sched_sleep(struct thread *td, int prio) * * Requires the thread lock on entry, drops on exit. */ -void -sched_wakeup(struct thread *td, int srqflags) +static void +sched_ule_wakeup(struct thread *td, int srqflags) { struct td_sched *ts; int slptick; @@ -2544,8 +2537,8 @@ sched_wakeup(struct thread *td, int srqflags) * Penalize the parent for creating a new child and initialize the child's * priority. */ -void -sched_fork(struct thread *td, struct thread *child) +static void +sched_ule_fork(struct thread *td, struct thread *child) { THREAD_LOCK_ASSERT(td, MA_OWNED); sched_pctcpu_update(td_get_sched(td), 1); @@ -2563,8 +2556,8 @@ sched_fork(struct thread *td, struct thread *child) /* * Fork a new thread, may be within the same process. */ -void -sched_fork_thread(struct thread *td, struct thread *child) +static void +sched_ule_fork_thread(struct thread *td, struct thread *child) { struct td_sched *ts; struct td_sched *ts2; @@ -2609,8 +2602,8 @@ sched_fork_thread(struct thread *td, struct thread *child) /* * Adjust the priority class of a thread. */ -void -sched_class(struct thread *td, int class) +static void +sched_ule_class(struct thread *td, int class) { THREAD_LOCK_ASSERT(td, MA_OWNED); @@ -2622,8 +2615,8 @@ sched_class(struct thread *td, int class) /* * Return some of the child's priority and interactivity to the parent. */ -void -sched_exit(struct proc *p, struct thread *child) +static void +sched_ule_exit(struct proc *p, struct thread *child) { struct thread *td; @@ -2640,8 +2633,8 @@ sched_exit(struct proc *p, struct thread *child) * jobs such as make. This has little effect on the make process itself but * causes new processes spawned by it to receive worse scores immediately. */ -void -sched_exit_thread(struct thread *td, struct thread *child) +static void +sched_ule_exit_thread(struct thread *td, struct thread *child) { KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit", @@ -2658,8 +2651,8 @@ sched_exit_thread(struct thread *td, struct thread *child) thread_unlock(td); } -void -sched_preempt(struct thread *td) +static void +sched_ule_preempt(struct thread *td) { struct tdq *tdq; int flags; @@ -2689,8 +2682,8 @@ sched_preempt(struct thread *td) * Fix priorities on return to user-space. Priorities may be elevated due * to static priorities in msleep() or similar. */ -void -sched_userret_slowpath(struct thread *td) +static void +sched_ule_userret_slowpath(struct thread *td) { thread_lock(td); @@ -2720,8 +2713,8 @@ td_slice(struct thread *td, struct tdq *tdq) * Handle a stathz tick. This is really only relevant for timeshare * and interrupt threads. */ -void -sched_clock(struct thread *td, int cnt) +static void +sched_ule_clock(struct thread *td, int cnt) { struct tdq *tdq; struct td_sched *ts; @@ -2806,8 +2799,8 @@ sched_clock(struct thread *td, int cnt) } } -u_int -sched_estcpu(struct thread *td __unused) +static u_int +sched_ule_estcpu(struct thread *td __unused) { return (0); @@ -2817,8 +2810,8 @@ sched_estcpu(struct thread *td __unused) * Return whether the current CPU has runnable tasks. Used for in-kernel * cooperative idle threads. */ -bool -sched_runnable(void) +static bool +sched_ule_runnable(void) { struct tdq *tdq; @@ -2830,8 +2823,8 @@ sched_runnable(void) * Choose the highest priority thread to run. The thread is removed from * the run-queue while running however the load remains. */ -struct thread * -sched_choose(void) +static struct thread * +sched_ule_choose(void) { struct thread *td; struct tdq *tdq; @@ -2907,8 +2900,8 @@ tdq_add(struct tdq *tdq, struct thread *td, int flags) * * Requires the thread lock on entry, drops on exit. */ -void -sched_add(struct thread *td, int flags) +static void +sched_ule_add(struct thread *td, int flags) { struct tdq *tdq; #ifdef SMP @@ -2967,8 +2960,8 @@ sched_add(struct thread *td, int flags) * when we're stealing a thread from a remote queue. Otherwise all threads * exit by calling sched_exit_thread() and sched_throw() themselves. */ -void -sched_rem(struct thread *td) +static void +sched_ule_rem(struct thread *td) { struct tdq *tdq; @@ -2990,8 +2983,8 @@ sched_rem(struct thread *td) /* * Fetch cpu utilization information. Updates on demand. */ -fixpt_t -sched_pctcpu(struct thread *td) +static fixpt_t +sched_ule_pctcpu(struct thread *td) { struct td_sched *ts; u_int len; @@ -3012,8 +3005,8 @@ sched_pctcpu(struct thread *td) * Enforce affinity settings for a thread. Called after adjustments to * cpumask. */ -void -sched_affinity(struct thread *td) +static void +sched_ule_affinity(struct thread *td) { #ifdef SMP struct td_sched *ts; @@ -3043,8 +3036,8 @@ sched_affinity(struct thread *td) /* * Bind a thread to a target cpu. */ -void -sched_bind(struct thread *td, int cpu) +static void +sched_ule_bind(struct thread *td, int cpu) { struct td_sched *ts; @@ -3067,8 +3060,8 @@ sched_bind(struct thread *td, int cpu) /* * Release a bound thread. */ -void -sched_unbind(struct thread *td) +static void +sched_ule_unbind(struct thread *td) { struct td_sched *ts; @@ -3081,8 +3074,8 @@ sched_unbind(struct thread *td) sched_unpin(); } -int -sched_is_bound(struct thread *td) +static int +sched_ule_is_bound(struct thread *td) { THREAD_LOCK_ASSERT(td, MA_OWNED); return (td_get_sched(td)->ts_flags & TSF_BOUND); @@ -3091,8 +3084,8 @@ sched_is_bound(struct thread *td) /* * Basic yield call. */ -void -sched_relinquish(struct thread *td) +static void +sched_ule_relinquish(struct thread *td) { thread_lock(td); mi_switch(SW_VOL | SWT_RELINQUISH); @@ -3101,8 +3094,8 @@ sched_relinquish(struct thread *td) /* * Return the total system load. */ -int -sched_load(void) +static int +sched_ule_load(void) { #ifdef SMP int total; @@ -3117,14 +3110,14 @@ sched_load(void) #endif } -int -sched_sizeof_proc(void) +static int +sched_ule_sizeof_proc(void) { return (sizeof(struct proc)); } -int -sched_sizeof_thread(void) +static int +sched_ule_sizeof_thread(void) { return (sizeof(struct thread) + sizeof(struct td_sched)); } @@ -3139,8 +3132,8 @@ sched_sizeof_thread(void) /* * The actual idle process. */ -void -sched_idletd(void *dummy) +static void +sched_ule_idletd(void *dummy) { struct thread *td; struct tdq *tdq; @@ -3242,8 +3235,8 @@ sched_throw_grab(struct tdq *tdq) /* * A CPU is entering for the first time. */ -void -sched_ap_entry(void) +static void +sched_ule_ap_entry(void) { struct thread *newtd; struct tdq *tdq; @@ -3272,8 +3265,8 @@ sched_ap_entry(void) /* * A thread is exiting. */ -void -sched_throw(struct thread *td) +static void +sched_ule_throw(struct thread *td) { struct thread *newtd; struct tdq *tdq; @@ -3303,8 +3296,8 @@ sched_throw(struct thread *td) * This is called from fork_exit(). Just acquire the correct locks and * let fork do the rest of the work. */ -void -sched_fork_exit(struct thread *td) +static void +sched_ule_fork_exit(struct thread *td) { struct tdq *tdq; int cpuid; @@ -3329,8 +3322,8 @@ sched_fork_exit(struct thread *td) /* * Create on first use to catch odd startup conditions. */ -char * -sched_tdname(struct thread *td) +static char * +sched_ule_tdname(struct thread *td) { #ifdef KTR struct td_sched *ts; @@ -3345,16 +3338,72 @@ sched_tdname(struct thread *td) #endif } -#ifdef KTR -void -sched_clear_tdname(struct thread *td) +static void +sched_ule_clear_tdname(struct thread *td) { +#ifdef KTR struct td_sched *ts; ts = td_get_sched(td); ts->ts_name[0] = '\0'; -} #endif +} + +static void +sched_ule_schedcpu(void) +{ +} + +struct sched_instance sched_ule_instance = { +#define SLOT(name) .name = sched_ule_##name + SLOT(load), + SLOT(rr_interval), + SLOT(runnable), + SLOT(exit), + SLOT(fork), + SLOT(fork_exit), + SLOT(class), + SLOT(nice), + SLOT(ap_entry), + SLOT(exit_thread), + SLOT(estcpu), + SLOT(fork_thread), + SLOT(ithread_prio), + SLOT(lend_prio), + SLOT(lend_user_prio), + SLOT(lend_user_prio_cond), + SLOT(pctcpu), + SLOT(prio), + SLOT(sleep), + SLOT(sswitch), + SLOT(throw), + SLOT(unlend_prio), + SLOT(user_prio), + SLOT(userret_slowpath), + SLOT(add), + SLOT(choose), + SLOT(clock), + SLOT(idletd), + SLOT(preempt), + SLOT(relinquish), + SLOT(rem), + SLOT(wakeup), + SLOT(bind), + SLOT(unbind), + SLOT(is_bound), + SLOT(affinity), + SLOT(sizeof_proc), + SLOT(sizeof_thread), + SLOT(tdname), + SLOT(clear_tdname), + SLOT(init), + SLOT(init_ap), + SLOT(setup), + SLOT(initticks), + SLOT(schedcpu), +#undef SLOT +}; +DECLARE_SCHEDULER(ule_sched_selector, "ULE", &sched_ule_instance); #ifdef SMP @@ -3457,10 +3506,6 @@ sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) return (0); } -SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, - "Scheduler"); -SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, - "Scheduler name"); SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_quantum, "I",
