The branch main has been updated by kib: URL: https://cgit.FreeBSD.org/src/commit/?id=c384b35e42ee5712dda6360ffa287c0350055580
commit c384b35e42ee5712dda6360ffa287c0350055580 Author: Konstantin Belousov <[email protected]> AuthorDate: 2026-01-22 13:24:37 +0000 Commit: Konstantin Belousov <[email protected]> CommitDate: 2026-01-29 18:11:56 +0000 x86/local_apic.c: remove direct SCHED_ULE use Move the code to decide on the timer accounting into the scheduler hook. Since there were no inclusion of opt_sched.h, it is probably done unconditionally anyway. Reviewed by: olce Tested by: pho Sponsored by: The FreeBSD Foundation MFC after: 1 week Differential revision: https://reviews.freebsd.org/D54831 --- sys/kern/sched_4bsd.c | 22 ++++++++++++++++++++++ sys/kern/sched_shim.c | 1 + sys/kern/sched_ule.c | 7 +++++++ sys/sys/sched.h | 3 +++ sys/x86/x86/local_apic.c | 15 +-------------- 5 files changed, 34 insertions(+), 14 deletions(-) diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index dae084aabcba..03e7b71d3fe6 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -1833,6 +1833,27 @@ sched_4bsd_affinity(struct thread *td) #endif } +static bool +sched_4bsd_do_timer_accounting(void) +{ +#ifdef SMP + /* + * Don't do any accounting for the disabled HTT cores, since it + * will provide misleading numbers for the userland. + * + * No locking is necessary here, since even if we lose the race + * when hlt_cpus_mask changes it is not a big deal, really. + * + * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask + * and unlike other schedulers it actually schedules threads to + * those CPUs. + */ + return (!CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask)); +#else + return (true); +#endif +} + struct sched_instance sched_4bsd_instance = { #define SLOT(name) .name = sched_4bsd_##name SLOT(load), @@ -1875,6 +1896,7 @@ struct sched_instance sched_4bsd_instance = { SLOT(sizeof_thread), SLOT(tdname), SLOT(clear_tdname), + SLOT(do_timer_accounting), SLOT(init), SLOT(init_ap), SLOT(setup), diff --git a/sys/kern/sched_shim.c b/sys/kern/sched_shim.c index f25b803bae82..d2f0b5749752 100644 --- a/sys/kern/sched_shim.c +++ b/sys/kern/sched_shim.c @@ -93,6 +93,7 @@ DEFINE_SHIM0(sizeof_proc, int, sched_sizeof_proc) DEFINE_SHIM0(sizeof_thread, int, sched_sizeof_thread) DEFINE_SHIM1(tdname, char *, sched_tdname, struct thread *, td) DEFINE_SHIM1(clear_tdname, void, sched_clear_tdname, struct thread *, td) +DEFINE_SHIM0(do_timer_accounting, bool, sched_do_timer_accounting) DEFINE_SHIM0(init_ap, void, schedinit_ap) static char sched_name[32] = "ULE"; diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index a285c62a4525..22257b2c0d7a 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -3354,6 +3354,12 @@ sched_ule_schedcpu(void) { } +static bool +sched_ule_do_timer_accounting(void) +{ + return (true); +} + struct sched_instance sched_ule_instance = { #define SLOT(name) .name = sched_ule_##name SLOT(load), @@ -3396,6 +3402,7 @@ struct sched_instance sched_ule_instance = { SLOT(sizeof_thread), SLOT(tdname), SLOT(clear_tdname), + SLOT(do_timer_accounting), SLOT(init), SLOT(init_ap), SLOT(setup), diff --git a/sys/sys/sched.h b/sys/sys/sched.h index c8491ede01a0..9c78452432b4 100644 --- a/sys/sys/sched.h +++ b/sys/sys/sched.h @@ -241,6 +241,8 @@ void schedinit(void); */ void schedinit_ap(void); +bool sched_do_timer_accounting(void); + struct sched_instance { int (*load)(void); int (*rr_interval)(void); @@ -282,6 +284,7 @@ struct sched_instance { int (*sizeof_thread)(void); char *(*tdname)(struct thread *td); void (*clear_tdname)(struct thread *td); + bool (*do_timer_accounting)(void); void (*init)(void); void (*init_ap)(void); void (*setup)(void); diff --git a/sys/x86/x86/local_apic.c b/sys/x86/x86/local_apic.c index c5399984c896..54026f83dc15 100644 --- a/sys/x86/x86/local_apic.c +++ b/sys/x86/x86/local_apic.c @@ -1443,21 +1443,8 @@ lapic_handle_timer(struct trapframe *frame) kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED); trap_check_kstack(); -#if defined(SMP) && !defined(SCHED_ULE) - /* - * Don't do any accounting for the disabled HTT cores, since it - * will provide misleading numbers for the userland. - * - * No locking is necessary here, since even if we lose the race - * when hlt_cpus_mask changes it is not a big deal, really. - * - * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask - * and unlike other schedulers it actually schedules threads to - * those CPUs. - */ - if (CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask)) + if (!sched_do_timer_accounting()) return; -#endif /* Look up our local APIC structure for the tick counters. */ la = &lapics[PCPU_GET(apic_id)];
