The branch main has been updated by olce: URL: https://cgit.FreeBSD.org/src/commit/?id=e5f8cbb86d58f25b5ff168506b78d09dca266fb6
commit e5f8cbb86d58f25b5ff168506b78d09dca266fb6 Author: Olivier Certner <[email protected]> AuthorDate: 2026-01-23 20:52:46 +0000 Commit: Olivier Certner <[email protected]> CommitDate: 2026-02-03 14:03:01 +0000 x86: x86_msr_op(): MSR_OP_LOCAL: Disable interrupts on atomic ops On MSR_OP_LOCAL and non-naturally-atomic operations (MSR_OP_ANDNOT and MSR_OP_OR), there is no guarantee that we are not interrupted between reading and writing the MSR, and that interruption could actually perform some operation on that MSR, which would be lost. Prevent that problem by temporarily disabling interrupts around MSR manipulation. Reviewed by: kib Discussed with: markj MFC after: 2 weeks Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D54996 --- sys/x86/include/x86_var.h | 2 ++ sys/x86/x86/cpu_machdep.c | 3 +++ 2 files changed, 5 insertions(+) diff --git a/sys/x86/include/x86_var.h b/sys/x86/include/x86_var.h index 701b982e6afb..215fe0562465 100644 --- a/sys/x86/include/x86_var.h +++ b/sys/x86/include/x86_var.h @@ -171,6 +171,8 @@ uint64_t rdtsc_ordered(void); /* * Where and which execution mode + * + * All modes cause execution on the target CPU(s) with interrupts disabled. */ #define MSR_OP_LOCAL 0x10000000 #define MSR_OP_SCHED_ALL 0x20000000 diff --git a/sys/x86/x86/cpu_machdep.c b/sys/x86/x86/cpu_machdep.c index 5cbcb40540bb..5f8965bd5614 100644 --- a/sys/x86/x86/cpu_machdep.c +++ b/sys/x86/x86/cpu_machdep.c @@ -160,6 +160,7 @@ x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res) struct thread *td; struct msr_op_arg a; cpuset_t set; + register_t flags; u_int exmode; int bound_cpu, cpu, i, is_bound; @@ -171,7 +172,9 @@ x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res) switch (exmode) { case MSR_OP_LOCAL: + flags = intr_disable(); x86_msr_op_one(&a); + intr_restore(flags); break; case MSR_OP_SCHED_ALL: td = curthread;
