The branch main has been updated by kib:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=377c053a43f347588ce6800627adb634f87f8cf9

commit 377c053a43f347588ce6800627adb634f87f8cf9
Author:     Konstantin Belousov <[email protected]>
AuthorDate: 2026-01-22 12:57:24 +0000
Commit:     Konstantin Belousov <[email protected]>
CommitDate: 2026-01-29 18:11:56 +0000

    cpu_switch(): unconditionally wait on the blocked mutex transient
    
    It is nop for 4BSD.
    
    Reviewed by:    olce
    Tested by:      pho
    Sponsored by:   The FreeBSD Foundation
    MFC after:      1 week
    Differential revision:  https://reviews.freebsd.org/D54831
---
 sys/amd64/amd64/cpu_switch.S  |  5 -----
 sys/arm/arm/swtch-v6.S        |  7 +------
 sys/arm64/arm64/swtch.S       |  3 +--
 sys/i386/i386/swtch.S         | 31 +++++++++++++------------------
 sys/powerpc/powerpc/swtch32.S |  3 +--
 sys/powerpc/powerpc/swtch64.S |  3 +--
 sys/riscv/riscv/swtch.S       |  3 +--
 7 files changed, 18 insertions(+), 37 deletions(-)

diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index d7e954f573b0..17ff8005e3cd 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -35,7 +35,6 @@
 #include <machine/specialreg.h>
 
 #include "assym.inc"
-#include "opt_sched.h"
 
 /*****************************************************************************/
 /* Scheduling                                                                */
@@ -136,13 +135,11 @@ ctx_switch_fpusave_done:
        movq    %r15,TD_LOCK(%r13)              /* Release the old thread */
 sw1:
        leaq    TD_MD_PCB(%r12),%r8
-#if defined(SCHED_ULE)
        movq    $blocked_lock, %rdx
        movq    TD_LOCK(%r12),%rcx
        cmpq    %rcx, %rdx
        je      sw1wait
 sw1cont:
-#endif
        /*
         * At this point, we've switched address spaces and are ready
         * to load up the rest of the next context.
@@ -492,7 +489,6 @@ ENTRY(resumectx)
 END(resumectx)
 
 /* Wait for the new thread to become unblocked */
-#if defined(SCHED_ULE)
 sw1wait:
 1:
        pause
@@ -500,4 +496,3 @@ sw1wait:
        cmpq    %rcx, %rdx
        je      1b
        jmp     sw1cont
-#endif
diff --git a/sys/arm/arm/swtch-v6.S b/sys/arm/arm/swtch-v6.S
index 97d863b6d4de..98c8e5c41ec5 100644
--- a/sys/arm/arm/swtch-v6.S
+++ b/sys/arm/arm/swtch-v6.S
@@ -79,7 +79,6 @@
  */
 
 #include "assym.inc"
-#include "opt_sched.h"
 
 #include <machine/asm.h>
 #include <machine/asmacros.h>
@@ -432,11 +431,7 @@ sw1:
         *   r11 = newtd
         */
 
-#if defined(SMP) && defined(SCHED_ULE)
-       /*
-        * 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE
-        * QQQ: What does it mean in reality and why is it done?
-        */
+#if defined(SMP)
        ldr     r6, =blocked_lock
 1:
        ldr     r3, [r11, #TD_LOCK]     /* atomic write regular read */
diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S
index a461fded929c..b3bf88135e57 100644
--- a/sys/arm64/arm64/swtch.S
+++ b/sys/arm64/arm64/swtch.S
@@ -31,7 +31,6 @@
 
 #include "assym.inc"
 #include "opt_kstack_pages.h"
-#include "opt_sched.h"
 
 #include <sys/elf_common.h>
 
@@ -197,7 +196,7 @@ ENTRY(cpu_switch)
         * Release the old thread.
         */
        stlr    x2, [x0, #TD_LOCK]
-#if defined(SCHED_ULE) && defined(SMP)
+#if defined(SMP)
        /* Spin if TD_LOCK points to a blocked_lock */
        ldr     x2, =_C_LABEL(blocked_lock)
 1:
diff --git a/sys/i386/i386/swtch.S b/sys/i386/i386/swtch.S
index 5c2e078b5446..cb03c847fbc9 100644
--- a/sys/i386/i386/swtch.S
+++ b/sys/i386/i386/swtch.S
@@ -30,27 +30,11 @@
  * SUCH DAMAGE.
  */
 
-#include "opt_sched.h"
-
 #include <machine/asmacros.h>
 
 #include "assym.inc"
 
-#if defined(SMP) && defined(SCHED_ULE)
-#define        SETOP           xchgl
 #define        BLOCK_SPIN(reg)                                                 
\
-               movl            $blocked_lock,%eax ;                    \
-       100: ;                                                          \
-               lock ;                                                  \
-               cmpxchgl        %eax,TD_LOCK(reg) ;                     \
-               jne             101f ;                                  \
-               pause ;                                                 \
-               jmp             100b ;                                  \
-       101:
-#else
-#define        SETOP           movl
-#define        BLOCK_SPIN(reg)
-#endif
 
 /*****************************************************************************/
 /* Scheduling                                                                */
@@ -162,7 +146,7 @@ ENTRY(cpu_switch)
        /* Switchout td_lock */
        movl    %esi,%eax
        movl    PCPU(CPUID),%esi
-       SETOP   %eax,TD_LOCK(%edi)
+       xchgl   %eax,TD_LOCK(%edi)
 
        /* Release bit from old pmap->pm_active */
        movl    PCPU(CURPMAP), %ebx
@@ -181,7 +165,18 @@ ENTRY(cpu_switch)
 #endif
        btsl    %esi, PM_ACTIVE(%ebx)           /* set new */
 sw1:
-       BLOCK_SPIN(%ecx)
+#ifdef SMP
+       movl            $blocked_lock,%eax
+100:
+
+       lock
+       cmpxchgl        %eax,TD_LOCK(reg)
+       jne             101f
+       pause
+       jmp             100b
+101:   
+#endif
+
        /*
         * At this point, we have managed thread locks and are ready
         * to load up the rest of the next context.
diff --git a/sys/powerpc/powerpc/swtch32.S b/sys/powerpc/powerpc/swtch32.S
index 7fc0641722aa..262e7035bb29 100644
--- a/sys/powerpc/powerpc/swtch32.S
+++ b/sys/powerpc/powerpc/swtch32.S
@@ -56,7 +56,6 @@
  */
 
 #include "assym.inc"
-#include "opt_sched.h"
 
 #include <sys/syscall.h>
 
@@ -125,7 +124,7 @@ ENTRY(cpu_switch)
        sync                            /* Make sure all of that finished */
 
 cpu_switchin:
-#if defined(SMP) && defined(SCHED_ULE)
+#if defined(SMP)
        /* Wait for the new thread to become unblocked */
        bl      1f
 1:
diff --git a/sys/powerpc/powerpc/swtch64.S b/sys/powerpc/powerpc/swtch64.S
index ba37274d32bb..61af10aabaee 100644
--- a/sys/powerpc/powerpc/swtch64.S
+++ b/sys/powerpc/powerpc/swtch64.S
@@ -56,7 +56,6 @@
  */
 
 #include "assym.inc"
-#include "opt_sched.h"
 
 #include <sys/syscall.h>
 
@@ -187,7 +186,7 @@ save_tar:
        sync                            /* Make sure all of that finished */
 
 cpu_switchin:
-#if defined(SMP) && defined(SCHED_ULE)
+#if defined(SMP)
        /* Wait for the new thread to become unblocked */
        addis   %r6,%r2,TOC_REF(blocked_lock)@ha
        ld      %r6,TOC_REF(blocked_lock)@l(%r6)
diff --git a/sys/riscv/riscv/swtch.S b/sys/riscv/riscv/swtch.S
index cfd19a2d76d6..fc9b493744b8 100644
--- a/sys/riscv/riscv/swtch.S
+++ b/sys/riscv/riscv/swtch.S
@@ -33,7 +33,6 @@
  */
 
 #include "assym.inc"
-#include "opt_sched.h"
 
 #include <machine/param.h>
 #include <machine/asm.h>
@@ -315,7 +314,7 @@ ENTRY(cpu_switch)
 
        /* Release the old thread */
        sd      s2, TD_LOCK(s0)
-#if defined(SCHED_ULE) && defined(SMP)
+#if defined(SMP)
        /* Spin if TD_LOCK points to a blocked_lock */
        la      s2, _C_LABEL(blocked_lock)
 1:

Reply via email to