curr_spl is actually 4 bytes, but the CX() macro
was expanding to 8 byte stride on x86_64.
Add a new macro specifically for 8 byte widths and
use the correct stride macro for every asm instruction.

Reported by Brent Baccala
---
 i386/i386/cpu_number.h |  7 ++-----
 x86_64/cswitch.S       |  6 +++---
 x86_64/locore.S        | 28 ++++++++++++++--------------
 3 files changed, 19 insertions(+), 22 deletions(-)

diff --git a/i386/i386/cpu_number.h b/i386/i386/cpu_number.h
index a549bb56..4e894a00 100644
--- a/i386/i386/cpu_number.h
+++ b/i386/i386/cpu_number.h
@@ -34,12 +34,8 @@
 
 #define MY(stm)                %gs:PERCPU_##stm
 
-#ifdef __i386__
 #define        CX(addr, reg)   addr(,reg,4)
-#endif
-#ifdef __x86_64__
-#define        CX(addr, reg)   addr(,reg,8)
-#endif
+#define        CX8(addr, reg)  addr(,reg,8)
 
 /* Fastest version, requires gs being set up */
 #define CPU_NUMBER(reg)        \
@@ -70,6 +66,7 @@ static inline int cpu_number(void)
 #define        CPU_NUMBER(reg) \
        xor     reg, reg
 #define        CX(addr,reg)    addr
+#define        CX8(addr,reg)   addr
 
 #endif /* NCPUS == 1 */
 
diff --git a/x86_64/cswitch.S b/x86_64/cswitch.S
index a6b390e8..d91fd65c 100644
--- a/x86_64/cswitch.S
+++ b/x86_64/cswitch.S
@@ -41,7 +41,7 @@ ENTRY(Load_context)
                                                /* point to stack top */
        CPU_NUMBER(%eax)
        movq    %rcx,MY(ACTIVE_STACK)           /* store stack address */
-       movq    %rdx,CX(EXT(kernel_stack),%rax) /* store stack top */
+       movq    %rdx,CX8(EXT(kernel_stack),%rax) /* store stack top */
 
 /* XXX complete */
 
@@ -86,7 +86,7 @@ ENTRY(Switch_context)
        CPU_NUMBER(%edx)                        /* Don't overwrite returned 
value %rax */
        movq    %rsi,MY(ACTIVE_THREAD)          /* new thread is active */
        movq    %rcx,MY(ACTIVE_STACK)           /* set current stack */
-       movq    %rbx,CX(EXT(kernel_stack),%rdx) /* set stack top */
+       movq    %rbx,CX8(EXT(kernel_stack),%rdx) /* set stack top */
 
        movq    KSS_ESP(%rcx),%rsp              /* switch stacks */
        movq    KSS_EBP(%rcx),%rbp              /* restore registers */
@@ -135,7 +135,7 @@ ud2
        movq    S_ARG2,%rsi                     /* get its argument */
 
        CPU_NUMBER(%ecx)
-       movq    CX(EXT(int_stack_base),%rcx),%rcx       /* point to its 
interrupt stack */
+       movq    CX8(EXT(int_stack_base),%rcx),%rcx      /* point to its 
interrupt stack */
        lea     INTSTACK_SIZE(%rcx),%rsp        /* switch to it (top) */
 
        movq    %rax,%rdi                       /* push thread */
diff --git a/x86_64/locore.S b/x86_64/locore.S
index b1ec6197..085b7467 100644
--- a/x86_64/locore.S
+++ b/x86_64/locore.S
@@ -669,7 +669,7 @@ trap_from_user:
        CPU_NUMBER(%edx)
        TIME_TRAP_UENTRY
 
-       movq    CX(EXT(kernel_stack),%rdx),%rbx
+       movq    CX8(EXT(kernel_stack),%rdx),%rbx
        xchgq   %rbx,%rsp               /* switch to kernel stack */
                                        /* user regs pointer already set */
 _take_trap:
@@ -691,10 +691,10 @@ _take_trap:
 
 _return_from_trap:
        CPU_NUMBER(%edx)
-       cmpl    $0,CX(EXT(need_ast),%rdx)
+       cmpq    $0,CX8(EXT(need_ast),%rdx)
        jz      _return_to_user         /* if we need an AST: */
 
-       movq    CX(EXT(kernel_stack),%rdx),%rsp
+       movq    CX8(EXT(kernel_stack),%rdx),%rsp
                                        /* switch to kernel stack */
        call    EXT(i386_astintr)       /* take the AST */
        popq    %rsp                    /* switch back to PCB stack */
@@ -739,17 +739,17 @@ trap_from_kernel:
 
        CPU_NUMBER(%ecx)
        and     $(~(INTSTACK_SIZE-1)),%rdx
-       cmpq    CX(EXT(int_stack_base),%rcx),%rdx
+       cmpq    CX8(EXT(int_stack_base),%rcx),%rdx
        je      1f                      /* OK if so */
 
        movq    %rcx,%rdx
-       cmpq    CX(EXT(kernel_stack),%rdx),%rsp
+       cmpq    CX8(EXT(kernel_stack),%rdx),%rsp
                                        /* already on kernel stack? */
        ja      0f
        cmpq    MY(ACTIVE_STACK),%rsp
        ja      1f                      /* switch if not */
 0:
-       movq    CX(EXT(kernel_stack),%rdx),%rsp
+       movq    CX8(EXT(kernel_stack),%rdx),%rsp
 1:
        pushq   %rbx                    /* save old stack */
        movq    %rbx,%rdi               /* pass as parameter */
@@ -919,12 +919,12 @@ ENTRY(all_intrs)
        CPU_NUMBER(%ecx)
        movq    %rsp,%rdx               /* on an interrupt stack? */
        and     $(~(INTSTACK_SIZE-1)),%rdx
-       cmpq    %ss:CX(EXT(int_stack_base),%rcx),%rdx
+       cmpq    %ss:CX8(EXT(int_stack_base),%rcx),%rdx
        je      int_from_intstack       /* if not: */
 
        CPU_NUMBER(%edx)
 
-       movq    CX(EXT(int_stack_top),%rdx),%rcx
+       movq    CX8(EXT(int_stack_top),%rdx),%rcx
 
        xchgq   %rcx,%rsp               /* switch to interrupt stack */
 
@@ -966,7 +966,7 @@ LEXT(return_to_iret)                        /* to find the 
return from calling interrupt) */
        testb   $2,I_CS(%rsp)           /* user mode, */
        jz      1f                      /* check for ASTs */
 0:
-       cmpq    $0,CX(EXT(need_ast),%rdx)
+       cmpq    $0,CX8(EXT(need_ast),%rdx)
        jnz     ast_from_interrupt      /* take it if so */
 1:
        SWAPGS_EXIT_IF_NEEDED_R12
@@ -977,7 +977,7 @@ LEXT(return_to_iret)                        /* to find the 
return from calling interrupt) */
 
 int_from_intstack:
        CPU_NUMBER(%edx)
-       cmpq    CX(EXT(int_stack_base),%rdx),%rsp /* seemingly looping? */
+       cmpq    CX8(EXT(int_stack_base),%rdx),%rsp /* seemingly looping? */
        jb      stack_overflowed        /* if not: */
        call    EXT(interrupt)          /* call interrupt routine */
 _return_to_iret_i:                     /* ( label for kdb_kintr) */
@@ -1017,7 +1017,7 @@ ast_from_interrupt:
        CPU_NUMBER(%edx)
        TIME_TRAP_UENTRY
 
-       movq    CX(EXT(kernel_stack),%rdx),%rsp
+       movq    CX8(EXT(kernel_stack),%rdx),%rsp
                                        /* switch to kernel stack */
        call    EXT(i386_astintr)       /* take the AST */
        popq    %rsp                    /* back to PCB stack */
@@ -1271,7 +1271,7 @@ syscall_entry_2:
        CPU_NUMBER(%edx)
        TIME_TRAP_SENTRY
 
-       movq    CX(EXT(kernel_stack),%rdx),%rbx
+       movq    CX8(EXT(kernel_stack),%rdx),%rbx
                                        /* get current kernel stack */
        xchgq   %rbx,%rsp               /* switch stacks - %ebx points to */
                                        /* user registers. */
@@ -1516,7 +1516,7 @@ ENTRY(syscall64)
 
        /* switch to kernel stack then enable interrupts */
        CPU_NUMBER(%r11d)               /* we can call the fast version here */
-       movq    CX(EXT(kernel_stack),%r11),%rsp
+       movq    CX8(EXT(kernel_stack),%r11),%rsp
        sti
 
        /* Now we have saved state and args 1-6 are in place.
@@ -1560,7 +1560,7 @@ _syscall64_check_for_ast:
        /* Check for ast. */
        CPU_NUMBER(%r11d)
 
-       cmpl    $0,CX(EXT(need_ast),%r11)
+       cmpq    $0,CX8(EXT(need_ast),%r11)
        jz      _syscall64_restore_state
 
        /* Save the syscall return value, both on our stack, for the case
-- 
2.51.0



Reply via email to