Module Name:    src
Committed By:   maxv
Date:           Fri Jul  1 13:11:21 UTC 2016

Modified Files:
        src/sys/arch/amd64/amd64: locore.S
        src/sys/arch/i386/i386: locore.S

Log Message:
Try to make this part more readable. No functional change.


To generate a diff of this commit:
cvs rdiff -u -r1.102 -r1.103 src/sys/arch/amd64/amd64/locore.S
cvs rdiff -u -r1.131 -r1.132 src/sys/arch/i386/i386/locore.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/locore.S
diff -u src/sys/arch/amd64/amd64/locore.S:1.102 src/sys/arch/amd64/amd64/locore.S:1.103
--- src/sys/arch/amd64/amd64/locore.S:1.102	Sat Jun  4 10:48:11 2016
+++ src/sys/arch/amd64/amd64/locore.S	Fri Jul  1 13:11:21 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.102 2016/06/04 10:48:11 maxv Exp $	*/
+/*	$NetBSD: locore.S,v 1.103 2016/07/01 13:11:21 maxv Exp $	*/
 
 /*
  * Copyright-o-rama!
@@ -1072,7 +1072,7 @@ END(dumpsys)
 
 /*
  * struct lwp *cpu_switchto(struct lwp *oldlwp, struct lwp *newlwp,
- *			    bool returning)
+ *     bool returning)
  *
  *	1. if (oldlwp != NULL), save its context.
  *	2. then, restore context of newlwp.
@@ -1091,18 +1091,19 @@ ENTRY(cpu_switchto)
 	movq	%rdi,%r13	/* oldlwp */
 	movq	%rsi,%r12	/* newlwp */
 
-	testq	%r13,%r13
-	jz	1f
+	testq	%r13,%r13	/* oldlwp = NULL ? */
+	jz	skip_save
 
 	/* Save old context. */
 	movq	L_PCB(%r13),%rax
 	movq	%rsp,PCB_RSP(%rax)
 	movq	%rbp,PCB_RBP(%rax)
+skip_save:
 
 	/* Switch to newlwp's stack. */
-1:	movq	L_PCB(%r12),%r14
+	movq	L_PCB(%r12),%r14
 #ifdef XEN /* XXX debug code */
-	cmpq	$0, PCB_RSP(%r14)
+	cmpq	$0,PCB_RSP(%r14)
 	jne 999f
 	callq _C_LABEL(cpu_Debugger);
 999:
@@ -1118,21 +1119,21 @@ ENTRY(cpu_switchto)
 	xchgq	%rcx,CPUVAR(CURLWP)
 
 	/* Skip the rest if returning to a pinned LWP. */
-	testb	%dl,%dl
-	jnz	4f
+	testb	%dl,%dl		/* returning = true ? */
+	jnz	switch_return
 
 	/* Switch ring0 stack */
 #ifndef XEN
 	movq	PCB_RSP0(%r14),%rax
 	movq	%rax,CPUVAR(RSP0)
 #else
-	movq	%r14, %rdi
+	movq	%r14,%rdi
 	callq	_C_LABEL(x86_64_switch_context);
 #endif
 
 	/* Don't bother with the rest if switching to a system process. */
 	testl	$LW_SYSTEM,L_FLAG(%r12)
-	jnz	4f
+	jnz	switch_return
 
 	/* Is this process using RAS (restartable atomic sequences)? */
 	movq	L_PROC(%r12),%rdi
@@ -1154,65 +1155,68 @@ ENTRY(cpu_switchto)
 	 * set CR0_TS so we'll trap rather than reuse bogus state.
 	 */
 	cmpq	CPUVAR(FPCURLWP),%r12
-	je	3f
+	je	skip_TS
 	orq	$CR0_TS,%rcx
+skip_TS:
 
 	/* Reloading CR0 is very expensive - avoid if possible. */
-3:	cmpq	%rdx,%rcx
-	je	6f
+	cmpq	%rdx,%rcx
+	je	skip_CR0
 	movq	%rcx,%cr0
+skip_CR0:
 
-6:	testl	$PCB_COMPAT32, PCB_FLAGS(%r14)
+	/* The 32bit LWPs are handled differently. */
+	testl	$PCB_COMPAT32,PCB_FLAGS(%r14)
 	jne	32f
 
 	/* Zero out %fs/%gs registers and GDT descriptors. */
-	xorq	%rax, %rax
-	movw	%ax, %fs
+	xorq	%rax,%rax
+	movw	%ax,%fs
 	CLI(cx)
 	SWAPGS
-	movw	%ax, %gs
+	movw	%ax,%gs
 	SWAPGS
 	STI(cx)
 
 	movq	CPUVAR(GDT),%rcx
-	movq	%rax, (GUFS_SEL*8)(%rcx)
-	movq	%rax, (GUGS_SEL*8)(%rcx)
+	movq	%rax,(GUFS_SEL*8)(%rcx)
+	movq	%rax,(GUGS_SEL*8)(%rcx)
 
 	/* Reload 64-bit %fs/%gs MSRs. */
-	movl	$MSR_FSBASE, %ecx
-	movl	PCB_FS(%r14), %eax
-	movl	4+PCB_FS(%r14), %edx
+	movl	$MSR_FSBASE,%ecx
+	movl	PCB_FS(%r14),%eax
+	movl	4+PCB_FS(%r14),%edx
 	wrmsr
-	movl	$MSR_KERNELGSBASE, %ecx
-	movl	PCB_GS(%r14), %eax
-	movl	4+PCB_GS(%r14), %edx
+	movl	$MSR_KERNELGSBASE,%ecx
+	movl	PCB_GS(%r14),%eax
+	movl	4+PCB_GS(%r14),%edx
 	wrmsr
-	jmp	4f
+	jmp	switch_return
 
 32:
 	/* Reload %fs/%gs GDT descriptors. */
 	movq	CPUVAR(GDT),%rcx
-	movq	PCB_FS(%r14), %rax
-	movq	%rax, (GUFS_SEL*8)(%rcx)
-	movq	PCB_GS(%r14), %rax
-	movq	%rax, (GUGS_SEL*8)(%rcx)
+	movq	PCB_FS(%r14),%rax
+	movq	%rax,(GUFS_SEL*8)(%rcx)
+	movq	PCB_GS(%r14),%rax
+	movq	%rax,(GUGS_SEL*8)(%rcx)
 
 	/* Reload %fs and %gs */
-	movq	L_MD_REGS(%r12), %rbx
-	movw	TF_FS(%rbx), %fs
+	movq	L_MD_REGS(%r12),%rbx
+	movw	TF_FS(%rbx),%fs
 	CLI(ax)
 	SWAPGS
-	movw	TF_GS(%rbx), %gs
+	movw	TF_GS(%rbx),%gs
 	SWAPGS
 	STI(ax)
-
 #else
 	movq	%r12,%rdi
 	callq	_C_LABEL(x86_64_tls_switch)
 #endif
 
+switch_return:
 	/* Return to the new LWP, returning 'oldlwp' in %rax. */
-4:	movq	%r13,%rax
+	movq	%r13,%rax
 	popq	%r15
 	popq	%r14
 	popq	%r13

Index: src/sys/arch/i386/i386/locore.S
diff -u src/sys/arch/i386/i386/locore.S:1.131 src/sys/arch/i386/i386/locore.S:1.132
--- src/sys/arch/i386/i386/locore.S:1.131	Sat Jun  4 10:48:11 2016
+++ src/sys/arch/i386/i386/locore.S	Fri Jul  1 13:11:21 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.131 2016/06/04 10:48:11 maxv Exp $	*/
+/*	$NetBSD: locore.S,v 1.132 2016/07/01 13:11:21 maxv Exp $	*/
 
 /*
  * Copyright-o-rama!
@@ -128,7 +128,7 @@
  */
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.131 2016/06/04 10:48:11 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.132 2016/07/01 13:11:21 maxv Exp $");
 
 #include "opt_compat_oldboot.h"
 #include "opt_copy_symtab.h"
@@ -1100,7 +1100,7 @@ END(dumpsys)
 
 /*
  * struct lwp *cpu_switchto(struct lwp *oldlwp, struct lwp *newlwp,
- *			    bool returning)
+ *     bool returning)
  *
  *	1. if (oldlwp != NULL), save its context.
  *	2. then, restore context of newlwp.
@@ -1128,16 +1128,18 @@ ENTRY(cpu_switchto)
 	movl	16(%esp),%esi		/* oldlwp */
 	movl	20(%esp),%edi		/* newlwp */
 	movl	24(%esp),%edx		/* returning */
-	testl	%esi,%esi
-	jz	1f
+
+	testl	%esi,%esi		/* oldlwp = NULL ? */
+	jz	skip_save
 
 	/* Save old context. */
 	movl	L_PCB(%esi),%eax
 	movl	%esp,PCB_ESP(%eax)
 	movl	%ebp,PCB_EBP(%eax)
+skip_save:
 
 	/* Switch to newlwp's stack. */
-1:	movl	L_PCB(%edi),%ebx
+	movl	L_PCB(%edi),%ebx
 	movl	PCB_EBP(%ebx),%ebp
 	movl	PCB_ESP(%ebx),%esp
 
@@ -1150,33 +1152,33 @@ ENTRY(cpu_switchto)
 
 	/* Skip the rest if returning to a pinned LWP. */
 	testl	%edx,%edx
-	jnz	4f
+	jnz	switch_return
 
+	/* Switch ring0 stack */
 #ifdef XEN
 	pushl	%edi
 	call	_C_LABEL(i386_switch_context)
 	addl	$4,%esp
-#else /* !XEN */
-	/* Switch ring0 esp */
+#else
 	movl	PCB_ESP0(%ebx),%eax
 	movl	%eax,CPUVAR(ESP0)
-#endif /* !XEN */
+#endif
 
 	/* Don't bother with the rest if switching to a system process. */
 	testl	$LW_SYSTEM,L_FLAG(%edi)
-	jnz	4f
+	jnz	switch_return
 
 #ifndef XEN
 	/* Restore thread-private %fs/%gs descriptors. */
 	movl	CPUVAR(GDT),%ecx
-	movl	PCB_FSD(%ebx), %eax
-	movl	PCB_FSD+4(%ebx), %edx
-	movl	%eax, (GUFS_SEL*8)(%ecx)
-	movl	%edx, (GUFS_SEL*8+4)(%ecx)
-	movl	PCB_GSD(%ebx), %eax
-	movl	PCB_GSD+4(%ebx), %edx
-	movl	%eax, (GUGS_SEL*8)(%ecx)
-	movl	%edx, (GUGS_SEL*8+4)(%ecx)
+	movl	PCB_FSD(%ebx),%eax
+	movl	PCB_FSD+4(%ebx),%edx
+	movl	%eax,(GUFS_SEL*8)(%ecx)
+	movl	%edx,(GUFS_SEL*8+4)(%ecx)
+	movl	PCB_GSD(%ebx),%eax
+	movl	PCB_GSD+4(%ebx),%edx
+	movl	%eax,(GUGS_SEL*8)(%ecx)
+	movl	%edx,(GUGS_SEL*8+4)(%ecx)
 #endif /* !XEN */
 
 	/* Switch I/O bitmap */
@@ -1211,17 +1213,19 @@ ENTRY(cpu_switchto)
 	 * set CR0_TS so we'll trap rather than reuse bogus state.
 	 */
 	cmpl	CPUVAR(FPCURLWP),%edi
-	je	3f
+	je	skip_TS
 	orl	$CR0_TS,%ecx
+skip_TS:
 
 	/* Reloading CR0 is very expensive - avoid if possible. */
-3:	cmpl	%edx,%ecx
-	je	4f
+	cmpl	%edx,%ecx
+	je	switch_return
 	movl	%ecx,%cr0
 #endif /* !XEN */
 
+switch_return:
 	/* Return to the new LWP, returning 'oldlwp' in %eax. */
-4:	movl	%esi,%eax
+	movl	%esi,%eax
 	popl	%edi
 	popl	%esi
 	popl	%ebx

Reply via email to