Module Name:    src
Committed By:   maxv
Date:           Wed Nov  1 09:17:28 UTC 2017

Modified Files:
        src/sys/arch/amd64/amd64: copy.S cpufunc.S

Log Message:
Don't fall through functions, explicitly jump instead. While here don't
call smap_enable twice (harmless), and add END() markers.


To generate a diff of this commit:
cvs rdiff -u -r1.27 -r1.28 src/sys/arch/amd64/amd64/copy.S
cvs rdiff -u -r1.30 -r1.31 src/sys/arch/amd64/amd64/cpufunc.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/copy.S
diff -u src/sys/arch/amd64/amd64/copy.S:1.27 src/sys/arch/amd64/amd64/copy.S:1.28
--- src/sys/arch/amd64/amd64/copy.S:1.27	Mon Oct 30 17:06:42 2017
+++ src/sys/arch/amd64/amd64/copy.S	Wed Nov  1 09:17:28 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: copy.S,v 1.27 2017/10/30 17:06:42 maxv Exp $	*/
+/*	$NetBSD: copy.S,v 1.28 2017/11/01 09:17:28 maxv Exp $	*/
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -258,19 +258,17 @@ END(copyin)
 
 NENTRY(copy_efault)
 	movq	$EFAULT,%rax
-
-/*
- * kcopy_fault is used by kcopy and copy_fault is used by copyin/out.
- *
- * they're distinguished for lazy pmap switching.  see trap().
- */
+	ret
+END(copy_efault)
 
 NENTRY(kcopy_fault)
 	ret
+END(kcopy_fault)
 
 NENTRY(copy_fault)
 	callq	smap_enable
 	ret
+END(copy_fault)
 
 ENTRY(copyoutstr)
 	DEFERRED_SWITCH_CHECK
@@ -362,6 +360,8 @@ END(copyinstr)
 
 ENTRY(copystr_efault)
 	movl	$EFAULT,%eax
+	jmp	copystr_return
+END(copystr_efault)
 
 ENTRY(copystr_fault)
 	callq	smap_enable
@@ -371,8 +371,8 @@ copystr_return:
 	jz	8f
 	subq	%rdx,%r8
 	movq	%r8,(%r9)
-
 8:	ret
+END(copystr_fault)
 
 ENTRY(copystr)
 	xchgq	%rdi,%rsi
@@ -564,10 +564,13 @@ END(ucas_32)
 
 ENTRY(ucas_efault)
 	movq	$EFAULT,%rax
+	ret
+END(ucas_efault)
 
 NENTRY(ucas_fault)
 	callq	smap_enable
 	ret
+END(ucas_fault)
 
 /*
  * int	ucas_ptr(volatile void **uptr, void *old, void *new, void **ret);

Index: src/sys/arch/amd64/amd64/cpufunc.S
diff -u src/sys/arch/amd64/amd64/cpufunc.S:1.30 src/sys/arch/amd64/amd64/cpufunc.S:1.31
--- src/sys/arch/amd64/amd64/cpufunc.S:1.30	Mon Oct 30 17:06:42 2017
+++ src/sys/arch/amd64/amd64/cpufunc.S	Wed Nov  1 09:17:28 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.S,v 1.30 2017/10/30 17:06:42 maxv Exp $	*/
+/*	$NetBSD: cpufunc.S,v 1.31 2017/11/01 09:17:28 maxv Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -424,8 +424,7 @@ END(__byte_swap_u16_variable)
  * Load a new GDT pointer (and do any necessary cleanup).
  * XXX It's somewhat questionable whether reloading all the segment registers
  * is necessary, since the actual descriptor data is not changed except by
- * process creation and exit, both of which clean up via task switches.  OTOH,
- * this only happens at run time when the GDT is resized.
+ * process creation and exit, both of which clean up via task switches.
  */
 #ifndef XEN
 ENTRY(lgdt)
@@ -435,19 +434,21 @@ ENTRY(lgdt)
 	/* Flush the prefetch q. */
 	jmp	1f
 	nop
-1:	/* Reload "stale" selectors. */
-#else /* XEN */
+1:	jmp	_C_LABEL(lgdt_finish)
+END(lgdt)
+#endif
+
 /*
  * void lgdt_finish(void);
  * Reload segments after a GDT change
  */
 ENTRY(lgdt_finish)
-#endif /* XEN */
 	movl	$GSEL(GDATA_SEL, SEL_KPL),%eax
 	movl	%eax,%ds
 	movl	%eax,%es
 	movl	%eax,%ss
-	/* FALLTHROUGH */
+	jmp	_C_LABEL(x86_flush)
+END(lgdt_finish)
 
 /*
  * void x86_flush()

Reply via email to