CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2018-11-29 Thread Martin Husemann
Module Name:src
Committed By:   martin
Date:   Thu Nov 29 08:51:01 UTC 2018

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: machdep.c

Log Message:
Pull up following revision(s) (requested by maxv in ticket #):

sys/arch/amd64/amd64/machdep.c: revision 1.321

Fix stack info leak. There is a big padding in struct sigframe_siginfo.

[  224.006287] kleak: Possible leak in copyout: [len=920, leaked=92]
[  224.016977] #0 0x80224d0a in kleak_note 
[  224.026268] #1 0x80224d8a in kleak_copyout 
[  224.026268] #2 0x802224b5 in sendsig_siginfo 
[  224.036261] #3 0x80b51564 in sendsig 
[  224.046475] #4 0x80b51282 in postsig 
[  224.046475] #5 0x80b2fc5d in lwp_userret 
[  224.056273] #6 0x8025a951 in mi_userret 
[  224.066277] #7 0x8025ab89 in syscall 


To generate a diff of this commit:
cvs rdiff -u -r1.255.6.7 -r1.255.6.8 src/sys/arch/amd64/amd64/machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.



CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2018-11-29 Thread Martin Husemann
Module Name:src
Committed By:   martin
Date:   Thu Nov 29 08:51:01 UTC 2018

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: machdep.c

Log Message:
Pull up following revision(s) (requested by maxv in ticket #):

sys/arch/amd64/amd64/machdep.c: revision 1.321

Fix stack info leak. There is a big padding in struct sigframe_siginfo.

[  224.006287] kleak: Possible leak in copyout: [len=920, leaked=92]
[  224.016977] #0 0x80224d0a in kleak_note 
[  224.026268] #1 0x80224d8a in kleak_copyout 
[  224.026268] #2 0x802224b5 in sendsig_siginfo 
[  224.036261] #3 0x80b51564 in sendsig 
[  224.046475] #4 0x80b51282 in postsig 
[  224.046475] #5 0x80b2fc5d in lwp_userret 
[  224.056273] #6 0x8025a951 in mi_userret 
[  224.066277] #7 0x8025ab89 in syscall 


To generate a diff of this commit:
cvs rdiff -u -r1.255.6.7 -r1.255.6.8 src/sys/arch/amd64/amd64/machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.255.6.7 src/sys/arch/amd64/amd64/machdep.c:1.255.6.8
--- src/sys/arch/amd64/amd64/machdep.c:1.255.6.7	Sat Jun  9 15:12:21 2018
+++ src/sys/arch/amd64/amd64/machdep.c	Thu Nov 29 08:51:01 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.255.6.7 2018/06/09 15:12:21 martin Exp $	*/
+/*	$NetBSD: machdep.c,v 1.255.6.8 2018/11/29 08:51:01 martin Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -111,7 +111,7 @@
  */
 
 #include 
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.255.6.7 2018/06/09 15:12:21 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.255.6.8 2018/11/29 08:51:01 martin Exp $");
 
 /* #define XENDEBUG_LOW  */
 
@@ -606,6 +606,7 @@ sendsig_siginfo(const ksiginfo_t *ksi, c
 	/* Round down the stackpointer to a multiple of 16 for the ABI. */
 	fp = (struct sigframe_siginfo *)(((unsigned long)sp & ~15) - 8);
 
+	memset(, 0, sizeof(frame));
 	frame.sf_ra = (uint64_t)ps->sa_sigdesc[sig].sd_tramp;
 	frame.sf_si._info = ksi->ksi_info;
 	frame.sf_uc.uc_flags = _UC_SIGMASK;
@@ -613,7 +614,6 @@ sendsig_siginfo(const ksiginfo_t *ksi, c
 	frame.sf_uc.uc_link = l->l_ctxlink;
 	frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
 	? _UC_SETSTACK : _UC_CLRSTACK;
-	memset(_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack));
 	sendsig_reset(l, sig);
 
 	mutex_exit(p->p_lock);



CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2018-05-05 Thread Martin Husemann
Module Name:src
Committed By:   martin
Date:   Sat May  5 15:00:29 UTC 2018

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: locore.S

Log Message:
Pull up following revision(s) (requested by maxv in ticket #786):

sys/arch/amd64/amd64/locore.S: revision 1.164,1.165

Adjust Xsyscall_svs to not use movq for 64bit immediates either.

Do not use movq for loading arbitrary 64bit immediates. The ISA
restricts it to 32bit immediates.


To generate a diff of this commit:
cvs rdiff -u -r1.123.6.5 -r1.123.6.6 src/sys/arch/amd64/amd64/locore.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/locore.S
diff -u src/sys/arch/amd64/amd64/locore.S:1.123.6.5 src/sys/arch/amd64/amd64/locore.S:1.123.6.6
--- src/sys/arch/amd64/amd64/locore.S:1.123.6.5	Thu Mar 22 16:59:03 2018
+++ src/sys/arch/amd64/amd64/locore.S	Sat May  5 15:00:29 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.123.6.5 2018/03/22 16:59:03 martin Exp $	*/
+/*	$NetBSD: locore.S,v 1.123.6.6 2018/05/05 15:00:29 martin Exp $	*/
 
 /*
  * Copyright-o-rama!
@@ -1368,8 +1368,8 @@ IDTVEC(\name)
 
 	/* Get the LWP's kernel stack pointer in %rax */
 	.if	\is_svs
-		movq	%rax,SVS_UTLS+UTLS_SCRATCH
-		movq	SVS_UTLS+UTLS_RSP0,%rax
+		movabs	%rax,SVS_UTLS+UTLS_SCRATCH
+		movabs	SVS_UTLS+UTLS_RSP0,%rax
 	.else
 		movq	%rax,CPUVAR(SCRATCH)
 		movq	CPUVAR(CURLWP),%rax
@@ -1387,7 +1387,7 @@ IDTVEC(\name)
 
 	/* Restore %rax */
 	.if	\is_svs
-		movq	SVS_UTLS+UTLS_SCRATCH,%rax
+		movabs	SVS_UTLS+UTLS_SCRATCH,%rax
 	.else
 		movq	CPUVAR(SCRATCH),%rax
 	.endif
@@ -1579,7 +1579,7 @@ END(intrfastexit)
 	.globl	nosvs_leave_altstack, nosvs_leave_altstack_end
 
 LABEL(svs_enter)
-	movq	SVS_UTLS+UTLS_KPDIRPA,%rax
+	movabs	SVS_UTLS+UTLS_KPDIRPA,%rax
 	movq	%rax,%cr3
 	movq	CPUVAR(KRSP0),%rsp
 LABEL(svs_enter_end)
@@ -1587,7 +1587,7 @@ LABEL(svs_enter_end)
 LABEL(svs_enter_altstack)
 	testb	$SEL_UPL,TF_CS(%rsp)
 	jz	1234f
-	movq	SVS_UTLS+UTLS_KPDIRPA,%rax
+	movabs	SVS_UTLS+UTLS_KPDIRPA,%rax
 	movq	%rax,%cr3
 1234:
 LABEL(svs_enter_altstack_end)



CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2018-05-05 Thread Martin Husemann
Module Name:src
Committed By:   martin
Date:   Sat May  5 15:00:29 UTC 2018

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: locore.S

Log Message:
Pull up following revision(s) (requested by maxv in ticket #786):

sys/arch/amd64/amd64/locore.S: revision 1.164,1.165

Adjust Xsyscall_svs to not use movq for 64bit immediates either.

Do not use movq for loading arbitrary 64bit immediates. The ISA
restricts it to 32bit immediates.


To generate a diff of this commit:
cvs rdiff -u -r1.123.6.5 -r1.123.6.6 src/sys/arch/amd64/amd64/locore.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.



CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2018-04-08 Thread Soren Jacobsen
Module Name:src
Committed By:   snj
Date:   Sun Apr  8 06:14:18 UTC 2018

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: trap.c

Log Message:
Pull up following revision(s) (requested by maxv in ticket #705):
sys/arch/amd64/amd64/trap.c: 1.113
Mmh. We shouldn't read %cr2 here. %cr2 is initialized by the CPU only
during page faults (T_PAGEFLT), so here we're reading a value that comes
from a previous page fault.
That's a real problem; if you launch an unprivileged process, set up a
signal handler, make it sleep 10 seconds, and trigger a T_ALIGNFLT fault,
you get in si_addr the address of another LWP's page - and perhaps this
can be used to defeat userland ASLR.
This bug has been there since 2003.


To generate a diff of this commit:
cvs rdiff -u -r1.96.4.2 -r1.96.4.3 src/sys/arch/amd64/amd64/trap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/trap.c
diff -u src/sys/arch/amd64/amd64/trap.c:1.96.4.2 src/sys/arch/amd64/amd64/trap.c:1.96.4.3
--- src/sys/arch/amd64/amd64/trap.c:1.96.4.2	Thu Mar 22 16:59:03 2018
+++ src/sys/arch/amd64/amd64/trap.c	Sun Apr  8 06:14:18 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: trap.c,v 1.96.4.2 2018/03/22 16:59:03 martin Exp $	*/
+/*	$NetBSD: trap.c,v 1.96.4.3 2018/04/08 06:14:18 snj Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
@@ -68,7 +68,7 @@
  */
 
 #include 
-__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.96.4.2 2018/03/22 16:59:03 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.96.4.3 2018/04/08 06:14:18 snj Exp $");
 
 #include "opt_ddb.h"
 #include "opt_kgdb.h"
@@ -389,7 +389,7 @@ copyfault:
 #endif
 		KSI_INIT_TRAP();
 		ksi.ksi_trap = type & ~T_USER;
-		ksi.ksi_addr = (void *)rcr2();
+		ksi.ksi_addr = (void *)frame->tf_rip;
 		switch (type) {
 		case T_SEGNPFLT|T_USER:
 		case T_STKFLT|T_USER:



CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2018-04-08 Thread Soren Jacobsen
Module Name:src
Committed By:   snj
Date:   Sun Apr  8 06:14:18 UTC 2018

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: trap.c

Log Message:
Pull up following revision(s) (requested by maxv in ticket #705):
sys/arch/amd64/amd64/trap.c: 1.113
Mmh. We shouldn't read %cr2 here. %cr2 is initialized by the CPU only
during page faults (T_PAGEFLT), so here we're reading a value that comes
from a previous page fault.
That's a real problem; if you launch an unprivileged process, set up a
signal handler, make it sleep 10 seconds, and trigger a T_ALIGNFLT fault,
you get in si_addr the address of another LWP's page - and perhaps this
can be used to defeat userland ASLR.
This bug has been there since 2003.


To generate a diff of this commit:
cvs rdiff -u -r1.96.4.2 -r1.96.4.3 src/sys/arch/amd64/amd64/trap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.



CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2018-02-25 Thread Soren Jacobsen
Module Name:src
Committed By:   snj
Date:   Mon Feb 26 00:49:48 UTC 2018

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: copy.S cpufunc.S

Log Message:
Pull up following revision(s) (requested by maxv in ticket #575):
sys/arch/amd64/amd64/copy.S: 1.28 via patch
sys/arch/amd64/amd64/cpufunc.S: 1.31
Don't fall through functions, explicitly jump instead.


To generate a diff of this commit:
cvs rdiff -u -r1.20.10.1 -r1.20.10.2 src/sys/arch/amd64/amd64/copy.S
cvs rdiff -u -r1.27 -r1.27.8.1 src/sys/arch/amd64/amd64/cpufunc.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/copy.S
diff -u src/sys/arch/amd64/amd64/copy.S:1.20.10.1 src/sys/arch/amd64/amd64/copy.S:1.20.10.2
--- src/sys/arch/amd64/amd64/copy.S:1.20.10.1	Mon Sep  4 20:41:28 2017
+++ src/sys/arch/amd64/amd64/copy.S	Mon Feb 26 00:49:48 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: copy.S,v 1.20.10.1 2017/09/04 20:41:28 snj Exp $	*/
+/*	$NetBSD: copy.S,v 1.20.10.2 2018/02/26 00:49:48 snj Exp $	*/
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -105,6 +105,7 @@ ENTRY(do_pmap_load)
 	popq	%rdi
 	leaveq
 	ret
+END(do_pmap_load)
 
 /*
  * Copy routines from and to userland, plus a few more. See the
@@ -172,6 +173,7 @@ ENTRY(kcopy)
 .Lkcopy_end:
 	xorq	%rax,%rax
 	ret
+END(kcopy)
 
 ENTRY(copyout)
 	DEFERRED_SWITCH_CHECK
@@ -199,6 +201,7 @@ ENTRY(copyout)
 	xorl	%eax,%eax
 	ret
 	DEFERRED_SWITCH_CALL
+END(copyout)
 
 ENTRY(copyin)
 	DEFERRED_SWITCH_CHECK
@@ -227,21 +230,20 @@ ENTRY(copyin)
 	xorl	%eax,%eax
 	ret
 	DEFERRED_SWITCH_CALL
+END(copyin)
 
 NENTRY(copy_efault)
 	movq	$EFAULT,%rax
-
-/*
- * kcopy_fault is used by kcopy and copy_fault is used by copyin/out.
- *
- * they're distinguished for lazy pmap switching.  see trap().
- */
+	ret
+END(copy_efault)
 
 NENTRY(kcopy_fault)
 	ret
+END(kcopy_fault)
 
 NENTRY(copy_fault)
 	ret
+END(copy_fault)
 
 ENTRY(copyoutstr)
 	DEFERRED_SWITCH_CHECK
@@ -282,6 +284,7 @@ ENTRY(copyoutstr)
 	movq	$ENAMETOOLONG,%rax
 	jmp	copystr_return
 	DEFERRED_SWITCH_CALL
+END(copyoutstr)
 
 ENTRY(copyinstr)
 	DEFERRED_SWITCH_CHECK
@@ -315,16 +318,19 @@ ENTRY(copyinstr)
 	xorq	%rax,%rax
 	jmp	copystr_return
 
-2:	/* edx is zero -- return EFAULT or ENAMETOOLONG. */
+2:	/* rdx is zero -- return EFAULT or ENAMETOOLONG. */
 	movq	$VM_MAXUSER_ADDRESS,%r11
 	cmpq	%r11,%rsi
 	jae	_C_LABEL(copystr_efault)
 	movq	$ENAMETOOLONG,%rax
 	jmp	copystr_return
 	DEFERRED_SWITCH_CALL
+END(copyinstr)
 
 ENTRY(copystr_efault)
 	movl	$EFAULT,%eax
+	jmp	copystr_return
+END(copystr_efault)
 
 ENTRY(copystr_fault)
 copystr_return:
@@ -333,8 +339,8 @@ copystr_return:
 	jz	8f
 	subq	%rdx,%r8
 	movq	%r8,(%r9)
-
 8:	ret
+END(copystr_fault)
 
 ENTRY(copystr)
 	xchgq	%rdi,%rsi
@@ -354,7 +360,7 @@ ENTRY(copystr)
 	xorl	%eax,%eax
 	jmp	6f
 
-4:	/* edx is zero -- return ENAMETOOLONG. */
+4:	/* rdx is zero -- return ENAMETOOLONG. */
 	movl	$ENAMETOOLONG,%eax
 
 6:	/* Set *lencopied and return %eax. */
@@ -364,7 +370,7 @@ ENTRY(copystr)
 	movq	%r8,(%rcx)
 
 7:	ret
-
+END(copystr)
 
 ENTRY(fuswintr)
 	cmpl	$TLBSTATE_VALID,CPUVAR(TLBSTATE)
@@ -380,6 +386,7 @@ ENTRY(fuswintr)
 
 	movq	$0,PCB_ONFAULT(%rcx)
 	ret
+END(fuswintr)
 
 ENTRY(fubyte)
 	DEFERRED_SWITCH_CHECK
@@ -395,6 +402,7 @@ ENTRY(fubyte)
 	movq	$0,PCB_ONFAULT(%rcx)
 	ret
 	DEFERRED_SWITCH_CALL
+END(fubyte)
 
 ENTRY(suswintr)
 	cmpl	$TLBSTATE_VALID,CPUVAR(TLBSTATE)
@@ -411,6 +419,7 @@ ENTRY(suswintr)
 	xorq	%rax,%rax
 	movq	%rax,PCB_ONFAULT(%rcx)
 	ret
+END(suswintr)
 
 ENTRY(subyte)
 	DEFERRED_SWITCH_CHECK
@@ -428,6 +437,7 @@ ENTRY(subyte)
 	movq	%rax,PCB_ONFAULT(%rcx)
 	ret
 	DEFERRED_SWITCH_CALL
+END(subyte)
 
 /*
  * These are the same, but must reside at different addresses,
@@ -437,15 +447,18 @@ ENTRY(fusuintrfailure)
 	movq	$0,PCB_ONFAULT(%rcx)
 	movl	$-1,%eax
 	ret
+END(fusuintrfailure)
 
 ENTRY(fusufailure)
 	movq	$0,PCB_ONFAULT(%rcx)
 	movl	$-1,%eax
 	ret
+END(fusufailure)
 
 ENTRY(fusuaddrfault)
 	movl	$-1,%eax
 	ret
+END(fusuaddrfault)
 
 /*
  * Compare-and-swap the 64-bit integer in the user-space.
@@ -474,6 +487,7 @@ ENTRY(ucas_64)
 	xorq	%rax,%rax
 	ret
 	DEFERRED_SWITCH_CALL
+END(ucas_64)
 
 /*
  * int	ucas_32(volatile int32_t *uptr, int32_t old, int32_t new, int32_t *ret);
@@ -500,12 +514,16 @@ ENTRY(ucas_32)
 	xorq	%rax,%rax
 	ret
 	DEFERRED_SWITCH_CALL
+END(ucas_32)
 
 ENTRY(ucas_efault)
 	movq	$EFAULT,%rax
+	ret
+END(ucas_efault)
 
 NENTRY(ucas_fault)
 	ret
+END(ucas_fault)
 
 /*
  * int	ucas_ptr(volatile void **uptr, void *old, void *new, void **ret);
@@ -524,6 +542,7 @@ x86_copyfunc_end:	.globl	x86_copyfunc_en
  */
 	.section ".rodata"
 	.globl _C_LABEL(onfault_table)
+
 _C_LABEL(onfault_table):
 	.quad .Lcopyin_start
 	.quad .Lcopyin_end

Index: src/sys/arch/amd64/amd64/cpufunc.S
diff -u src/sys/arch/amd64/amd64/cpufunc.S:1.27 src/sys/arch/amd64/amd64/cpufunc.S:1.27.8.1
--- src/sys/arch/amd64/amd64/cpufunc.S:1.27	Sun Nov 27 14:49:21 2016

CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2018-02-25 Thread Soren Jacobsen
Module Name:src
Committed By:   snj
Date:   Mon Feb 26 00:49:48 UTC 2018

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: copy.S cpufunc.S

Log Message:
Pull up following revision(s) (requested by maxv in ticket #575):
sys/arch/amd64/amd64/copy.S: 1.28 via patch
sys/arch/amd64/amd64/cpufunc.S: 1.31
Don't fall through functions, explicitly jump instead.


To generate a diff of this commit:
cvs rdiff -u -r1.20.10.1 -r1.20.10.2 src/sys/arch/amd64/amd64/copy.S
cvs rdiff -u -r1.27 -r1.27.8.1 src/sys/arch/amd64/amd64/cpufunc.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.



CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2017-12-21 Thread Soren Jacobsen
Module Name:src
Committed By:   snj
Date:   Thu Dec 21 19:53:29 UTC 2017

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: netbsd32_machdep.c

Log Message:
Pull up following revision(s) (requested by christos in ticket #444):
sys/arch/amd64/amd64/netbsd32_machdep.c: revision 1.114
Keep fs/gs the same for the signal context; otherwise calling things
like __lwp_getprivate_fast() from a signal handler (that uses %gs) die.
Merge context building code.


To generate a diff of this commit:
cvs rdiff -u -r1.105 -r1.105.2.1 src/sys/arch/amd64/amd64/netbsd32_machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.



CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2017-12-21 Thread Soren Jacobsen
Module Name:src
Committed By:   snj
Date:   Thu Dec 21 19:53:29 UTC 2017

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: netbsd32_machdep.c

Log Message:
Pull up following revision(s) (requested by christos in ticket #444):
sys/arch/amd64/amd64/netbsd32_machdep.c: revision 1.114
Keep fs/gs the same for the signal context; otherwise calling things
like __lwp_getprivate_fast() from a signal handler (that uses %gs) die.
Merge context building code.


To generate a diff of this commit:
cvs rdiff -u -r1.105 -r1.105.2.1 src/sys/arch/amd64/amd64/netbsd32_machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/netbsd32_machdep.c
diff -u src/sys/arch/amd64/amd64/netbsd32_machdep.c:1.105 src/sys/arch/amd64/amd64/netbsd32_machdep.c:1.105.2.1
--- src/sys/arch/amd64/amd64/netbsd32_machdep.c:1.105	Thu Jun  1 02:45:05 2017
+++ src/sys/arch/amd64/amd64/netbsd32_machdep.c	Thu Dec 21 19:53:28 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: netbsd32_machdep.c,v 1.105 2017/06/01 02:45:05 chs Exp $	*/
+/*	$NetBSD: netbsd32_machdep.c,v 1.105.2.1 2017/12/21 19:53:28 snj Exp $	*/
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -36,7 +36,7 @@
  */
 
 #include 
-__KERNEL_RCSID(0, "$NetBSD: netbsd32_machdep.c,v 1.105 2017/06/01 02:45:05 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: netbsd32_machdep.c,v 1.105.2.1 2017/12/21 19:53:28 snj Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_compat_netbsd.h"
@@ -164,6 +164,43 @@ netbsd32_setregs(struct lwp *l, struct e
 	tf->tf_ss = LSEL(LUDATA32_SEL, SEL_UPL);
 }
 
+static void
+netbsd32_buildcontext(struct lwp *l, struct trapframe *tf, void *fp,
+sig_t catcher, int onstack)
+{
+	/*
+	 * Build context to run handler in.
+	 */
+	tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL);
+	tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL);
+#if 0
+	tf->tf_fs = GSEL(GUDATA32_SEL, SEL_UPL);
+	tf->tf_gs = GSEL(GUDATA32_SEL, SEL_UPL);
+#endif
+
+	/* Ensure FP state is sane. */
+	fpu_save_area_reset(l);
+
+	tf->tf_rip = (uint64_t)catcher;
+	tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL);
+	tf->tf_rflags &= ~PSL_CLEARSIG;
+	tf->tf_rsp = (uint64_t)fp;
+	tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL);
+
+	/* Remember that we're now on the signal stack. */
+	if (onstack)
+		l->l_sigstk.ss_flags |= SS_ONSTACK;
+	if ((vaddr_t)catcher >= VM_MAXUSER_ADDRESS32) {
+		/*
+		 * process has given an invalid address for the
+		 * handler. Stop it, but do not do it before so
+		 * we can return the right info to userland (or in core dump)
+		 */
+		sigexit(l, SIGILL);
+		/* NOTREACHED */
+	}
+}
+
 #ifdef COMPAT_16
 static void
 netbsd32_sendsig_sigcontext(const ksiginfo_t *ksi, const sigset_t *mask)
@@ -249,35 +286,7 @@ netbsd32_sendsig_sigcontext(const ksigin
 		/* NOTREACHED */
 	}
 
-	/*
-	 * Build context to run handler in.
-	 */
-	tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL);
-	tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL);
-	tf->tf_fs = GSEL(GUDATA32_SEL, SEL_UPL);
-	tf->tf_gs = GSEL(GUDATA32_SEL, SEL_UPL);
-
-	/* Ensure FP state is sane. */
-	fpu_save_area_reset(l);
-
-	tf->tf_rip = (uint64_t)catcher;
-	tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL);
-	tf->tf_rflags &= ~PSL_CLEARSIG;
-	tf->tf_rsp = (uint64_t)fp;
-	tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL);
-
-	/* Remember that we're now on the signal stack. */
-	if (onstack)
-		l->l_sigstk.ss_flags |= SS_ONSTACK;
-	if ((vaddr_t)catcher >= VM_MAXUSER_ADDRESS32) {
-		/*
-		 * process has given an invalid address for the
-		 * handler. Stop it, but do not do it before so
-		 * we can return the right info to userland (or in core dump)
-		 */
-		sigexit(l, SIGILL);
-		/* NOTREACHED */
-	}
+	netbsd32_buildcontext(l, tf, fp, catcher, onstack);
 }
 #endif
 
@@ -346,35 +355,7 @@ netbsd32_sendsig_siginfo(const ksiginfo_
 		/* NOTREACHED */
 	}
 
-	/*
-	 * Build context to run handler in.
-	 */
-	tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL);
-	tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL);
-	tf->tf_fs = GSEL(GUDATA32_SEL, SEL_UPL);
-	tf->tf_gs = GSEL(GUDATA32_SEL, SEL_UPL);
-
-	tf->tf_rip = (uint64_t)catcher;
-	tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL);
-	tf->tf_rflags &= ~PSL_CLEARSIG;
-	tf->tf_rsp = (uint64_t)fp;
-	tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL);
-
-	/* Ensure FP state is sane. */
-	fpu_save_area_reset(l);
-
-	/* Remember that we're now on the signal stack. */
-	if (onstack)
-		l->l_sigstk.ss_flags |= SS_ONSTACK;
-	if ((vaddr_t)catcher >= VM_MAXUSER_ADDRESS32) {
-		/*
-		 * process has given an invalid address for the
-		 * handler. Stop it, but do not do it before so
-		 * we can return the right info to userland (or in core dump)
-		 */
-		sigexit(l, SIGILL);
-		/* NOTREACHED */
-	}
+	netbsd32_buildcontext(l, tf, fp, catcher, onstack);
 }
 
 void



CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2017-11-30 Thread Martin Husemann
Module Name:src
Committed By:   martin
Date:   Thu Nov 30 14:03:41 UTC 2017

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: machdep.c

Log Message:
Pull up following revision(s) (requested by maxv in ticket #401):
sys/arch/amd64/amd64/machdep.c: revision 1.267
Mmh, don't forget to clear the TLS gdt slots on Xen. Otherwise, when doing
a lwp32->lwp64 context switch, the new lwp can use the slots to reconstruct
the address of the previous lwp's TLS space (and defeat ASLR?).


To generate a diff of this commit:
cvs rdiff -u -r1.255.6.1 -r1.255.6.2 src/sys/arch/amd64/amd64/machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.255.6.1 src/sys/arch/amd64/amd64/machdep.c:1.255.6.2
--- src/sys/arch/amd64/amd64/machdep.c:1.255.6.1	Mon Sep  4 20:41:28 2017
+++ src/sys/arch/amd64/amd64/machdep.c	Thu Nov 30 14:03:41 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.255.6.1 2017/09/04 20:41:28 snj Exp $	*/
+/*	$NetBSD: machdep.c,v 1.255.6.2 2017/11/30 14:03:41 martin Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -111,7 +111,7 @@
  */
 
 #include 
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.255.6.1 2017/09/04 20:41:28 snj Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.255.6.2 2017/11/30 14:03:41 martin Exp $");
 
 /* #define XENDEBUG_LOW  */
 
@@ -427,6 +427,7 @@ x86_64_tls_switch(struct lwp *l)
 	struct cpu_info *ci = curcpu();
 	struct pcb *pcb = lwp_getpcb(l);
 	struct trapframe *tf = l->l_md.md_regs;
+	uint64_t zero = 0;
 
 	/*
 	 * Raise the IPL to IPL_HIGH.
@@ -449,6 +450,8 @@ x86_64_tls_switch(struct lwp *l)
 		setfs(tf->tf_fs);
 		HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, tf->tf_gs);
 	} else {
+		update_descriptor(()->ci_gdt[GUFS_SEL], );
+		update_descriptor(()->ci_gdt[GUGS_SEL], );
 		setfs(0);
 		HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, 0);
 		HYPERVISOR_set_segment_base(SEGBASE_FS, pcb->pcb_fs);



CVS commit: [netbsd-8] src/sys/arch/amd64/amd64

2017-11-30 Thread Martin Husemann
Module Name:src
Committed By:   martin
Date:   Thu Nov 30 14:03:41 UTC 2017

Modified Files:
src/sys/arch/amd64/amd64 [netbsd-8]: machdep.c

Log Message:
Pull up following revision(s) (requested by maxv in ticket #401):
sys/arch/amd64/amd64/machdep.c: revision 1.267
Mmh, don't forget to clear the TLS gdt slots on Xen. Otherwise, when doing
a lwp32->lwp64 context switch, the new lwp can use the slots to reconstruct
the address of the previous lwp's TLS space (and defeat ASLR?).


To generate a diff of this commit:
cvs rdiff -u -r1.255.6.1 -r1.255.6.2 src/sys/arch/amd64/amd64/machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.