Module Name:    src
Committed By:   maxv
Date:           Sun Jan 21 11:21:40 UTC 2018

Modified Files:
        src/sys/arch/amd64/amd64: amd64_trap.S locore.S machdep.c vector.S
        src/sys/arch/amd64/conf: kern.ldscript kern.ldscript.kaslr
        src/sys/arch/amd64/include: frameasm.h

Log Message:
Unmap the kernel from userland in SVS, and leave only the needed
trampolines. As explained below, SVS should now completely mitigate
Meltdown on GENERIC kernels, even though it needs some more tweaking
for GENERIC_KASLR.

Until now the kernel entry points looked like:

        FUNC(intr)
                pushq   $ERR
                pushq   $TRAPNO
                INTRENTRY
                ... handle interrupt ...
                INTRFASTEXIT
        END(intr)

With this change they are split and become:

        FUNC(handle)
                ... handle interrupt ...
                INTRFASTEXIT
        END(handle)

                TEXT_USER_BEGIN
        FUNC(intr)
                pushq   $ERR
                pushq   $TRAPNO
                INTRENTRY
                jmp     handle
        END(intr)
                TEXT_USER_END

A new section is introduced, .text.user, that contains minimal kernel
entry/exit points. In order to choose what to put in this section, two
macros are introduced, TEXT_USER_BEGIN and TEXT_USER_END.

The section is mapped in userland with normal 4K pages.

In GENERIC, the section is 4K-page-aligned and embedded in .text, which
is mapped with large pages. That is to say, when an interrupt comes in,
the CPU has the user page tables loaded and executes the 'intr' functions
on 4K pages; after calling SVS_ENTER (in INTRENTRY) these 4K pages become
2MB large pages, and remain so when executing in kernel mode.

In GENERIC_KASLR, the section is 4K-page-aligned and independent from the
other kernel texts. The prekern just picks it up and maps it at a random
address.

In GENERIC, SVS should now completely mitigate Meltdown: what we put in
.text.user is not secret.

In GENERIC_KASLR, SVS would have to be improved a bit more: the
'jmp handle' instruction is actually secret, since it leaks the address
of the section we are jumping into. By exploiting Meltdown on Intel, this
theoretically allows a local user to reconstruct the address of the first
text section. But given that our KASLR produces several texts, and that
each section is not correlated with the others, the level of protection
KASLR provides is still good.


To generate a diff of this commit:
cvs rdiff -u -r1.22 -r1.23 src/sys/arch/amd64/amd64/amd64_trap.S
cvs rdiff -u -r1.147 -r1.148 src/sys/arch/amd64/amd64/locore.S
cvs rdiff -u -r1.294 -r1.295 src/sys/arch/amd64/amd64/machdep.c
cvs rdiff -u -r1.55 -r1.56 src/sys/arch/amd64/amd64/vector.S
cvs rdiff -u -r1.25 -r1.26 src/sys/arch/amd64/conf/kern.ldscript
cvs rdiff -u -r1.4 -r1.5 src/sys/arch/amd64/conf/kern.ldscript.kaslr
cvs rdiff -u -r1.30 -r1.31 src/sys/arch/amd64/include/frameasm.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/amd64_trap.S
diff -u src/sys/arch/amd64/amd64/amd64_trap.S:1.22 src/sys/arch/amd64/amd64/amd64_trap.S:1.23
--- src/sys/arch/amd64/amd64/amd64_trap.S:1.22	Sat Jan 20 14:27:15 2018
+++ src/sys/arch/amd64/amd64/amd64_trap.S	Sun Jan 21 11:21:40 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: amd64_trap.S,v 1.22 2018/01/20 14:27:15 maxv Exp $	*/
+/*	$NetBSD: amd64_trap.S,v 1.23 2018/01/21 11:21:40 maxv Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
@@ -95,13 +95,19 @@
 #define	PRE_TRAP
 #endif
 
+#define TRAPENTRY			\
+	INTRENTRY			; \
+	jmp	.Lalltraps_noentry
+
 #define	TRAP_NJ(a)	PRE_TRAP ; pushq $(a)
 #define	ZTRAP_NJ(a)	PRE_TRAP ; pushq $0 ; pushq $(a)
-#define	TRAP(a)		TRAP_NJ(a) ; jmp _C_LABEL(alltraps)
-#define	ZTRAP(a)	ZTRAP_NJ(a) ; jmp _C_LABEL(alltraps)
+#define	TRAP(a)		TRAP_NJ(a) ; TRAPENTRY
+#define	ZTRAP(a)	ZTRAP_NJ(a) ; TRAPENTRY
 
 	.text
 
+	TEXT_USER_BEGIN
+
 IDTVEC(trap00)
 	ZTRAP(T_DIVIDE)
 IDTVEC_END(trap00)
@@ -361,24 +367,6 @@ IDTVEC(intrspurious)
 	jmp	.Lalltraps_checkusr
 IDTVEC_END(intrspurious)
 
-/*
- * trap() calls here when it detects a fault in INTRFASTEXIT (loading the
- * segment registers or during the iret itself). The address of the (possibly
- * reconstructed) user trap frame is passed as an argument.
- *
- * Typically the code will have raised a SIGSEGV which will be actioned
- * by the code below.
- */
-	.type	_C_LABEL(trap_return_fault_return), @function
-LABEL(trap_return_fault_return)
-	mov	%rdi,%rsp		/* frame for user return */
-#ifdef DIAGNOSTIC
-	/* We can't recover the saved %rbx, so suppress warning */
-	movl	CPUVAR(ILEVEL),%ebx
-#endif
-	jmp	.Lalltraps_checkusr
-END(trap_return_fault_return)
-
 #ifndef check_swapgs
 /*
  * We need to worry about traps in kernel mode while the kernel %gs isn't
@@ -423,12 +411,33 @@ NENTRY(check_swapgs)
 END(check_swapgs)
 #endif
 
+	TEXT_USER_END
+
+/*
+ * trap() calls here when it detects a fault in INTRFASTEXIT (loading the
+ * segment registers or during the iret itself). The address of the (possibly
+ * reconstructed) user trap frame is passed as an argument.
+ *
+ * Typically the code will have raised a SIGSEGV which will be actioned
+ * by the code below.
+ */
+	.type	_C_LABEL(trap_return_fault_return), @function
+LABEL(trap_return_fault_return)
+	mov	%rdi,%rsp		/* frame for user return */
+#ifdef DIAGNOSTIC
+	/* We can't recover the saved %rbx, so suppress warning */
+	movl	CPUVAR(ILEVEL),%ebx
+#endif
+	jmp	.Lalltraps_checkusr
+END(trap_return_fault_return)
+
 /*
  * All traps go through here. Call the generic trap handler, and
  * check for ASTs afterwards.
  */
 NENTRY(alltraps)
 	INTRENTRY
+.Lalltraps_noentry:
 	STI(si)
 
 calltrap:

Index: src/sys/arch/amd64/amd64/locore.S
diff -u src/sys/arch/amd64/amd64/locore.S:1.147 src/sys/arch/amd64/amd64/locore.S:1.148
--- src/sys/arch/amd64/amd64/locore.S:1.147	Thu Jan 18 07:25:34 2018
+++ src/sys/arch/amd64/amd64/locore.S	Sun Jan 21 11:21:40 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.147 2018/01/18 07:25:34 maxv Exp $	*/
+/*	$NetBSD: locore.S,v 1.148 2018/01/21 11:21:40 maxv Exp $	*/
 
 /*
  * Copyright-o-rama!
@@ -1248,76 +1248,12 @@ ENTRY(savectx)
 	ret
 END(savectx)
 
-IDTVEC(syscall32)
-	sysret		/* go away please */
-IDTVEC_END(syscall32)
-
 /*
- * syscall()
- *
- * syscall insn entry.
+ * Syscall handler.
  */
-IDTVEC(syscall)
-#ifndef XEN
-	/*
-	 * The user %rip is in %rcx and the user %rflags in %r11. The kernel %cs
-	 * and %ss are loaded, but nothing else is.
-	 *
-	 * The 'swapgs' instruction gives us access to cpu-specific memory where
-	 * we can save a user register and then read the LWP's kernel stack
-	 * pointer.
-	 *
-	 * This code doesn't seem to set %ds, this may not matter since it is
-	 * ignored in 64bit mode, OTOH the syscall instruction sets %ss and that
-	 * is ignored as well.
-	 */
-	swapgs
-
-#ifdef SVS
-	movq	%rax,SVS_UTLS+UTLS_SCRATCH
-	movq	SVS_UTLS+UTLS_RSP0,%rax
-#define SP(x)	(x)-(TF_SS+8)(%rax)
-#else
-	movq	%r15,CPUVAR(SCRATCH)
-	movq	CPUVAR(CURLWP),%r15
-	movq	L_PCB(%r15),%r15
-	movq	PCB_RSP0(%r15),%r15	/* LWP's kernel stack pointer */
-#define SP(x)	(x)-(TF_SS+8)(%r15)
-#endif
-
-	/* Make stack look like an 'int nn' frame */
-	movq	$(LSEL(LUDATA_SEL, SEL_UPL)),SP(TF_SS)	/* user %ss */
-	movq	%rsp,SP(TF_RSP)				/* user %rsp */
-	movq	%r11,SP(TF_RFLAGS)			/* user %rflags */
-	movq	$(LSEL(LUCODE_SEL, SEL_UPL)),SP(TF_CS)	/* user %cs */
-	movq	%rcx,SP(TF_RIP)				/* user %rip */
-
-	leaq	SP(0),%rsp		/* %rsp now valid after frame */
-#ifdef SVS
-	movq	SVS_UTLS+UTLS_SCRATCH,%rax
-#else
-	movq	CPUVAR(SCRATCH),%r15
-#endif
-
-	movq	$2,TF_ERR(%rsp)		/* syscall instruction size */
-	movq	$T_ASTFLT,TF_TRAPNO(%rsp)
-#else
-	/* Xen already switched to kernel stack */
-	addq	$0x10,%rsp	/* gap to match cs:rip */
-	pushq	$2		/* error code */
-	pushq	$T_ASTFLT
-	subq	$TF_REGSIZE,%rsp
-	cld
-#endif
-	INTR_SAVE_GPRS
-	movw	$GSEL(GUDATA_SEL, SEL_UPL),TF_DS(%rsp)
-	movw	$GSEL(GUDATA_SEL, SEL_UPL),TF_ES(%rsp)
-	movw	$0,TF_FS(%rsp)
-	movw	$0,TF_GS(%rsp)
-	SVS_ENTER
+NENTRY(handle_syscall)
 	STI(si)
 
-.Ldo_syscall:
 	movq	CPUVAR(CURLWP),%r14
 	incq	CPUVAR(NSYSCALL)	/* count it atomically */
 	movq	%rsp,L_MD_REGS(%r14)	/* save pointer to frame */
@@ -1353,20 +1289,7 @@ IDTVEC(syscall)
 	testl	$(MDL_IRET|MDL_COMPAT32),L_MD_FLAGS(%r14)
 	jnz	intrfastexit
 
-	SVS_LEAVE
-	INTR_RESTORE_GPRS
-	SWAPGS
-#ifndef XEN
-	movq	TF_RIP(%rsp),%rcx	/* %rip for sysret */
-	movq	TF_RFLAGS(%rsp),%r11	/* %flags for sysret */
-	movq	TF_RSP(%rsp),%rsp
-do_sysret:
-	sysretq
-#else
-	addq	$TF_RIP,%rsp
-	pushq	$256	/* VGCF_IN_SYSCALL */
-	jmp	HYPERVISOR_iret
-#endif
+	jmp	syscall_sysret
 
 #ifdef DIAGNOSTIC
 	/* Report SPL error */
@@ -1398,7 +1321,7 @@ do_sysret:
 	movq	%rsp,%rdi
 	call	_C_LABEL(trap)
 	jmp	.Lsyscall_checkast	/* re-check ASTs */
-IDTVEC_END(syscall)
+END(handle_syscall)
 
 /*
  * void lwp_trampoline(void);
@@ -1418,10 +1341,83 @@ NENTRY(lwp_trampoline)
 END(lwp_trampoline)
 
 /*
+ * Entry points of the 'syscall' instruction, 64bit and 32bit mode.
+ */
+	TEXT_USER_BEGIN
+
+IDTVEC(syscall)
+#ifndef XEN
+	/*
+	 * The user %rip is in %rcx and the user %rflags in %r11. The kernel %cs
+	 * and %ss are loaded, but nothing else is.
+	 *
+	 * The 'swapgs' instruction gives us access to cpu-specific memory where
+	 * we can save a user register and then read the LWP's kernel stack
+	 * pointer.
+	 *
+	 * This code doesn't seem to set %ds, this may not matter since it is
+	 * ignored in 64bit mode, OTOH the syscall instruction sets %ss and that
+	 * is ignored as well.
+	 */
+	swapgs
+
+#ifdef SVS
+	movq	%rax,SVS_UTLS+UTLS_SCRATCH
+	movq	SVS_UTLS+UTLS_RSP0,%rax
+#define SP(x)	(x)-(TF_SS+8)(%rax)
+#else
+	movq	%r15,CPUVAR(SCRATCH)
+	movq	CPUVAR(CURLWP),%r15
+	movq	L_PCB(%r15),%r15
+	movq	PCB_RSP0(%r15),%r15	/* LWP's kernel stack pointer */
+#define SP(x)	(x)-(TF_SS+8)(%r15)
+#endif
+
+	/* Make stack look like an 'int nn' frame */
+	movq	$(LSEL(LUDATA_SEL, SEL_UPL)),SP(TF_SS)	/* user %ss */
+	movq	%rsp,SP(TF_RSP)				/* user %rsp */
+	movq	%r11,SP(TF_RFLAGS)			/* user %rflags */
+	movq	$(LSEL(LUCODE_SEL, SEL_UPL)),SP(TF_CS)	/* user %cs */
+	movq	%rcx,SP(TF_RIP)				/* user %rip */
+
+	leaq	SP(0),%rsp		/* %rsp now valid after frame */
+#ifdef SVS
+	movq	SVS_UTLS+UTLS_SCRATCH,%rax
+#else
+	movq	CPUVAR(SCRATCH),%r15
+#endif
+
+	movq	$2,TF_ERR(%rsp)		/* syscall instruction size */
+	movq	$T_ASTFLT,TF_TRAPNO(%rsp)
+#else
+	/* Xen already switched to kernel stack */
+	addq	$0x10,%rsp	/* gap to match cs:rip */
+	pushq	$2		/* error code */
+	pushq	$T_ASTFLT
+	subq	$TF_REGSIZE,%rsp
+	cld
+#endif
+	INTR_SAVE_GPRS
+	movw	$GSEL(GUDATA_SEL, SEL_UPL),TF_DS(%rsp)
+	movw	$GSEL(GUDATA_SEL, SEL_UPL),TF_ES(%rsp)
+	movw	$0,TF_FS(%rsp)
+	movw	$0,TF_GS(%rsp)
+	SVS_ENTER
+	jmp	handle_syscall
+IDTVEC_END(syscall)
+
+IDTVEC(syscall32)
+	sysret		/* go away please */
+IDTVEC_END(syscall32)
+
+	TEXT_USER_END
+
+/*
  * osyscall()
  *
  * Trap gate entry for int $80 syscall, also used by sigreturn.
  */
+	TEXT_USER_BEGIN
 IDTVEC(osyscall)
 #ifdef XEN
 	movq (%rsp),%rcx
@@ -1431,9 +1427,32 @@ IDTVEC(osyscall)
 	pushq	$2		/* size of instruction for restart */
 	pushq	$T_ASTFLT	/* trap # for doing ASTs */
 	INTRENTRY
-	STI(si)
-	jmp	.Ldo_syscall
+	jmp	handle_syscall
 IDTVEC_END(osyscall)
+	TEXT_USER_END
+
+/*
+ * Return to userland via 'sysret'.
+ */
+	TEXT_USER_BEGIN
+	_ALIGN_TEXT
+LABEL(syscall_sysret)
+	SVS_LEAVE
+	INTR_RESTORE_GPRS
+	SWAPGS
+#ifndef XEN
+	movq	TF_RIP(%rsp),%rcx	/* %rip for sysret */
+	movq	TF_RFLAGS(%rsp),%r11	/* %flags for sysret */
+	movq	TF_RSP(%rsp),%rsp
+do_sysret:
+	sysretq
+#else
+	addq	$TF_RIP,%rsp
+	pushq	$256	/* VGCF_IN_SYSCALL */
+	jmp	HYPERVISOR_iret
+#endif
+END(syscall_sysret)
+	TEXT_USER_END
 
 /*
  * bool sse2_idlezero_page(void *pg)
@@ -1496,7 +1515,10 @@ ENTRY(pagezero)
 	ret
 END(pagezero)
 
-ENTRY(intrfastexit)
+	TEXT_USER_BEGIN
+
+	_ALIGN_TEXT
+LABEL(intrfastexit)
 	NOT_XEN(cli;)
 	SVS_LEAVE
 	INTR_RESTORE_GPRS

Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.294 src/sys/arch/amd64/amd64/machdep.c:1.295
--- src/sys/arch/amd64/amd64/machdep.c:1.294	Sun Jan 21 08:20:30 2018
+++ src/sys/arch/amd64/amd64/machdep.c	Sun Jan 21 11:21:40 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.294 2018/01/21 08:20:30 maxv Exp $	*/
+/*	$NetBSD: machdep.c,v 1.295 2018/01/21 11:21:40 maxv Exp $	*/
 
 /*
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -110,7 +110,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.294 2018/01/21 08:20:30 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.295 2018/01/21 11:21:40 maxv Exp $");
 
 /* #define XENDEBUG_LOW  */
 
@@ -2266,7 +2266,7 @@ mm_md_direct_mapped_phys(paddr_t paddr, 
  *     Direct Map        [OK]
  *     Remote PCPU Areas [OK]
  *     Kernel Heap       [OK]
- *     Kernel Image      [TODO]
+ *     Kernel Image      [OK]
  */
 
 struct svs_utls {
@@ -2450,6 +2450,8 @@ svs_range_add(struct cpu_info *ci, vaddr
 void
 cpu_svs_init(struct cpu_info *ci)
 {
+	extern char __text_user_start;
+	extern char __text_user_end;
 	const cpuid_t cid = cpu_index(ci);
 	struct vm_page *pg;
 
@@ -2480,6 +2482,8 @@ cpu_svs_init(struct cpu_info *ci)
 	svs_page_add(ci, (vaddr_t)&pcpuarea->ldt);
 	svs_range_add(ci, (vaddr_t)&pcpuarea->ent[cid],
 	    offsetof(struct pcpu_entry, rsp0));
+	svs_range_add(ci, (vaddr_t)&__text_user_start,
+	    (vaddr_t)&__text_user_end - (vaddr_t)&__text_user_start);
 
 	svs_rsp0_init(ci);
 	svs_utls_init(ci);
@@ -2595,10 +2599,6 @@ svs_pdir_switch(struct pmap *pmap)
 		ci->ci_svs_updir[i] = pte;
 	}
 
-	/* Kernel image. */
-	pte = svs_pte_atomic_read(pmap, 511);
-	ci->ci_svs_updir[511] = pte;
-
 	mutex_exit(&ci->ci_svs_mtx);
 }
 #endif

Index: src/sys/arch/amd64/amd64/vector.S
diff -u src/sys/arch/amd64/amd64/vector.S:1.55 src/sys/arch/amd64/amd64/vector.S:1.56
--- src/sys/arch/amd64/amd64/vector.S:1.55	Sat Jan 20 14:27:15 2018
+++ src/sys/arch/amd64/amd64/vector.S	Sun Jan 21 11:21:40 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: vector.S,v 1.55 2018/01/20 14:27:15 maxv Exp $	*/
+/*	$NetBSD: vector.S,v 1.56 2018/01/21 11:21:40 maxv Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -116,10 +116,7 @@ IDTVEC(recurse_lapic_ipi)
 	INTRENTRY
 	jmp	1f
 IDTVEC_END(recurse_lapic_ipi)
-IDTVEC(intr_x2apic_ipi)
-	pushq	$0
-	pushq	$T_ASTFLT
-	INTRENTRY
+NENTRY(handle_x2apic_ipi)
 	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
 	xorl	%eax,%eax
 	xorl	%edx,%edx
@@ -128,18 +125,15 @@ IDTVEC(intr_x2apic_ipi)
 	cmpl	$IPL_HIGH,%ebx
 	jae	2f
 	jmp	1f
-IDTVEC_END(intr_x2apic_ipi)
-IDTVEC(intr_lapic_ipi)
-	pushq	$0
-	pushq	$T_ASTFLT
-	INTRENTRY
+END(handle_x2apic_ipi)
+NENTRY(handle_lapic_ipi)
 	movq	_C_LABEL(local_apic_va),%rbx
 	movl	$0,LAPIC_EOI(%rbx)
 	movl	CPUVAR(ILEVEL),%ebx
 	cmpl	$IPL_HIGH,%ebx
 	jae	2f
 	jmp	1f
-IDTVEC_END(intr_lapic_ipi)
+END(handle_lapic_ipi)
 IDTVEC(resume_lapic_ipi)
 1:
 	incl	CPUVAR(IDEPTH)
@@ -153,12 +147,23 @@ IDTVEC(resume_lapic_ipi)
 	INTRFASTEXIT
 IDTVEC_END(resume_lapic_ipi)
 
-#if defined(DDB)
-IDTVEC(intrddbipi)
-1:
+	TEXT_USER_BEGIN
+IDTVEC(intr_x2apic_ipi)
 	pushq	$0
-	pushq	$T_BPTFLT
+	pushq	$T_ASTFLT
 	INTRENTRY
+	jmp	_C_LABEL(handle_x2apic_ipi)
+IDTVEC_END(intr_x2apic_ipi)
+IDTVEC(intr_lapic_ipi)
+	pushq	$0
+	pushq	$T_ASTFLT
+	INTRENTRY
+	jmp	_C_LABEL(handle_lapic_ipi)
+IDTVEC_END(intr_lapic_ipi)
+	TEXT_USER_END
+
+#if defined(DDB)
+NENTRY(handle_intrddbipi)
 	movl	$0xf,%eax
 	movq	%rax,%cr8
 	movq	_C_LABEL(local_apic_va),%rbx
@@ -168,13 +173,8 @@ IDTVEC(intrddbipi)
 	xorl	%eax,%eax
 	movq	%rax,%cr8
 	INTRFASTEXIT
-IDTVEC_END(intrddbipi)
-
-IDTVEC(x2apic_intrddbipi)
-1:
-	pushq	$0
-	pushq	$T_BPTFLT
-	INTRENTRY
+END(handle_intrddbipi)
+NENTRY(handle_x2apic_intrddbipi)
 	movl	$0xf,%eax
 	movq	%rax,%cr8
 	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
@@ -186,7 +186,23 @@ IDTVEC(x2apic_intrddbipi)
 	xorl	%eax,%eax
 	movq	%rax,%cr8
 	INTRFASTEXIT
+END(handle_x2apic_intrddbipi)
+
+	TEXT_USER_BEGIN
+IDTVEC(intrddbipi)
+	pushq	$0
+	pushq	$T_BPTFLT
+	INTRENTRY
+	jmp	_C_LABEL(handle_intrddbipi)
+IDTVEC_END(intrddbipi)
+IDTVEC(x2apic_intrddbipi)
+	pushq	$0
+	pushq	$T_BPTFLT
+	INTRENTRY
+	jmp	_C_LABEL(handle_x2apic_intrddbipi)
 IDTVEC_END(x2apic_intrddbipi)
+	TEXT_USER_END
+
 #endif /* DDB */
 #endif /* MULTIPROCESSOR */
 
@@ -200,10 +216,7 @@ IDTVEC(recurse_lapic_ltimer)
 	INTRENTRY
 	jmp	1f
 IDTVEC_END(recurse_lapic_ltimer)
-IDTVEC(intr_x2apic_ltimer)
-	pushq	$0
-	pushq	$T_ASTFLT
-	INTRENTRY
+NENTRY(handle_x2apic_ltimer)
 	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
 	xorl	%eax,%eax
 	xorl	%edx,%edx
@@ -212,18 +225,15 @@ IDTVEC(intr_x2apic_ltimer)
 	cmpl	$IPL_CLOCK,%ebx
 	jae	2f
 	jmp	1f
-IDTVEC_END(intr_x2apic_ltimer)
-IDTVEC(intr_lapic_ltimer)
-	pushq	$0
-	pushq	$T_ASTFLT
-	INTRENTRY
+END(handle_x2apic_ltimer)
+NENTRY(handle_lapic_ltimer)
 	movq	_C_LABEL(local_apic_va),%rbx
 	movl	$0,LAPIC_EOI(%rbx)
 	movl	CPUVAR(ILEVEL),%ebx
 	cmpl	$IPL_CLOCK,%ebx
 	jae	2f
 	jmp	1f
-IDTVEC_END(intr_lapic_ltimer)
+END(handle_lapic_ltimer)
 IDTVEC(resume_lapic_ltimer)
 1:
 	incl	CPUVAR(IDEPTH)
@@ -238,33 +248,57 @@ IDTVEC(resume_lapic_ltimer)
 	orl	$(1 << LIR_TIMER),CPUVAR(IPENDING)
 	INTRFASTEXIT
 IDTVEC_END(resume_lapic_ltimer)
+
+	TEXT_USER_BEGIN
+IDTVEC(intr_x2apic_ltimer)
+	pushq	$0
+	pushq	$T_ASTFLT
+	INTRENTRY
+	jmp	_C_LABEL(handle_x2apic_ltimer)
+IDTVEC_END(intr_x2apic_ltimer)
+IDTVEC(intr_lapic_ltimer)
+	pushq	$0
+	pushq	$T_ASTFLT
+	INTRENTRY
+	jmp	_C_LABEL(handle_lapic_ltimer)
+IDTVEC_END(intr_lapic_ltimer)
+	TEXT_USER_END
+
 #endif /* NLAPIC > 0 */
 
 #ifndef XEN
 /*
  * TLB shootdown handler.
  */
-IDTVEC(intr_lapic_tlb)
-	pushq	$0
-	pushq	$T_ASTFLT
-	INTRENTRY
+NENTRY(handle_lapic_tlb)
 	movq	_C_LABEL(local_apic_va),%rax
 	movl	$0,LAPIC_EOI(%rax)
 	callq	_C_LABEL(pmap_tlb_intr)
 	INTRFASTEXIT
-IDTVEC_END(intr_lapic_tlb)
-
-IDTVEC(intr_x2apic_tlb)
-	pushq	$0
-	pushq	$T_ASTFLT
-	INTRENTRY
+END(handle_lapic_tlb)
+NENTRY(handle_x2apic_tlb)
 	movl	$(MSR_X2APIC_BASE + MSR_X2APIC_EOI),%ecx
 	xorl	%eax,%eax
 	xorl	%edx,%edx
 	wrmsr
 	callq	_C_LABEL(pmap_tlb_intr)
 	INTRFASTEXIT
+END(handle_x2apic_tlb)
+
+	TEXT_USER_BEGIN
+IDTVEC(intr_lapic_tlb)
+	pushq	$0
+	pushq	$T_ASTFLT
+	INTRENTRY
+	jmp	_C_LABEL(handle_lapic_tlb)
+IDTVEC_END(intr_lapic_tlb)
+IDTVEC(intr_x2apic_tlb)
+	pushq	$0
+	pushq	$T_ASTFLT
+	INTRENTRY
+	jmp	_C_LABEL(handle_x2apic_tlb)
 IDTVEC_END(intr_x2apic_tlb)
+	TEXT_USER_END
 
 #endif /* !XEN */
 
@@ -292,10 +326,7 @@ IDTVEC(resume_ ## name ## num)						\
 	movl	IS_MAXLEVEL(%r14),%ebx					;\
 	jmp	1f							;\
 IDTVEC_END(resume_ ## name ## num)					;\
-IDTVEC(intr_ ## name ## num)						;\
-	pushq	$0			/* dummy error code */		;\
-	pushq	$T_ASTFLT		/* trap # for doing ASTs */	;\
-	INTRENTRY							;\
+NENTRY(handle_ ## name ## num)						;\
 	movq	CPUVAR(ISOURCES) + (num) * 8,%r14			;\
 	mask(num)			/* mask it in hardware */	;\
 	early_ack(num)			/* and allow other intrs */	;\
@@ -347,7 +378,15 @@ IDTVEC(intr_ ## name ## num)						;\
 	unmask(num)							;\
 	late_ack(num)							;\
 	INTRFASTEXIT							;\
-IDTVEC_END(intr_ ## name ## num)
+END(handle_ ## name ## num)						;\
+	TEXT_USER_BEGIN							;\
+IDTVEC(intr_ ## name ## num)						;\
+	pushq	$0			/* dummy error code */		;\
+	pushq	$T_ASTFLT		/* trap # for doing ASTs */	;\
+	INTRENTRY							;\
+	jmp	_C_LABEL(handle_ ## name ## num)			;\
+IDTVEC_END(intr_ ## name ## num)					;\
+	TEXT_USER_END
 
 #define ICUADDR IO_ICU1
 

Index: src/sys/arch/amd64/conf/kern.ldscript
diff -u src/sys/arch/amd64/conf/kern.ldscript:1.25 src/sys/arch/amd64/conf/kern.ldscript:1.26
--- src/sys/arch/amd64/conf/kern.ldscript:1.25	Sun Jan  7 12:42:46 2018
+++ src/sys/arch/amd64/conf/kern.ldscript	Sun Jan 21 11:21:40 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern.ldscript,v 1.25 2018/01/07 12:42:46 maxv Exp $	*/
+/*	$NetBSD: kern.ldscript,v 1.26 2018/01/21 11:21:40 maxv Exp $	*/
 
 #include "assym.h"
 
@@ -15,6 +15,12 @@ SECTIONS
 {
 	.text : AT (ADDR(.text) & 0x0fffffff)
 	{
+		. = ALIGN(__PAGE_SIZE);
+		__text_user_start = . ;
+		*(.text.user)
+		. = ALIGN(__PAGE_SIZE);
+		__text_user_end = . ;
+
 		*(.text)
 		*(.text.*)
 		*(.stub)

Index: src/sys/arch/amd64/conf/kern.ldscript.kaslr
diff -u src/sys/arch/amd64/conf/kern.ldscript.kaslr:1.4 src/sys/arch/amd64/conf/kern.ldscript.kaslr:1.5
--- src/sys/arch/amd64/conf/kern.ldscript.kaslr:1.4	Sun Jan  7 12:42:46 2018
+++ src/sys/arch/amd64/conf/kern.ldscript.kaslr	Sun Jan 21 11:21:40 2018
@@ -1,10 +1,18 @@
-/*	$NetBSD: kern.ldscript.kaslr,v 1.4 2018/01/07 12:42:46 maxv Exp $	*/
+/*	$NetBSD: kern.ldscript.kaslr,v 1.5 2018/01/21 11:21:40 maxv Exp $	*/
 
 #include "assym.h"
 
 ENTRY(_start)
 SECTIONS
 {
+	.text.user : SUBALIGN(PAGE_SIZE)
+	{
+		__text_user_start = . ;
+		*(.text.user)
+		. = ALIGN(PAGE_SIZE);
+		__text_user_end = . ;
+	} =0xCC
+
 	.text :
 	{
 		*(.text)

Index: src/sys/arch/amd64/include/frameasm.h
diff -u src/sys/arch/amd64/include/frameasm.h:1.30 src/sys/arch/amd64/include/frameasm.h:1.31
--- src/sys/arch/amd64/include/frameasm.h:1.30	Sat Jan 20 14:39:21 2018
+++ src/sys/arch/amd64/include/frameasm.h	Sun Jan 21 11:21:40 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: frameasm.h,v 1.30 2018/01/20 14:39:21 maxv Exp $	*/
+/*	$NetBSD: frameasm.h,v 1.31 2018/01/21 11:21:40 maxv Exp $	*/
 
 #ifndef _AMD64_MACHINE_FRAMEASM_H
 #define _AMD64_MACHINE_FRAMEASM_H
@@ -96,6 +96,9 @@
 	movq	TF_RBX(%rsp),%rbx	; \
 	movq	TF_RAX(%rsp),%rax
 
+#define TEXT_USER_BEGIN	.pushsection	.text.user, "ax"
+#define TEXT_USER_END	.popsection
+
 #ifdef SVS
 
 /* XXX: put this somewhere else */

Reply via email to