Module Name:    src
Committed By:   maxv
Date:           Sun Jan  7 16:10:16 UTC 2018

Modified Files:
        src/sys/arch/amd64/amd64: amd64_trap.S genassym.cf locore.S machdep.c
        src/sys/arch/amd64/conf: GENERIC files.amd64
        src/sys/arch/amd64/include: frameasm.h pmap.h
        src/sys/arch/i386/conf: files.i386
        src/sys/arch/x86/include: cpu.h
        src/sys/arch/x86/x86: cpu.c pmap.c

Log Message:
Add a new option, SVS (for Separate Virtual Space), that unmaps kernel
pages when running in userland. For now, only the PTE area is unmapped.

Sent on tech-kern@.


To generate a diff of this commit:
cvs rdiff -u -r1.16 -r1.17 src/sys/arch/amd64/amd64/amd64_trap.S
cvs rdiff -u -r1.64 -r1.65 src/sys/arch/amd64/amd64/genassym.cf
cvs rdiff -u -r1.144 -r1.145 src/sys/arch/amd64/amd64/locore.S
cvs rdiff -u -r1.284 -r1.285 src/sys/arch/amd64/amd64/machdep.c
cvs rdiff -u -r1.476 -r1.477 src/sys/arch/amd64/conf/GENERIC
cvs rdiff -u -r1.97 -r1.98 src/sys/arch/amd64/conf/files.amd64
cvs rdiff -u -r1.26 -r1.27 src/sys/arch/amd64/include/frameasm.h
cvs rdiff -u -r1.40 -r1.41 src/sys/arch/amd64/include/pmap.h
cvs rdiff -u -r1.388 -r1.389 src/sys/arch/i386/conf/files.i386
cvs rdiff -u -r1.87 -r1.88 src/sys/arch/x86/include/cpu.h
cvs rdiff -u -r1.143 -r1.144 src/sys/arch/x86/x86/cpu.c
cvs rdiff -u -r1.277 -r1.278 src/sys/arch/x86/x86/pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/amd64_trap.S
diff -u src/sys/arch/amd64/amd64/amd64_trap.S:1.16 src/sys/arch/amd64/amd64/amd64_trap.S:1.17
--- src/sys/arch/amd64/amd64/amd64_trap.S:1.16	Sun Jan  7 12:42:46 2018
+++ src/sys/arch/amd64/amd64/amd64_trap.S	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: amd64_trap.S,v 1.16 2018/01/07 12:42:46 maxv Exp $	*/
+/*	$NetBSD: amd64_trap.S,v 1.17 2018/01/07 16:10:16 maxv Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
@@ -66,7 +66,7 @@
 
 #if 0
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.16 2018/01/07 12:42:46 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.17 2018/01/07 16:10:16 maxv Exp $");
 #endif
 
 /*
@@ -120,6 +120,7 @@ IDTVEC(trap02)
 #else
 	ZTRAP_NJ(T_NMI)
 	subq	$TF_REGSIZE,%rsp
+	SVS_ENTER
 	INTR_SAVE_GPRS
 	cld
 	SMAP_ENABLE
@@ -137,6 +138,7 @@ IDTVEC(trap02)
 	movq	%rsp,%rdi
 	incq	CPUVAR(NTRAP)
 	call	_C_LABEL(nmitrap)
+	SVS_LEAVE
 	swapgs
 	jmp	.Lnmileave
 
@@ -144,6 +146,7 @@ IDTVEC(trap02)
 	movq	%rsp,%rdi
 	incq	CPUVAR(NTRAP)
 	call	_C_LABEL(nmitrap)
+	SVS_LEAVE
 
 .Lnmileave:
 	movw	TF_ES(%rsp),%es

Index: src/sys/arch/amd64/amd64/genassym.cf
diff -u src/sys/arch/amd64/amd64/genassym.cf:1.64 src/sys/arch/amd64/amd64/genassym.cf:1.65
--- src/sys/arch/amd64/amd64/genassym.cf:1.64	Thu Jan  4 13:36:30 2018
+++ src/sys/arch/amd64/amd64/genassym.cf	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.64 2018/01/04 13:36:30 maxv Exp $
+#	$NetBSD: genassym.cf,v 1.65 2018/01/07 16:10:16 maxv Exp $
 
 #
 # Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -236,6 +236,8 @@ define	CPU_INFO_CURLDT		offsetof(struct 
 define	CPU_INFO_IDLELWP	offsetof(struct cpu_info, ci_data.cpu_idlelwp)
 define	CPU_INFO_PMAP		offsetof(struct cpu_info, ci_pmap)
 define	CPU_INFO_TSS		offsetof(struct cpu_info, ci_tss)
+define	CPU_INFO_UPDIRPA	offsetof(struct cpu_info, ci_svs_updirpa)
+define	CPU_INFO_KPDIRPA	offsetof(struct cpu_info, ci_svs_kpdirpa)
 define	CPU_INFO_NSYSCALL	offsetof(struct cpu_info, ci_data.cpu_nsyscall)
 define	CPU_INFO_NTRAP		offsetof(struct cpu_info, ci_data.cpu_ntrap)
 define	CPU_INFO_NINTR		offsetof(struct cpu_info, ci_data.cpu_nintr)

Index: src/sys/arch/amd64/amd64/locore.S
diff -u src/sys/arch/amd64/amd64/locore.S:1.144 src/sys/arch/amd64/amd64/locore.S:1.145
--- src/sys/arch/amd64/amd64/locore.S:1.144	Thu Jan  4 13:36:30 2018
+++ src/sys/arch/amd64/amd64/locore.S	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.144 2018/01/04 13:36:30 maxv Exp $	*/
+/*	$NetBSD: locore.S,v 1.145 2018/01/07 16:10:16 maxv Exp $	*/
 
 /*
  * Copyright-o-rama!
@@ -159,6 +159,7 @@
 #include "opt_compat_netbsd.h"
 #include "opt_compat_netbsd32.h"
 #include "opt_xen.h"
+#include "opt_svs.h"
 
 #include "assym.h"
 #include "lapic.h"
@@ -1088,6 +1089,12 @@ ENTRY(cpu_switchto)
 	movq	%rbp,PCB_RBP(%rax)
 .Lskip_save:
 
+#ifdef SVS
+	pushq	%rdx
+	callq	_C_LABEL(svs_lwp_switch)
+	popq	%rdx
+#endif
+
 	/* Switch to newlwp's stack. */
 	movq	L_PCB(%r12),%r14
 	movq	PCB_RSP(%r14),%rsp
@@ -1288,6 +1295,7 @@ IDTVEC(syscall)
 	subq	$TF_REGSIZE,%rsp
 	cld
 #endif
+	SVS_ENTER
 	INTR_SAVE_GPRS
 	movw	$GSEL(GUDATA_SEL, SEL_UPL),TF_DS(%rsp)
 	movw	$GSEL(GUDATA_SEL, SEL_UPL),TF_ES(%rsp)
@@ -1332,6 +1340,7 @@ IDTVEC(syscall)
 	jnz	intrfastexit
 
 	INTR_RESTORE_GPRS
+	SVS_LEAVE
 	SWAPGS
 #ifndef XEN
 	movq	TF_RIP(%rsp),%rcx	/* %rip for sysret */
@@ -1494,6 +1503,7 @@ do_mov_ds:
 	movw	TF_DS(%rsp),%ds
 do_mov_fs:
 	movw	TF_FS(%rsp),%fs
+	SVS_LEAVE
 	SWAPGS
 #ifndef XEN
 do_mov_gs:
@@ -1503,6 +1513,7 @@ do_mov_gs:
 
 .Luexit64:
 	NOT_XEN(cli;)
+	SVS_LEAVE
 	SWAPGS
 
 .Lkexit:

Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.284 src/sys/arch/amd64/amd64/machdep.c:1.285
--- src/sys/arch/amd64/amd64/machdep.c:1.284	Fri Jan  5 08:04:20 2018
+++ src/sys/arch/amd64/amd64/machdep.c	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.284 2018/01/05 08:04:20 maxv Exp $	*/
+/*	$NetBSD: machdep.c,v 1.285 2018/01/07 16:10:16 maxv Exp $	*/
 
 /*
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -110,7 +110,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.284 2018/01/05 08:04:20 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.285 2018/01/07 16:10:16 maxv Exp $");
 
 /* #define XENDEBUG_LOW  */
 
@@ -123,6 +123,7 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 
 #include "opt_realmem.h"
 #include "opt_xen.h"
 #include "opt_kaslr.h"
+#include "opt_svs.h"
 #ifndef XEN
 #include "opt_physmem.h"
 #endif
@@ -2228,3 +2229,138 @@ mm_md_direct_mapped_phys(paddr_t paddr, 
 	return true;
 }
 #endif
+
+/* -------------------------------------------------------------------------- */
+
+#ifdef SVS
+/*
+ * Separate Virtual Space
+ *
+ * A per-cpu L4 page is maintained in ci_svs_updirpa. During each context
+ * switch to a user pmap, updirpa is populated with the entries of the new
+ * pmap, minus what we don't want to have mapped in userland.
+ *
+ * Note on locking/synchronization here:
+ *
+ * (a) Touching ci_svs_updir without holding ci_svs_mtx first is *not*
+ *     allowed.
+ *
+ * (b) pm_kernel_cpus contains the set of CPUs that have the pmap loaded
+ *     in their CR3 register. It must *not* be replaced by pm_cpus.
+ *
+ * (c) When a context switch on the current CPU is made from a user LWP
+ *     towards a kernel LWP, CR3 is not updated. Therefore, the pmap's
+ *     pm_kernel_cpus still contains the current CPU. It implies that the
+ *     remote CPUs that execute other threads of the user process we just
+ *     left will keep synchronizing us against their changes.
+ *
+ * TODO: for now, only PMAP_SLOT_PTE is unmapped.
+ */
+
+void
+cpu_svs_init(struct cpu_info *ci)
+{
+	struct vm_page *pg;
+
+	KASSERT(ci != NULL);
+
+	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
+	if (pg == 0)
+		panic("%s: failed to allocate L4 PA for CPU %d\n",
+			__func__, cpu_index(ci));
+	ci->ci_svs_updirpa = VM_PAGE_TO_PHYS(pg);
+
+	ci->ci_svs_updir = (pt_entry_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
+		UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
+	if (ci->ci_svs_updir == NULL)
+		panic("%s: failed to allocate L4 VA for CPU %d\n",
+			__func__, cpu_index(ci));
+
+	pmap_kenter_pa((vaddr_t)ci->ci_svs_updir, ci->ci_svs_updirpa,
+		VM_PROT_READ | VM_PROT_WRITE, 0);
+
+	pmap_update(pmap_kernel());
+
+	ci->ci_svs_kpdirpa = pmap_pdirpa(pmap_kernel(), 0);
+
+	mutex_init(&ci->ci_svs_mtx, MUTEX_DEFAULT, IPL_VM);
+}
+
+void
+svs_pmap_sync(struct pmap *pmap, int index)
+{
+	CPU_INFO_ITERATOR cii;
+	struct cpu_info *ci;
+	cpuid_t cid;
+
+	KASSERT(pmap != NULL);
+	KASSERT(pmap != pmap_kernel());
+	KASSERT(mutex_owned(pmap->pm_lock));
+	KASSERT(kpreempt_disabled());
+	KASSERT(index <= 255);
+
+	for (CPU_INFO_FOREACH(cii, ci)) {
+		cid = cpu_index(ci);
+
+		if (!kcpuset_isset(pmap->pm_kernel_cpus, cid)) {
+			continue;
+		}
+
+		/* take the lock and check again */
+		mutex_enter(&ci->ci_svs_mtx);
+		if (kcpuset_isset(pmap->pm_kernel_cpus, cid)) {
+			ci->ci_svs_updir[index] = pmap->pm_pdir[index];
+		}
+		mutex_exit(&ci->ci_svs_mtx);
+	}
+}
+
+void
+svs_lwp_switch(struct lwp *oldlwp, struct lwp *newlwp)
+{
+	/* Switch rsp0 */
+}
+
+static inline pt_entry_t
+svs_pte_atomic_read(struct pmap *pmap, size_t idx)
+{
+	/*
+	 * XXX: We don't have a basic atomic_fetch_64 function?
+	 */
+	return atomic_cas_64(&pmap->pm_pdir[idx], 666, 666);
+}
+
+/*
+ * We may come here with the pmap unlocked. So read its PTEs atomically. If
+ * a remote CPU is updating them at the same time, it's not that bad: the
+ * remote CPU will call svs_pmap_sync afterwards, and our updirpa will be
+ * synchronized properly.
+ */
+void
+svs_pdir_switch(struct pmap *pmap)
+{
+	struct cpu_info *ci = curcpu();
+	pt_entry_t pte;
+	size_t i;
+
+	KASSERT(kpreempt_disabled());
+	KASSERT(pmap != pmap_kernel());
+
+	ci->ci_svs_kpdirpa = pmap_pdirpa(pmap, 0);
+
+	mutex_enter(&ci->ci_svs_mtx);
+
+	for (i = 0; i < 512; i++) {
+		if (i == PDIR_SLOT_PTE) {
+			/* We don't want to have this mapped. */
+			ci->ci_svs_updir[i] = 0;
+		} else {
+			pte = svs_pte_atomic_read(pmap, i);
+			ci->ci_svs_updir[i] = pte;
+		}
+	}
+
+	mutex_exit(&ci->ci_svs_mtx);
+}
+#endif
+

Index: src/sys/arch/amd64/conf/GENERIC
diff -u src/sys/arch/amd64/conf/GENERIC:1.476 src/sys/arch/amd64/conf/GENERIC:1.477
--- src/sys/arch/amd64/conf/GENERIC:1.476	Sun Dec 31 03:38:06 2017
+++ src/sys/arch/amd64/conf/GENERIC	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-# $NetBSD: GENERIC,v 1.476 2017/12/31 03:38:06 christos Exp $
+# $NetBSD: GENERIC,v 1.477 2018/01/07 16:10:16 maxv Exp $
 #
 # GENERIC machine description file
 #
@@ -22,7 +22,7 @@ include 	"arch/amd64/conf/std.amd64"
 
 options 	INCLUDE_CONFIG_FILE	# embed config file in kernel binary
 
-#ident		"GENERIC-$Revision: 1.476 $"
+#ident		"GENERIC-$Revision: 1.477 $"
 
 maxusers	64		# estimated number of users
 
@@ -75,6 +75,7 @@ options 	SYSCTL_INCLUDE_DESCR	# Include 
 
 # CPU-related options
 #options 	USER_LDT	# user-settable LDT; used by WINE
+options 	SVS		# Separate Virtual Space
 
 # CPU features
 acpicpu*	at cpu?		# ACPI CPU (including frequency scaling)

Index: src/sys/arch/amd64/conf/files.amd64
diff -u src/sys/arch/amd64/conf/files.amd64:1.97 src/sys/arch/amd64/conf/files.amd64:1.98
--- src/sys/arch/amd64/conf/files.amd64:1.97	Mon Jan  1 08:14:13 2018
+++ src/sys/arch/amd64/conf/files.amd64	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-#	$NetBSD: files.amd64,v 1.97 2018/01/01 08:14:13 maxv Exp $
+#	$NetBSD: files.amd64,v 1.98 2018/01/07 16:10:16 maxv Exp $
 #
 # new style config file for amd64 architecture
 #
@@ -27,6 +27,7 @@ defparam opt_physmem.h	PHYSMEM_MAX_ADDR 
 defflag			PMC
 defflag			USER_LDT
 defflag			KASLR
+defflag			SVS
 defflag eisa.h EISA
 
 # Start code

Index: src/sys/arch/amd64/include/frameasm.h
diff -u src/sys/arch/amd64/include/frameasm.h:1.26 src/sys/arch/amd64/include/frameasm.h:1.27
--- src/sys/arch/amd64/include/frameasm.h:1.26	Sun Jan  7 13:43:23 2018
+++ src/sys/arch/amd64/include/frameasm.h	Sun Jan  7 16:10:16 2018
@@ -1,10 +1,11 @@
-/*	$NetBSD: frameasm.h,v 1.26 2018/01/07 13:43:23 maxv Exp $	*/
+/*	$NetBSD: frameasm.h,v 1.27 2018/01/07 16:10:16 maxv Exp $	*/
 
 #ifndef _AMD64_MACHINE_FRAMEASM_H
 #define _AMD64_MACHINE_FRAMEASM_H
 
 #ifdef _KERNEL_OPT
 #include "opt_xen.h"
+#include "opt_svs.h"
 #endif
 
 /*
@@ -95,6 +96,22 @@
 	movq	TF_RBX(%rsp),%rbx	; \
 	movq	TF_RAX(%rsp),%rax
 
+#ifdef SVS
+#define SVS_ENTER \
+	pushq	%rax			; \
+	movq	CPUVAR(KPDIRPA),%rax	; \
+	movq	%rax,%cr3		; \
+	popq	%rax
+#define SVS_LEAVE \
+	pushq	%rax			; \
+	movq	CPUVAR(UPDIRPA),%rax	; \
+	movq	%rax,%cr3		; \
+	popq	%rax
+#else
+#define SVS_ENTER	/* nothing */
+#define SVS_LEAVE	/* nothing */
+#endif
+
 #define	INTRENTRY_L(kernel_trap, usertrap) \
 	subq	$TF_REGSIZE,%rsp	; \
 	INTR_SAVE_GPRS			; \
@@ -104,6 +121,7 @@
 	je	kernel_trap		; \
 usertrap				; \
 	SWAPGS				; \
+	SVS_ENTER			; \
 	movw	%gs,TF_GS(%rsp)		; \
 	movw	%fs,TF_FS(%rsp)		; \
 	movw	%es,TF_ES(%rsp)		; \

Index: src/sys/arch/amd64/include/pmap.h
diff -u src/sys/arch/amd64/include/pmap.h:1.40 src/sys/arch/amd64/include/pmap.h:1.41
--- src/sys/arch/amd64/include/pmap.h:1.40	Sat Jun 17 08:40:46 2017
+++ src/sys/arch/amd64/include/pmap.h	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.40 2017/06/17 08:40:46 maxv Exp $	*/
+/*	$NetBSD: pmap.h,v 1.41 2018/01/07 16:10:16 maxv Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -218,6 +218,10 @@
  */
 #define NPTECL		8
 
+void svs_pmap_sync(struct pmap *, int);
+void svs_lwp_switch(struct lwp *, struct lwp *);
+void svs_pdir_switch(struct pmap *);
+
 #include <x86/pmap.h>
 
 #ifndef XEN

Index: src/sys/arch/i386/conf/files.i386
diff -u src/sys/arch/i386/conf/files.i386:1.388 src/sys/arch/i386/conf/files.i386:1.389
--- src/sys/arch/i386/conf/files.i386:1.388	Sun Oct  8 09:06:50 2017
+++ src/sys/arch/i386/conf/files.i386	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-#	$NetBSD: files.i386,v 1.388 2017/10/08 09:06:50 maxv Exp $
+#	$NetBSD: files.i386,v 1.389 2018/01/07 16:10:16 maxv Exp $
 #
 # new style config file for i386 architecture
 #
@@ -20,6 +20,7 @@ obsolete defflag	XBOX
 
 defflag			PMC
 defflag			KASLR
+defflag			SVS
 
 # User-settable LDT (used by WINE)
 defflag			USER_LDT

Index: src/sys/arch/x86/include/cpu.h
diff -u src/sys/arch/x86/include/cpu.h:1.87 src/sys/arch/x86/include/cpu.h:1.88
--- src/sys/arch/x86/include/cpu.h:1.87	Fri Jan  5 08:04:21 2018
+++ src/sys/arch/x86/include/cpu.h	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.87 2018/01/05 08:04:21 maxv Exp $	*/
+/*	$NetBSD: cpu.h,v 1.88 2018/01/07 16:10:16 maxv Exp $	*/
 
 /*
  * Copyright (c) 1990 The Regents of the University of California.
@@ -47,6 +47,7 @@
 #if defined(_KERNEL) || defined(_KMEMUSER)
 #if defined(_KERNEL_OPT)
 #include "opt_xen.h"
+#include "opt_svs.h"
 #ifdef i386
 #include "opt_user_ldt.h"
 #endif
@@ -187,6 +188,13 @@ struct cpu_info {
 	pd_entry_t *	ci_pae_l3_pdir; /* VA pointer to L3 PD */
 #endif
 
+#ifdef SVS
+	pd_entry_t *	ci_svs_updir;
+	paddr_t		ci_svs_updirpa;
+	paddr_t		ci_svs_kpdirpa;
+	kmutex_t	ci_svs_mtx;
+#endif
+
 #if defined(XEN) && (defined(PAE) || defined(__x86_64__))
 	/* Currently active user PGD (can't use rcr3() with Xen) */
 	pd_entry_t *	ci_kpm_pdir;	/* per-cpu PMD (va) */
@@ -333,6 +341,7 @@ void cpu_broadcast_halt(void);
 void cpu_kick(struct cpu_info *);
 
 void cpu_pcpuarea_init(struct cpu_info *);
+void cpu_svs_init(struct cpu_info *);
 
 #define	curcpu()		x86_curcpu()
 #define	curlwp			x86_curlwp()

Index: src/sys/arch/x86/x86/cpu.c
diff -u src/sys/arch/x86/x86/cpu.c:1.143 src/sys/arch/x86/x86/cpu.c:1.144
--- src/sys/arch/x86/x86/cpu.c:1.143	Sun Jan  7 10:16:13 2018
+++ src/sys/arch/x86/x86/cpu.c	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.143 2018/01/07 10:16:13 maxv Exp $	*/
+/*	$NetBSD: cpu.c,v 1.144 2018/01/07 16:10:16 maxv Exp $	*/
 
 /*
  * Copyright (c) 2000-2012 NetBSD Foundation, Inc.
@@ -62,12 +62,13 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.143 2018/01/07 10:16:13 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.144 2018/01/07 16:10:16 maxv Exp $");
 
 #include "opt_ddb.h"
 #include "opt_mpbios.h"		/* for MPDEBUG */
 #include "opt_mtrr.h"
 #include "opt_multiprocessor.h"
+#include "opt_svs.h"
 
 #include "lapic.h"
 #include "ioapic.h"
@@ -379,6 +380,10 @@ cpu_attach(device_t parent, device_t sel
 	/* Must be before mi_cpu_attach(). */
 	cpu_vm_init(ci);
 
+#ifdef SVS
+	cpu_svs_init(ci);
+#endif
+
 	if (caa->cpu_role == CPU_ROLE_AP) {
 		int error;
 
@@ -1248,6 +1253,10 @@ x86_cpu_idle_halt(void)
 void
 cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap)
 {
+#ifdef SVS
+	svs_pdir_switch(pmap);
+#endif
+
 #ifdef PAE
 	struct cpu_info *ci = curcpu();
 	bool interrupts_enabled;

Index: src/sys/arch/x86/x86/pmap.c
diff -u src/sys/arch/x86/x86/pmap.c:1.277 src/sys/arch/x86/x86/pmap.c:1.278
--- src/sys/arch/x86/x86/pmap.c:1.277	Fri Jan  5 09:13:48 2018
+++ src/sys/arch/x86/x86/pmap.c	Sun Jan  7 16:10:16 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.277 2018/01/05 09:13:48 martin Exp $	*/
+/*	$NetBSD: pmap.c,v 1.278 2018/01/07 16:10:16 maxv Exp $	*/
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@@ -170,12 +170,13 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.277 2018/01/05 09:13:48 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.278 2018/01/07 16:10:16 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
 #include "opt_multiprocessor.h"
 #include "opt_xen.h"
+#include "opt_svs.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -558,9 +559,11 @@ static void pmap_init_pcpu(void);
 #ifdef __HAVE_DIRECT_MAP
 static void pmap_init_directmap(struct pmap *);
 #endif
+#if !defined(XEN) && !defined(SVS)
+static void pmap_remap_global(void);
+#endif
 #ifndef XEN
 static void pmap_init_lapic(void);
-static void pmap_remap_global(void);
 static void pmap_remap_largepages(void);
 #endif
 
@@ -1286,7 +1289,7 @@ pmap_bootstrap(vaddr_t kva_start)
 	 * operation of the system.
 	 */
 
-#ifndef XEN
+#if !defined(XEN) && !defined(SVS)
 	/*
 	 * Begin to enable global TLB entries if they are supported.
 	 * The G bit has no effect until the CR4_PGE bit is set in CR4,
@@ -1299,7 +1302,9 @@ pmap_bootstrap(vaddr_t kva_start)
 		/* add PG_G attribute to already mapped kernel pages */
 		pmap_remap_global();
 	}
+#endif
 
+#ifndef XEN
 	/*
 	 * Enable large pages if they are supported.
 	 */
@@ -1648,7 +1653,7 @@ pmap_init_directmap(struct pmap *kpm)
 }
 #endif /* __HAVE_DIRECT_MAP */
 
-#ifndef XEN
+#if !defined(XEN) && !defined(SVS)
 /*
  * Remap all of the virtual pages created so far with the PG_G bit.
  */
@@ -1691,7 +1696,9 @@ pmap_remap_global(void)
 			PTE_BASE[p1i] |= PG_G;
 	}
 }
+#endif
 
+#ifndef XEN
 /*
  * Remap several kernel segments with large pages. We cover as many pages as we
  * can. Called only once at boot time, if the CPU supports large pages.
@@ -2113,13 +2120,17 @@ pmap_free_ptp(struct pmap *pmap, struct 
 		opde = pmap_pte_testset(&pdes[level - 1][index], 0);
 
 		/*
-		 * On Xen-amd64, we need to sync the top level page
+		 * On Xen-amd64 or SVS, we need to sync the top level page
 		 * directory on each CPU.
 		 */
 #if defined(XEN) && defined(__x86_64__)
 		if (level == PTP_LEVELS - 1) {
 			xen_kpm_sync(pmap, index);
 		}
+#elif defined(SVS)
+		if (level == PTP_LEVELS - 1) {
+			svs_pmap_sync(pmap, index);
+		}
 #endif
 
 		invaladdr = level == 1 ? (vaddr_t)ptes :
@@ -2214,13 +2225,17 @@ pmap_get_ptp(struct pmap *pmap, vaddr_t 
 		    (pmap_pa2pte(pa) | PG_u | PG_RW | PG_V));
 
 		/*
-		 * On Xen-amd64, we need to sync the top level page
+		 * On Xen-amd64 or SVS, we need to sync the top level page
 		 * directory on each CPU.
 		 */
 #if defined(XEN) && defined(__x86_64__)
 		if (i == PTP_LEVELS) {
 			xen_kpm_sync(pmap, index);
 		}
+#elif defined(SVS)
+		if (i == PTP_LEVELS) {
+			svs_pmap_sync(pmap, index);
+		}
 #endif
 
 		pmap_pte_flush();

Reply via email to