Module Name:    src
Committed By:   cherry
Date:           Fri Dec 30 16:55:21 UTC 2011

Modified Files:
        src/sys/arch/x86/x86: pmap.c
        src/sys/arch/xen/include: xenpmap.h
        src/sys/arch/xen/x86: cpu.c xen_pmap.c

Log Message:
per-cpu shadow directory pages should be updated locally via cross-calls. Do 
this.


To generate a diff of this commit:
cvs rdiff -u -r1.147 -r1.148 src/sys/arch/x86/x86/pmap.c
cvs rdiff -u -r1.32 -r1.33 src/sys/arch/xen/include/xenpmap.h
cvs rdiff -u -r1.71 -r1.72 src/sys/arch/xen/x86/cpu.c
cvs rdiff -u -r1.11 -r1.12 src/sys/arch/xen/x86/xen_pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/x86/x86/pmap.c
diff -u src/sys/arch/x86/x86/pmap.c:1.147 src/sys/arch/x86/x86/pmap.c:1.148
--- src/sys/arch/x86/x86/pmap.c:1.147	Fri Dec  9 17:32:51 2011
+++ src/sys/arch/x86/x86/pmap.c	Fri Dec 30 16:55:21 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.147 2011/12/09 17:32:51 chs Exp $	*/
+/*	$NetBSD: pmap.c,v 1.148 2011/12/30 16:55:21 cherry Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
@@ -171,7 +171,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.147 2011/12/09 17:32:51 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.148 2011/12/30 16:55:21 cherry Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -1915,16 +1915,7 @@ pmap_free_ptp(struct pmap *pmap, struct 
 			 * Update the per-cpu PD on all cpus the current
 			 * pmap is active on 
 			 */ 
-			CPU_INFO_ITERATOR cii;
-			struct cpu_info *ci;
-			for (CPU_INFO_FOREACH(cii, ci)) {
-				if (ci == NULL) {
-					continue;
-				}
-				if (ci->ci_cpumask & pmap->pm_cpus) {
-					pmap_pte_set(&ci->ci_kpm_pdir[index], 0);
-				}
-			}
+			xen_kpm_sync(pmap, index);
 		}
 #  endif /*__x86_64__ */
 		invaladdr = level == 1 ? (vaddr_t)ptes :
@@ -2029,17 +2020,7 @@ pmap_get_ptp(struct pmap *pmap, vaddr_t 
 			 * Update the per-cpu PD on all cpus the current
 			 * pmap is active on 
 			 */ 
-			CPU_INFO_ITERATOR cii;
-			struct cpu_info *ci;
-			for (CPU_INFO_FOREACH(cii, ci)) {
-				if (ci == NULL) {
-					continue;
-				}
-				if (ci->ci_cpumask & pmap->pm_cpus) {
-					pmap_pte_set(&ci->ci_kpm_pdir[index],
-						     (pd_entry_t) (pmap_pa2pte(pa) | PG_u | PG_RW | PG_V));
-				}
-			}
+			xen_kpm_sync(pmap, index);
 		}
 #endif /* XEN && __x86_64__ */
 		pmap_pte_flush();
@@ -4247,33 +4228,14 @@ pmap_alloc_level(pd_entry_t * const *pde
 			pmap_get_physpage(va, level - 1, &pa);
 			pte = pmap_pa2pte(pa) | PG_k | PG_V | PG_RW;
 #ifdef XEN
-			switch (level) {
-			case PTP_LEVELS: 
+			xpq_queue_pte_update(xpmap_ptetomach(&pdep[i]), pte);
+			if (level == PTP_LEVELS) {
 #if defined(PAE) || defined(__x86_64__)
 				if (i >= PDIR_SLOT_KERN) {
 					/* update per-cpu PMDs on all cpus */
-					CPU_INFO_ITERATOR cii;
-					struct cpu_info *ci;
-					for (CPU_INFO_FOREACH(cii, ci)) {
-						if (ci == NULL) {
-							continue;
-						}
-#ifdef PAE
-						xpq_queue_pte_update(
-							xpmap_ptetomach(&ci->ci_kpm_pdir[l2tol2(i)]), pte);
-#elif defined(__x86_64__)
-						xpq_queue_pte_update(
-							xpmap_ptetomach(&ci->ci_kpm_pdir[i]), pte);
-#endif /* PAE */
-					}
+					xen_kpm_sync(pmap_kernel(), i);
 				}
 #endif /* PAE || __x86_64__ */
-				/* FALLTHROUGH */
-
-			default: /* All other levels */
-				xpq_queue_pte_update(
-					xpmap_ptetomach(&pdep[i]), 
-					pte);
 			}
 #else /* XEN */
 			pdep[i] = pte;

Index: src/sys/arch/xen/include/xenpmap.h
diff -u src/sys/arch/xen/include/xenpmap.h:1.32 src/sys/arch/xen/include/xenpmap.h:1.33
--- src/sys/arch/xen/include/xenpmap.h:1.32	Wed Nov 23 00:56:56 2011
+++ src/sys/arch/xen/include/xenpmap.h	Fri Dec 30 16:55:21 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: xenpmap.h,v 1.32 2011/11/23 00:56:56 jym Exp $	*/
+/*	$NetBSD: xenpmap.h,v 1.33 2011/12/30 16:55:21 cherry Exp $	*/
 
 /*
  *
@@ -61,6 +61,10 @@ void	pmap_map_recursive_entries(void);
 void	pmap_unmap_recursive_entries(void);
 #endif /* PAE */
 
+#if defined(PAE) || defined(__x86_64__)
+void xen_kpm_sync(struct pmap *, int);
+#endif /* PAE || __x86_64__ */
+
 #define xpq_queue_pin_l1_table(pa)	\
 	xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE)
 #define xpq_queue_pin_l2_table(pa)	\

Index: src/sys/arch/xen/x86/cpu.c
diff -u src/sys/arch/xen/x86/cpu.c:1.71 src/sys/arch/xen/x86/cpu.c:1.72
--- src/sys/arch/xen/x86/cpu.c:1.71	Wed Dec  7 15:47:43 2011
+++ src/sys/arch/xen/x86/cpu.c	Fri Dec 30 16:55:21 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.71 2011/12/07 15:47:43 cegger Exp $	*/
+/*	$NetBSD: cpu.c,v 1.72 2011/12/30 16:55:21 cherry Exp $	*/
 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp  */
 
 /*-
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.71 2011/12/07 15:47:43 cegger Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.72 2011/12/30 16:55:21 cherry Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -1102,6 +1102,7 @@ static struct cpu_info *cpu_primary(void
 {
 	return &cpu_info_primary;
 }
+/* XXX: rename to something more generic. users other than xpq exist */
 struct cpu_info	* (*xpq_cpu)(void) = cpu_primary;
 
 void

Index: src/sys/arch/xen/x86/xen_pmap.c
diff -u src/sys/arch/xen/x86/xen_pmap.c:1.11 src/sys/arch/xen/x86/xen_pmap.c:1.12
--- src/sys/arch/xen/x86/xen_pmap.c:1.11	Wed Dec  7 15:47:43 2011
+++ src/sys/arch/xen/x86/xen_pmap.c	Fri Dec 30 16:55:21 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen_pmap.c,v 1.11 2011/12/07 15:47:43 cegger Exp $	*/
+/*	$NetBSD: xen_pmap.c,v 1.12 2011/12/30 16:55:21 cherry Exp $	*/
 
 /*
  * Copyright (c) 2007 Manuel Bouyer.
@@ -102,7 +102,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.11 2011/12/07 15:47:43 cegger Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.12 2011/12/30 16:55:21 cherry Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -550,3 +550,123 @@ pmap_unmap_recursive_entries(void)
 
 }
 #endif /* PAE */
+
+#if defined(PAE) || defined(__x86_64__)
+
+extern struct cpu_info	* (*xpq_cpu)(void);
+static __inline void
+pmap_kpm_setpte(struct cpu_info *ci, int index)
+{
+#ifdef PAE
+		xpq_queue_pte_update(
+			xpmap_ptetomach(&ci->ci_kpm_pdir[l2tol2(index)]),
+			pmap_kernel()->pm_pdir[index]);
+#elif defined(__x86_64__)
+		xpq_queue_pte_update(
+			xpmap_ptetomach(&ci->ci_kpm_pdir[index]),
+			pmap_kernel()->pm_pdir[index]);
+#endif /* PAE */
+}
+
+static void
+pmap_kpm_sync_xcall(void *arg1, void *arg2)
+{
+	KASSERT(arg1 != NULL);
+	KASSERT(arg2 != NULL);
+
+	struct pmap *pmap = arg1;
+	int index = *(int *)arg2;
+	struct cpu_info *ci = xpq_cpu();
+
+	if (pmap == pmap_kernel()) {
+		KASSERT(index >= PDIR_SLOT_KERN);
+		pmap_kpm_setpte(ci, index);
+		pmap_pte_flush();
+		return;
+	}
+
+#ifdef PAE
+	KASSERTMSG(false, "%s not allowed for PAE user pmaps", __func__);
+	return;
+#else /* __x86_64__ */
+	
+	if (ci->ci_pmap != pmap) {
+		/* pmap changed. Nothing to do. */
+		return;
+	}
+	
+	pmap_pte_set(&ci->ci_kpm_pdir[index],
+	    pmap_kernel()->pm_pdir[index]);
+	pmap_pte_flush();
+#endif /* PAE || __x86_64__ */
+}
+
+/*
+ * Synchronise shadow pdir with the pmap on all cpus on which it is
+ * loaded.
+ */
+void
+xen_kpm_sync(struct pmap *pmap, int index)
+{
+	uint64_t where;
+	
+	KASSERT(pmap != NULL);
+
+	pmap_pte_flush();
+
+	if (__predict_false(xpq_cpu != &x86_curcpu)) { /* Too early to xcall */
+		CPU_INFO_ITERATOR cii;
+		struct cpu_info *ci;
+		for (CPU_INFO_FOREACH(cii, ci)) {
+			if (ci == NULL) {
+				continue;
+			}
+			if (pmap == pmap_kernel() ||
+			    ci->ci_cpumask & pmap->pm_cpus) {
+				pmap_kpm_setpte(ci, index);
+			}
+		}
+		pmap_pte_flush();
+		return;
+	}
+
+	if (pmap == pmap_kernel()) {
+		where = xc_broadcast(XC_HIGHPRI,
+		    pmap_kpm_sync_xcall, pmap, &index);
+		xc_wait(where);
+	} else {
+		KASSERT(mutex_owned(pmap->pm_lock));
+		KASSERT(kpreempt_disabled());
+
+		CPU_INFO_ITERATOR cii;
+		struct cpu_info *ci;
+		for (CPU_INFO_FOREACH(cii, ci)) {
+			if (ci == NULL) {
+				continue;
+			}
+			while (ci->ci_cpumask & pmap->pm_cpus) {
+#ifdef MULTIPROCESSOR
+#define CPU_IS_CURCPU(ci) __predict_false((ci) == curcpu())
+#else /* MULTIPROCESSOR */
+#define CPU_IS_CURCPU(ci) __predict_true((ci) == curcpu())
+#endif /* MULTIPROCESSOR */
+				if (ci->ci_want_pmapload &&
+				    !CPU_IS_CURCPU(ci)) {
+					/*
+					 * XXX: make this more cpu
+					 *  cycle friendly/co-operate
+					 *  with pmap_load()
+					 */
+					continue;
+				    }
+
+				where = xc_unicast(XC_HIGHPRI, pmap_kpm_sync_xcall,
+				    pmap, &index, ci);
+				xc_wait(where);
+				break;
+			}
+		}
+	}
+}
+
+#endif /* PAE || __x86_64__ */

Reply via email to