Module Name:    src
Committed By:   nonaka
Date:           Mon Jan 26 04:47:53 UTC 2015

Modified Files:
        src/sys/arch/powerpc/booke: booke_pmap.c genassym.cf trap_subr.S
        src/sys/arch/powerpc/include/booke: pmap.h
        src/sys/uvm/pmap: pmap.c

Log Message:
Avoid race condition between PTE update and TLB miss walk.


To generate a diff of this commit:
cvs rdiff -u -r1.21 -r1.22 src/sys/arch/powerpc/booke/booke_pmap.c
cvs rdiff -u -r1.10 -r1.11 src/sys/arch/powerpc/booke/genassym.cf
cvs rdiff -u -r1.11 -r1.12 src/sys/arch/powerpc/booke/trap_subr.S
cvs rdiff -u -r1.14 -r1.15 src/sys/arch/powerpc/include/booke/pmap.h
cvs rdiff -u -r1.9 -r1.10 src/sys/uvm/pmap/pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/powerpc/booke/booke_pmap.c
diff -u src/sys/arch/powerpc/booke/booke_pmap.c:1.21 src/sys/arch/powerpc/booke/booke_pmap.c:1.22
--- src/sys/arch/powerpc/booke/booke_pmap.c:1.21	Fri Jan 23 06:39:41 2015
+++ src/sys/arch/powerpc/booke/booke_pmap.c	Mon Jan 26 04:47:53 2015
@@ -1,4 +1,4 @@
-/*	$NetBSD: booke_pmap.c,v 1.21 2015/01/23 06:39:41 nonaka Exp $	*/
+/*	$NetBSD: booke_pmap.c,v 1.22 2015/01/26 04:47:53 nonaka Exp $	*/
 /*-
  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -38,16 +38,21 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.21 2015/01/23 06:39:41 nonaka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.22 2015/01/26 04:47:53 nonaka Exp $");
 
 #include <sys/param.h>
 #include <sys/kcore.h>
 #include <sys/buf.h>
+#include <sys/mutex.h>
 
 #include <uvm/uvm.h>
 
 #include <machine/pmap.h>
 
+#if defined(MULTIPROCESSOR)
+kmutex_t pmap_tlb_miss_lock;
+#endif
+
 /*
  * Initialize the kernel pmap.
  */
@@ -166,6 +171,10 @@ pmap_bootstrap(vaddr_t startkernel, vadd
 	/* init the lock */
 	pmap_tlb_info_init(&pmap_tlb0_info);
 
+#if defined(MULTIPROCESSOR)
+	mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH);
+#endif
+
 	/*
 	 * Compute the number of pages kmem_arena will have.
 	 */
@@ -427,4 +436,18 @@ pmap_md_tlb_info_attach(struct pmap_tlb_
 {
 	/* nothing */
 }
+
+void
+pmap_md_tlb_miss_lock_enter(void)
+{
+
+	mutex_spin_enter(&pmap_tlb_miss_lock);
+}
+
+void
+pmap_md_tlb_miss_lock_exit(void)
+{
+
+	mutex_spin_exit(&pmap_tlb_miss_lock);
+}
 #endif /* MULTIPROCESSOR */

Index: src/sys/arch/powerpc/booke/genassym.cf
diff -u src/sys/arch/powerpc/booke/genassym.cf:1.10 src/sys/arch/powerpc/booke/genassym.cf:1.11
--- src/sys/arch/powerpc/booke/genassym.cf:1.10	Tue Nov 27 19:24:46 2012
+++ src/sys/arch/powerpc/booke/genassym.cf	Mon Jan 26 04:47:53 2015
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.10 2012/11/27 19:24:46 matt Exp $
+#	$NetBSD: genassym.cf,v 1.11 2015/01/26 04:47:53 nonaka Exp $
 
 #-
 # Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
@@ -111,4 +111,7 @@ define	HATCH_SP		offsetof(struct cpu_hat
 define	HATCH_TBU		offsetof(struct cpu_hatch_data, hatch_tbu)
 define	HATCH_TBL		offsetof(struct cpu_hatch_data, hatch_tbl)
 define	HATCH_TLBIDX		offsetof(struct cpu_hatch_data, hatch_tlbidx)
+
+define	__SIMPLELOCK_LOCKED	__SIMPLELOCK_LOCKED
+define	__SIMPLELOCK_UNLOCKED	__SIMPLELOCK_UNLOCKED
 endif

Index: src/sys/arch/powerpc/booke/trap_subr.S
diff -u src/sys/arch/powerpc/booke/trap_subr.S:1.11 src/sys/arch/powerpc/booke/trap_subr.S:1.12
--- src/sys/arch/powerpc/booke/trap_subr.S:1.11	Thu Sep 18 23:37:51 2014
+++ src/sys/arch/powerpc/booke/trap_subr.S	Mon Jan 26 04:47:53 2015
@@ -1,4 +1,4 @@
-/*	$NetBSD: trap_subr.S,v 1.11 2014/09/18 23:37:51 joerg Exp $	*/
+/*	$NetBSD: trap_subr.S,v 1.12 2015/01/26 04:47:53 nonaka Exp $	*/
 /*-
  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -34,7 +34,7 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-RCSID("$NetBSD: trap_subr.S,v 1.11 2014/09/18 23:37:51 joerg Exp $")
+RCSID("$NetBSD: trap_subr.S,v 1.12 2015/01/26 04:47:53 nonaka Exp $")
 
 	.globl	_C_LABEL(sctrapexit), _C_LABEL(trapexit), _C_LABEL(intrcall)
 
@@ -346,6 +346,50 @@ RCSID("$NetBSD: trap_subr.S,v 1.11 2014/
 	RESTORE_SPRG1(%r6);	\
 	FRAME_INTR_XEXIT(rfci, CSRR)
 
+#if defined(MULTIPROCESSOR)
+#define	FRAME_TLBMISSLOCK						\
+	GET_CPUINFO(%r23);						\
+	ldint	%r22, CI_MTX_COUNT(%r23);				\
+	subi	%r22, %r22, 1;						\
+	stint	%r22, CI_MTX_COUNT(%r23);				\
+	isync;								\
+	cmpwi	%r22, 0;						\
+	bne	1f;							\
+	ldint	%r22, CI_CPL(%r23);					\
+	stint	%r22, CI_MTX_OLDSPL(%r23);				\
+1:	lis	%r23, _C_LABEL(pmap_tlb_miss_lock)@h;			\
+	ori	%r23, %r23, _C_LABEL(pmap_tlb_miss_lock)@l;		\
+	li	%r20, MTX_LOCK;						\
+2:	lwarx	%r22, %r20, %r23;					\
+	cmpwi	%r22, __SIMPLELOCK_UNLOCKED;				\
+	beq+	4f;							\
+3:	lwzx	%r22, %r20, %r23;					\
+	cmpwi	%r22, __SIMPLELOCK_UNLOCKED;				\
+	beq+	2b;							\
+	b	3b;							\
+4:	li	%r21, __SIMPLELOCK_LOCKED;				\
+	stwcx.	%r21, %r20, %r23;					\
+	bne-	2b;							\
+	isync;								\
+	msync;
+#define	FRAME_TLBMISSUNLOCK						\
+	sync;								\
+	lis	%r23, _C_LABEL(pmap_tlb_miss_lock)@h;			\
+	ori	%r23, %r23, _C_LABEL(pmap_tlb_miss_lock)@l;		\
+	li	%r22, __SIMPLELOCK_UNLOCKED;				\
+	stw	%r22, MTX_LOCK(%r23);					\
+	isync;								\
+	msync;								\
+	GET_CPUINFO(%r23);						\
+	ldint	%r22, CI_MTX_COUNT(%r23);				\
+	addi	%r22, %r22, 1;						\
+	stint	%r22, CI_MTX_COUNT(%r23);				\
+	isync;
+#else	/* !MULTIPROCESSOR */
+#define	FRAME_TLBMISSLOCK
+#define	FRAME_TLBMISSUNLOCK
+#endif	/* MULTIPROCESSOR */
+
 	.text
 	.p2align 4
 _C_LABEL(critical_input_vector):
@@ -535,6 +579,7 @@ _C_LABEL(watchdog_timer_vector):
 _C_LABEL(data_tlb_error_vector):
 	/* MSR[CE], MSR[ME], MSR[DE] are unchanged, all others cleared */
 	FRAME_TLBPROLOGUE
+	FRAME_TLBMISSLOCK
 	/*
 	 * Registers as this point:
 	 *
@@ -577,6 +622,7 @@ _C_LABEL(data_tlb_error_vector):
 		31-PTR_SCALESHIFT, \
 		31-PTR_SCALESHIFT		/* move PSL_DS[27] to bit 29 */
 	bl	pte_load
+	FRAME_TLBMISSUNLOCK
 	mtlr	%r29				/* restore LR */
 	/*
 	 * If we returned, pte load failed so let trap deal with it but
@@ -590,6 +636,7 @@ _C_LABEL(data_tlb_error_vector):
 _C_LABEL(instruction_tlb_error_vector):
 	/* MSR[CE], MSR[ME], MSR[DE] are unchanged, all others cleared */
 	FRAME_TLBPROLOGUE
+	FRAME_TLBMISSLOCK
 	/*
 	 * Attempt to update the TLB from the page table.
 	 */
@@ -600,6 +647,7 @@ _C_LABEL(instruction_tlb_error_vector):
 		31-PTR_SCALESHIFT, \
 		31-PTR_SCALESHIFT		/* move PSL_IS[26] to bit 29 */
 	bl	pte_load
+	FRAME_TLBMISSUNLOCK
 	mtlr	%r29				/* restore LR */
 	/*
 	 * If we returned, pte load failed so let trap deal with it but
@@ -764,6 +812,9 @@ e500_pte_load:
 	addic	%r31, %r31, 1
 	addze	%r30, %r30
 	stmw	%r30, CI_EV_TLBMISS_SOFT(%r2)
+
+	FRAME_TLBMISSUNLOCK
+
 	/*
 	 * Cleanup and leave.  We know any higher priority exception will
 	 * save and restore SPRG1 and %r2 thereby preserving their values.

Index: src/sys/arch/powerpc/include/booke/pmap.h
diff -u src/sys/arch/powerpc/include/booke/pmap.h:1.14 src/sys/arch/powerpc/include/booke/pmap.h:1.15
--- src/sys/arch/powerpc/include/booke/pmap.h:1.14	Thu Apr  3 13:55:34 2014
+++ src/sys/arch/powerpc/include/booke/pmap.h	Mon Jan 26 04:47:53 2015
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.14 2014/04/03 13:55:34 matt Exp $	*/
+/*	$NetBSD: pmap.h,v 1.15 2015/01/26 04:47:53 nonaka Exp $	*/
 /*-
  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -92,6 +92,12 @@ void	pmap_md_init(void);
 
 bool	pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
 
+#ifdef MULTIPROCESSOR
+#define	PMAP_MD_NEED_TLB_MISS_LOCK
+void	pmap_md_tlb_miss_lock_enter(void);
+void	pmap_md_tlb_miss_lock_exit(void);
+#endif	/* MULTIPROCESSOR */
+
 #ifdef PMAP_MINIMALTLB
 vaddr_t	pmap_kvptefill(vaddr_t, vaddr_t, pt_entry_t);
 #endif

Index: src/sys/uvm/pmap/pmap.c
diff -u src/sys/uvm/pmap/pmap.c:1.9 src/sys/uvm/pmap/pmap.c:1.10
--- src/sys/uvm/pmap/pmap.c:1.9	Mon Jan  5 05:35:18 2015
+++ src/sys/uvm/pmap/pmap.c	Mon Jan 26 04:47:53 2015
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.9 2015/01/05 05:35:18 nonaka Exp $	*/
+/*	$NetBSD: pmap.c,v 1.10 2015/01/26 04:47:53 nonaka Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.9 2015/01/05 05:35:18 nonaka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.10 2015/01/26 04:47:53 nonaka Exp $");
 
 /*
  *	Manages physical address maps.
@@ -263,6 +263,11 @@ struct pool_allocator pmap_pv_page_alloc
 #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
 #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
 
+#if !defined(MULTIPROCESSOR) || !defined(PMAP_MD_NEED_TLB_MISS_LOCK)
+#define	pmap_md_tlb_miss_lock_enter()	do { } while(/*CONSTCOND*/0)
+#define	pmap_md_tlb_miss_lock_exit()	do { } while(/*CONSTCOND*/0)
+#endif	/* !MULTIPROCESSOR || !PMAP_MD_NEED_TLB_MISS_LOCK */
+
 /*
  * Misc. functions.
  */
@@ -541,8 +546,10 @@ pmap_destroy(pmap_t pmap)
 	KASSERT(pmap->pm_count == 0);
 	PMAP_COUNT(destroy);
 	kpreempt_disable();
+	pmap_md_tlb_miss_lock_enter();
 	pmap_tlb_asid_release_all(pmap);
 	pmap_segtab_destroy(pmap, NULL, 0);
+	pmap_md_tlb_miss_lock_exit();
 
 #ifdef MULTIPROCESSOR
 	kcpuset_destroy(pmap->pm_active);
@@ -586,10 +593,12 @@ pmap_activate(struct lwp *l)
 	PMAP_COUNT(activate);
 
 	kpreempt_disable();
+	pmap_md_tlb_miss_lock_enter();
 	pmap_tlb_asid_acquire(pmap, l);
 	if (l == curlwp) {
 		pmap_segtab_activate(pmap, l);
 	}
+	pmap_md_tlb_miss_lock_exit();
 	kpreempt_enable();
 
 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
@@ -608,8 +617,10 @@ pmap_deactivate(struct lwp *l)
 	PMAP_COUNT(deactivate);
 
 	kpreempt_disable();
+	pmap_md_tlb_miss_lock_enter();
 	curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
 	pmap_tlb_asid_deactivate(pmap);
+	pmap_md_tlb_miss_lock_exit();
 	kpreempt_enable();
 
 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
@@ -629,6 +640,7 @@ pmap_update(struct pmap *pmap)
 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
 		PMAP_COUNT(shootdown_ipis);
 #endif
+	pmap_md_tlb_miss_lock_enter();
 #ifdef DEBUG
 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
 #endif /* DEBUG */
@@ -642,6 +654,7 @@ pmap_update(struct pmap *pmap)
 		pmap_tlb_asid_acquire(pmap, curlwp);
 		pmap_segtab_activate(pmap, curlwp);
 	}
+	pmap_md_tlb_miss_lock_exit();
 	kpreempt_enable();
 
 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
@@ -685,11 +698,13 @@ pmap_pte_remove(pmap_t pmap, vaddr_t sva
 			pmap_remove_pv(pmap, sva, pg,
 			   pte_modified_p(pt_entry));
 		}
+		pmap_md_tlb_miss_lock_enter();
 		*ptep = npte;
 		/*
 		 * Flush the TLB for the given address.
 		 */
 		pmap_tlb_invalidate_addr(pmap, sva);
+		pmap_md_tlb_miss_lock_exit();
 	}
 	return false;
 }
@@ -843,12 +858,14 @@ pmap_pte_protect(pmap_t pmap, vaddr_t sv
 		}
 		pt_entry = pte_prot_downgrade(pt_entry, prot);
 		if (*ptep != pt_entry) {
+			pmap_md_tlb_miss_lock_enter();
 			*ptep = pt_entry;
 			/*
 			 * Update the TLB if needed.
 			 */
 			pmap_tlb_update_addr(pmap, sva, pt_entry,
 			    PMAP_TLB_NEED_IPI);
+			pmap_md_tlb_miss_lock_exit();
 		}
 	}
 	return false;
@@ -937,9 +954,11 @@ pmap_page_cache(struct vm_page *pg, bool
 		pt_entry_t pt_entry = *ptep;
 		if (pte_valid_p(pt_entry)) {
 			pt_entry = pte_cached_change(pt_entry, cached);
+			pmap_md_tlb_miss_lock_enter();
 			*ptep = pt_entry;
 			pmap_tlb_update_addr(pmap, va, pt_entry,
 			    PMAP_TLB_NEED_IPI);
+			pmap_md_tlb_miss_lock_exit();
 		}
 	}
 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
@@ -1060,11 +1079,13 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 	bool resident = pte_valid_p(opte);
 	if (!resident)
 		pmap->pm_stats.resident_count++;
+	pmap_md_tlb_miss_lock_enter();
 	*ptep = npte;
 
 	pmap_tlb_update_addr(pmap, va, npte,
 	    ((flags & VM_PROT_ALL) ? PMAP_TLB_INSERT : 0)
 	    | (resident ? PMAP_TLB_NEED_IPI : 0));
+	pmap_md_tlb_miss_lock_exit();
 	kpreempt_enable();
 
 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
@@ -1138,12 +1159,14 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
 	pt_entry_t * const ptep = pmap_pte_reserve(pmap_kernel(), va, 0);
 	KASSERT(ptep != NULL);
 	KASSERT(!pte_valid_p(*ptep));
+	pmap_md_tlb_miss_lock_enter();
 	*ptep = npte;
 	/*
 	 * We have the option to force this mapping into the TLB but we
 	 * don't.  Instead let the next reference to the page do it.
 	 */
 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
+	pmap_md_tlb_miss_lock_exit();
 	kpreempt_enable();
 #if DEBUG > 1
 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
@@ -1179,8 +1202,10 @@ pmap_pte_kremove(pmap_t pmap, vaddr_t sv
 		if (pg != NULL)
 			pmap_md_vca_clean(pg, sva, PMAP_WBINV);
 
+		pmap_md_tlb_miss_lock_enter();
 		*ptep = new_pt_entry;
 		pmap_tlb_invalidate_addr(pmap_kernel(), sva);
+		pmap_md_tlb_miss_lock_exit();
 	}
 
 	return false;
@@ -1213,8 +1238,10 @@ pmap_remove_all(struct pmap *pmap)
 	 * Free all of our ASIDs which means we can skip doing all the
 	 * tlb_invalidate_addrs().
 	 */
+	pmap_md_tlb_miss_lock_enter();
 	pmap_tlb_asid_deactivate(pmap);
 	pmap_tlb_asid_release_all(pmap);
+	pmap_md_tlb_miss_lock_exit();
 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
 
 	kpreempt_enable();
@@ -1258,7 +1285,9 @@ pmap_unwire(pmap_t pmap, vaddr_t va)
 #endif
 
 	if (pte_wired_p(pt_entry)) {
+		pmap_md_tlb_miss_lock_enter();
 		*ptep = pte_unwire_entry(*ptep);
+		pmap_md_tlb_miss_lock_exit();
 		pmap->pm_stats.wired_count--;
 	}
 #ifdef DIAGNOSTIC
@@ -1421,9 +1450,11 @@ pmap_clear_modify(struct vm_page *pg)
 			continue;
 		}
 		pmap_md_vca_clean(pg, va, PMAP_WBINV);
+		pmap_md_tlb_miss_lock_enter();
 		*ptep = pt_entry;
 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
 		pmap_tlb_invalidate_addr(pmap, va);
+		pmap_md_tlb_miss_lock_exit();
 		pmap_update(pmap);
 		if (__predict_false(gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false))) {
 			/*

Reply via email to