Module Name:    src
Committed By:   snj
Date:           Wed Nov  8 21:22:48 UTC 2017

Modified Files:
        src/sys/arch/mips/include [netbsd-6-0]: pmap.h
        src/sys/arch/mips/mips [netbsd-6-0]: pmap.c pmap_segtab.c

Log Message:
Pull up following revision(s) (requested by skrll in ticket #1068):
        sys/arch/mips/include/pmap.h: revision 1.63
        sys/arch/mips/mips/pmap.c: revision 1.214
        sys/arch/mips/mips/pmap_segtab.c: revision 1.8
Deal with incompatible cache aliases. Specifically,
- always flush an ephemeral page on unmap
- track unmanaged mappings (mappings entered via pmap_kenter_pa) for
    aliases where required and handle appropriately (via pmap_enter_pv)
Hopefully this (finally) addresses the instability reported in the
following PRs:
PR/44900 - R5000/Rm5200 mips ports are broken
PR/46890 - upcoming NetBSD 6.0 release is very unstable / unusable on cobalt 
qube 2
PR/48628 - cobalt and hpcmips ports are dead


To generate a diff of this commit:
cvs rdiff -u -r1.61.8.1 -r1.61.8.1.4.1 src/sys/arch/mips/include/pmap.h
cvs rdiff -u -r1.207.2.1.4.1 -r1.207.2.1.4.2 src/sys/arch/mips/mips/pmap.c
cvs rdiff -u -r1.4.2.1 -r1.4.2.1.4.1 src/sys/arch/mips/mips/pmap_segtab.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/include/pmap.h
diff -u src/sys/arch/mips/include/pmap.h:1.61.8.1 src/sys/arch/mips/include/pmap.h:1.61.8.1.4.1
--- src/sys/arch/mips/include/pmap.h:1.61.8.1	Thu Jul  5 18:39:42 2012
+++ src/sys/arch/mips/include/pmap.h	Wed Nov  8 21:22:48 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.61.8.1 2012/07/05 18:39:42 riz Exp $	*/
+/*	$NetBSD: pmap.h,v 1.61.8.1.4.1 2017/11/08 21:22:48 snj Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -283,6 +283,7 @@ void	pmap_prefer(vaddr_t, vaddr_t *, vsi
 #endif /* MIPS3_PLUS */
 
 #define	PMAP_STEAL_MEMORY	/* enable pmap_steal_memory() */
+#define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
 
 /*
  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
@@ -329,6 +330,7 @@ typedef struct pv_entry {
 	struct pv_entry	*pv_next;	/* next pv_entry */
 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
 	vaddr_t		pv_va;		/* virtual address for mapping */
+#define	PV_KENTER	0x001
 } *pv_entry_t;
 
 #define	PG_MD_UNCACHED		0x0001	/* page is mapped uncached */

Index: src/sys/arch/mips/mips/pmap.c
diff -u src/sys/arch/mips/mips/pmap.c:1.207.2.1.4.1 src/sys/arch/mips/mips/pmap.c:1.207.2.1.4.2
--- src/sys/arch/mips/mips/pmap.c:1.207.2.1.4.1	Wed Nov  8 21:17:46 2017
+++ src/sys/arch/mips/mips/pmap.c	Wed Nov  8 21:22:48 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.207.2.1.4.1 2017/11/08 21:17:46 snj Exp $	*/
+/*	$NetBSD: pmap.c,v 1.207.2.1.4.2 2017/11/08 21:22:48 snj Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.207.2.1.4.1 2017/11/08 21:17:46 snj Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.207.2.1.4.2 2017/11/08 21:22:48 snj Exp $");
 
 /*
  *	Manages physical address maps.
@@ -317,7 +317,7 @@ u_int		pmap_page_colormask;
 
 /* Forward function declarations */
 void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
-void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *);
+void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *, int);
 pt_entry_t *pmap_pte(pmap_t, vaddr_t);
 
 /*
@@ -386,13 +386,13 @@ pmap_page_syncicache(struct vm_page *pg)
 	}
 	PG_MD_PVLIST_UNLOCK(md);
 	kpreempt_disable();
-	pmap_tlb_syncicache(md->pvh_first.pv_va, onproc);
+	pmap_tlb_syncicache(trunc_page(md->pvh_first.pv_va), onproc);
 	kpreempt_enable();
 #else
 	if (MIPS_HAS_R4K_MMU) {
 		if (PG_MD_CACHED_P(md)) {
 			mips_icache_sync_range_index(
-			    md->pvh_first.pv_va, PAGE_SIZE);
+			    trunc_page(md->pvh_first.pv_va), PAGE_SIZE);
 		}
 	} else {
 		mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
@@ -436,10 +436,10 @@ pmap_map_ephemeral_page(struct vm_page *
 		 */
 		(void)PG_MD_PVLIST_LOCK(md, false);
 		if (PG_MD_CACHED_P(md)
-		    && mips_cache_badalias(pv->pv_va, va))
-			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
-		if (pv->pv_pmap == NULL)
-			pv->pv_va = va;
+		    && mips_cache_badalias(pv->pv_va, va)) {
+			mips_dcache_wbinv_range_index(trunc_page(pv->pv_va),
+			    PAGE_SIZE);
+		}
 		PG_MD_PVLIST_UNLOCK(md);
 	}
 
@@ -450,23 +450,13 @@ static void
 pmap_unmap_ephemeral_page(struct vm_page *pg, vaddr_t va,
 	pt_entry_t old_pt_entry)
 {
-	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-	pv_entry_t pv = &md->pvh_first;
-	
-	if (MIPS_CACHE_VIRTUAL_ALIAS) {
-		(void)PG_MD_PVLIST_LOCK(md, false);
-		if (PG_MD_CACHED_P(md)
-		    || (pv->pv_pmap != NULL
-			&& mips_cache_badalias(pv->pv_va, va))) {
 
-			/*
-			 * If this page was previously cached or we had to use an
-			 * incompatible alias and it has a valid mapping, flush it
-			 * from the cache.
-			 */
-			mips_dcache_wbinv_range(va, PAGE_SIZE);
-		}
-		PG_MD_PVLIST_UNLOCK(md);
+	if (MIPS_CACHE_VIRTUAL_ALIAS) {
+		/*
+		 * Flush the page to avoid future incompatible aliases
+		 */
+		KASSERT((va & PAGE_MASK) == 0);
+		mips_dcache_wbinv_range(va, PAGE_SIZE);
 	}
 #ifndef _LP64
 	/*
@@ -1073,7 +1063,7 @@ pmap_page_protect(struct vm_page *pg, vm
 			while (pv != NULL) {
 				const pmap_t pmap = pv->pv_pmap;
 				const uint16_t gen = PG_MD_PVLIST_GEN(md);
-				va = pv->pv_va;
+				va = trunc_page(pv->pv_va);
 				PG_MD_PVLIST_UNLOCK(md);
 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
 				KASSERT(pv->pv_pmap == pmap);
@@ -1101,7 +1091,7 @@ pmap_page_protect(struct vm_page *pg, vm
 		pv = &md->pvh_first;
 		while (pv->pv_pmap != NULL) {
 			const pmap_t pmap = pv->pv_pmap;
-			va = pv->pv_va;
+			va = trunc_page(pv->pv_va);
 			PG_MD_PVLIST_UNLOCK(md);
 			pmap_remove(pmap, va, va + PAGE_SIZE);
 			pmap_update(pmap);
@@ -1118,6 +1108,9 @@ pmap_pte_protect(pmap_t pmap, vaddr_t sv
 	const uint32_t pg_mask = ~(mips_pg_m_bit() | mips_pg_ro_bit());
 	const uint32_t p = (flags & VM_PROT_WRITE) ? mips_pg_rw_bit() : mips_pg_ro_bit();
 	KASSERT(kpreempt_disabled());
+	KASSERT((sva & PAGE_MASK) == 0);
+	KASSERT((eva & PAGE_MASK) == 0);
+
 	/*
 	 * Change protection on every valid mapping within this segment.
 	 */
@@ -1162,6 +1155,8 @@ pmap_protect(pmap_t pmap, vaddr_t sva, v
 	pt_entry_t *pte;
 	u_int p;
 
+	KASSERT((sva & PAGE_MASK) == 0);
+	KASSERT((eva & PAGE_MASK) == 0);
 	PMAP_COUNT(protect);
 #ifdef DEBUG
 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
@@ -1315,7 +1310,7 @@ pmap_page_cache(struct vm_page *pg, bool
 	     pv != NULL;
 	     pv = pv->pv_next) {
 		pmap_t pmap = pv->pv_pmap;
-		vaddr_t va = pv->pv_va;
+		vaddr_t va = trunc_page(pv->pv_va);
 		pt_entry_t *pte;
 		uint32_t pt_entry;
 
@@ -1493,6 +1488,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 		PMAP_COUNT(enter_exec_mapping);
 		if (!PG_MD_EXECPAGE_P(md)) {
+			KASSERT((pa & PAGE_MASK) == 0);
 			mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(pa),
 			    PAGE_SIZE);
 			pmap_set_mdpage_attributes(md, PG_MD_EXECPAGE);
@@ -1505,7 +1501,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 	kpreempt_disable();
 	if (pmap == pmap_kernel()) {
 		if (pg)
-			pmap_enter_pv(pmap, va, pg, &npte);
+			pmap_enter_pv(pmap, va, pg, &npte, 0);
 
 		/* enter entries into kernel pmap */
 		pte = kvtopte(va);
@@ -1546,7 +1542,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 
 	/* Done after case that may sleep/return. */
 	if (pg)
-		pmap_enter_pv(pmap, va, pg, &npte);
+		pmap_enter_pv(pmap, va, pg, &npte, 0);
 
 	/*
 	 * Now validate mapping with desired protection/wiring.
@@ -1663,6 +1659,17 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
 	kpreempt_disable();
 	pte = kvtopte(va);
 	KASSERT(!mips_pg_v(pte->pt_entry));
+
+	/*
+	 * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases
+	 */
+	if (managed && (flags & PMAP_KMPAGE) == 0) {
+		pmap_t pmap = pmap_kernel();
+		struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
+
+		pmap_enter_pv(pmap, va, pg, &npte, PV_KENTER);
+	}
+
 	pte->pt_entry = npte;
 	pmap_tlb_update_addr(pmap_kernel(), va, npte, false);
 	kpreempt_enable();
@@ -1688,23 +1695,10 @@ pmap_kremove(vaddr_t va, vsize_t len)
 		}
 
 		PMAP_COUNT(kremove_pages);
-		if (MIPS_HAS_R4K_MMU && MIPS_CACHE_VIRTUAL_ALIAS) {
-			struct vm_page * const pg =
-			    PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry));
-			if (pg != NULL) {
-				struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-				(void)PG_MD_PVLIST_LOCK(md, false);
-				pv_entry_t pv = &md->pvh_first;
-				if (pv->pv_pmap == NULL) {
-					pv->pv_va = va;
-				} else if (PG_MD_CACHED_P(md)
-				    && mips_cache_badalias(pv->pv_va, va)) {
-					mips_dcache_wbinv_range(va, PAGE_SIZE);
-				}
-				PG_MD_PVLIST_UNLOCK(md);
-			}
-		}
-
+		struct vm_page * const pg =
+		    PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry));
+		if (pg)
+			pmap_remove_pv(pmap_kernel(), va, pg, false);
 		pte->pt_entry = new_pt_entry;
 		pmap_tlb_invalidate_addr(pmap_kernel(), va);
 	}
@@ -2001,10 +1995,13 @@ pmap_clear_modify(struct vm_page *pg)
 	gen = PG_MD_PVLIST_LOCK(md, false);
 	for (; pv != NULL; pv = pv_next) {
 		pmap_t pmap = pv->pv_pmap;
-		vaddr_t va = pv->pv_va;
+		vaddr_t va = trunc_page(pv->pv_va);
 		pt_entry_t *pte;
 		uint32_t pt_entry;
+
 		pv_next = pv->pv_next;
+		if (pv->pv_va & PV_KENTER)
+			continue;
 		if (pmap == pmap_kernel()) {
 			pte = kvtopte(va);
 		} else {
@@ -2083,8 +2080,13 @@ pmap_check_pvlist(struct vm_page_md *md)
 #ifdef _LP64
 			KASSERT(!MIPS_XKPHYS_P(pv->pv_va));
 #endif
+			pv_entry_t opv = &md->pvh_first;
+			for (; opv != NULL; opv = opv->pv_next) {
+				if (mips_cache_badalias(pv->pv_va, opv->pv_va)) {
+					KASSERT(PG_MD_UNCACHED_P(md));
+				}
+			}
 		}
-		pv = &md->pvh_first;
 	}
 #endif /* PARANOIADIAG */
 }
@@ -2094,7 +2096,8 @@ pmap_check_pvlist(struct vm_page_md *md)
  * physical to virtual map table.
  */
 void
-pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte)
+pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte,
+    int flags)
 {
 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t pv, npv, apv;
@@ -2133,7 +2136,7 @@ again:
 		PMAP_COUNT(mappings);
 		pmap_clear_mdpage_attributes(md, PG_MD_UNCACHED);
 		pv->pv_pmap = pmap;
-		pv->pv_va = va;
+		pv->pv_va = va | flags;
 	} else {
 #if defined(MIPS3_PLUS) && !defined(MULTIPROCESSOR) /* XXX mmu XXX */
 		if (MIPS_CACHE_VIRTUAL_ALIAS) {
@@ -2154,8 +2157,9 @@ again:
 
 			if (mips_cache_badalias(pv->pv_va, va)) {
 				for (npv = pv; npv; npv = npv->pv_next) {
-					pmap_remove(npv->pv_pmap, npv->pv_va,
-					    npv->pv_va + PAGE_SIZE);
+					vaddr_t nva = trunc_page(npv->pv_va);
+					pmap_remove(npv->pv_pmap, nva,
+					    nva + PAGE_SIZE);
 					pmap_update(npv->pv_pmap);
 					goto again;
 				}
@@ -2174,9 +2178,10 @@ again:
 				 * share the same cache index again.
 				 */
 				if (mips_cache_badalias(pv->pv_va, va)) {
+   					vaddr_t nva = trunc_page(pv->pv_va);
 					pmap_page_cache(pg, false);
-					mips_dcache_wbinv_range_index(
-					    pv->pv_va, PAGE_SIZE);
+					mips_dcache_wbinv_range_index(nva,
+					    PAGE_SIZE);
 					*npte = (*npte &
 					    ~MIPS3_PG_CACHEMODE) |
 					    MIPS3_PG_UNCACHED;
@@ -2200,7 +2205,8 @@ again:
 		 */
 
 		for (npv = pv; npv; npv = npv->pv_next) {
-			if (pmap == npv->pv_pmap && va == npv->pv_va) {
+			if (pmap == npv->pv_pmap &&
+			    va == trunc_page(npv->pv_va)) {
 #ifdef PARANOIADIAG
 				pt_entry_t *pte;
 				uint32_t pt_entry;
@@ -2264,7 +2270,7 @@ again:
 		}
 		npv = apv;
 		apv = NULL;
-		npv->pv_va = va;
+		npv->pv_va = va | flags;
 		npv->pv_pmap = pmap;
 		npv->pv_next = pv->pv_next;
 		pv->pv_next = npv;
@@ -2296,6 +2302,7 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, 
 		    VM_PAGE_TO_PHYS(pg));
 #endif
 	KASSERT(kpreempt_disabled());
+	KASSERT((va & PAGE_MASK) == 0);
 	pv = &md->pvh_first;
 
 	(void)PG_MD_PVLIST_LOCK(md, true);
@@ -2309,13 +2316,12 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, 
 	 */
 
 	last = false;
-	if (pmap == pv->pv_pmap && va == pv->pv_va) {
+	if (pmap == pv->pv_pmap && va == trunc_page(pv->pv_va)) {
 		npv = pv->pv_next;
 		if (npv) {
 			*pv = *npv;
 			KASSERT(pv->pv_pmap != NULL);
 		} else {
-			pmap_clear_mdpage_attributes(md, PG_MD_UNCACHED);
 			pv->pv_pmap = NULL;
 			last = true;	/* Last mapping removed */
 		}
@@ -2323,7 +2329,8 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, 
 	} else {
 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
 			PMAP_COUNT(remove_pvsearch);
-			if (pmap == npv->pv_pmap && va == npv->pv_va)
+			if (pmap == npv->pv_pmap &&
+			    va == trunc_page(npv->pv_va))
 				break;
 		}
 		if (npv) {
@@ -2338,11 +2345,13 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, 
 		 * removed.  If it was, then reenable caching.
 		 */
 		pv = &md->pvh_first;
-		for (pv_entry_t pv0 = pv->pv_next; pv0; pv0 = pv0->pv_next) {
+		pv_entry_t pv0 = pv->pv_next;
+
+		for (; pv0; pv0 = pv0->pv_next) {
 			if (mips_cache_badalias(pv->pv_va, pv0->pv_va))
 				break;
 		}
-		if (npv == NULL)
+		if (pv0 == NULL)
 			pmap_page_cache(pg, true);
 	}
 #endif
@@ -2496,8 +2505,10 @@ pmap_pv_page_free(struct pool *pp, void 
 	pa = MIPS_KSEG0_TO_PHYS(va);
 #endif
 #ifdef MIPS3_PLUS
-	if (MIPS_CACHE_VIRTUAL_ALIAS)
+	if (MIPS_CACHE_VIRTUAL_ALIAS) {
+		KASSERT((va & PAGE_MASK) == 0);
 		mips_dcache_inv_range(va, PAGE_SIZE);
+	}
 #endif
 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
 	KASSERT(pg != NULL);
@@ -2622,7 +2633,7 @@ mips_pmap_map_poolpage(paddr_t pa)
 		 */
 		(void)PG_MD_PVLIST_LOCK(md, false);
 		pv_entry_t pv = &md->pvh_first;
-		vaddr_t last_va = pv->pv_va;
+		vaddr_t last_va = trunc_page(pv->pv_va);
 		KASSERT(pv->pv_pmap == NULL);
 		pv->pv_va = va;
 		if (PG_MD_CACHED_P(md) && mips_cache_badalias(last_va, va))
@@ -2655,6 +2666,8 @@ mips_pmap_unmap_poolpage(vaddr_t va)
 		/*
 		 * We've unmapped a poolpage.  Its contents are irrelevant.
 		 */
+
+		KASSERT((va & PAGE_MASK) == 0);
 		mips_dcache_inv_range(va, PAGE_SIZE);
 	}
 #endif

Index: src/sys/arch/mips/mips/pmap_segtab.c
diff -u src/sys/arch/mips/mips/pmap_segtab.c:1.4.2.1 src/sys/arch/mips/mips/pmap_segtab.c:1.4.2.1.4.1
--- src/sys/arch/mips/mips/pmap_segtab.c:1.4.2.1	Thu Jul  5 18:39:42 2012
+++ src/sys/arch/mips/mips/pmap_segtab.c	Wed Nov  8 21:22:48 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap_segtab.c,v 1.4.2.1 2012/07/05 18:39:42 riz Exp $	*/
+/*	$NetBSD: pmap_segtab.c,v 1.4.2.1.4.1 2017/11/08 21:22:48 snj Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.4.2.1 2012/07/05 18:39:42 riz Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.4.2.1.4.1 2017/11/08 21:22:48 snj Exp $");
 
 /*
  *	Manages physical address maps.
@@ -217,18 +217,7 @@ pmap_segtab_release(union segtab *stp, u
 		}
 #endif
 
-#ifdef MIPS3_PLUS	/* XXX mmu XXX */
-		/*
-		 * The pica pmap.c flushed the segmap pages here.  I'm
-		 * not sure why, but I suspect it's because the page(s)
-		 * were being accessed by KSEG0 (cached) addresses and
-		 * may cause cache coherency problems when the page
-		 * is reused with KSEG2 (mapped) addresses.  This may
-		 * cause problems on machines without VCED/VCEI.
-		 */
-		if (MIPS_CACHE_VIRTUAL_ALIAS)
-			mips_dcache_inv_range((vaddr_t)pte, PAGE_SIZE);
-#endif	/* MIPS3_PLUS */
+		/* No need to flush page here as unmap poolpage does it */
 #ifdef _LP64
 		KASSERT(MIPS_XKPHYS_P(pte));
 #endif

Reply via email to