Module Name:    src
Committed By:   chs
Date:           Sat Jan 27 23:07:36 UTC 2018

Modified Files:
        src/sys/arch/alpha/alpha: pmap.c
        src/sys/arch/m68k/m68k: pmap_motorola.c
        src/sys/arch/powerpc/oea: pmap.c
        src/sys/arch/sparc64/sparc64: pmap.c

Log Message:
apply the change from arch/x86/x86/pmap.c rev. 1.266 commitid vZRjvmxG7YTHLOfA:

In pmap_enter_ma(), only try to allocate pves if we might need them,
and even if that fails, only fail the operation if we later discover
that we really do need them.  If we are replacing an existing mapping,
reuse the pv structure where possible.

This implements the requirement that pmap_enter(PMAP_CANFAIL) must not fail
when replacing an existing mapping with the first mapping of a new page,
which is an unintended consequence of the changes from the rmind-uvmplock
branch in 2011.

The problem arises when pmap_enter(PMAP_CANFAIL) is used to replace an existing
pmap mapping with a mapping of a different page (eg. to resolve a 
copy-on-write).
If that fails and leaves the old pmap entry in place, then UVM won't hold
the right locks when it eventually retries.  This entanglement of the UVM and
pmap locking was done in rmind-uvmplock in order to improve performance,
but it also means that the UVM state and pmap state need to be kept in sync
more than they did before.  It would be possible to handle this in the UVM code
instead of in the pmap code, but these pmap changes improve the handling of
low memory situations in general, and handling this in UVM would be clunky,
so this seemed like the better way to go.

This somewhat indirectly fixes PR 52706 on the remaining platforms where
this problem existed.


To generate a diff of this commit:
cvs rdiff -u -r1.261 -r1.262 src/sys/arch/alpha/alpha/pmap.c
cvs rdiff -u -r1.69 -r1.70 src/sys/arch/m68k/m68k/pmap_motorola.c
cvs rdiff -u -r1.94 -r1.95 src/sys/arch/powerpc/oea/pmap.c
cvs rdiff -u -r1.307 -r1.308 src/sys/arch/sparc64/sparc64/pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/alpha/alpha/pmap.c
diff -u src/sys/arch/alpha/alpha/pmap.c:1.261 src/sys/arch/alpha/alpha/pmap.c:1.262
--- src/sys/arch/alpha/alpha/pmap.c:1.261	Fri Dec 23 07:15:27 2016
+++ src/sys/arch/alpha/alpha/pmap.c	Sat Jan 27 23:07:36 2018
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.261 2016/12/23 07:15:27 cherry Exp $ */
+/* $NetBSD: pmap.c,v 1.262 2018/01/27 23:07:36 chs Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008 The NetBSD Foundation, Inc.
@@ -140,7 +140,7 @@
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.261 2016/12/23 07:15:27 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.262 2018/01/27 23:07:36 chs Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -439,7 +439,8 @@ static struct pool_cache pmap_tlb_shootd
  * Internal routines
  */
 static void	alpha_protection_init(void);
-static bool	pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, long);
+static bool	pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, long,
+				    pv_entry_t *);
 static void	pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t, long);
 
 /*
@@ -466,8 +467,9 @@ static int	pmap_l1pt_ctor(void *, void *
  * PV table management functions.
  */
 static int	pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, pt_entry_t *,
-			      bool);
-static void	pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool);
+			      bool, pv_entry_t);
+static void	pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t, bool,
+			       pv_entry_t *);
 static void	*pmap_pv_page_alloc(struct pool *, int);
 static void	pmap_pv_page_free(struct pool *, void *);
 
@@ -1266,7 +1268,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va
 					    sva);
 #endif
 				needisync |= pmap_remove_mapping(pmap, sva,
-				    l3pte, true, cpu_id);
+				    l3pte, true, cpu_id, NULL);
 			}
 			sva += PAGE_SIZE;
 		}
@@ -1343,7 +1345,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va
 						    pmap_remove_mapping(
 							pmap, sva,
 							l3pte, true,
-							cpu_id);
+							cpu_id, NULL);
 					}
 
 					/*
@@ -1450,7 +1452,7 @@ pmap_page_protect(struct vm_page *pg, vm
 			panic("pmap_page_protect: bad mapping");
 #endif
 		if (pmap_remove_mapping(pmap, pv->pv_va, pv->pv_pte,
-		    false, cpu_id) == true) {
+		    false, cpu_id, NULL)) {
 			if (pmap == pmap_kernel())
 				needkisync |= true;
 			else
@@ -1558,6 +1560,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 {
 	struct vm_page *pg;			/* if != NULL, managed page */
 	pt_entry_t *pte, npte, opte;
+	pv_entry_t opv = NULL;
 	paddr_t opa;
 	bool tflush = true;
 	bool hadasm = false;	/* XXX gcc -Wuninitialized */
@@ -1750,14 +1753,15 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 		 */
 		pmap_physpage_addref(pte);
 	}
-	needisync |= pmap_remove_mapping(pmap, va, pte, true, cpu_id);
+	needisync |= pmap_remove_mapping(pmap, va, pte, true, cpu_id, &opv);
 
  validate_enterpv:
 	/*
 	 * Enter the mapping into the pv_table if appropriate.
 	 */
 	if (pg != NULL) {
-		error = pmap_pv_enter(pmap, pg, va, pte, true);
+		error = pmap_pv_enter(pmap, pg, va, pte, true, opv);
+		opv = NULL;
 		if (error) {
 			pmap_l3pt_delref(pmap, va, pte, cpu_id);
 			if (flags & PMAP_CANFAIL)
@@ -1845,6 +1849,8 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 out:
 	PMAP_UNLOCK(pmap);
 	PMAP_MAP_TO_HEAD_UNLOCK();
+	if (opv)
+		pmap_pv_free(opv);
 	
 	return error;
 }
@@ -2422,7 +2428,7 @@ alpha_protection_init(void)
  */
 static bool
 pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte,
-    bool dolock, long cpu_id)
+    bool dolock, long cpu_id, pv_entry_t *opvp)
 {
 	paddr_t pa;
 	struct vm_page *pg;		/* if != NULL, page is managed */
@@ -2434,8 +2440,8 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t
 
 #ifdef DEBUG
 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
-		printf("pmap_remove_mapping(%p, %lx, %p, %d, %ld)\n",
-		       pmap, va, pte, dolock, cpu_id);
+		printf("pmap_remove_mapping(%p, %lx, %p, %d, %ld, %p)\n",
+		       pmap, va, pte, dolock, cpu_id, opvp);
 #endif
 
 	/*
@@ -2511,7 +2517,8 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t
 	 */
 	pg = PHYS_TO_VM_PAGE(pa);
 	KASSERT(pg != NULL);
-	pmap_pv_remove(pmap, pg, va, dolock);
+	pmap_pv_remove(pmap, pg, va, dolock, opvp);
+	KASSERT(opvp == NULL || *opvp != NULL);
 
 	return (needisync);
 }
@@ -2765,18 +2772,19 @@ vtophys(vaddr_t vaddr)
  */
 static int
 pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte,
-    bool dolock)
+    bool dolock, pv_entry_t newpv)
 {
 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-	pv_entry_t newpv;
 	kmutex_t *lock;
 
 	/*
 	 * Allocate and fill in the new pv_entry.
 	 */
-	newpv = pmap_pv_alloc();
-	if (newpv == NULL)
-		return ENOMEM;
+	if (newpv == NULL) {
+		newpv = pmap_pv_alloc();
+		if (newpv == NULL)
+			return ENOMEM;
+	}
 	newpv->pv_va = va;
 	newpv->pv_pmap = pmap;
 	newpv->pv_pte = pte;
@@ -2820,7 +2828,8 @@ pmap_pv_enter(pmap_t pmap, struct vm_pag
  *	Remove a physical->virtual entry from the pv_table.
  */
 static void
-pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock)
+pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock,
+	pv_entry_t *opvp)
 {
 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t pv, *pvp;
@@ -2852,7 +2861,10 @@ pmap_pv_remove(pmap_t pmap, struct vm_pa
 		mutex_exit(lock);
 	}
 
-	pmap_pv_free(pv);
+	if (opvp != NULL)
+		*opvp = pv;
+	else
+		pmap_pv_free(pv);
 }
 
 /*

Index: src/sys/arch/m68k/m68k/pmap_motorola.c
diff -u src/sys/arch/m68k/m68k/pmap_motorola.c:1.69 src/sys/arch/m68k/m68k/pmap_motorola.c:1.70
--- src/sys/arch/m68k/m68k/pmap_motorola.c:1.69	Fri Dec 23 07:15:27 2016
+++ src/sys/arch/m68k/m68k/pmap_motorola.c	Sat Jan 27 23:07:36 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap_motorola.c,v 1.69 2016/12/23 07:15:27 cherry Exp $        */
+/*	$NetBSD: pmap_motorola.c,v 1.70 2018/01/27 23:07:36 chs Exp $        */
 
 /*-
  * Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -119,7 +119,7 @@
 #include "opt_m68k_arch.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.69 2016/12/23 07:15:27 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.70 2018/01/27 23:07:36 chs Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -306,7 +306,8 @@ pa_to_pvh(paddr_t pa)
 /*
  * Internal routines
  */
-void	pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int);
+void	pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int,
+			    struct pv_entry **);
 bool	pmap_testbit(paddr_t, int);
 bool	pmap_changebit(paddr_t, int, int);
 int	pmap_enter_ptpage(pmap_t, vaddr_t, bool);
@@ -843,7 +844,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va
 				}
 				firstpage = false;
 #endif
-				pmap_remove_mapping(pmap, sva, pte, flags);
+				pmap_remove_mapping(pmap, sva, pte, flags, NULL);
 			}
 			pte++;
 			sva += PAGE_SIZE;
@@ -929,7 +930,7 @@ pmap_page_protect(struct vm_page *pg, vm
 			panic("pmap_page_protect: bad mapping");
 #endif
 		pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
-		    pte, PRM_TFLUSH|PRM_CFLUSH);
+		    pte, PRM_TFLUSH|PRM_CFLUSH, NULL);
 	}
 	splx(s);
 }
@@ -1048,6 +1049,7 @@ int
 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
 {
 	pt_entry_t *pte;
+	struct pv_entry *opv = NULL;
 	int npte;
 	paddr_t opa;
 	bool cacheable = true;
@@ -1130,7 +1132,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 		PMAP_DPRINTF(PDB_ENTER,
 		    ("enter: removing old mapping %lx\n", va));
 		pmap_remove_mapping(pmap, va, pte,
-		    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
+		    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE, &opv);
 	}
 
 	/*
@@ -1179,7 +1181,12 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 				if (pmap == npv->pv_pmap && va == npv->pv_va)
 					panic("pmap_enter: already in pv_tab");
 #endif
-			npv = pmap_alloc_pv();
+			if (opv != NULL) {
+				npv = opv;
+				opv = NULL;
+			} else {
+				npv = pmap_alloc_pv();
+			}
 			KASSERT(npv != NULL);
 			npv->pv_va = va;
 			npv->pv_pmap = pmap;
@@ -1346,6 +1353,9 @@ validate:
 		pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
 #endif
 
+	if (opv != NULL)
+		pmap_free_pv(opv);
+
 	return 0;
 }
 
@@ -1659,7 +1669,7 @@ pmap_collect1(pmap_t pmap, paddr_t start
 
 		(void) pmap_extract(pmap, pv->pv_va, &kpa);
 		pmap_remove_mapping(pmap, pv->pv_va, NULL,
-		    PRM_TFLUSH|PRM_CFLUSH);
+		    PRM_TFLUSH|PRM_CFLUSH, NULL);
 
 		/*
 		 * Use the physical address to locate the original
@@ -1970,11 +1980,12 @@ pmap_prefer(vaddr_t foff, vaddr_t *vap)
  */
 /* static */
 void
-pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags)
+pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags,
+    struct pv_entry **opvp)
 {
 	paddr_t pa;
 	struct pv_header *pvh;
-	struct pv_entry *pv, *npv;
+	struct pv_entry *pv, *npv, *opv = NULL;
 	struct pmap *ptpmap;
 	st_entry_t *ste;
 	int s, bits;
@@ -1983,8 +1994,8 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t
 #endif
 
 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
-	    ("pmap_remove_mapping(%p, %lx, %p, %x)\n",
-	    pmap, va, pte, flags));
+	    ("pmap_remove_mapping(%p, %lx, %p, %x, %p)\n",
+	    pmap, va, pte, flags, opvp));
 
 	/*
 	 * PTE not provided, compute it from pmap and va.
@@ -2093,7 +2104,7 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t
 				    ptppv->pv_next);
 #endif
 			pmap_remove_mapping(pmap_kernel(), ptpva,
-			    NULL, PRM_TFLUSH|PRM_CFLUSH);
+			    NULL, PRM_TFLUSH|PRM_CFLUSH, NULL);
 			mutex_enter(uvm_kernel_object->vmobjlock);
 			uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
 			mutex_exit(uvm_kernel_object->vmobjlock);
@@ -2133,7 +2144,7 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t
 		npv = pv->pv_next;
 		if (npv) {
 			*pv = *npv;
-			pmap_free_pv(npv);
+			opv = npv;
 		} else
 			pv->pv_pmap = NULL;
 	} else {
@@ -2149,7 +2160,7 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t
 		ste = npv->pv_ptste;
 		ptpmap = npv->pv_ptpmap;
 		pv->pv_next = npv->pv_next;
-		pmap_free_pv(npv);
+		opv = npv;
 		pvh = pa_to_pvh(pa);
 		pv = &pvh->pvh_first;
 	}
@@ -2255,6 +2266,11 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t
 
 	pvh->pvh_attrs |= bits;
 	splx(s);
+
+	if (opvp != NULL)
+		*opvp = opv;
+	else if (opv != NULL)
+		pmap_free_pv(opv);
 }
 
 /*

Index: src/sys/arch/powerpc/oea/pmap.c
diff -u src/sys/arch/powerpc/oea/pmap.c:1.94 src/sys/arch/powerpc/oea/pmap.c:1.95
--- src/sys/arch/powerpc/oea/pmap.c:1.94	Fri Dec 23 07:15:28 2016
+++ src/sys/arch/powerpc/oea/pmap.c	Sat Jan 27 23:07:36 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.94 2016/12/23 07:15:28 cherry Exp $	*/
+/*	$NetBSD: pmap.c,v 1.95 2018/01/27 23:07:36 chs Exp $	*/
 /*-
  * Copyright (c) 2001 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -63,7 +63,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.94 2016/12/23 07:15:28 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.95 2018/01/27 23:07:36 chs Exp $");
 
 #define	PMAP_NOOPNAMES
 
@@ -1551,6 +1551,13 @@ pmap_pvo_reclaim(struct pmap *pm)
 	return NULL;
 }
 
+static struct pool *
+pmap_pvo_pl(struct pvo_entry *pvo)
+{
+
+	return PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool;
+}
+
 /*
  * This returns whether this is the first mapping of a page.
  */
@@ -1616,9 +1623,10 @@ pmap_pvo_enter(pmap_t pm, struct pool *p
 #endif
 	pmap_interrupts_restore(msr);
 	if (pvo) {
-		pmap_pvo_free(pvo);
+		KASSERT(pmap_pvo_pl(pvo) == pl);
+	} else {
+		pvo = pool_get(pl, poolflags);
 	}
-	pvo = pool_get(pl, poolflags);
 	KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS);
 
 #ifdef DEBUG
@@ -1822,7 +1830,7 @@ void
 pmap_pvo_free(struct pvo_entry *pvo)
 {
 
-	pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
+	pool_put(pmap_pvo_pl(pvo), pvo);
 }
 
 void

Index: src/sys/arch/sparc64/sparc64/pmap.c
diff -u src/sys/arch/sparc64/sparc64/pmap.c:1.307 src/sys/arch/sparc64/sparc64/pmap.c:1.308
--- src/sys/arch/sparc64/sparc64/pmap.c:1.307	Fri Feb 10 23:26:23 2017
+++ src/sys/arch/sparc64/sparc64/pmap.c	Sat Jan 27 23:07:36 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.307 2017/02/10 23:26:23 palle Exp $	*/
+/*	$NetBSD: pmap.c,v 1.308 2018/01/27 23:07:36 chs Exp $	*/
 /*
  *
  * Copyright (C) 1996-1999 Eduardo Horvath.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.307 2017/02/10 23:26:23 palle Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.308 2018/01/27 23:07:36 chs Exp $");
 
 #undef	NO_VCACHE /* Don't forget the locked TLB in dostart */
 #define	HWREF
@@ -135,7 +135,7 @@ struct pool_cache pmap_pv_cache;
 
 pv_entry_t	pmap_remove_pv(struct pmap *, vaddr_t, struct vm_page *);
 void	pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, struct vm_page *,
-			   pv_entry_t);
+			   pv_entry_t *);
 void	pmap_page_cache(struct pmap *, paddr_t, int);
 
 /*
@@ -1783,13 +1783,13 @@ pmap_enter(struct pmap *pm, vaddr_t va, 
 	pte_t tte;
 	int64_t data;
 	paddr_t opa = 0, ptp; /* XXX: gcc */
-	pv_entry_t pvh, npv = NULL, freepv;
+	pv_entry_t pvh, opv = NULL, npv;
 	struct vm_page *pg, *opg, *ptpg;
 	int s, i, uncached = 0, error = 0;
 	int size = PGSZ_8K; /* PMAP_SZ_TO_TTE(pa); */
 	bool wired = (flags & PMAP_WIRED) != 0;
-	bool wasmapped = FALSE;
-	bool dopv = TRUE;
+	bool wasmapped = false;
+	bool dopv = true;
 
 	/*
 	 * Is this part of the permanent mappings?
@@ -1797,14 +1797,12 @@ pmap_enter(struct pmap *pm, vaddr_t va, 
 	KASSERT(pm != pmap_kernel() || va < INTSTACK || va > EINTSTACK);
 	KASSERT(pm != pmap_kernel() || va < kdata || va > ekdata);
 
-	/* Grab a spare PV. */
-	freepv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
-	if (__predict_false(freepv == NULL)) {
-		if (flags & PMAP_CANFAIL)
-			return (ENOMEM);
-		panic("pmap_enter: no pv entries available");
-	}
-	freepv->pv_next = NULL;
+	/*
+	 * Grab a spare PV.  Keep going even if this fails since we don't
+	 * yet know if we will need it.
+	 */
+
+	npv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
 
 	/*
 	 * If a mapping at this address already exists, check if we're
@@ -1819,7 +1817,7 @@ pmap_enter(struct pmap *pm, vaddr_t va, 
 		if (opa != pa) {
 			opg = PHYS_TO_VM_PAGE(opa);
 			if (opg != NULL) {
-				npv = pmap_remove_pv(pm, va, opg);
+				opv = pmap_remove_pv(pm, va, opg);
 			}
 		}
 	}
@@ -1849,31 +1847,21 @@ pmap_enter(struct pmap *pm, vaddr_t va, 
 		/*
 		 * make sure we have a pv entry ready if we need one.
 		 */
-		if (pvh->pv_pmap == NULL || (wasmapped && opa == pa)) {
-			if (npv != NULL) {
-				/* free it */
-				npv->pv_next = freepv;
-				freepv = npv;
-				npv = NULL;
-			}
-			if (wasmapped && opa == pa) {
-				dopv = FALSE;
-			}
+		if (wasmapped && opa == pa) {
+			dopv = false;
 		} else if (npv == NULL) {
-			/* use the pre-allocated pv */
-			npv = freepv;
-			freepv = freepv->pv_next;
+			npv = opv;
+			opv = NULL;
+			if (npv == NULL) {
+				mutex_exit(&pmap_lock);
+				error = ENOMEM;
+				goto out;
+			}
 		}
 		ENTER_STAT(managed);
 	} else {
 		ENTER_STAT(unmanaged);
-		dopv = FALSE;
-		if (npv != NULL) {
-			/* free it */
-			npv->pv_next = freepv;
-			freepv = npv;
-			npv = NULL;
-		}
+		dopv = false;
 	}
 
 #ifndef NO_VCACHE
@@ -1945,11 +1933,6 @@ pmap_enter(struct pmap *pm, vaddr_t va, 
 		if (!pmap_get_page(&ptp)) {
 			mutex_exit(&pmap_lock);
 			if (flags & PMAP_CANFAIL) {
-				if (npv != NULL) {
-					/* free it */
-					npv->pv_next = freepv;
-					freepv = npv;
-				}
 				error = ENOMEM;
 				goto out;
 			} else {
@@ -1966,7 +1949,7 @@ pmap_enter(struct pmap *pm, vaddr_t va, 
 		pmap_free_page_noflush(ptp);
 	}
 	if (dopv) {
-		pmap_enter_pv(pm, va, pa, pg, npv);
+		pmap_enter_pv(pm, va, pa, pg, &npv);
 	}
 
 	mutex_exit(&pmap_lock);
@@ -2039,11 +2022,11 @@ pmap_enter(struct pmap *pm, vaddr_t va, 
 	/* We will let the fast mmu miss interrupt load the new translation */
 	pv_check();
  out:
-	/* Catch up on deferred frees. */
-	for (; freepv != NULL; freepv = npv) {
-		npv = freepv->pv_next;
-		pool_cache_put(&pmap_pv_cache, freepv);
-	}
+	if (opv)
+		pool_cache_put(&pmap_pv_cache, opv);
+	if (npv)
+		pool_cache_put(&pmap_pv_cache, npv);
+
 	return error;
 }
 
@@ -3302,14 +3285,16 @@ ctx_free(struct pmap *pm, struct cpu_inf
  * physical to virtual map table.
  *
  * We enter here with the pmap locked.
+ * The pv_entry_t in *npvp is replaced with NULL if this function
+ * uses it, otherwise the caller needs to free it.
  */
 
 void
 pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa, struct vm_page *pg,
-	      pv_entry_t npv)
+	      pv_entry_t *npvp)
 {
 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-	pv_entry_t pvh;
+	pv_entry_t pvh, npv;
 
 	KASSERT(mutex_owned(&pmap_lock));
 
@@ -3327,7 +3312,6 @@ pmap_enter_pv(struct pmap *pmap, vaddr_t
 		PV_SETVA(pvh, va);
 		pvh->pv_pmap = pmap;
 		pvh->pv_next = NULL;
-		KASSERT(npv == NULL);
 	} else {
 		if (pg->loan_count == 0 && !(pvh->pv_va & PV_ALIAS)) {
 
@@ -3352,6 +3336,8 @@ pmap_enter_pv(struct pmap *pmap, vaddr_t
 
 		DPRINTF(PDB_ENTER, ("pmap_enter: new pv: pmap %p va %lx\n",
 		    pmap, va));
+		npv = *npvp;
+		*npvp = NULL;
 		npv->pv_pmap = pmap;
 		npv->pv_va = va & PV_VAMASK;
 		npv->pv_next = pvh->pv_next;

Reply via email to