Module Name:    src
Committed By:   skrll
Date:           Fri Mar 19 07:29:44 UTC 2010

Modified Files:
        src/sys/arch/hppa/hppa: pmap.c
        src/sys/arch/hppa/include: pmap.h

Log Message:
Not all PA CPUs have the U-bit (uncacheable) for non-IO memory.  In fact
most don't.  Deal with non-equivalent aliases by removing and flushing the
managed mappings, and flushing the unmanaged mappings.

When flushing caches/TLB flush the cache before purging the TLB just in
case the flush enters the mapping into the TLB.


To generate a diff of this commit:
cvs rdiff -u -r1.72 -r1.73 src/sys/arch/hppa/hppa/pmap.c
cvs rdiff -u -r1.25 -r1.26 src/sys/arch/hppa/include/pmap.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/hppa/hppa/pmap.c
diff -u src/sys/arch/hppa/hppa/pmap.c:1.72 src/sys/arch/hppa/hppa/pmap.c:1.73
--- src/sys/arch/hppa/hppa/pmap.c:1.72	Tue Mar 16 16:20:19 2010
+++ src/sys/arch/hppa/hppa/pmap.c	Fri Mar 19 07:29:44 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.72 2010/03/16 16:20:19 skrll Exp $	*/
+/*	$NetBSD: pmap.c,v 1.73 2010/03/19 07:29:44 skrll Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.72 2010/03/16 16:20:19 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.73 2010/03/19 07:29:44 skrll Exp $");
 
 #include "opt_cputype.h"
 
@@ -225,13 +225,12 @@
 void pmap_dump_pv(paddr_t);
 #endif
 
-void pmap_check_alias(struct vm_page *, struct pv_entry *, vaddr_t,
-    pt_entry_t *);
-static bool __changebit(struct vm_page *, u_int, u_int);
+void pmap_page_remove_locked(struct vm_page *);
+int pmap_check_alias(struct vm_page *, vaddr_t, pt_entry_t);
 
 /* un-invert PVF_REF */
 #define pmap_pvh_attrs(a) \
-	(((a) & (PVF_MOD|PVF_REF|PVF_WRITE|PVF_UNCACHEABLE)) ^ PVF_REF)
+	(((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF)
 
 #define PMAP_LOCK(pm)					\
 	do {						\
@@ -426,11 +425,11 @@
 {
 
 	fdcache(pmap->pm_space, va, PAGE_SIZE);
-	pdtlb(pmap->pm_space, va);
 	if (pte & PTE_PROT(TLB_EXECUTE)) {
 		ficache(pmap->pm_space, va, PAGE_SIZE);
 		pitlb(pmap->pm_space, va);
 	}
+	pdtlb(pmap->pm_space, va);
 #ifdef USE_HPT
 	if (pmap_hpt) {
 		struct hpt_entry *hpt;
@@ -511,112 +510,28 @@
 }
 #endif
 
-/*
- * Check for non-equiv aliases for this page and the mapping being added or
- * removed. If, when adding, we find a new non-equiv alias then mark all PTEs
- * as uncacheable including the one we're checking. If, when removing, there
- * are no non-equiv aliases left then we mark PTEs as cacheable.
- *
- * - Shouldn't be called for pages that have been marked uncacheable by
- *   pmap_kenter_pa.
- * - Must be called with pg->mdpage.pvh_lock held.
- */
-void
-pmap_check_alias(struct vm_page *pg, struct pv_entry *pve, vaddr_t va,
-    pt_entry_t *ptep)
+int
+pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte)
 {
-	bool nonequiv = false;
-	struct pv_entry *tpve;
-	u_int attrs;
-
-	DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-	    ("%s(%p, %p, 0x%lx, %p)\n", __func__, pg, pve, va, ptep));
-
-	/* we should only be looking if we're not PVF_NC */
-	KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0);
-	KASSERT(mutex_owned(&pg->mdpage.pvh_lock));
-
-	if (ptep) {
-		attrs = pmap_pvh_attrs(*ptep);
-
-		DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-		    ("%s: va 0x%08lx attrs 0x%08x (new)\n", __func__, va,
-		    attrs));
-	} else {
-		attrs = 0;
-
-		DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-		    ("%s: va 0x%08lx (removed)\n", __func__, va));
-	}
-
-	/*
-	 * Add in flags for existing mappings and check if mapping we're
-	 * adding/removing is an non-equiv aliases of the other mappings.
-	 */
-	for (tpve = pve; tpve; tpve = tpve->pv_next) {
-		pt_entry_t pte;
-		vaddr_t tva = tpve->pv_va & PV_VAMASK;
-
-		/* XXX LOCK */
-		pte = pmap_vp_find(tpve->pv_pmap, tva);
-		attrs |= pmap_pvh_attrs(pte);
-
-		if (((va ^ tva) & HPPA_PGAOFF) != 0)
-			nonequiv = true;
-
-		DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-		    ("%s: va 0x%08x:0x%08lx attrs 0x%08x %s\n", __func__,
-		    tpve->pv_pmap->pm_space, tpve->pv_va & PV_VAMASK,
-		    pmap_pvh_attrs(pte), nonequiv ? "alias" : ""));
-	}
-
-	if (!nonequiv) {
-		/*
-		 * Inherit uncacheable attribute if set as it means we already
-		 * have non-equiv aliases.
-		 */
-		if (ptep && (attrs & PVF_UNCACHEABLE) != 0)
-			*ptep |= PTE_PROT(TLB_UNCACHEABLE);
-
-		/* No more to be done. */
-		return;
-	}
-
-	if (ptep) {
-		if ((attrs & (PVF_WRITE|PVF_MOD)) != 0) {
-			/*
-			 * We have non-equiv aliases and the new/some 
-			 * mapping(s) is/are writable (or modified). We must
-			 * mark all mappings as uncacheable (if they're not
-			 * already marked as such).
-			 */
-			pg->mdpage.pvh_aliases++;
+	struct pv_entry *pve;
+	int ret = 0;
 
-			if ((attrs & PVF_UNCACHEABLE) == 0)
-				__changebit(pg, PVF_UNCACHEABLE, 0);
+	/* check for non-equ aliased mappings */
+	for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
+		vaddr_t pva = pve->pv_va & PV_VAMASK;
 
-			*ptep |= PTE_PROT(TLB_UNCACHEABLE);
+		pte |= pmap_vp_find(pve->pv_pmap, pva);
+		if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) &&
+		    (pte & PTE_PROT(TLB_WRITE))) {
 
 			DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-			    ("%s: page marked uncacheable\n", __func__));
-		}
-	} else {
-		if ((attrs & PVF_UNCACHEABLE) != 0) {
-			/*
-			 * We've removed a non-equiv aliases. We can now mark
-			 * it cacheable if all non-equiv aliases are gone.
-			 */
-
-			pg->mdpage.pvh_aliases--;
-			if (pg->mdpage.pvh_aliases == 0) {
-				__changebit(pg, 0, PVF_UNCACHEABLE);
-
-				DPRINTF(PDB_FOLLOW|PDB_ALIAS,
-				    ("%s: page re-marked cacheable\n",
-				    __func__));
-			}
+                            ("%s: "aliased writable mapping 0x%x:0x%x\n",
+                            __func__, pve->pv_pmap->pm_space, pve->pv_va));
+			ret++;
 		}
 	}
+
+        return (ret);
 }
 
 /*
@@ -1308,7 +1223,6 @@
 	}
 
 	if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
-		mutex_enter(&pg->mdpage.pvh_lock);
 
 		if (!pve && !(pve = pmap_pv_alloc())) {
 			if (flags & PMAP_CANFAIL) {
@@ -1318,9 +1232,11 @@
 			}
 			panic("%s: no pv entries available", __func__);
 		}
+                pte |= PTE_PROT(pmap_prot(pmap, prot));
+		mutex_enter(&pg->mdpage.pvh_lock);
+		if (pmap_check_alias(pg, va, pte))
+			pmap_page_remove_locked(pg);
 		pmap_pv_enter(pg, pve, pmap, va, ptp, 0);
-		pmap_check_alias(pg, pve, va, &pte);
-
 		mutex_exit(&pg->mdpage.pvh_lock);
 	} else if (pve) {
 		pmap_pv_free(pve);
@@ -1394,10 +1310,6 @@
 				mutex_enter(&pg->mdpage.pvh_lock);
 
 				pve = pmap_pv_remove(pg, pmap, sva);
-
-				pmap_check_alias(pg, pg->mdpage.pvh_list,
-				    sva, NULL);
-
 				pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
 
 				mutex_exit(&pg->mdpage.pvh_lock);
@@ -1467,6 +1379,15 @@
 void
 pmap_page_remove(struct vm_page *pg)
 {
+
+	mutex_enter(&pg->mdpage.pvh_lock);
+	pmap_page_remove_locked(pg);
+	mutex_exit(&pg->mdpage.pvh_lock);
+}
+
+void
+pmap_page_remove_locked(struct vm_page *pg)
+{
 	struct pv_entry *pve, *npve, **pvp;
 
 	DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg));
@@ -1474,7 +1395,6 @@
 	if (pg->mdpage.pvh_list == NULL)
 		return;
 
-	mutex_enter(&pg->mdpage.pvh_lock);
 	pvp = &pg->mdpage.pvh_list;
 	for (pve = pg->mdpage.pvh_list; pve; pve = npve) {
 		pmap_t pmap = pve->pv_pmap;
@@ -1495,22 +1415,21 @@
 		if (pve->pv_va & PV_KENTER) {
 			*pvp = pve;
 			pvp = &pve->pv_next;
-			continue;
-		}
-
-		pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
+		} else
+			pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
 
 		pmap_pte_flush(pmap, va, pte);
 		if (pte & PTE_PROT(TLB_WIRED))
 			pmap->pm_stats.wired_count--;
 		pmap->pm_stats.resident_count--;
 
-		pmap_pte_set(pde, va, 0);
-		pmap_pv_free(pve);
+		if (!(pve->pv_va & PV_KENTER)) {
+			pmap_pte_set(pde, va, 0);
+			pmap_pv_free(pve);
+		}
 		PMAP_UNLOCK(pmap);
 	}
 	*pvp = NULL;
-	mutex_exit(&pg->mdpage.pvh_lock);
 
 	DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: leaving\n", __func__));
 }
@@ -1553,30 +1472,16 @@
 bool
 pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
 {
-	bool rv;
+	struct pv_entry *pve;
+	int res;
 
 	DPRINTF(PDB_FOLLOW|PDB_BITS, 
 	    ("%s(%p, %x, %x)\n", __func__, pg, set, clear));
 
-	mutex_enter(&pg->mdpage.pvh_lock);
-	rv = __changebit(pg, set, clear);
-	mutex_exit(&pg->mdpage.pvh_lock);
-
-	return rv;
-}
+	KASSERT((set & ~(PVF_REF)) == 0);
+	KASSERT((clear & ~(PVF_MOD|PVF_WRITE)) == 0);
 
-/*
- * Must be called with pg->mdpage.pvh_lock held.
- */
-static bool
-__changebit(struct vm_page *pg, u_int set, u_int clear)
-{
-	struct pv_entry *pve;
-	int res;
-
-	KASSERT(mutex_owned(&pg->mdpage.pvh_lock));
-	KASSERT(((set | clear) &
-	    ~(PVF_MOD|PVF_REF|PVF_UNCACHEABLE|PVF_WRITE)) == 0);
+	mutex_enter(&pg->mdpage.pvh_lock);
 
 	/* preserve other bits */
 	res = pg->mdpage.pvh_attrs & (set | clear);
@@ -1612,6 +1517,7 @@
 			}
 		}
 	}
+	mutex_exit(&pg->mdpage.pvh_lock);
 
 	return ((res & (clear | set)) != 0);
 }
@@ -1701,8 +1607,6 @@
 
 	DPRINTF(PDB_FOLLOW|PDB_CACHE, ("%s(%p, %d)\n", __func__, pg, purge));
 
-	KASSERT(!(pg->mdpage.pvh_attrs & PVF_NC));
-
 	/* purge cache for all possible mappings for the pa */
 	for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
 		vaddr_t va = pve->pv_va & PV_VAMASK;
@@ -1714,8 +1618,8 @@
 			fdcache(sp, va, PAGE_SIZE);
 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
     defined(HP8500_CPU) || defined(HP8600_CPU)
-		pdtlb(sp, va);
 		ficache(sp, va, PAGE_SIZE);
+		pdtlb(sp, va);
 		pitlb(sp, va);
 #endif
 	}
@@ -1736,10 +1640,11 @@
 
 	memset((void *)pa, 0, PAGE_SIZE);
 	fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
+
 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
     defined(HP8500_CPU) || defined(HP8600_CPU)
-	pdtlb(HPPA_SID_KERNEL, pa);
 	ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
+	pdtlb(HPPA_SID_KERNEL, pa);
 	pitlb(HPPA_SID_KERNEL, pa);
 #endif
 }
@@ -1766,10 +1671,10 @@
 	fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
     defined(HP8500_CPU) || defined(HP8600_CPU)
-	pdtlb(HPPA_SID_KERNEL, spa);
-	pdtlb(HPPA_SID_KERNEL, dpa);
 	ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
 	ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
+	pdtlb(HPPA_SID_KERNEL, spa);
+	pdtlb(HPPA_SID_KERNEL, dpa);
 	pitlb(HPPA_SID_KERNEL, spa);
 	pitlb(HPPA_SID_KERNEL, dpa);
 #endif
@@ -1814,29 +1719,23 @@
 
 		pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
 		if (pg != NULL) {
-
 			KASSERT(pa < HPPA_IOBEGIN);
 
-			mutex_enter(&pg->mdpage.pvh_lock);
-
-			if (flags & PMAP_NOCACHE)
-				pg->mdpage.pvh_attrs |= PVF_NC;
-			else {
-				struct pv_entry *pve;
+			struct pv_entry *pve;
 				
-				pve = pmap_pv_alloc();
-				if (!pve)
-					panic("%s: no pv entries available",
-					    __func__);
-				DPRINTF(PDB_FOLLOW|PDB_ENTER,
-				    ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__,
-				    va, pa, pte));
-
-				pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL,
-				    PV_KENTER);
-				pmap_check_alias(pg, pve, va, &pte);
-			}
+			pve = pmap_pv_alloc();
+			if (!pve)
+				panic("%s: no pv entries available",
+				    __func__);
+			DPRINTF(PDB_FOLLOW|PDB_ENTER,
+			    ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__,
+			    va, pa, pte));
 
+			mutex_enter(&pg->mdpage.pvh_lock);
+			if (pmap_check_alias(pg, va, pte))
+				pmap_page_remove_locked(pg);
+			pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL,
+			    PV_KENTER);
 			mutex_exit(&pg->mdpage.pvh_lock);
 		}
 	}
@@ -1911,12 +1810,6 @@
 
 			pve = pmap_pv_remove(pg, pmap, va);
 
-			if ((pg->mdpage.pvh_attrs & PVF_NC) == 0)
-				pmap_check_alias(pg, pg->mdpage.pvh_list, va,
-				    NULL);
-
-			pg->mdpage.pvh_attrs &= ~PVF_NC;
-
 			mutex_exit(&pg->mdpage.pvh_lock);
 			if (pve != NULL)
 				pmap_pv_free(pve);

Index: src/sys/arch/hppa/include/pmap.h
diff -u src/sys/arch/hppa/include/pmap.h:1.25 src/sys/arch/hppa/include/pmap.h:1.26
--- src/sys/arch/hppa/include/pmap.h:1.25	Sat Mar  6 21:22:42 2010
+++ src/sys/arch/hppa/include/pmap.h	Fri Mar 19 07:29:44 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.25 2010/03/06 21:22:42 skrll Exp $	*/
+/*	$NetBSD: pmap.h,v 1.26 2010/03/19 07:29:44 skrll Exp $	*/
 
 /*	$OpenBSD: pmap.h,v 1.35 2007/12/14 18:32:23 deraadt Exp $	*/
 
@@ -60,35 +60,9 @@
 	struct pmap_statistics	pm_stats;
 };
 
-/*
- * Flags that indicate attributes of pages or mappings of pages.
- *
- * We need two flags for cacheability because pages/mappings can be marked
- * uncacheable for two reasons,
- *
- *	1) A page's contents may change under our feet and can never be
- *	   cacheable, e.g. I/O space, DMA buffers.
- *	2) A page has non-equivalent aliases and must be (temporarily)
- *	   marked uncachable.
- *
- * A page that is marked PVF_NC can *never* be marked cacheable and will have
- * all mappings marked PVF_UNCACHEABLE. A page marked PVF_UNCACHEABLE only
- * is done so due to non-equivalent aliases this maybe removed is the non-
- * equivalent aliases are removed. 
- *
- */
-
-#define	PVF_NC		0x2000			/* pg is never cacheable */
-
 #define	PVF_MOD		PTE_PROT(TLB_DIRTY)	/* pg/mp is modified */
 #define	PVF_REF		PTE_PROT(TLB_REFTRAP)	/* pg/mp (inv) is referenced */
 #define	PVF_WRITE	PTE_PROT(TLB_WRITE)	/* pg/mp is writable */
-#define	PVF_UNCACHEABLE	PTE_PROT(TLB_UNCACHEABLE)
-						/* pg/mp is uncacheable */
-
-#define	pmap_is_aliased(pg)	\
-	(((pg)->mdpage.pvh_attrs & PVF_NC) == 0 && \
-	 ((pg)->mdpage.pvh_attrs & PVF_UNCACHEABLE) != 0)
 
 #define	HPPA_MAX_PID	0xfffa
 #define	HPPA_SID_MAX	0x7ffd
@@ -123,10 +97,11 @@
 static inline paddr_t hppa_unmap_poolpage(vaddr_t va)
 {
 	pdcache(HPPA_SID_KERNEL, va, PAGE_SIZE);
+
 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
     defined(HP8500_CPU) || defined(HP8600_CPU)
-	pdtlb(HPPA_SID_KERNEL, va);
 	ficache(HPPA_SID_KERNEL, va, PAGE_SIZE);
+	pdtlb(HPPA_SID_KERNEL, va);
 	pitlb(HPPA_SID_KERNEL, va);
 #endif
 

Reply via email to