Module Name:    src
Committed By:   matt
Date:           Thu Jul  5 16:55:11 UTC 2012

Modified Files:
        src/sys/common/pmap/tlb: pmap.c pmap.h pmap_segtab.c

Log Message:
Add 3-level page table support (from MIPS).  Rename a few routines.


To generate a diff of this commit:
cvs rdiff -u -r1.14 -r1.15 src/sys/common/pmap/tlb/pmap.c
cvs rdiff -u -r1.13 -r1.14 src/sys/common/pmap/tlb/pmap.h
cvs rdiff -u -r1.5 -r1.6 src/sys/common/pmap/tlb/pmap_segtab.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/common/pmap/tlb/pmap.c
diff -u src/sys/common/pmap/tlb/pmap.c:1.14 src/sys/common/pmap/tlb/pmap.c:1.15
--- src/sys/common/pmap/tlb/pmap.c:1.14	Wed Jul  4 11:39:42 2012
+++ src/sys/common/pmap/tlb/pmap.c	Thu Jul  5 16:55:11 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.14 2012/07/04 11:39:42 matt Exp $	*/
+/*	$NetBSD: pmap.c,v 1.15 2012/07/05 16:55:11 matt Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.14 2012/07/04 11:39:42 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.15 2012/07/05 16:55:11 matt Exp $");
 
 /*
  *	Manages physical address maps.
@@ -450,8 +450,8 @@ pmap_steal_memory(vsize_t size, vaddr_t 
 void
 pmap_init(void)
 {
-        UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
-        UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
+	UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
+	UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
 
 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
 
@@ -504,7 +504,7 @@ pmap_create(void)
 	pmap->pm_minaddr = VM_MIN_ADDRESS;
 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
 
-	pmap_segtab_alloc(pmap);
+	pmap_segtab_init(pmap);
 
 	UVMHIST_LOG(pmaphist, "<- pmap %p", pmap,0,0,0);
 	return pmap;
@@ -530,7 +530,7 @@ pmap_destroy(pmap_t pmap)
 	PMAP_COUNT(destroy);
 	kpreempt_disable();
 	pmap_tlb_asid_release_all(pmap);
-	pmap_segtab_free(pmap);
+	pmap_segtab_destroy(pmap);
 
 	pool_put(&pmap_pmap_pool, pmap);
 	kpreempt_enable();
@@ -1482,7 +1482,7 @@ pmap_enter_pv(pmap_t pmap, vaddr_t va, s
 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
 	UVMHIST_LOG(pmaphist, "nptep=%p (%#x))", npte, *npte, 0, 0);
 
-        KASSERT(kpreempt_disabled());
+	KASSERT(kpreempt_disabled());
 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
 
 	apv = NULL;

Index: src/sys/common/pmap/tlb/pmap.h
diff -u src/sys/common/pmap/tlb/pmap.h:1.13 src/sys/common/pmap/tlb/pmap.h:1.14
--- src/sys/common/pmap/tlb/pmap.h:1.13	Wed Jul  4 11:39:42 2012
+++ src/sys/common/pmap/tlb/pmap.h	Thu Jul  5 16:55:11 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.13 2012/07/04 11:39:42 matt Exp $	*/
+/*	$NetBSD: pmap.h,v 1.14 2012/07/05 16:55:11 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -107,8 +107,8 @@ pt_entry_t *pmap_pte_reserve(struct pmap
 void pmap_pte_process(struct pmap *, vaddr_t, vaddr_t, pte_callback_t,
 	uintptr_t);
 void pmap_segtab_activate(struct pmap *, struct lwp *);
-void pmap_segtab_alloc(struct pmap *);
-void pmap_segtab_free(struct pmap *);
+void pmap_segtab_init(struct pmap *);
+void pmap_segtab_destroy(struct pmap *);
 extern kmutex_t pmap_segtab_lock;
 #endif /* _KERNEL */
 

Index: src/sys/common/pmap/tlb/pmap_segtab.c
diff -u src/sys/common/pmap/tlb/pmap_segtab.c:1.5 src/sys/common/pmap/tlb/pmap_segtab.c:1.6
--- src/sys/common/pmap/tlb/pmap_segtab.c:1.5	Wed Jul  4 11:39:42 2012
+++ src/sys/common/pmap/tlb/pmap_segtab.c	Thu Jul  5 16:55:11 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap_segtab.c,v 1.5 2012/07/04 11:39:42 matt Exp $	*/
+/*	$NetBSD: pmap_segtab.c,v 1.6 2012/07/05 16:55:11 matt Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.5 2012/07/04 11:39:42 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.6 2012/07/05 16:55:11 matt Exp $");
 
 /*
  *	Manages physical address maps.
@@ -134,7 +134,12 @@ pmap_segmap(struct pmap *pmap, vaddr_t v
 {
 	struct pmap_segtab *stp = pmap->pm_segtab;
 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
-	return stp->seg_tab[va >> SEGSHIFT];
+#ifdef _LP64
+	stp = stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)];
+	if (stp == NULL)
+		return NULL;
+#endif
+	return stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)];
 }
 
 pt_entry_t *
@@ -147,6 +152,55 @@ pmap_pte_lookup(pmap_t pmap, vaddr_t va)
 	return pte + ((va >> PGSHIFT) & (NPTEPG - 1));
 }
 
+static void
+pmap_segtab_free(struct pmap_segtab *stp)
+{
+	/*
+	 * Insert the the segtab into the segtab freelist.
+	 */
+	mutex_spin_enter(&pmap_segtab_lock);
+	stp->seg_tab[0] = (void *) pmap_segtab_info.free_segtab;
+	pmap_segtab_info.free_segtab = stp;
+	SEGTAB_ADD(nput, 1);
+	mutex_spin_exit(&pmap_segtab_lock);
+}
+
+static void
+pmap_segtab_release(struct pmap_segtab *stp, u_int level)
+{
+
+	for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
+		paddr_t pa;
+#ifdef _LP64
+		if (level > 0) {
+			if (stp->seg_seg[i] != NULL) {
+				pmap_segtab_release(stp->seg_seg[i], level - 1);
+				stp->seg_seg[i] = NULL;
+			}
+			continue;
+		}
+#endif
+
+		/* get pointer to segment map */
+		pt_entry_t *pte = stp->seg_tab[i];
+		if (pte == NULL)
+			continue;
+#ifdef PARANOIADIAG
+		for (size_t j = 0; j < NPTEPG; j++) {
+			if ((pte + j)->pt_entry)
+				panic("pmap_destroy: segmap not empty");
+		}
+#endif
+
+		pa = POOL_VTOPHYS(pte);
+		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
+
+		stp->seg_tab[i] = NULL;
+	}
+
+	pmap_segtab_free(stp);
+}
+
 /*
  *	Create and return a physical map.
  *
@@ -159,10 +213,11 @@ pmap_pte_lookup(pmap_t pmap, vaddr_t va)
  *	the map will be used in software only, and
  *	is bounded by that size.
  */
-void
-pmap_segtab_alloc(pmap_t pmap)
+static struct pmap_segtab *
+pmap_segtab_alloc(void)
 {
 	struct pmap_segtab *stp;
+
  again:
 	mutex_spin_enter(&pmap_segtab_lock);
 	if (__predict_true((stp = pmap_segtab_info.free_segtab) != NULL)) {
@@ -172,7 +227,7 @@ pmap_segtab_alloc(pmap_t pmap)
 		SEGTAB_ADD(nget, 1);
 	}
 	mutex_spin_exit(&pmap_segtab_lock);
-	
+
 	if (__predict_false(stp == NULL)) {
 		struct vm_page * const stp_pg = pmap_pte_pagealloc();
 
@@ -187,7 +242,7 @@ pmap_segtab_alloc(pmap_t pmap)
 		const paddr_t stp_pa = VM_PAGE_TO_PHYS(stp_pg);
 
 		stp = (struct pmap_segtab *)POOL_PHYSTOV(stp_pa);
-		const size_t n = NBPG / sizeof(struct pmap_segtab);
+		const size_t n = NBPG / sizeof(*stp);
 		if (n > 1) {
 			/*
 			 * link all the segtabs in this page together
@@ -212,8 +267,14 @@ pmap_segtab_alloc(pmap_t pmap)
 			panic("pmap_create: pm_segtab.seg_tab[%zu] != 0");
 	}
 #endif
+	return stp;
+}
 
-	pmap->pm_segtab = stp;
+void
+pmap_segtab_init(pmap_t pmap)
+{
+
+	pmap->pm_segtab = pmap_segtab_alloc();
 }
 
 /*
@@ -222,40 +283,18 @@ pmap_segtab_alloc(pmap_t pmap)
  *	no valid mappings.
  */
 void
-pmap_segtab_free(pmap_t pmap)
+pmap_segtab_destroy(pmap_t pmap)
 {
 	struct pmap_segtab *stp = pmap->pm_segtab;
 
 	if (stp == NULL)
 		return;
 
-	for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
-		paddr_t pa;
-		/* get pointer to segment map */
-		pt_entry_t *pte = stp->seg_tab[i];
-		if (pte == NULL)
-			continue;
-#ifdef PARANOIADIAG
-		for (size_t j = 0; j < NPTEPG; j++) {
-			if ((pte + j)->pt_entry)
-				panic("pmap_destroy: segmap not empty");
-		}
+#ifdef _LP64
+	pmap_segtab_release(stp, 1);
+#else
+	pmap_segtab_release(stp, 0);
 #endif
-
-		pa = POOL_VTOPHYS(pte);
-		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
-
-		stp->seg_tab[i] = NULL;
-	}
-
-	/*
-	 * Insert the the segtab into the segtab freelist.
-	 */
-	mutex_spin_enter(&pmap_segtab_lock);
-	stp->seg_tab[0] = (void *) pmap_segtab_info.free_segtab;
-	pmap_segtab_info.free_segtab = stp;
-	SEGTAB_ADD(nput, 1);
-	mutex_spin_exit(&pmap_segtab_lock);
 }
 
 /*
@@ -265,11 +304,17 @@ void
 pmap_segtab_activate(struct pmap *pm, struct lwp *l)
 {
 	if (l == curlwp) {
+		KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap);
 		if (pm == pmap_kernel()) {
 			l->l_cpu->ci_pmap_user_segtab = (void*)0xdeadbabe;
+#ifdef _LP64
+			l->l_cpu->ci_pmap_user_seg0tab = (void*)0xdeadbabe;
+#endif
 		} else {
-			KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap);
 			l->l_cpu->ci_pmap_user_segtab = pm->pm_segtab;
+#ifdef _LP64
+			l->l_cpu->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0];
+#endif
 		}
 	}
 }
@@ -324,6 +369,25 @@ pmap_pte_reserve(pmap_t pmap, vaddr_t va
 
 	pte = pmap_pte_lookup(pmap, va);
 	if (__predict_false(pte == NULL)) {
+#ifdef _LP64
+		struct pmap_segtab ** const stp_p =
+		    &stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)];  
+		if (__predict_false((stp = *stp_p) == NULL)) {  
+ 			struct pmap_segtab *nstp = pmap_segtab_alloc();
+#ifdef MULTIPROCESSOR 
+			struct pmap_segtab *ostp = atomic_cas_ptr(stp_p, NULL, nstp);
+			if (__predict_false(ostp != NULL)) {
+				pmap_segtab_free(nstp);
+				nstp = ostp;
+			}
+#else   
+			*stp_p = nstp;
+#endif /* MULTIPROCESSOR */
+			stp = nstp;
+		}
+		KASSERT(stp == pmap->pm_segtab->seg_seg[(va >> XSEGSHIFT) & (NSE
+GPG - 1)]);
+#endif /* _LP64 */
 		struct vm_page * const pg = pmap_pte_pagealloc();
 		if (pg == NULL) {
 			if (flags & PMAP_CANFAIL)

Reply via email to