Module Name: src Committed By: matt Date: Mon Jul 9 17:45:23 UTC 2012
Modified Files: src/sys/arch/powerpc/booke: booke_pmap.c trap.c src/sys/arch/powerpc/include: cpu.h src/sys/common/pmap/tlb: pmap.c pmap.h pmap_segtab.c Log Message: More cleanup. Use a union (pmap_segtab) and a typedef (pmap_segtab_t). Add more functionality from the mips pmap and try to make it more common to ease the transition for mips in the future. To generate a diff of this commit: cvs rdiff -u -r1.14 -r1.15 src/sys/arch/powerpc/booke/booke_pmap.c cvs rdiff -u -r1.16 -r1.17 src/sys/arch/powerpc/booke/trap.c cvs rdiff -u -r1.91 -r1.92 src/sys/arch/powerpc/include/cpu.h cvs rdiff -u -r1.15 -r1.16 src/sys/common/pmap/tlb/pmap.c \ src/sys/common/pmap/tlb/pmap.h cvs rdiff -u -r1.7 -r1.8 src/sys/common/pmap/tlb/pmap_segtab.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/powerpc/booke/booke_pmap.c diff -u src/sys/arch/powerpc/booke/booke_pmap.c:1.14 src/sys/arch/powerpc/booke/booke_pmap.c:1.15 --- src/sys/arch/powerpc/booke/booke_pmap.c:1.14 Thu Jul 5 17:25:36 2012 +++ src/sys/arch/powerpc/booke/booke_pmap.c Mon Jul 9 17:45:22 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: booke_pmap.c,v 1.14 2012/07/05 17:25:36 matt Exp $ */ +/* $NetBSD: booke_pmap.c,v 1.15 2012/07/09 17:45:22 matt Exp $ */ /*- * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. * All rights reserved. @@ -38,7 +38,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.14 2012/07/05 17:25:36 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.15 2012/07/09 17:45:22 matt Exp $"); #include <sys/param.h> #include <sys/kcore.h> @@ -57,9 +57,9 @@ __KERNEL_RCSID(0, "$NetBSD: booke_pmap.c #define PMAP_SIZE sizeof(struct pmap) #endif -CTASSERT(sizeof(struct pmap_segtab) == NBPG); +CTASSERT(sizeof(pmap_segtab_t) == NBPG); -struct pmap_segtab pmap_kernel_segtab; +pmap_segtab_t pmap_kernel_segtab; void pmap_procwr(struct proc *p, vaddr_t va, size_t len) @@ -124,9 +124,9 @@ pmap_md_direct_mapped_vaddr_to_paddr(vad #ifdef PMAP_MINIMALTLB static pt_entry_t * -kvtopte(const struct pmap_segtab *stp, vaddr_t va) +kvtopte(const pmap_segtab_t *stp, vaddr_t va) { - pt_entry_t * const ptep = stp->seg_ptr[va >> SEGSHIFT]; + pt_entry_t * const ptep = stp->seg_tab[va >> SEGSHIFT]; if (ptep == NULL) return NULL; return &ptep[(va & SEGOFSET) >> PAGE_SHIFT]; @@ -135,7 +135,7 @@ kvtopte(const struct pmap_segtab *stp, v vaddr_t pmap_kvptefill(vaddr_t sva, vaddr_t eva, pt_entry_t pt_entry) { - const struct pmap_segtab * const stp = pmap_kernel()->pm_segtab; + const pmap_segtab_t * const stp = pmap_kernel()->pm_segtab; KASSERT(sva == trunc_page(sva)); pt_entry_t *ptep = kvtopte(stp, sva); for (; sva < eva; sva += NBPG) { @@ -153,7 +153,7 @@ vaddr_t pmap_bootstrap(vaddr_t startkernel, vaddr_t endkernel, phys_ram_seg_t *avail, size_t cnt) { - struct pmap_segtab * const stp = &pmap_kernel_segtab; + pmap_segtab_t * const stp = &pmap_kernel_segtab; /* * Initialize the kernel segment table. @@ -226,7 +226,7 @@ pmap_bootstrap(vaddr_t startkernel, vadd * an extra page for the segment table and allows the user/kernel * access to be common. */ - void **ptp = &stp->seg_ptr[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT]; + pt_entry_t **ptp = &stp->seg_tab[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT]; pt_entry_t *ptep = (void *)kv_segtabs; memset(ptep, 0, NBPG * kv_nsegtabs); for (size_t i = 0; i < kv_nsegtabs; i++, ptep += NPTEPG) { @@ -244,7 +244,7 @@ pmap_bootstrap(vaddr_t startkernel, vadd avail[0].size -= NBPG * dm_nsegtabs; endkernel += NBPG * dm_nsegtabs; - ptp = stp->seg_ptr; + ptp = stp->seg_tab; ptep = (void *)dm_segtabs; memset(ptep, 0, NBPG * dm_nsegtabs); for (size_t i = 0; i < dm_nsegtabs; i++, ptp++, ptep += NPTEPG) { Index: src/sys/arch/powerpc/booke/trap.c diff -u src/sys/arch/powerpc/booke/trap.c:1.16 src/sys/arch/powerpc/booke/trap.c:1.17 --- src/sys/arch/powerpc/booke/trap.c:1.16 Thu Jul 5 17:25:36 2012 +++ src/sys/arch/powerpc/booke/trap.c Mon Jul 9 17:45:22 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: trap.c,v 1.16 2012/07/05 17:25:36 matt Exp $ */ +/* $NetBSD: trap.c,v 1.17 2012/07/09 17:45:22 matt Exp $ */ /*- * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. * All rights reserved. @@ -38,7 +38,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.16 2012/07/05 17:25:36 matt Exp $"); +__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.17 2012/07/09 17:45:22 matt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -138,17 +138,17 @@ get_faultmap(const struct trapframe * co } /* - * We could use pmap_pte_lookip but this slightly faster since we already + * We could use pmap_pte_lookup but this slightly faster since we already * the segtab pointers in cpu_info. */ static inline pt_entry_t * trap_pte_lookup(struct trapframe *tf, vaddr_t va, register_t psl_mask) { - struct pmap_segtab ** const stps = &curcpu()->ci_pmap_kern_segtab; - struct pmap_segtab * const stp = stps[(tf->tf_srr1 / psl_mask) & 1]; + pmap_segtab_t ** const stps = &curcpu()->ci_pmap_kern_segtab; + pmap_segtab_t * const stp = stps[(tf->tf_srr1 / psl_mask) & 1]; if (__predict_false(stp == NULL)) return NULL; - pt_entry_t * const ptep = stp->seg_ptr[va >> SEGSHIFT]; + pt_entry_t * const ptep = stp->seg_tab[va >> SEGSHIFT]; if (__predict_false(ptep == NULL)) return NULL; return ptep + ((va & SEGOFSET) >> PAGE_SHIFT); Index: src/sys/arch/powerpc/include/cpu.h diff -u src/sys/arch/powerpc/include/cpu.h:1.91 src/sys/arch/powerpc/include/cpu.h:1.92 --- src/sys/arch/powerpc/include/cpu.h:1.91 Tue Apr 10 16:57:50 2012 +++ src/sys/arch/powerpc/include/cpu.h Mon Jul 9 17:45:22 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: cpu.h,v 1.91 2012/04/10 16:57:50 matt Exp $ */ +/* $NetBSD: cpu.h,v 1.92 2012/07/09 17:45:22 matt Exp $ */ /* * Copyright (C) 1999 Wolfgang Solfrank. @@ -114,7 +114,7 @@ struct cpu_info { register_t ci_savearea[CPUSAVE_SIZE]; #if defined(PPC_BOOKE) || defined(MODULAR) || defined(_MODULE) uint32_t ci_pmap_asid_cur; - struct pmap_segtab *ci_pmap_segtabs[2]; + union pmap_segtab *ci_pmap_segtabs[2]; #define ci_pmap_kern_segtab ci_pmap_segtabs[0] #define ci_pmap_user_segtab ci_pmap_segtabs[1] struct pmap_tlb_info *ci_tlb_info; Index: src/sys/common/pmap/tlb/pmap.c diff -u src/sys/common/pmap/tlb/pmap.c:1.15 src/sys/common/pmap/tlb/pmap.c:1.16 --- src/sys/common/pmap/tlb/pmap.c:1.15 Thu Jul 5 16:55:11 2012 +++ src/sys/common/pmap/tlb/pmap.c Mon Jul 9 17:45:22 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.15 2012/07/05 16:55:11 matt Exp $ */ +/* $NetBSD: pmap.c,v 1.16 2012/07/09 17:45:22 matt Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.15 2012/07/05 16:55:11 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.16 2012/07/09 17:45:22 matt Exp $"); /* * Manages physical address maps. @@ -530,7 +530,7 @@ pmap_destroy(pmap_t pmap) PMAP_COUNT(destroy); kpreempt_disable(); pmap_tlb_asid_release_all(pmap); - pmap_segtab_destroy(pmap); + pmap_segtab_destroy(pmap, NULL, 0); pool_put(&pmap_pmap_pool, pmap); kpreempt_enable(); Index: src/sys/common/pmap/tlb/pmap.h diff -u src/sys/common/pmap/tlb/pmap.h:1.15 src/sys/common/pmap/tlb/pmap.h:1.16 --- src/sys/common/pmap/tlb/pmap.h:1.15 Thu Jul 5 17:24:54 2012 +++ src/sys/common/pmap/tlb/pmap.h Mon Jul 9 17:45:22 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.15 2012/07/05 17:24:54 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.16 2012/07/09 17:45:22 matt Exp $ */ /* * Copyright (c) 1992, 1993 @@ -94,9 +94,10 @@ UVMHIST_DECL(pmaphist); /* * Each seg_tab point an array of pt_entry [NPTEPG] */ -struct pmap_segtab { - void *seg_ptr[PMAP_SEGTABSIZE]; -}; +typedef union pmap_segtab { + union pmap_segtab * seg_seg[PMAP_SEGTABSIZE]; + pt_entry_t * seg_tab[PMAP_SEGTABSIZE]; +} pmap_segtab_t; #ifdef _KERNEL struct pmap; @@ -108,7 +109,7 @@ void pmap_pte_process(struct pmap *, vad uintptr_t); void pmap_segtab_activate(struct pmap *, struct lwp *); void pmap_segtab_init(struct pmap *); -void pmap_segtab_destroy(struct pmap *); +void pmap_segtab_destroy(struct pmap *, pte_callback_t, uintptr_t); extern kmutex_t pmap_segtab_lock; #endif /* _KERNEL */ @@ -137,7 +138,7 @@ struct pmap { __cpuset_t pm_onproc; /* pmap is active on ... */ volatile u_int pm_shootdown_pending; #endif - struct pmap_segtab *pm_segtab; /* pointers to pages of PTEs */ + pmap_segtab_t * pm_segtab; /* pointers to pages of PTEs */ u_int pm_count; /* pmap reference count */ u_int pm_flags; #define PMAP_DEFERRED_ACTIVATE 0x0001 Index: src/sys/common/pmap/tlb/pmap_segtab.c diff -u src/sys/common/pmap/tlb/pmap_segtab.c:1.7 src/sys/common/pmap/tlb/pmap_segtab.c:1.8 --- src/sys/common/pmap/tlb/pmap_segtab.c:1.7 Thu Jul 5 17:24:54 2012 +++ src/sys/common/pmap/tlb/pmap_segtab.c Mon Jul 9 17:45:22 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_segtab.c,v 1.7 2012/07/05 17:24:54 matt Exp $ */ +/* $NetBSD: pmap_segtab.c,v 1.8 2012/07/09 17:45:22 matt Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.7 2012/07/05 17:24:54 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.8 2012/07/09 17:45:22 matt Exp $"); /* * Manages physical address maps. @@ -107,10 +107,10 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_segtab. #include <uvm/uvm.h> -CTASSERT(NBPG >= sizeof(struct pmap_segtab)); +CTASSERT(NBPG >= sizeof(pmap_segtab_t)); struct pmap_segtab_info { - struct pmap_segtab *free_segtab; /* free list kept locally */ + pmap_segtab_t *free_segtab; /* free list kept locally */ #ifdef DEBUG uint32_t nget_segtab; uint32_t nput_segtab; @@ -119,27 +119,45 @@ struct pmap_segtab_info { #else #define SEGTAB_ADD(n, v) ((void) 0) #endif -} pmap_segtab_info; +#ifdef PMAP_PTP_CACHE + struct pgflist ptp_pgflist; /* Keep a list of idle page tables. */ +#endif +} pmap_segtab_info = { +#ifdef PMAP_PTP_CACHE + .ptp_pgflist = LIST_HEAD_INITIALIZER(pmap_segtab_info.ptp_pgflist), +#endif +}; kmutex_t pmap_segtab_lock __cacheline_aligned; static inline struct vm_page * pmap_pte_pagealloc(void) { - return pmap_md_alloc_poolpage(UVM_PGA_ZERO|UVM_PGA_USERESERVE); + struct vm_page *pg; + + pg = pmap_md_alloc_poolpage(UVM_PGA_ZERO|UVM_PGA_USERESERVE); + if (pg) { +#ifdef UVM_PAGE_TRKOWN + pg->owner_tag = NULL; +#endif + UVM_PAGE_OWN(pg, "pmap-ptp"); + } + + return pg; } -static inline pt_entry_t * +static inline pt_entry_t * pmap_segmap(struct pmap *pmap, vaddr_t va) { - struct pmap_segtab *stp = pmap->pm_segtab; + pmap_segtab_t *stp = pmap->pm_segtab; KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); #ifdef _LP64 - stp = stp->seg_ptr[(va >> XSEGSHIFT) & (NSEGPG - 1)]; + stp = stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]; if (stp == NULL) return NULL; #endif - return stp->seg_ptr[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]; + + return stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]; } pt_entry_t * @@ -153,52 +171,74 @@ pmap_pte_lookup(pmap_t pmap, vaddr_t va) } static void -pmap_segtab_free(struct pmap_segtab *stp) +pmap_segtab_free(pmap_segtab_t *stp) { /* * Insert the the segtab into the segtab freelist. */ mutex_spin_enter(&pmap_segtab_lock); - stp->seg_ptr[0] = pmap_segtab_info.free_segtab; + stp->seg_seg[0] = pmap_segtab_info.free_segtab; pmap_segtab_info.free_segtab = stp; SEGTAB_ADD(nput, 1); mutex_spin_exit(&pmap_segtab_lock); } static void -pmap_segtab_release(struct pmap_segtab *stp, u_int level) +pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stp_p, bool free_stp, + pte_callback_t callback, uintptr_t flags, + vaddr_t va, vsize_t vinc) { + pmap_segtab_t *stp = *stp_p; - for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) { - paddr_t pa; + for (size_t i = va / vinc; i < PMAP_SEGTABSIZE; i++, va += vinc) { #ifdef _LP64 - if (level > 0) { - if (stp->seg_ptr[i] != NULL) { - pmap_segtab_release(stp->seg_ptr[i], level - 1); - stp->seg_ptr[i] = NULL; + if (vinc > NBSEG) { + if (stp->seg_seg[i] != NULL) { + pmap_segtab_release(pmap, &stp->seg_seg[i], + true, callback, flags, va, vinc / NSEGPG); + KASSERT(stp->seg_seg[i] == NULL); } continue; } #endif + KASSERT(vinc == NBSEG); /* get pointer to segment map */ - pt_entry_t *pte = stp->seg_ptr[i]; + pt_entry_t *pte = stp->seg_tab[i]; if (pte == NULL) continue; -#ifdef PARANOIADIAG + + /* + * If our caller want a callback, do so. + */ + if (callback != NULL) { + (*callback)(pmap, va, va + vinc, pte, flags); + } +#ifdef DEBUG for (size_t j = 0; j < NPTEPG; j++) { - if ((pte + j)->pt_entry) - panic("pmap_destroy: segmap not empty"); + if (pte[j]) + panic("%s: pte entry %p not 0 (%#x)", + __func__, &pte[j], pte[j]); } #endif + paddr_t pa = PMAP_UNMAP_POOLPAGE((vaddr_t)pte); + struct vm_page *pg = PHYS_TO_VM_PAGE(pa); + pmap_md_vca_clean(pg, (vaddr_t)pte, 0); +#ifdef PMAP_PTP_CACHE + mutex_spin_enter(&pmap_segtab_lock); + LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist, pg, listq.list); + mutex_spin_exit(&pmap_segtab_lock); +#else + uvm_pagefree(pg); +#endif - pa = POOL_VTOPHYS(pte); - uvm_pagefree(PHYS_TO_VM_PAGE(pa)); - - stp->seg_ptr[i] = NULL; + stp->seg_tab[i] = NULL; } - pmap_segtab_free(stp); + if (free_stp) { + pmap_segtab_free(stp); + *stp_p = NULL; + } } /* @@ -213,16 +253,16 @@ pmap_segtab_release(struct pmap_segtab * * the map will be used in software only, and * is bounded by that size. */ -static struct pmap_segtab * +static pmap_segtab_t * pmap_segtab_alloc(void) { - struct pmap_segtab *stp; + pmap_segtab_t *stp; again: mutex_spin_enter(&pmap_segtab_lock); if (__predict_true((stp = pmap_segtab_info.free_segtab) != NULL)) { - pmap_segtab_info.free_segtab = stp->seg_ptr[0]; - stp->seg_ptr[0] = NULL; + pmap_segtab_info.free_segtab = stp->seg_seg[0]; + stp->seg_seg[0] = NULL; SEGTAB_ADD(nget, 1); } mutex_spin_exit(&pmap_segtab_lock); @@ -240,20 +280,20 @@ pmap_segtab_alloc(void) SEGTAB_ADD(npage, 1); const paddr_t stp_pa = VM_PAGE_TO_PHYS(stp_pg); - stp = (struct pmap_segtab *)POOL_PHYSTOV(stp_pa); + stp = (pmap_segtab_t *)PMAP_MAP_POOLPAGE(stp_pa); const size_t n = NBPG / sizeof(*stp); if (n > 1) { /* * link all the segtabs in this page together */ for (size_t i = 1; i < n - 1; i++) { - stp[i].seg_ptr[0] = (void *)&stp[i+1]; + stp[i].seg_seg[0] = &stp[i+1]; } /* * Now link the new segtabs into the free segtab list. */ mutex_spin_enter(&pmap_segtab_lock); - stp[n-1].seg_ptr[0] = pmap_segtab_info.free_segtab; + stp[n-1].seg_seg[0] = pmap_segtab_info.free_segtab; pmap_segtab_info.free_segtab = stp + 1; SEGTAB_ADD(nput, n - 1); mutex_spin_exit(&pmap_segtab_lock); @@ -262,13 +302,16 @@ pmap_segtab_alloc(void) #ifdef PARANOIADIAG for (i = 0; i < PMAP_SEGTABSIZE; i++) { - if (stp->seg_ptr[i] != 0) - panic("pmap_create: pm_segtab.seg_ptr[%zu] != 0"); + if (stp->seg_tab[i] != 0) + panic("pmap_create: pm_segtab.seg_tab[%zu] != 0"); } #endif return stp; } +/* + * Allocate the top segment table for the pmap. + */ void pmap_segtab_init(pmap_t pmap) { @@ -282,18 +325,18 @@ pmap_segtab_init(pmap_t pmap) * no valid mappings. */ void -pmap_segtab_destroy(pmap_t pmap) +pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags) { - struct pmap_segtab *stp = pmap->pm_segtab; - - if (stp == NULL) + if (pmap->pm_segtab == NULL) return; #ifdef _LP64 - pmap_segtab_release(stp, 1); + const vsize_t vinc = NBXSEG; #else - pmap_segtab_release(stp, 0); + const vsize_t vinc = NBSEG; #endif + pmap_segtab_release(pmap, &pmap->pm_segtab, + func == NULL, func, flags, pmap->pm_minaddr, vinc); } /* @@ -312,7 +355,7 @@ pmap_segtab_activate(struct pmap *pm, st } else { l->l_cpu->ci_pmap_user_segtab = pm->pm_segtab; #ifdef _LP64 - l->l_cpu->ci_pmap_user_seg0tab = pm->pm_segtab->seg_ptr[0]; + l->l_cpu->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0]; #endif } } @@ -363,31 +406,40 @@ pmap_pte_process(pmap_t pmap, vaddr_t sv pt_entry_t * pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags) { - struct pmap_segtab *stp = pmap->pm_segtab; + pmap_segtab_t *stp = pmap->pm_segtab; pt_entry_t *pte; pte = pmap_pte_lookup(pmap, va); if (__predict_false(pte == NULL)) { #ifdef _LP64 - void ** const stp_p = - &stp->seg_ptr[(va >> XSEGSHIFT) & (NSEGPG - 1)]; - if (__predict_false((stp = *stp_p) == NULL)) { - struct pmap_segtab *nstp = pmap_segtab_alloc(); -#ifdef MULTIPROCESSOR - struct pmap_segtab *ostp = atomic_cas_ptr(stp_p, NULL, nstp); + pmap_segtab_t ** const stp_p = + &stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]; + if (__predict_false((stp = *stp_p) == NULL)) { + pmap_segtab_t *nstp = pmap_segtab_alloc(); +#ifdef MULTIPROCESSOR + pmap_segtab_t *ostp = atomic_cas_ptr(stp_p, NULL, nstp); if (__predict_false(ostp != NULL)) { pmap_segtab_free(nstp); nstp = ostp; } -#else +#else *stp_p = nstp; #endif /* MULTIPROCESSOR */ stp = nstp; } - KASSERT(stp == pmap->pm_segtab->seg_ptr[(va >> XSEGSHIFT) & (NSE -GPG - 1)]); + KASSERT(stp == pmap->pm_segtab->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]); #endif /* _LP64 */ - struct vm_page * const pg = pmap_pte_pagealloc(); + struct vm_page *pg = NULL; +#ifdef PMAP_PTP_CACHE + mutex_spin_enter(&pmap_segtab_lock); + if ((pg = LIST_FIRST(&pmap_segtab_info.ptp_pgflist)) != NULL) { + LIST_REMOVE(pg, listq.list); + KASSERT(LIST_FIRST(&pmap_segtab_info.ptp_pgflist) != pg); + } + mutex_spin_exit(&pmap_segtab_lock); +#endif + if (pg == NULL) + pg = pmap_pte_pagealloc(); if (pg == NULL) { if (flags & PMAP_CANFAIL) return NULL; @@ -397,20 +449,29 @@ GPG - 1)]); const paddr_t pa = VM_PAGE_TO_PHYS(pg); pte = (pt_entry_t *)POOL_PHYSTOV(pa); + pt_entry_t ** const pte_p = + &stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]; #ifdef MULTIPROCESSOR - pt_entry_t *opte = atomic_cas_ptr( - &stp->seg_ptr[va >> SEGSHIFT], NULL, pte); + pt_entry_t *opte = atomic_cas_ptr(pte_p, NULL, pte); /* * If another thread allocated the segtab needed for this va * free the page we just allocated. */ if (__predict_false(opte != NULL)) { +#ifdef PMAP_PTP_CACHE + mutex_spin_enter(&pmap_segtab_lock); + LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist, + pg, listq.list); + mutex_spin_exit(&pmap_segtab_lock); +#else uvm_pagefree(pg); +#endif pte = opte; } #else - stp->seg_ptr[va >> SEGSHIFT] = pte; + *pte_p = pte; #endif + KASSERT(pte == stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]); pte += (va >> PGSHIFT) & (NPTEPG - 1); #ifdef PARANOIADIAG