Module Name: src Committed By: skrll Date: Sat Sep 2 12:24:39 UTC 2017
Modified Files: src/sys/arch/arm/arm32: pmap.c Log Message: Perform tracking of unmanaged mappings for VIVT and call vac_me_harder as appropriate. PR/52102 shark: ffs_newvnode panic when unpacking sets installing -current Thanks to Felix Deichmann for bisecting the problem and testing the fix. To generate a diff of this commit: cvs rdiff -u -r1.355 -r1.356 src/sys/arch/arm/arm32/pmap.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/arm/arm32/pmap.c diff -u src/sys/arch/arm/arm32/pmap.c:1.355 src/sys/arch/arm/arm32/pmap.c:1.356 --- src/sys/arch/arm/arm32/pmap.c:1.355 Sat Sep 2 11:57:09 2017 +++ src/sys/arch/arm/arm32/pmap.c Sat Sep 2 12:24:39 2017 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.355 2017/09/02 11:57:09 skrll Exp $ */ +/* $NetBSD: pmap.c,v 1.356 2017/09/02 12:24:39 skrll Exp $ */ /* * Copyright 2003 Wasabi Systems, Inc. @@ -217,7 +217,7 @@ #include <arm/locore.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.355 2017/09/02 11:57:09 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.356 2017/09/02 12:24:39 skrll Exp $"); //#define PMAP_DEBUG #ifdef PMAP_DEBUG @@ -3586,7 +3586,7 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd pmap_release_pmap_lock(pm); } -#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) +#if !defined(ARM_MMU_EXTENDED) static struct pv_entry * pmap_kremove_pg(struct vm_page *pg, vaddr_t va) { @@ -3594,7 +3594,9 @@ pmap_kremove_pg(struct vm_page *pg, vadd paddr_t pa = VM_PAGE_TO_PHYS(pg); struct pv_entry *pv; +#ifdef PMAP_CACHE_VIPT KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); +#endif KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); KASSERT(pmap_page_locked_p(md)); @@ -3612,16 +3614,18 @@ pmap_kremove_pg(struct vm_page *pg, vadd if (SLIST_EMPTY(&md->pvh_list)) { md->pvh_attrs &= ~PVF_EXEC; PMAPCOUNT(exec_discarded_kremove); +#ifdef PMAP_CACHE_VIPT } else { pmap_syncicache_page(md, pa); PMAPCOUNT(exec_synced_kremove); +#endif } } pmap_vac_me_harder(md, pa, pmap_kernel(), 0); return pv; } -#endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ +#endif /* !ARM_MMU_EXTENDED */ /* * pmap_kenter_pa: enter an unmanaged, wired kernel mapping @@ -3633,16 +3637,11 @@ pmap_kremove_pg(struct vm_page *pg, vadd void pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) { -#ifdef PMAP_CACHE_VIVT - struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; -#endif -#ifdef PMAP_CACHE_VIPT struct vm_page *pg = PHYS_TO_VM_PAGE(pa); struct vm_page *opg; #ifndef ARM_MMU_EXTENDED struct pv_entry *pv = NULL; #endif -#endif struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL; UVMHIST_FUNC(__func__); @@ -3676,12 +3675,13 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; } else { PMAPCOUNT(kenter_remappings); -#ifdef PMAP_CACHE_VIPT opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); -#if !defined(ARM_MMU_EXTENDED) || defined(DIAGNOSTIC) struct vm_page_md *omd __diagused = VM_PAGE_TO_MD(opg); + if (opg +#ifdef PMAP_CACHE_VIPT + && arm_cache_prefer_mask != 0 #endif - if (opg && arm_cache_prefer_mask != 0) { + && true) { KASSERT(opg != pg); KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0); KASSERT((flags & PMAP_KMPAGE) == 0); @@ -3691,7 +3691,6 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v pmap_release_page_lock(omd); #endif } -#endif if (l2pte_valid_p(opte)) { l2pte_reset(ptep); PTE_SYNC(ptep); @@ -3750,8 +3749,14 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v md->pvh_attrs |= PVF_KMPAGE; #endif atomic_inc_32(&pmap_kmpages); + } else if (false #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) - } else if (arm_cache_prefer_mask != 0) { + || arm_cache_prefer_mask != 0 +#elif defined(PMAP_CACHE_VIVT) + || true +#endif + || false) { +#if !defined(ARM_MMU_EXTENDED) if (pv == NULL) { pv = pool_get(&pmap_pv_pool, PR_NOWAIT); KASSERT(pv != NULL); @@ -3768,13 +3773,13 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v pmap_release_page_lock(md); #endif } -#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) +#if !defined(ARM_MMU_EXTENDED) } else { if (pv != NULL) pool_put(&pmap_pv_pool, pv); #endif } -#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) +#if !defined(ARM_MMU_EXTENDED) KASSERT(md == NULL || !pmap_page_locked_p(md)); #endif if (pmap_initialized) { @@ -3832,8 +3837,14 @@ pmap_kremove(vaddr_t va, vsize_t len) } #endif atomic_dec_32(&pmap_kmpages); + } else if (false #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) - } else if (arm_cache_prefer_mask != 0) { + || arm_cache_prefer_mask != 0 +#elif defined(PMAP_CACHE_VIVT) + || true +#endif + || false) { +#if !defined(ARM_MMU_EXTENDED) pmap_acquire_page_lock(omd); pool_put(&pmap_pv_pool, pmap_kremove_pg(opg, va));