Module Name:    src
Committed By:   ryo
Date:           Mon Feb  3 13:35:44 UTC 2020

Modified Files:
        src/sys/arch/aarch64/aarch64: pmap.c
        src/sys/arch/aarch64/include: pmap.h

Log Message:
separate struct vm_page_md into vm_page_md and pmap_page
for preparation pmap_pv(9)


To generate a diff of this commit:
cvs rdiff -u -r1.61 -r1.62 src/sys/arch/aarch64/aarch64/pmap.c
cvs rdiff -u -r1.31 -r1.32 src/sys/arch/aarch64/include/pmap.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/aarch64/aarch64/pmap.c
diff -u src/sys/arch/aarch64/aarch64/pmap.c:1.61 src/sys/arch/aarch64/aarch64/pmap.c:1.62
--- src/sys/arch/aarch64/aarch64/pmap.c:1.61	Thu Jan  9 01:38:34 2020
+++ src/sys/arch/aarch64/aarch64/pmap.c	Mon Feb  3 13:35:44 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.61 2020/01/09 01:38:34 ryo Exp $	*/
+/*	$NetBSD: pmap.c,v 1.62 2020/02/03 13:35:44 ryo Exp $	*/
 
 /*
  * Copyright (c) 2017 Ryo Shimizu <[email protected]>
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.61 2020/01/09 01:38:34 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.62 2020/02/03 13:35:44 ryo Exp $");
 
 #include "opt_arm_debug.h"
 #include "opt_ddb.h"
@@ -183,6 +183,8 @@ PMAP_COUNTER(unwire_failure, "pmap_unwir
 		cpu_icache_sync_range((va), PAGE_SIZE);			\
 	} while (0/*CONSTCOND*/)
 
+#define VM_PAGE_TO_PP(pg)	(&(pg)->mdpage.mdpg_pp)
+
 struct pv_entry {
 	TAILQ_ENTRY(pv_entry) pv_link;
 	struct pmap *pv_pmap;
@@ -217,17 +219,17 @@ static struct pool_cache _pmap_pv_pool;
 
 
 static inline void
-pmap_pv_lock(struct vm_page_md *md)
+pmap_pv_lock(struct pmap_page *pp)
 {
 
-	mutex_enter(&md->mdpg_pvlock);
+	mutex_enter(&pp->pp_pvlock);
 }
 
 static inline void
-pmap_pv_unlock(struct vm_page_md *md)
+pmap_pv_unlock(struct pmap_page *pp)
 {
 
-	mutex_exit(&md->mdpg_pvlock);
+	mutex_exit(&pp->pp_pvlock);
 }
 
 
@@ -243,6 +245,18 @@ pm_unlock(struct pmap *pm)
 	mutex_exit(&pm->pm_lock);
 }
 
+static inline struct pmap_page *
+phys_to_pp(paddr_t pa)
+{
+	struct vm_page *pg;
+
+	pg = PHYS_TO_VM_PAGE(pa);
+	if (pg != NULL)
+		return VM_PAGE_TO_PP(pg);
+
+	return NULL;
+}
+
 #define IN_RANGE(va,sta,end)	(((sta) <= (va)) && ((va) < (end)))
 
 #define IN_KSEG_ADDR(va)	\
@@ -512,7 +526,7 @@ pmap_init(void)
 	    0, 0, 0, "pvpl", NULL, IPL_VM, _pmap_pv_ctor, NULL, NULL);
 
 	/*
-	 * initialize vm_page_md:mdpg_pvlock at this time.
+	 * initialize mutex in vm_page_md at this time.
 	 * When LOCKDEBUG, mutex_init() calls km_alloc,
 	 * but VM_MDPAGE_INIT() is called before initialized kmem_vm_arena.
 	 */
@@ -524,7 +538,7 @@ pmap_init(void)
 		     pfn++) {
 			pg = PHYS_TO_VM_PAGE(ptoa(pfn));
 			md = VM_PAGE_TO_MD(pg);
-			mutex_init(&md->mdpg_pvlock, MUTEX_SPIN, IPL_VM);
+			mutex_init(&md->mdpg_pp.pp_pvlock, MUTEX_SPIN, IPL_VM);
 		}
 	}
 }
@@ -994,22 +1008,19 @@ _pmap_pte_adjust_cacheflags(pt_entry_t p
 }
 
 static struct pv_entry *
-_pmap_remove_pv(struct vm_page *pg, struct pmap *pm, vaddr_t va, pt_entry_t pte)
+_pmap_remove_pv(struct pmap_page *pp, struct pmap *pm, vaddr_t va, pt_entry_t pte)
 {
-	struct vm_page_md *md;
 	struct pv_entry *pv;
 
 	UVMHIST_FUNC(__func__);
 	UVMHIST_CALLED(pmaphist);
 
-	UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pte=%llx",
-	    pg, pm, va, pte);
-
-	md = VM_PAGE_TO_MD(pg);
+	UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pte=%llx",
+	    pp, pm, va, pte);
 
-	TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
+	TAILQ_FOREACH(pv, &pp->pp_pvhead, pv_link) {
 		if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
-			TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link);
+			TAILQ_REMOVE(&pp->pp_pvhead, pv, pv_link);
 			PMAP_COUNT(pv_remove);
 			break;
 		}
@@ -1061,18 +1072,18 @@ pg_dump(struct vm_page *pg, void (*pr)(c
 }
 
 static void
-pv_dump(struct vm_page_md *md, void (*pr)(const char *, ...) __printflike(1, 2))
+pv_dump(struct pmap_page *pp, void (*pr)(const char *, ...) __printflike(1, 2))
 {
 	struct pv_entry *pv;
 	int i;
 
 	i = 0;
 
-	pr("md=%p\n", md);
-	pr(" md->mdpg_flags=%08x %s\n", md->mdpg_flags,
-	    str_vmflags(md->mdpg_flags));
+	pr("pp=%p\n", pp);
+	pr(" pp->pp_flags=%08x %s\n", pp->pp_flags,
+	    str_vmflags(pp->pp_flags));
 
-	TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
+	TAILQ_FOREACH(pv, &pp->pp_pvhead, pv_link) {
 		pr("  pv[%d] pv=%p\n",
 		    i, pv);
 		pr("    pv[%d].pv_pmap = %p (asid=%d)\n",
@@ -1089,22 +1100,19 @@ pv_dump(struct vm_page_md *md, void (*pr
 #endif /* PMAP_PV_DEBUG & DDB */
 
 static int
-_pmap_enter_pv(struct vm_page *pg, struct pmap *pm, struct pv_entry **pvp,
+_pmap_enter_pv(struct pmap_page *pp, struct pmap *pm, struct pv_entry **pvp,
     vaddr_t va, pt_entry_t *ptep, paddr_t pa, u_int flags)
 {
-	struct vm_page_md *md;
 	struct pv_entry *pv;
 
 	UVMHIST_FUNC(__func__);
 	UVMHIST_CALLED(pmaphist);
 
-	UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pa=%llx", pg, pm, va, pa);
+	UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pa=%llx", pp, pm, va, pa);
 	UVMHIST_LOG(pmaphist, "ptep=%p, flags=%08x", ptep, flags, 0, 0);
 
-	md = VM_PAGE_TO_MD(pg);
-
 	/* pv is already registered? */
-	TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
+	TAILQ_FOREACH(pv, &pp->pp_pvhead, pv_link) {
 		if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
 			break;
 		}
@@ -1125,14 +1133,14 @@ _pmap_enter_pv(struct vm_page *pg, struc
 		pv->pv_pa = pa;
 		pv->pv_ptep = ptep;
 
-		TAILQ_INSERT_HEAD(&md->mdpg_pvhead, pv, pv_link);
+		TAILQ_INSERT_HEAD(&pp->pp_pvhead, pv, pv_link);
 		PMAP_COUNT(pv_enter);
 
 #ifdef PMAP_PV_DEBUG
-		if (!TAILQ_EMPTY(&md->mdpg_pvhead)){
+		if (!TAILQ_EMPTY(&pp->pp_pvhead)){
 			printf("pv %p alias added va=%016lx -> pa=%016lx\n",
 			    pv, va, pa);
-			pv_dump(md, printf);
+			pv_dump(pp, printf);
 		}
 #endif
 	}
@@ -1175,7 +1183,7 @@ pmap_kremove(vaddr_t va, vsize_t size)
 }
 
 static void
-_pmap_protect_pv(struct vm_page *pg, struct pv_entry *pv, vm_prot_t prot)
+_pmap_protect_pv(struct pmap_page *pp, struct pv_entry *pv, vm_prot_t prot)
 {
 	pt_entry_t *ptep, pte;
 	vm_prot_t pteprot;
@@ -1185,10 +1193,10 @@ _pmap_protect_pv(struct vm_page *pg, str
 	UVMHIST_FUNC(__func__);
 	UVMHIST_CALLED(pmaphist);
 
-	UVMHIST_LOG(pmaphist, "pg=%p, pv=%p, prot=%08x", pg, pv, prot, 0);
+	UVMHIST_LOG(pmaphist, "pp=%p, pv=%p, prot=%08x", pp, pv, prot, 0);
 
 	/* get prot mask from referenced/modified */
-	mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags &
+	mdattr = pp->pp_flags &
 	    (VM_PROT_READ | VM_PROT_WRITE);
 
 	pm_lock(pv->pv_pmap);
@@ -1253,6 +1261,7 @@ pmap_protect(struct pmap *pm, vaddr_t sv
 		pt_entry_t opte;
 #endif
 		struct vm_page *pg;
+		struct pmap_page *pp;
 		paddr_t pa;
 		uint32_t mdattr;
 		bool executable;
@@ -1271,16 +1280,20 @@ pmap_protect(struct pmap *pm, vaddr_t sv
 
 		pa = lxpde_pa(pte);
 		pg = PHYS_TO_VM_PAGE(pa);
-
 		if (pg != NULL) {
-			/* get prot mask from referenced/modified */
-			mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags &
-			    (VM_PROT_READ | VM_PROT_WRITE);
+			pp = VM_PAGE_TO_PP(pg);
 			PMAP_COUNT(protect_managed);
 		} else {
+			pp = NULL;
+			PMAP_COUNT(protect_unmanaged);
+		}
+
+		if (pp != NULL) {
+			/* get prot mask from referenced/modified */
+			mdattr = pp->pp_flags & (VM_PROT_READ | VM_PROT_WRITE);
+		} else {
 			/* unmanaged page */
 			mdattr = VM_PROT_ALL;
-			PMAP_COUNT(protect_unmanaged);
 		}
 
 #ifdef UVMHIST
@@ -1553,7 +1566,8 @@ static int
 _pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
     u_int flags, bool kenter)
 {
-	struct vm_page *pg, *pgs[2], *pdppg, *pdppg0;
+	struct vm_page *pdppg, *pdppg0;
+	struct pmap_page *pp, *opp, *pps[2];
 	struct pv_entry *spv, *opv = NULL;
 	pd_entry_t pde;
 	pt_entry_t attr, pte, opte, *ptep;
@@ -1599,13 +1613,20 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 	}
 #endif
 
-	if (kenter)
-		pg = NULL;
-	else
-		pg = PHYS_TO_VM_PAGE(pa);
+	if (kenter) {
+		pp = NULL;
+	} else {
+		struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
+		if (pg != NULL) {
+			pp = VM_PAGE_TO_PP(pg);
+			PMAP_COUNT(managed_mappings);
+		} else {
+			pp = NULL;
+			PMAP_COUNT(unmanaged_mappings);
+		}
+	}
 
-	if (pg != NULL) {
-		PMAP_COUNT(managed_mappings);
+	if (pp != NULL) {
 		/*
 		 * allocate pv in advance of pm_lock() to avoid locking myself.
 		 * pool_cache_get() may call pmap_kenter() internally.
@@ -1613,7 +1634,6 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 		spv = pool_cache_get(&_pmap_pv_pool, PR_NOWAIT);
 		need_update_pv = true;
 	} else {
-		PMAP_COUNT(unmanaged_mappings);
 		spv = NULL;
 		need_update_pv = false;
 	}
@@ -1708,13 +1728,12 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 	opte = atomic_swap_64(ptep, 0);
 	need_sync_icache = (prot & VM_PROT_EXECUTE);
 
-	/* for lock ordering for pg and opg */
-	pgs[0] = pg;
-	pgs[1] = NULL;
+	/* for lock ordering for old page and new page */
+	pps[0] = pp;
+	pps[1] = NULL;
 
 	/* remap? */
 	if (l3pte_valid(opte)) {
-		struct vm_page *opg;
 		bool need_remove_pv;
 
 		KASSERT(!kenter);	/* pmap_kenter_pa() cannot override */
@@ -1737,7 +1756,7 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 
 		if (pa == l3pte_pa(opte)) {
 			/* old and new pte have same pa, no need to update pv */
-			need_remove_pv = (pg == NULL);
+			need_remove_pv = (pp == NULL);
 			need_update_pv = false;
 			if (need_sync_icache && l3pte_executable(opte, user))
 				need_sync_icache = false;
@@ -1746,25 +1765,30 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 		}
 
 		if (need_remove_pv &&
-		    ((opg = PHYS_TO_VM_PAGE(l3pte_pa(opte))) != NULL)) {
-			/* need to lock both pg and opg against deadlock */
-			if (pg < opg) {
-				pgs[0] = pg;
-				pgs[1] = opg;
+		    ((opp = phys_to_pp(l3pte_pa(opte))) != NULL)) {
+			/*
+			 * need to lock both pp and opp(old pp)
+			 * against deadlock, and 'pp' maybe NULL.
+			 */
+			if (pp < opp) {
+				pps[0] = pp;
+				pps[1] = opp;
 			} else {
-				pgs[0] = opg;
-				pgs[1] = pg;
+				pps[0] = opp;
+				pps[1] = pp;
 			}
-			pmap_pv_lock(VM_PAGE_TO_MD(pgs[0]));
-			pmap_pv_lock(VM_PAGE_TO_MD(pgs[1]));
-			opv = _pmap_remove_pv(opg, pm, va, opte);
+			if (pps[0] != NULL)
+				pmap_pv_lock(pps[0]);
+			if (pps[1] != NULL)
+				pmap_pv_lock(pps[1]);
+			opv = _pmap_remove_pv(opp, pm, va, opte);
 		} else {
-			if (pg != NULL)
-				pmap_pv_lock(VM_PAGE_TO_MD(pg));
+			if (pp != NULL)
+				pmap_pv_lock(pp);
 		}
 	} else {
-		if (pg != NULL)
-			pmap_pv_lock(VM_PAGE_TO_MD(pg));
+		if (pp != NULL)
+			pmap_pv_lock(pp);
 	}
 
 	if (!l3pte_valid(opte))
@@ -1781,7 +1805,7 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 
 	mdattr = VM_PROT_READ | VM_PROT_WRITE;
 	if (need_update_pv) {
-		error = _pmap_enter_pv(pg, pm, &spv, va, ptep, pa, flags);
+		error = _pmap_enter_pv(pp, pm, &spv, va, ptep, pa, flags);
 		if (error != 0) {
 			/*
 			 * If pmap_enter() fails,
@@ -1801,11 +1825,11 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 		}
 	}
 
-	if (pg != NULL) {
+	if (pp != NULL) {
 		/* update referenced/modified flags */
-		VM_PAGE_TO_MD(pg)->mdpg_flags |=
+		pp->pp_flags |=
 		    (flags & (VM_PROT_READ | VM_PROT_WRITE));
-		mdattr &= VM_PAGE_TO_MD(pg)->mdpg_flags;
+		mdattr &= pp->pp_flags;
 	}
 
 #ifdef PMAPCOUNTERS
@@ -1854,10 +1878,10 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 	PMSTAT_INC_RESIDENT_COUNT(pm);
 
  fail1:
-	if (pgs[1] != NULL)
-		pmap_pv_unlock(VM_PAGE_TO_MD(pgs[1]));
-	if (pgs[0] != NULL)
-		pmap_pv_unlock(VM_PAGE_TO_MD(pgs[0]));
+	if (pps[1] != NULL)
+		pmap_pv_unlock(pps[1]);
+	if (pps[0] != NULL)
+		pmap_pv_unlock(pps[0]);
  fail0:
 	pm_unlock(pm);
 
@@ -1888,7 +1912,7 @@ _pmap_remove(struct pmap *pm, vaddr_t sv
     struct pv_entry **pvtofree)
 {
 	pt_entry_t pte, *ptep = NULL;
-	struct vm_page *pg;
+	struct pmap_page *pp;
 	struct pv_entry *opv;
 	paddr_t pa;
 	vaddr_t va;
@@ -1916,12 +1940,12 @@ _pmap_remove(struct pmap *pm, vaddr_t sv
 
 		if (!kremove) {
 			pa = lxpde_pa(pte);
-			pg = PHYS_TO_VM_PAGE(pa);
-			if (pg != NULL) {
+			pp = phys_to_pp(pa);
+			if (pp != NULL) {
 
-				pmap_pv_lock(VM_PAGE_TO_MD(pg));
-				opv = _pmap_remove_pv(pg, pm, va, pte);
-				pmap_pv_unlock(VM_PAGE_TO_MD(pg));
+				pmap_pv_lock(pp);
+				opv = _pmap_remove_pv(pp, pm, va, pte);
+				pmap_pv_unlock(pp);
 				if (opv != NULL) {
 					opv->pv_next = *pvtofree;
 					*pvtofree = opv;
@@ -1974,7 +1998,7 @@ pmap_remove(struct pmap *pm, vaddr_t sva
 void
 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
 {
-	struct vm_page_md *md = VM_PAGE_TO_MD(pg);
+	struct pmap_page *pp = VM_PAGE_TO_PP(pg);
 	struct pv_entry *pv, *pvtmp;
 	pt_entry_t opte;
 
@@ -1983,8 +2007,8 @@ pmap_page_protect(struct vm_page *pg, vm
 	UVMHIST_FUNC(__func__);
 	UVMHIST_CALLED(pmaphist);
 
-	UVMHIST_LOG(pmaphist, "pg=%p, md=%p, pa=%016lx, prot=%08x",
-	    pg, md, VM_PAGE_TO_PHYS(pg), prot);
+	UVMHIST_LOG(pmaphist, "pg=%p, pp=%p, pa=%016lx, prot=%08x",
+	    pg, pp, VM_PAGE_TO_PHYS(pg), prot);
 
 
 	if ((prot & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
@@ -1992,8 +2016,8 @@ pmap_page_protect(struct vm_page *pg, vm
 		struct pv_entry *pvtofree = NULL;
 
 		/* remove all pages reference to this physical page */
-		pmap_pv_lock(md);
-		TAILQ_FOREACH_SAFE(pv, &md->mdpg_pvhead, pv_link, pvtmp) {
+		pmap_pv_lock(pp);
+		TAILQ_FOREACH_SAFE(pv, &pp->pp_pvhead, pv_link, pvtmp) {
 
 			opte = atomic_swap_64(pv->pv_ptep, 0);
 			if (lxpde_valid(opte)) {
@@ -2008,24 +2032,24 @@ pmap_page_protect(struct vm_page *pg, vm
 				}
 				PMSTAT_DEC_RESIDENT_COUNT(pv->pv_pmap);
 			}
-			TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link);
+			TAILQ_REMOVE(&pp->pp_pvhead, pv, pv_link);
 			PMAP_COUNT(pv_remove);
 
 			pv->pv_next = pvtofree;
 			pvtofree = pv;
 		}
-		pmap_pv_unlock(md);
+		pmap_pv_unlock(pp);
 
 		for (pv = pvtofree; pv != NULL; pv = pvtmp) {
 			pvtmp = pv->pv_next;
 			pool_cache_put(&_pmap_pv_pool, pv);
 		}
 	} else {
-		pmap_pv_lock(md);
-		TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
-			_pmap_protect_pv(pg, pv, prot);
+		pmap_pv_lock(pp);
+		TAILQ_FOREACH(pv, &pp->pp_pvhead, pv_link) {
+			_pmap_protect_pv(pp, pv, prot);
 		}
-		pmap_pv_unlock(md);
+		pmap_pv_unlock(pp);
 	}
 }
 
@@ -2067,8 +2091,7 @@ pmap_unwire(struct pmap *pm, vaddr_t va)
 bool
 pmap_fault_fixup(struct pmap *pm, vaddr_t va, vm_prot_t accessprot, bool user)
 {
-	struct vm_page *pg;
-	struct vm_page_md *md;
+	struct pmap_page *pp;
 	pt_entry_t *ptep, pte;
 	vm_prot_t pmap_prot;
 	paddr_t pa;
@@ -2113,12 +2136,11 @@ pmap_fault_fixup(struct pmap *pm, vaddr_
 	}
 
 	pa = l3pte_pa(*ptep);
-	pg = PHYS_TO_VM_PAGE(pa);
-	if (pg == NULL) {
-		UVMHIST_LOG(pmaphist, "pg not found: va=%016lx", va, 0, 0, 0);
+	pp = phys_to_pp(pa);
+	if (pp == NULL) {
+		UVMHIST_LOG(pmaphist, "pmap_page not found: va=%016lx", va, 0, 0, 0);
 		goto done;
 	}
-	md = VM_PAGE_TO_MD(pg);
 
 	/* get prot by pmap_enter() (stored in software use bit in pte) */
 	switch (pte & (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE)) {
@@ -2157,14 +2179,14 @@ pmap_fault_fixup(struct pmap *pm, vaddr_
 	if ((pte & LX_BLKPAG_AF) && ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW))
 		goto done;
 
-	pmap_pv_lock(md);
+	pmap_pv_lock(pp);
 	if ((pte & LX_BLKPAG_AF) == 0) {
 		/* pte has no AF bit, set referenced and AF bit */
 		UVMHIST_LOG(pmaphist,
 		    "REFERENCED:"
 		    " va=%016lx, pa=%016lx, pte_prot=%08x, accessprot=%08x",
 		    va, pa, pmap_prot, accessprot);
-		md->mdpg_flags |= VM_PROT_READ;		/* set referenced */
+		pp->pp_flags |= VM_PROT_READ;		/* set referenced */
 		pte |= LX_BLKPAG_AF;
 
 		PMAP_COUNT(fixup_referenced);
@@ -2176,13 +2198,13 @@ pmap_fault_fixup(struct pmap *pm, vaddr_
 		UVMHIST_LOG(pmaphist, "MODIFIED:"
 		    " va=%016lx, pa=%016lx, pte_prot=%08x, accessprot=%08x",
 		    va, pa, pmap_prot, accessprot);
-		md->mdpg_flags |= VM_PROT_WRITE;	/* set modified */
+		pp->pp_flags |= VM_PROT_WRITE;	/* set modified */
 		pte &= ~LX_BLKPAG_AP;
 		pte |= LX_BLKPAG_AP_RW;
 
 		PMAP_COUNT(fixup_modified);
 	}
-	pmap_pv_unlock(md);
+	pmap_pv_unlock(pp);
 
 	atomic_swap_64(ptep, pte);
 	AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
@@ -2198,27 +2220,27 @@ bool
 pmap_clear_modify(struct vm_page *pg)
 {
 	struct pv_entry *pv;
-	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+	struct pmap_page * const pp = VM_PAGE_TO_PP(pg);
 	pt_entry_t *ptep, pte, opte;
 	vaddr_t va;
 
 	UVMHIST_FUNC(__func__);
 	UVMHIST_CALLED(pmaphist);
 
-	UVMHIST_LOG(pmaphist, "pg=%p, mdpg_flags=%08x",
-	    pg, md->mdpg_flags, 0, 0);
+	UVMHIST_LOG(pmaphist, "pg=%p, pp_flags=%08x",
+	    pg, pp->pp_flags, 0, 0);
 
-	pmap_pv_lock(md);
+	pmap_pv_lock(pp);
 
-	if ((md->mdpg_flags & VM_PROT_WRITE) == 0) {
-		pmap_pv_unlock(md);
+	if ((pp->pp_flags & VM_PROT_WRITE) == 0) {
+		pmap_pv_unlock(pp);
 		return false;
 	}
 
-	md->mdpg_flags &= ~VM_PROT_WRITE;
+	pp->pp_flags &= ~VM_PROT_WRITE;
 
 	PMAP_COUNT(clear_modify);
-	TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
+	TAILQ_FOREACH(pv, &pp->pp_pvhead, pv_link) {
 		PMAP_COUNT(clear_modify_pages);
 
 		va = pv->pv_va;
@@ -2246,7 +2268,7 @@ pmap_clear_modify(struct vm_page *pg)
 		    va, ptep, l3pte_pa(pte), 0);
 	}
 
-	pmap_pv_unlock(md);
+	pmap_pv_unlock(pp);
 
 	return true;
 }
@@ -2255,26 +2277,26 @@ bool
 pmap_clear_reference(struct vm_page *pg)
 {
 	struct pv_entry *pv;
-	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+	struct pmap_page * const pp = VM_PAGE_TO_PP(pg);
 	pt_entry_t *ptep, pte, opte;
 	vaddr_t va;
 
 	UVMHIST_FUNC(__func__);
 	UVMHIST_CALLED(pmaphist);
 
-	UVMHIST_LOG(pmaphist, "pg=%p, mdpg_flags=%08x",
-	    pg, md->mdpg_flags, 0, 0);
+	UVMHIST_LOG(pmaphist, "pg=%p, pp=%p, pp_flags=%08x",
+	    pg, pp, pp->pp_flags, 0);
 
-	pmap_pv_lock(md);
+	pmap_pv_lock(pp);
 
-	if ((md->mdpg_flags & VM_PROT_READ) == 0) {
-		pmap_pv_unlock(md);
+	if ((pp->pp_flags & VM_PROT_READ) == 0) {
+		pmap_pv_unlock(pp);
 		return false;
 	}
-	md->mdpg_flags &= ~VM_PROT_READ;
+	pp->pp_flags &= ~VM_PROT_READ;
 
 	PMAP_COUNT(clear_reference);
-	TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
+	TAILQ_FOREACH(pv, &pp->pp_pvhead, pv_link) {
 		PMAP_COUNT(clear_reference_pages);
 
 		va = pv->pv_va;
@@ -2300,7 +2322,7 @@ pmap_clear_reference(struct vm_page *pg)
 		    va, ptep, l3pte_pa(pte), 0);
 	}
 
-	pmap_pv_unlock(md);
+	pmap_pv_unlock(pp);
 
 	return true;
 }
@@ -2308,17 +2330,17 @@ pmap_clear_reference(struct vm_page *pg)
 bool
 pmap_is_modified(struct vm_page *pg)
 {
-	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+	struct pmap_page * const pp = VM_PAGE_TO_PP(pg);
 
-	return (md->mdpg_flags & VM_PROT_WRITE);
+	return (pp->pp_flags & VM_PROT_WRITE);
 }
 
 bool
 pmap_is_referenced(struct vm_page *pg)
 {
-	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+	struct pmap_page * const pp = VM_PAGE_TO_PP(pg);
 
-	return (md->mdpg_flags & VM_PROT_READ);
+	return (pp->pp_flags & VM_PROT_READ);
 }
 
 #ifdef DDB
@@ -2488,11 +2510,11 @@ void
 pmap_db_pteinfo(vaddr_t va, void (*pr)(const char *, ...) __printflike(1, 2))
 {
 	struct vm_page *pg;
+	struct pmap_page *pp;
 	bool user;
 	pd_entry_t *l0, *l1, *l2, *l3;
 	pd_entry_t pde;
 	pt_entry_t pte;
-	struct vm_page_md *md;
 	uint64_t ttbr;
 	paddr_t pa;
 	unsigned int idx;
@@ -2558,12 +2580,15 @@ pmap_db_pteinfo(vaddr_t va, void (*pr)(c
 
 	pa = l3pte_pa(pte);
 	pg = PHYS_TO_VM_PAGE(pa);
-	if (pg == NULL) {
-		pr("No VM_PAGE\n");
+	pp = phys_to_pp(pa);
+	if (pp == NULL) {
+		pr("No VM_PAGE nor PMAP_PAGE\n");
 	} else {
-		pg_dump(pg, pr);
-		md = VM_PAGE_TO_MD(pg);
-		pv_dump(md, pr);
+		if (pg != NULL)
+			pg_dump(pg, pr);
+		else
+			pr("no VM_PAGE. pv-tracked page?\n");
+		pv_dump(pp, pr);
 	}
 }
 

Index: src/sys/arch/aarch64/include/pmap.h
diff -u src/sys/arch/aarch64/include/pmap.h:1.31 src/sys/arch/aarch64/include/pmap.h:1.32
--- src/sys/arch/aarch64/include/pmap.h:1.31	Sun Jan 26 15:52:00 2020
+++ src/sys/arch/aarch64/include/pmap.h	Mon Feb  3 13:35:44 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.31 2020/01/26 15:52:00 skrll Exp $ */
+/* $NetBSD: pmap.h,v 1.32 2020/02/03 13:35:44 ryo Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -83,22 +83,27 @@ struct pmap {
 };
 
 struct pv_entry;
+
+struct pmap_page {
+	kmutex_t pp_pvlock;
+	TAILQ_HEAD(, pv_entry) pp_pvhead;
+
+	/* VM_PROT_READ means referenced, VM_PROT_WRITE means modified */
+	uint32_t pp_flags;
+};
+
 struct vm_page_md {
-	kmutex_t mdpg_pvlock;
 	TAILQ_ENTRY(vm_page) mdpg_vmlist;	/* L[0123] table vm_page list */
-	TAILQ_HEAD(, pv_entry) mdpg_pvhead;
-
 	pd_entry_t *mdpg_ptep_parent;	/* for page descriptor page only */
 
-	/* VM_PROT_READ means referenced, VM_PROT_WRITE means modified */
-	uint32_t mdpg_flags;
+	struct pmap_page mdpg_pp;
 };
 
-/* each mdpg_pvlock will be initialized in pmap_init() */
-#define VM_MDPAGE_INIT(pg)				\
-	do {						\
-		TAILQ_INIT(&(pg)->mdpage.mdpg_pvhead);	\
-		(pg)->mdpage.mdpg_flags = 0;		\
+/* each mdpg_pp.pp_pvlock will be initialized in pmap_init() */
+#define VM_MDPAGE_INIT(pg)					\
+	do {							\
+		TAILQ_INIT(&(pg)->mdpage.mdpg_pp.pp_pvhead);	\
+		(pg)->mdpage.mdpg_pp.pp_flags = 0;		\
 	} while (/*CONSTCOND*/ 0)
 
 

Reply via email to