Module Name: src
Committed By: ad
Date: Sat Mar 21 18:47:54 UTC 2020
Modified Files:
src/sys/arch/vax/include: pmap.h
src/sys/arch/vax/vax: pmap.c
Log Message:
PR port-vax/55094: vax pmap needs locking adjustments
Make the adjustments noted in the PR and don't call uvm_wait() or do
WAITOK ever - UVM takes care of that.
To generate a diff of this commit:
cvs rdiff -u -r1.81 -r1.82 src/sys/arch/vax/include/pmap.h
cvs rdiff -u -r1.189 -r1.190 src/sys/arch/vax/vax/pmap.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/arch/vax/include/pmap.h
diff -u src/sys/arch/vax/include/pmap.h:1.81 src/sys/arch/vax/include/pmap.h:1.82
--- src/sys/arch/vax/include/pmap.h:1.81 Sat Mar 14 14:05:44 2020
+++ src/sys/arch/vax/include/pmap.h Sat Mar 21 18:47:54 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.81 2020/03/14 14:05:44 ad Exp $ */
+/* $NetBSD: pmap.h,v 1.82 2020/03/21 18:47:54 ad Exp $ */
/*
* Copyright (c) 1991 Regents of the University of California.
@@ -189,9 +189,6 @@ pmap_extract(pmap_t pmap, vaddr_t va, pa
return (false);
}
-bool pmap_clear_modify_long(const struct pv_entry *);
-bool pmap_clear_reference_long(const struct pv_entry *);
-bool pmap_is_modified_long_p(const struct pv_entry *);
void pmap_page_protect_long(struct pv_entry *, vm_prot_t);
void pmap_protect_long(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
@@ -209,38 +206,6 @@ pmap_is_referenced(struct vm_page *pg)
return (pv->pv_attr & PG_V) != 0;
}
-static __inline bool
-pmap_clear_reference(struct vm_page *pg)
-{
- struct pv_entry * const pv = pmap_pg_to_pv(pg);
- bool rv = (pv->pv_attr & PG_V) != 0;
-
- pv->pv_attr &= ~PG_V;
- if (pv->pv_pmap != NULL || pv->pv_next != NULL)
- rv |= pmap_clear_reference_long(pv);
- return rv;
-}
-
-static __inline bool
-pmap_clear_modify(struct vm_page *pg)
-{
- struct pv_entry * const pv = pmap_pg_to_pv(pg);
- bool rv = (pv->pv_attr & PG_M) != 0;
-
- pv->pv_attr &= ~PG_M;
- if (pv->pv_pmap != NULL || pv->pv_next != NULL)
- rv |= pmap_clear_modify_long(pv);
- return rv;
-}
-
-static __inline bool
-pmap_is_modified(struct vm_page *pg)
-{
- const struct pv_entry * const pv = pmap_pg_to_pv(pg);
-
- return (pv->pv_attr & PG_M) != 0 || pmap_is_modified_long_p(pv);
-}
-
static __inline void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
Index: src/sys/arch/vax/vax/pmap.c
diff -u src/sys/arch/vax/vax/pmap.c:1.189 src/sys/arch/vax/vax/pmap.c:1.190
--- src/sys/arch/vax/vax/pmap.c:1.189 Sat Feb 15 18:12:14 2020
+++ src/sys/arch/vax/vax/pmap.c Sat Mar 21 18:47:54 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.189 2020/02/15 18:12:14 ad Exp $ */
+/* $NetBSD: pmap.c,v 1.190 2020/03/21 18:47:54 ad Exp $ */
/*
* Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
* All rights reserved.
@@ -25,7 +25,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.189 2020/02/15 18:12:14 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.190 2020/03/21 18:47:54 ad Exp $");
#include "opt_ddb.h"
#include "opt_cputype.h"
@@ -114,29 +114,9 @@ extern void *msgbufaddr;
static inline void
pmap_decrement_stats(struct pmap *pm, bool wired)
{
-#if defined(MULTIPROCESSOR)
- atomic_dec_ulong(&pm->pm_stats.resident_count);
- if (wired)
- atomic_dec_ulong(&pm->pm_stats.wired_count);
-#else
pm->pm_stats.resident_count--;
if (wired)
pm->pm_stats.wired_count--;
-#endif
-}
-
-static inline void
-pmap_increment_stats(struct pmap *pm, bool wired)
-{
-#if defined(MULTIPROCESSOR)
- atomic_inc_ulong(&pm->pm_stats.resident_count);
- if (wired)
- atomic_inc_ulong(&pm->pm_stats.wired_count);
-#else
- pm->pm_stats.resident_count++;
- if (wired)
- pm->pm_stats.wired_count++;
-#endif
}
/*
@@ -171,27 +151,18 @@ ptpinuse(void *pte)
}
#ifdef PMAPDEBUG
-volatile int recurse;
-#define RECURSESTART { \
- if (recurse) \
- printf("enter at %d, previous %d\n", __LINE__, recurse);\
- recurse = __LINE__; \
-}
-#define RECURSEEND {recurse = 0; }
#define PMDEBUG(x) if (startpmapdebug)printf x
#else
-#define RECURSESTART
-#define RECURSEEND
#define PMDEBUG(x)
#endif
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
-static kmutex_t pvtable_lock;
-#define PVTABLE_LOCK mutex_spin_enter(&pvtable_lock);
-#define PVTABLE_UNLOCK mutex_spin_enter(&pvtable_lock);
+static kmutex_t pmap_lock;
+#define PMAP_LOCK mutex_spin_enter(&pmap_lock);
+#define PMAP_UNLOCK mutex_spin_enter(&pmap_lock);
#else
-#define PVTABLE_LOCK
-#define PVTABLE_UNLOCK
+#define PMAP_LOCK
+#define PMAP_UNLOCK
#endif
#ifdef PMAPDEBUG
@@ -204,7 +175,7 @@ vaddr_t virtual_avail, virtual_end; /*
struct pv_entry *get_pventry(void);
void free_pventry(struct pv_entry *);
void more_pventries(void);
-vaddr_t get_ptp(bool);
+vaddr_t get_ptp(void);
void free_ptp(paddr_t);
/*
@@ -434,7 +405,7 @@ pmap_bootstrap(void)
SIMPLEQ_FIRST(&cpus) = ci;
#endif
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- mutex_init(&pvtable_lock, MUTEX_DEFAULT, IPL_VM);
+ mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_VM);
#endif
/*
@@ -606,18 +577,13 @@ update_pcbs(struct pmap *pm)
* Allocate a page through direct-mapped segment.
*/
static vaddr_t
-getpage(bool wait)
+getpage(void)
{
struct vm_page *pg;
- for (;;) {
- pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
- if (pg != NULL)
- break;
- if (!wait)
- return 0;
- uvm_wait("getpage");
- }
+ pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
+ if (pg == NULL)
+ return 0;
return (VM_PAGE_TO_PHYS(pg)|KERNBASE);
}
@@ -813,9 +779,9 @@ grow_p0(struct pmap *pm, int reqlen)
p0lr = pm->pm_p0lr;
inuse = p0lr != 0;
len = round_page((reqlen+1) * PPTESZ);
- RECURSEEND;
+ PMAP_UNLOCK;
nptespc = pmap_getusrptes(pm, len);
- RECURSESTART;
+ PMAP_LOCK;
if (nptespc == 0)
return 0;
@@ -856,9 +822,9 @@ grow_p1(struct pmap *pm, int len)
/* Get new pte space */
nlen = (NPTEPERREG*PPTESZ) - trunc_page(len * PPTESZ);
- RECURSEEND;
+ PMAP_UNLOCK;
nptespc = pmap_getusrptes(pm, nlen);
- RECURSESTART;
+ PMAP_LOCK;
if (nptespc == 0)
return 0;
@@ -1086,7 +1052,7 @@ pmap_enter(pmap_t pmap, vaddr_t v, paddr
PMDEBUG(("pmap_enter: pmap %p v %lx p %lx prot %x wired %d access %x\n",
pmap, v, p, prot, (flags & PMAP_WIRED) != 0, flags & VM_PROT_ALL));
- RECURSESTART;
+ PMAP_LOCK;
/* Find address of correct pte */
switch (SEGTYPE(v)) {
@@ -1124,10 +1090,12 @@ pmap_enter(pmap_t pmap, vaddr_t v, paddr
if (*ptpptr == 0) {
paddr_t phys;
- phys = get_ptp((flags & PMAP_CANFAIL) != 0);
+ phys = get_ptp();
if (phys == 0) {
- RECURSEEND;
- return ENOMEM;
+ PMAP_UNLOCK;
+ if ((flags & PMAP_CANFAIL) != 0)
+ return ENOMEM;
+ panic("pmap_enter: out of memory");
}
*ptpptr = PG_V | PG_KW | PG_PFNUM(phys);
}
@@ -1138,7 +1106,7 @@ pmap_enter(pmap_t pmap, vaddr_t v, paddr
*/
if (IOSPACE_P(p)) {
mapin8(pteptr, newpte);
- RECURSEEND;
+ PMAP_UNLOCK;
return 0;
}
@@ -1152,13 +1120,13 @@ pmap_enter(pmap_t pmap, vaddr_t v, paddr
if (newpte == (oldpte | PG_W)) {
*pteptr |= PG_W;
pmap->pm_stats.wired_count++;
- RECURSEEND;
+ PMAP_UNLOCK;
return 0;
}
/* mapping unchanged? just return. */
if (newpte == oldpte) {
- RECURSEEND;
+ PMAP_UNLOCK;
return 0;
}
@@ -1174,15 +1142,14 @@ pmap_enter(pmap_t pmap, vaddr_t v, paddr
*/
if (oldpte & PG_FRAME) {
- RECURSEEND;
if ((oldpte & PG_SREF) == 0)
rmpage(pmap, pteptr);
- else
+ else {
+ PMAP_UNLOCK;
panic("pmap_enter on PG_SREF page");
- RECURSESTART;
+ }
}
- PVTABLE_LOCK;
if (pv->pv_pmap == NULL) {
pv->pv_vaddr = v;
pv->pv_pmap = pmap;
@@ -1193,28 +1160,28 @@ pmap_enter(pmap_t pmap, vaddr_t v, paddr
tmp->pv_next = pv->pv_next;
pv->pv_next = tmp;
}
- PVTABLE_UNLOCK;
}
- pmap_increment_stats(pmap, (flags & PMAP_WIRED) != 0);
+ pmap->pm_stats.resident_count++;
+ if ((flags & PMAP_WIRED) != 0)
+ pmap->pm_stats.wired_count++;
- PVTABLE_LOCK;
if (flags & (VM_PROT_READ|VM_PROT_WRITE)) {
pv->pv_attr |= PG_V;
newpte |= PG_V;
}
if (flags & VM_PROT_WRITE)
pv->pv_attr |= PG_M;
- PVTABLE_UNLOCK;
if (flags & PMAP_WIRED)
newpte |= PG_V; /* Not allowed to be invalid */
mapin8(pteptr, newpte);
- RECURSEEND;
if (pventries < 10)
more_pventries();
+ PMAP_UNLOCK;
+
mtpr(0, PR_TBIA); /* Always; safety belt */
return 0;
@@ -1294,7 +1261,7 @@ pmap_protect_long(pmap_t pmap, vaddr_t s
PMDEBUG(("pmap_protect: pmap %p, start %lx, end %lx, prot %x\n",
pmap, start, end,prot));
- RECURSESTART;
+ PMAP_LOCK;
switch (SEGTYPE(start)) {
case SYSSEG:
@@ -1310,7 +1277,7 @@ pmap_protect_long(pmap_t pmap, vaddr_t s
case P1SEG:
if (vax_btop(end - 0x40000000) <= pmap->pm_p1lr) {
- RECURSEEND;
+ PMAP_UNLOCK;
return;
}
if (vax_btop(start - 0x40000000) < pmap->pm_p1lr)
@@ -1326,7 +1293,7 @@ pmap_protect_long(pmap_t pmap, vaddr_t s
/* Anything to care about at all? */
if (vax_btop(start) > lr) {
- RECURSEEND;
+ PMAP_UNLOCK;
return;
}
if (vax_btop(end) > lr)
@@ -1350,14 +1317,14 @@ pmap_protect_long(pmap_t pmap, vaddr_t s
while (pts < ptd) {
if (kvtopte(pts)->pg_pfn && *(int *)pts) {
if (prot == VM_PROT_NONE) {
- RECURSEEND;
if ((*(int *)pts & PG_SREF) == 0)
rmpage(pmap, (u_int *)pts);
#ifdef DEBUG
- else
+ else {
+ PMAP_UNLOCK;
panic("pmap_remove PG_SREF page");
+ }
#endif
- RECURSESTART;
memset(pts, 0, sizeof(struct pte) * LTOHPN);
if (pt != Sysmap) {
if (ptpinuse(pts) == 0)
@@ -1376,7 +1343,7 @@ pmap_protect_long(pmap_t pmap, vaddr_t s
}
pts += LTOHPN;
}
- RECURSEEND;
+ PMAP_UNLOCK;
#ifdef MULTIPROCESSOR
cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
#endif
@@ -1433,12 +1400,12 @@ pmap_simulref(int bits, int addr)
pte[6] |= PG_V;
pte[7] |= PG_V;
if (!IOSPACE_P(pa)) { /* No pv_table fiddling in iospace */
- PVTABLE_LOCK;
+ PMAP_LOCK;
pv = pv_table + (pa >> PGSHIFT);
pv->pv_attr |= PG_V; /* Referenced */
if (bits & 4) /* (will be) modified. XXX page tables */
pv->pv_attr |= PG_M;
- PVTABLE_UNLOCK;
+ PMAP_UNLOCK;
}
return 0;
}
@@ -1447,15 +1414,17 @@ pmap_simulref(int bits, int addr)
* Clears valid bit in all ptes referenced to this physical page.
*/
bool
-pmap_clear_reference_long(const struct pv_entry *pv)
+pmap_clear_reference(struct vm_page *pg)
{
+ struct pv_entry *pv = pmap_pg_to_pv(pg);
struct pte *pte;
- int ref = 0;
+ bool ref;
PMDEBUG(("pmap_clear_reference: pv_entry %p\n", pv));
- RECURSESTART;
- PVTABLE_LOCK;
+ PMAP_LOCK;
+ ref = ISSET(pv->pv_attr, PG_V);
+ CLR(pv->pv_attr, PG_V);
if (pv->pv_pmap != NULL) do {
pte = vaddrtopte(pv);
if (pte[0].pg_w == 0) {
@@ -1465,8 +1434,7 @@ pmap_clear_reference_long(const struct p
pte[6].pg_v = 0; pte[7].pg_v = 0;
}
} while ((pv = pv->pv_next) != NULL);
- PVTABLE_UNLOCK;
- RECURSEEND;
+ PMAP_UNLOCK;
#ifdef MULTIPROCESSOR
cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
#endif
@@ -1478,14 +1446,16 @@ pmap_clear_reference_long(const struct p
* Checks if page is modified; returns true or false depending on result.
*/
bool
-pmap_is_modified_long_p(const struct pv_entry *pv)
+pmap_is_modified(struct vm_page *pg)
{
- bool rv = false;
+ struct pv_entry *pv = pmap_pg_to_pv(pg);
+ bool rv;
PMDEBUG(("pmap_is_modified: pv_entry %p ", pv));
- PVTABLE_LOCK;
- if (pv->pv_pmap != NULL) do {
+ PMAP_LOCK;
+ rv = ISSET(pv->pv_attr, PG_M);
+ if (rv == false && pv->pv_pmap != NULL) do {
const struct pte * const pte = vaddrtopte(pv);
if (pte[0].pg_m | pte[1].pg_m | pte[2].pg_m | pte[3].pg_m
| pte[4].pg_m | pte[5].pg_m | pte[6].pg_m | pte[7].pg_m) {
@@ -1493,7 +1463,7 @@ pmap_is_modified_long_p(const struct pv_
break;
}
} while ((pv = pv->pv_next) != NULL);
- PVTABLE_UNLOCK;
+ PMAP_UNLOCK;
return rv;
}
@@ -1501,13 +1471,16 @@ pmap_is_modified_long_p(const struct pv_
* Clears modify bit in all ptes referenced to this physical page.
*/
bool
-pmap_clear_modify_long(const struct pv_entry *pv)
+pmap_clear_modify(struct vm_page *pg)
{
+ struct pv_entry *pv = pmap_pg_to_pv(pg);
bool rv = false;
PMDEBUG(("pmap_clear_modify: pv_entry %p\n", pv));
- PVTABLE_LOCK;
+ PMAP_LOCK;
+ rv = ISSET(pv->pv_attr, PG_M);
+ CLR(pv->pv_attr, PG_M);
if (pv->pv_pmap != NULL) do {
struct pte * const pte = vaddrtopte(pv);
if (pte[0].pg_m | pte[1].pg_m | pte[2].pg_m | pte[3].pg_m |
@@ -1517,7 +1490,7 @@ pmap_clear_modify_long(const struct pv_e
pte[0].pg_m = pte[1].pg_m = pte[2].pg_m = pte[3].pg_m = 0;
pte[4].pg_m = pte[5].pg_m = pte[6].pg_m = pte[7].pg_m = 0;
} while ((pv = pv->pv_next) != NULL);
- PVTABLE_UNLOCK;
+ PMAP_UNLOCK;
return rv;
}
@@ -1538,8 +1511,7 @@ pmap_page_protect_long(struct pv_entry *
if (prot == VM_PROT_ALL) /* 'cannot happen' */
return;
- RECURSESTART;
- PVTABLE_LOCK;
+ PMAP_LOCK;
if (prot == VM_PROT_NONE) {
g = (int *)vaddrtopte(pv);
if (g) {
@@ -1586,8 +1558,7 @@ pmap_page_protect_long(struct pv_entry *
pt[6].pg_prot = pr; pt[7].pg_prot = pr;
} while ((pv = pv->pv_next));
}
- PVTABLE_UNLOCK;
- RECURSEEND;
+ PMAP_UNLOCK;
#ifdef MULTIPROCESSOR
cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
#endif
@@ -1682,7 +1653,7 @@ pmap_unwire(pmap_t pmap, vaddr_t v)
PMDEBUG(("pmap_unwire: pmap %p v %lx\n", pmap, v));
- RECURSESTART;
+ PMAP_LOCK;
if (v & KERNBASE) {
pte = (int *)kvtopte(v);
} else {
@@ -1692,8 +1663,8 @@ pmap_unwire(pmap_t pmap, vaddr_t v)
pte = (int *)&pmap->pm_p1br[PG_PFNUM(v)];
}
pte[0] &= ~PG_W;
- RECURSEEND;
pmap->pm_stats.wired_count--;
+ PMAP_UNLOCK;
}
/*
@@ -1735,15 +1706,15 @@ free_pventry(struct pv_entry *pv)
/*
* more_pventries().
- * The pv_table lock must _not_ be held before calling this.
+ * The pmap_lock must be held before calling this.
*/
void
more_pventries(void)
{
struct pv_entry *pv;
- int s, i, count;
+ int i, count;
- pv = (struct pv_entry *)getpage(false);
+ pv = (struct pv_entry *)getpage();
if (pv == NULL)
return;
count = PAGE_SIZE/sizeof(struct pv_entry);
@@ -1751,13 +1722,9 @@ more_pventries(void)
for (i = 0; i < count - 1; i++)
pv[i].pv_next = &pv[i + 1];
- s = splvm();
- PVTABLE_LOCK;
pv[count - 1].pv_next = pv_list;
pv_list = pv;
pventries += count;
- PVTABLE_UNLOCK;
- splx(s);
}
static int *ptpp;
@@ -1766,7 +1733,7 @@ static int *ptpp;
* Get a (vax-size) page, to use for page tables.
*/
vaddr_t
-get_ptp(bool wait)
+get_ptp(void)
{
int *a;
@@ -1775,7 +1742,7 @@ get_ptp(bool wait)
memset(a, 0, VAX_NBPG);
return (vaddr_t)a;
}
- a = (int *)getpage(wait);
+ a = (int *)getpage();
if (a != NULL) {
a[128] = (int)&a[256];
a[256] = (int)&a[384];