Module Name:    src
Committed By:   chs
Date:           Fri Aug 14 09:06:15 UTC 2020

Modified Files:
        src/sys/miscfs/genfs: genfs_io.c
        src/sys/uvm: uvm_extern.h uvm_object.c uvm_object.h uvm_page.c
            uvm_page_status.c uvm_pager.c uvm_vnode.c

Log Message:
centralize calls from UVM to radixtree into a few functions.
in those functions, assert that the object lock is held in
the correct mode.


To generate a diff of this commit:
cvs rdiff -u -r1.99 -r1.100 src/sys/miscfs/genfs/genfs_io.c
cvs rdiff -u -r1.230 -r1.231 src/sys/uvm/uvm_extern.h
cvs rdiff -u -r1.23 -r1.24 src/sys/uvm/uvm_object.c
cvs rdiff -u -r1.38 -r1.39 src/sys/uvm/uvm_object.h
cvs rdiff -u -r1.244 -r1.245 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.5 -r1.6 src/sys/uvm/uvm_page_status.c
cvs rdiff -u -r1.128 -r1.129 src/sys/uvm/uvm_pager.c
cvs rdiff -u -r1.115 -r1.116 src/sys/uvm/uvm_vnode.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/miscfs/genfs/genfs_io.c
diff -u src/sys/miscfs/genfs/genfs_io.c:1.99 src/sys/miscfs/genfs/genfs_io.c:1.100
--- src/sys/miscfs/genfs/genfs_io.c:1.99	Mon Aug 10 11:09:15 2020
+++ src/sys/miscfs/genfs/genfs_io.c	Fri Aug 14 09:06:14 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: genfs_io.c,v 1.99 2020/08/10 11:09:15 rin Exp $	*/
+/*	$NetBSD: genfs_io.c,v 1.100 2020/08/14 09:06:14 chs Exp $	*/
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.99 2020/08/10 11:09:15 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.100 2020/08/14 09:06:14 chs Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -913,8 +913,7 @@ retry:
 	 * shortcut if we have no pages to process.
 	 */
 
-	nodirty = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
-            UVM_PAGE_DIRTY_TAG);
+	nodirty = uvm_obj_clean_p(uobj);
 #ifdef DIAGNOSTIC
 	mutex_enter(vp->v_interlock);
 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || nodirty);
@@ -922,9 +921,8 @@ retry:
 #endif
 	if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) {
 		mutex_enter(vp->v_interlock);
-		if (vp->v_iflag & VI_ONWORKLST) {
-			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
-				vn_syncer_remove_from_worklist(vp);
+		if (vp->v_iflag & VI_ONWORKLST && LIST_EMPTY(&vp->v_dirtyblkhd)) {
+			vn_syncer_remove_from_worklist(vp);
 		}
 		mutex_exit(vp->v_interlock);
 		if (trans_mp) {
@@ -978,8 +976,7 @@ retry:
 	}
 
 	error = 0;
-	wasclean = radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
-            UVM_PAGE_WRITEBACK_TAG);
+	wasclean = uvm_obj_nowriteback_p(uobj);
 	nextoff = startoff;
 	if (endoff == 0 || flags & PGO_ALLPAGES) {
 		endoff = trunc_page(LLONG_MAX);
@@ -1030,8 +1027,7 @@ retry:
 		KASSERT(pg->offset >= nextoff);
 		KASSERT(!dirtyonly ||
 		    uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN ||
-		    radix_tree_get_tag(&uobj->uo_pages,
-			pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
+		    uvm_obj_page_writeback_p(pg));
 
 		if (pg->offset >= endoff) {
 			break;
@@ -1245,9 +1241,7 @@ retry:
 				 * mark pages as WRITEBACK so that concurrent
 				 * fsync can find and wait for our activities.
 				 */
-				radix_tree_set_tag(&uobj->uo_pages,
-				    pgs[i]->offset >> PAGE_SHIFT,
-				    UVM_PAGE_WRITEBACK_TAG);
+				uvm_obj_page_set_writeback(pgs[i]);
 			}
 			if (tpg->offset < startoff || tpg->offset >= endoff)
 				continue;
@@ -1332,11 +1326,9 @@ retry:
 	 * syncer list.
 	 */
 
-	if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
-	    radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
-	    UVM_PAGE_DIRTY_TAG)) {
-		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
-			vn_syncer_remove_from_worklist(vp);
+	if ((vp->v_iflag & VI_ONWORKLST) != 0 && uvm_obj_clean_p(uobj) &&
+	    LIST_EMPTY(&vp->v_dirtyblkhd)) {
+		vn_syncer_remove_from_worklist(vp);
 	}
 
 #if !defined(DEBUG)

Index: src/sys/uvm/uvm_extern.h
diff -u src/sys/uvm/uvm_extern.h:1.230 src/sys/uvm/uvm_extern.h:1.231
--- src/sys/uvm/uvm_extern.h:1.230	Sun Jun 14 22:25:15 2020
+++ src/sys/uvm/uvm_extern.h	Fri Aug 14 09:06:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_extern.h,v 1.230 2020/06/14 22:25:15 ad Exp $	*/
+/*	$NetBSD: uvm_extern.h,v 1.231 2020/08/14 09:06:15 chs Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -776,6 +776,14 @@ void			uvm_obj_destroy(struct uvm_object
 int			uvm_obj_wirepages(struct uvm_object *, off_t, off_t,
 			    struct pglist *);
 void			uvm_obj_unwirepages(struct uvm_object *, off_t, off_t);
+bool			uvm_obj_clean_p(struct uvm_object *);
+bool			uvm_obj_nowriteback_p(struct uvm_object *);
+bool			uvm_obj_page_dirty_p(struct vm_page *);
+void			uvm_obj_page_set_dirty(struct vm_page *);
+void			uvm_obj_page_clear_dirty(struct vm_page *);
+bool			uvm_obj_page_writeback_p(struct vm_page *);
+void			uvm_obj_page_set_writeback(struct vm_page *);
+void			uvm_obj_page_clear_writeback(struct vm_page *);
 
 /* uvm_page.c */
 int			uvm_availmem(bool);
@@ -826,7 +834,6 @@ int			uvn_findpages(struct uvm_object *,
 			    unsigned int *, struct vm_page **,
 			    struct uvm_page_array *, unsigned int);
 bool			uvn_text_p(struct uvm_object *);
-bool			uvn_clean_p(struct uvm_object *);
 bool			uvn_needs_writefault_p(struct uvm_object *);
 
 /* kern_malloc.c */

Index: src/sys/uvm/uvm_object.c
diff -u src/sys/uvm/uvm_object.c:1.23 src/sys/uvm/uvm_object.c:1.24
--- src/sys/uvm/uvm_object.c:1.23	Mon May 25 21:15:10 2020
+++ src/sys/uvm/uvm_object.c	Fri Aug 14 09:06:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_object.c,v 1.23 2020/05/25 21:15:10 ad Exp $	*/
+/*	$NetBSD: uvm_object.c,v 1.24 2020/08/14 09:06:15 chs Exp $	*/
 
 /*
  * Copyright (c) 2006, 2010, 2019 The NetBSD Foundation, Inc.
@@ -37,7 +37,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.23 2020/05/25 21:15:10 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.24 2020/08/14 09:06:15 chs Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_ddb.h"
@@ -233,6 +233,103 @@ uvm_obj_unwirepages(struct uvm_object *u
 	rw_exit(uobj->vmobjlock);
 }
 
+static inline bool
+uvm_obj_notag_p(struct uvm_object *uobj, int tag)
+{
+
+	KASSERT(rw_lock_held(uobj->vmobjlock));
+	return radix_tree_empty_tagged_tree_p(&uobj->uo_pages, tag);
+}
+
+bool
+uvm_obj_clean_p(struct uvm_object *uobj)
+{
+
+	return uvm_obj_notag_p(uobj, UVM_PAGE_DIRTY_TAG);
+}
+
+bool
+uvm_obj_nowriteback_p(struct uvm_object *uobj)
+{
+
+	return uvm_obj_notag_p(uobj, UVM_PAGE_WRITEBACK_TAG);
+}
+
+static inline bool
+uvm_obj_page_tag_p(struct vm_page *pg, int tag)
+{
+	struct uvm_object *uobj = pg->uobject;
+	int pgidx = pg->offset >> PAGE_SHIFT;
+
+	KASSERT(uobj != NULL);
+	KASSERT(rw_lock_held(uobj->vmobjlock));
+	return radix_tree_get_tag(&uobj->uo_pages, pgidx, tag) != 0;
+}
+
+static inline void
+uvm_obj_page_set_tag(struct vm_page *pg, int tag)
+{
+	struct uvm_object *uobj = pg->uobject;
+	int pgidx = pg->offset >> PAGE_SHIFT;
+
+	KASSERT(uobj != NULL);
+	KASSERT(rw_write_held(uobj->vmobjlock));
+	radix_tree_set_tag(&uobj->uo_pages, pgidx, tag);
+}
+
+static inline void
+uvm_obj_page_clear_tag(struct vm_page *pg, int tag)
+{
+	struct uvm_object *uobj = pg->uobject;
+	int pgidx = pg->offset >> PAGE_SHIFT;
+
+	KASSERT(uobj != NULL);
+	KASSERT(rw_write_held(uobj->vmobjlock));
+	radix_tree_clear_tag(&uobj->uo_pages, pgidx, tag);
+}
+
+bool
+uvm_obj_page_dirty_p(struct vm_page *pg)
+{
+
+	return uvm_obj_page_tag_p(pg, UVM_PAGE_DIRTY_TAG);
+}
+
+void
+uvm_obj_page_set_dirty(struct vm_page *pg)
+{
+
+	uvm_obj_page_set_tag(pg, UVM_PAGE_DIRTY_TAG);
+}
+
+void
+uvm_obj_page_clear_dirty(struct vm_page *pg)
+{
+
+	uvm_obj_page_clear_tag(pg, UVM_PAGE_DIRTY_TAG);
+}
+
+bool
+uvm_obj_page_writeback_p(struct vm_page *pg)
+{
+
+	return uvm_obj_page_tag_p(pg, UVM_PAGE_WRITEBACK_TAG);
+}
+
+void
+uvm_obj_page_set_writeback(struct vm_page *pg)
+{
+
+	uvm_obj_page_set_tag(pg, UVM_PAGE_WRITEBACK_TAG);
+}
+
+void
+uvm_obj_page_clear_writeback(struct vm_page *pg)
+{
+
+	uvm_obj_page_clear_tag(pg, UVM_PAGE_WRITEBACK_TAG);
+}
+
 #if defined(DDB) || defined(DEBUGPRINT)
 
 /*

Index: src/sys/uvm/uvm_object.h
diff -u src/sys/uvm/uvm_object.h:1.38 src/sys/uvm/uvm_object.h:1.39
--- src/sys/uvm/uvm_object.h:1.38	Sat Mar 14 20:45:23 2020
+++ src/sys/uvm/uvm_object.h	Fri Aug 14 09:06:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_object.h,v 1.38 2020/03/14 20:45:23 ad Exp $	*/
+/*	$NetBSD: uvm_object.h,v 1.39 2020/08/14 09:06:15 chs Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -104,7 +104,7 @@ extern const struct uvm_pagerops aobj_pa
 	(UVM_OBJ_IS_VNODE(uobj) && uvn_text_p(uobj))
 
 #define	UVM_OBJ_IS_CLEAN(uobj)						\
-	(UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj))
+	(UVM_OBJ_IS_VNODE(uobj) && uvm_obj_clean_p(uobj))
 
 /*
  * UVM_OBJ_NEEDS_WRITEFAULT: true if the uobj needs to detect modification.
@@ -114,7 +114,7 @@ extern const struct uvm_pagerops aobj_pa
  */
 
 #define	UVM_OBJ_NEEDS_WRITEFAULT(uobj)					\
-	(UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj))
+	(UVM_OBJ_IS_VNODE(uobj) && uvm_obj_clean_p(uobj))
 
 #define	UVM_OBJ_IS_AOBJ(uobj)						\
 	((uobj)->pgops == &aobj_pager)

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.244 src/sys/uvm/uvm_page.c:1.245
--- src/sys/uvm/uvm_page.c:1.244	Thu Jul  9 05:57:15 2020
+++ src/sys/uvm/uvm_page.c	Fri Aug 14 09:06:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.244 2020/07/09 05:57:15 skrll Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.245 2020/08/14 09:06:15 chs Exp $	*/
 
 /*-
  * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
@@ -95,7 +95,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.244 2020/07/09 05:57:15 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.245 2020/08/14 09:06:15 chs Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvm.h"
@@ -240,15 +240,17 @@ uvm_pageinsert_tree(struct uvm_object *u
 	const uint64_t idx = pg->offset >> PAGE_SHIFT;
 	int error;
 
+	KASSERT(rw_write_held(uobj->vmobjlock));
+
 	error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
 	if (error != 0) {
 		return error;
 	}
 	if ((pg->flags & PG_CLEAN) == 0) {
-		radix_tree_set_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG);
+		uvm_obj_page_set_dirty(pg);
 	}
 	KASSERT(((pg->flags & PG_CLEAN) == 0) ==
-	    radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
+		uvm_obj_page_dirty_p(pg));
 	return 0;
 }
 
@@ -297,6 +299,8 @@ uvm_pageremove_tree(struct uvm_object *u
 {
 	struct vm_page *opg __unused;
 
+	KASSERT(rw_write_held(uobj->vmobjlock));
+
 	opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
 	KASSERT(pg == opg);
 }
@@ -1363,11 +1367,9 @@ uvm_pagereplace(struct vm_page *oldpg, s
 	KASSERT(pg == oldpg);
 	if (((oldpg->flags ^ newpg->flags) & PG_CLEAN) != 0) {
 		if ((newpg->flags & PG_CLEAN) != 0) {
-			radix_tree_clear_tag(&uobj->uo_pages, idx,
-			    UVM_PAGE_DIRTY_TAG);
+			uvm_obj_page_clear_dirty(newpg);
 		} else {
-			radix_tree_set_tag(&uobj->uo_pages, idx,
-			    UVM_PAGE_DIRTY_TAG);
+			uvm_obj_page_set_dirty(newpg);
 		}
 	}
 	/*
@@ -1788,8 +1790,13 @@ struct vm_page *
 uvm_pagelookup(struct uvm_object *obj, voff_t off)
 {
 	struct vm_page *pg;
+	bool ddb = false;
+#ifdef DDB
+	extern int db_active;
+	ddb = db_active != 0;
+#endif
 
-	/* No - used from DDB. KASSERT(rw_lock_held(obj->vmobjlock)); */
+	KASSERT(ddb || rw_lock_held(obj->vmobjlock));
 
 	pg = radix_tree_lookup_node(&obj->uo_pages, off >> PAGE_SHIFT);
 

Index: src/sys/uvm/uvm_page_status.c
diff -u src/sys/uvm/uvm_page_status.c:1.5 src/sys/uvm/uvm_page_status.c:1.6
--- src/sys/uvm/uvm_page_status.c:1.5	Fri May 15 22:25:18 2020
+++ src/sys/uvm/uvm_page_status.c	Fri Aug 14 09:06:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page_status.c,v 1.5 2020/05/15 22:25:18 ad Exp $	*/
+/*	$NetBSD: uvm_page_status.c,v 1.6 2020/08/14 09:06:15 chs Exp $	*/
 
 /*-
  * Copyright (c)2011 YAMAMOTO Takashi,
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.5 2020/05/15 22:25:18 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.6 2020/08/14 09:06:15 chs Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -60,12 +60,11 @@ unsigned int
 uvm_pagegetdirty(struct vm_page *pg)
 {
 	struct uvm_object * const uobj __diagused = pg->uobject;
-	const uint64_t idx __diagused = pg->offset >> PAGE_SHIFT;
 
 	KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0);
 	KASSERT(uvm_page_owner_locked_p(pg, false));
 	KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
-	    !!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
+		uvm_obj_page_dirty_p(pg));
 	return pg->flags & (PG_CLEAN|PG_DIRTY);
 }
 
@@ -85,7 +84,6 @@ void
 uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
 {
 	struct uvm_object * const uobj = pg->uobject;
-	const uint64_t idx = pg->offset >> PAGE_SHIFT;
 	const unsigned int oldstatus = uvm_pagegetdirty(pg);
 	enum cpu_count base;
 
@@ -93,7 +91,7 @@ uvm_pagemarkdirty(struct vm_page *pg, un
 	KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0);
 	KASSERT(uvm_page_owner_locked_p(pg, true));
 	KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
-	    !!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
+		uvm_obj_page_dirty_p(pg));
 
 	if (oldstatus == newstatus) {
 		return;
@@ -106,20 +104,17 @@ uvm_pagemarkdirty(struct vm_page *pg, un
 
 	if (uobj != NULL) {
 		if (newstatus == UVM_PAGE_STATUS_CLEAN) {
-			radix_tree_clear_tag(&uobj->uo_pages, idx,
-			    UVM_PAGE_DIRTY_TAG);
+			uvm_obj_page_clear_dirty(pg);
 		} else if (oldstatus == UVM_PAGE_STATUS_CLEAN) {
 			/*
 			 * on first dirty page, mark the object dirty.
 			 * for vnodes this inserts to the syncer worklist.
 			 */
-			if (radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
-		            UVM_PAGE_DIRTY_TAG) &&
+			if (uvm_obj_clean_p(uobj) &&
 		            uobj->pgops->pgo_markdirty != NULL) {
 				(*uobj->pgops->pgo_markdirty)(uobj);
 			}
-			radix_tree_set_tag(&uobj->uo_pages, idx,
-			    UVM_PAGE_DIRTY_TAG);
+			uvm_obj_page_set_dirty(pg);
 		}
 	}
 	if (newstatus == UVM_PAGE_STATUS_UNKNOWN) {
@@ -131,7 +126,7 @@ uvm_pagemarkdirty(struct vm_page *pg, un
 	pg->flags &= ~(PG_CLEAN|PG_DIRTY);
 	pg->flags |= newstatus;
 	KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
-	    !!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
+		uvm_obj_page_dirty_p(pg));
 	if ((pg->flags & PG_STAT) != 0) {
 		if ((pg->flags & PG_SWAPBACKED) != 0) {
 			base = CPU_COUNT_ANONUNKNOWN;

Index: src/sys/uvm/uvm_pager.c
diff -u src/sys/uvm/uvm_pager.c:1.128 src/sys/uvm/uvm_pager.c:1.129
--- src/sys/uvm/uvm_pager.c:1.128	Thu Jul  9 05:57:15 2020
+++ src/sys/uvm/uvm_pager.c	Fri Aug 14 09:06:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pager.c,v 1.128 2020/07/09 05:57:15 skrll Exp $	*/
+/*	$NetBSD: uvm_pager.c,v 1.129 2020/08/14 09:06:15 chs Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.128 2020/07/09 05:57:15 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.129 2020/08/14 09:06:15 chs Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -391,10 +391,8 @@ uvm_aio_aiodone_pages(struct vm_page **p
 #endif /* defined(VMSWAP) */
 
 		if (write && uobj != NULL) {
-			KASSERT(radix_tree_get_tag(&uobj->uo_pages,
-			    pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
-			radix_tree_clear_tag(&uobj->uo_pages,
-			    pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG);
+			KASSERT(uvm_obj_page_writeback_p(pg));
+			uvm_obj_page_clear_writeback(pg);
 		}
 
 		/*

Index: src/sys/uvm/uvm_vnode.c
diff -u src/sys/uvm/uvm_vnode.c:1.115 src/sys/uvm/uvm_vnode.c:1.116
--- src/sys/uvm/uvm_vnode.c:1.115	Thu Jul  9 05:57:15 2020
+++ src/sys/uvm/uvm_vnode.c	Fri Aug 14 09:06:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_vnode.c,v 1.115 2020/07/09 05:57:15 skrll Exp $	*/
+/*	$NetBSD: uvm_vnode.c,v 1.116 2020/08/14 09:06:15 chs Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -45,7 +45,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.115 2020/07/09 05:57:15 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.116 2020/08/14 09:06:15 chs Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_uvmhist.h"
@@ -316,10 +316,9 @@ uvn_findpage(struct uvm_object *uobj, vo
 			KASSERT(
 			    ((a->ar_flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
 			    == (pg->offset < offset));
-			KASSERT(uvm_pagelookup(uobj, offset) == NULL
-			    || ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0
-			    && radix_tree_get_tag(&uobj->uo_pages,
-			    offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
+			KASSERT(uvm_pagelookup(uobj, offset) == NULL ||
+				((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
+				 !uvm_obj_page_dirty_p(pg)));
 			pg = NULL;
 			if ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
 				UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
@@ -501,14 +500,6 @@ uvn_text_p(struct uvm_object *uobj)
 	return (iflag & VI_EXECMAP) != 0;
 }
 
-bool
-uvn_clean_p(struct uvm_object *uobj)
-{
-
-	return radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
-            UVM_PAGE_DIRTY_TAG);
-}
-
 static void
 uvn_alloc_ractx(struct uvm_object *uobj)
 {

Reply via email to