Module Name:    src
Committed By:   yamt
Date:           Sat Nov 12 02:54:04 UTC 2011

Modified Files:
        src/sys/uvm [yamt-pagecache]: uvm.h uvm_extern.h uvm_meter.c uvm_page.c
            uvm_page_status.c

Log Message:
redo the page clean/dirty/unknown accounting separately for file and
anonymous pages


To generate a diff of this commit:
cvs rdiff -u -r1.62.4.1 -r1.62.4.2 src/sys/uvm/uvm.h
cvs rdiff -u -r1.176.2.1 -r1.176.2.2 src/sys/uvm/uvm_extern.h
cvs rdiff -u -r1.56.4.1 -r1.56.4.2 src/sys/uvm/uvm_meter.c
cvs rdiff -u -r1.178.2.3 -r1.178.2.4 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.1.2.2 -r1.1.2.3 src/sys/uvm/uvm_page_status.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/uvm/uvm.h
diff -u src/sys/uvm/uvm.h:1.62.4.1 src/sys/uvm/uvm.h:1.62.4.2
--- src/sys/uvm/uvm.h:1.62.4.1	Fri Nov 11 10:34:24 2011
+++ src/sys/uvm/uvm.h	Sat Nov 12 02:54:04 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm.h,v 1.62.4.1 2011/11/11 10:34:24 yamt Exp $	*/
+/*	$NetBSD: uvm.h,v 1.62.4.2 2011/11/12 02:54:04 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -82,7 +82,12 @@ struct uvm_cpu {
 	int pages[PGFL_NQUEUES];	/* total of pages in page_free */
 	u_int emap_gen;			/* emap generation number */
 
-	int64_t pagestate[UVM_PAGE_NUM_STATUS];
+	/*
+	 * pagestate
+	 * 	[0] non-anonymous
+	 * 	[1] anonymous (PQ_SWAPBACKED)
+	 */
+	int64_t pagestate[2][UVM_PAGE_NUM_STATUS];
 };
 
 /*

Index: src/sys/uvm/uvm_extern.h
diff -u src/sys/uvm/uvm_extern.h:1.176.2.1 src/sys/uvm/uvm_extern.h:1.176.2.2
--- src/sys/uvm/uvm_extern.h:1.176.2.1	Fri Nov 11 10:34:24 2011
+++ src/sys/uvm/uvm_extern.h	Sat Nov 12 02:54:04 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_extern.h,v 1.176.2.1 2011/11/11 10:34:24 yamt Exp $	*/
+/*	$NetBSD: uvm_extern.h,v 1.176.2.2 2011/11/12 02:54:04 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -459,6 +459,9 @@ struct uvmexp_sysctl {
 	int64_t mightdirtypages;
 	int64_t cleanpages;
 	int64_t dirtypages;
+	int64_t mightdirtyanonpages;
+	int64_t cleananonpages;
+	int64_t dirtyanonpages;
 };
 
 #ifdef _KERNEL

Index: src/sys/uvm/uvm_meter.c
diff -u src/sys/uvm/uvm_meter.c:1.56.4.1 src/sys/uvm/uvm_meter.c:1.56.4.2
--- src/sys/uvm/uvm_meter.c:1.56.4.1	Fri Nov 11 10:34:24 2011
+++ src/sys/uvm/uvm_meter.c	Sat Nov 12 02:54:04 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_meter.c,v 1.56.4.1 2011/11/11 10:34:24 yamt Exp $	*/
+/*	$NetBSD: uvm_meter.c,v 1.56.4.2 2011/11/12 02:54:04 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -36,7 +36,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.56.4.1 2011/11/11 10:34:24 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.56.4.2 2011/11/12 02:54:04 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -179,9 +179,14 @@ sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
 	for (CPU_INFO_FOREACH(cii, ci)) {
 		struct uvm_cpu *ucpu = ci->ci_data.cpu_uvm;
 
-		u.mightdirtypages += ucpu->pagestate[UVM_PAGE_STATUS_UNKNOWN];
-		u.cleanpages += ucpu->pagestate[UVM_PAGE_STATUS_CLEAN];
-		u.dirtypages += ucpu->pagestate[UVM_PAGE_STATUS_DIRTY];
+		u.mightdirtypages +=
+		    ucpu->pagestate[0][UVM_PAGE_STATUS_UNKNOWN];
+		u.cleanpages += ucpu->pagestate[0][UVM_PAGE_STATUS_CLEAN];
+		u.dirtypages += ucpu->pagestate[0][UVM_PAGE_STATUS_DIRTY];
+		u.mightdirtyanonpages +=
+		    ucpu->pagestate[1][UVM_PAGE_STATUS_UNKNOWN];
+		u.cleananonpages += ucpu->pagestate[1][UVM_PAGE_STATUS_CLEAN];
+		u.dirtyanonpages += ucpu->pagestate[1][UVM_PAGE_STATUS_DIRTY];
 	}
 	node = *rnode;
 	node.sysctl_data = &u;

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.178.2.3 src/sys/uvm/uvm_page.c:1.178.2.4
--- src/sys/uvm/uvm_page.c:1.178.2.3	Fri Nov 11 10:34:24 2011
+++ src/sys/uvm/uvm_page.c	Sat Nov 12 02:54:04 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.178.2.3 2011/11/11 10:34:24 yamt Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.178.2.4 2011/11/12 02:54:04 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.3 2011/11/11 10:34:24 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.4 2011/11/12 02:54:04 yamt Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -162,15 +162,15 @@ static void uvm_pageremove(struct uvm_ob
  * uvm_pageinsert: insert a page in the object.
  *
  * => caller must lock object
- * => caller must lock page queues
  * => call should have already set pg's object and offset pointers
- *    and bumped the version counter
  */
 
 static inline void
 uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg,
     struct vm_page *where)
 {
+	const bool isvnode = UVM_OBJ_IS_VNODE(uobj);
+	const bool isaobj = UVM_OBJ_IS_AOBJ(uobj);
 
 	KASSERT(uobj == pg->uobject);
 	KASSERT(mutex_owned(uobj->vmobjlock));
@@ -178,32 +178,39 @@ uvm_pageinsert_list(struct uvm_object *u
 	KASSERT(where == NULL || (where->flags & PG_TABLED));
 	KASSERT(where == NULL || (where->uobject == uobj));
 
-	if (UVM_OBJ_IS_VNODE(uobj)) {
-		if (uobj->uo_npages == 0) {
-			struct vnode *vp = (struct vnode *)uobj;
+	if (isvnode || isaobj) {
+		struct uvm_cpu *ucpu;
+		const unsigned int status = uvm_pagegetdirty(pg);
+
+		kpreempt_disable();
+		ucpu = curcpu()->ci_data.cpu_uvm;
+		ucpu->pagestate[isaobj][status]++;
+		kpreempt_enable();
+		if (isvnode) {
+			if (uobj->uo_npages == 0) {
+				struct vnode *vp = (struct vnode *)uobj;
 
-			vholdl(vp);
-		}
-		if (UVM_OBJ_IS_VTEXT(uobj)) {
-			atomic_inc_uint(&uvmexp.execpages);
+				vholdl(vp);
+			}
+			if (UVM_OBJ_IS_VTEXT(uobj)) {
+				atomic_inc_uint(&uvmexp.execpages);
+			} else {
+				atomic_inc_uint(&uvmexp.filepages);
+			}
 		} else {
-			atomic_inc_uint(&uvmexp.filepages);
+			atomic_inc_uint(&uvmexp.anonpages);
 		}
-	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
-		atomic_inc_uint(&uvmexp.anonpages);
 	}
 	pg->flags |= PG_TABLED;
 	uobj->uo_npages++;
 }
 
-
 static inline int
 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
 {
 	const uint64_t idx = pg->offset >> PAGE_SHIFT;
 	int error;
 
-	KASSERT(uobj == pg->uobject);
 	error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
 	if (error != 0) {
 		return error;
@@ -222,6 +229,7 @@ uvm_pageinsert(struct uvm_object *uobj, 
 	int error;
 
 	KDASSERT(uobj != NULL);
+	KASSERT(uobj == pg->uobject);
 	error = uvm_pageinsert_tree(uobj, pg);
 	if (error != 0) {
 		KASSERT(error == ENOMEM);
@@ -235,33 +243,41 @@ uvm_pageinsert(struct uvm_object *uobj, 
  * uvm_page_remove: remove page from object.
  *
  * => caller must lock object
- * => caller must lock page queues
  */
 
 static inline void
 uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg)
 {
+	const bool isvnode = UVM_OBJ_IS_VNODE(uobj);
+	const bool isaobj = UVM_OBJ_IS_AOBJ(uobj);
 
 	KASSERT(uobj == pg->uobject);
 	KASSERT(mutex_owned(uobj->vmobjlock));
 	KASSERT(pg->flags & PG_TABLED);
 
-	if (UVM_OBJ_IS_VNODE(uobj)) {
-		if (uobj->uo_npages == 1) {
-			struct vnode *vp = (struct vnode *)uobj;
+	if (isvnode || isaobj) {
+		struct uvm_cpu *ucpu;
+		const unsigned int status = uvm_pagegetdirty(pg);
+
+		kpreempt_disable();
+		ucpu = curcpu()->ci_data.cpu_uvm;
+		ucpu->pagestate[isaobj][status]--;
+		kpreempt_enable();
+		if (isvnode) {
+			if (uobj->uo_npages == 1) {
+				struct vnode *vp = (struct vnode *)uobj;
 
-			holdrelel(vp);
-		}
-		if (UVM_OBJ_IS_VTEXT(uobj)) {
-			atomic_dec_uint(&uvmexp.execpages);
+				holdrelel(vp);
+			}
+			if (UVM_OBJ_IS_VTEXT(uobj)) {
+				atomic_dec_uint(&uvmexp.execpages);
+			} else {
+				atomic_dec_uint(&uvmexp.filepages);
+			}
 		} else {
-			atomic_dec_uint(&uvmexp.filepages);
+			atomic_dec_uint(&uvmexp.anonpages);
 		}
-	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
-		atomic_dec_uint(&uvmexp.anonpages);
 	}
-
-	/* object should be locked */
 	uobj->uo_npages--;
 	pg->flags &= ~PG_TABLED;
 	pg->uobject = NULL;
@@ -272,7 +288,6 @@ uvm_pageremove_tree(struct uvm_object *u
 {
 	struct vm_page *opg;
 
-	KASSERT(uobj == pg->uobject);
 	opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
 	KASSERT(pg == opg);
 }
@@ -282,8 +297,9 @@ uvm_pageremove(struct uvm_object *uobj, 
 {
 
 	KDASSERT(uobj != NULL);
-	uvm_pageremove_tree(uobj, pg);
+	KASSERT(uobj == pg->uobject);
 	uvm_pageremove_list(uobj, pg);
+	uvm_pageremove_tree(uobj, pg);
 }
 
 static void
@@ -423,15 +439,7 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr
 			if (atop(paddr) >= seg->avail_start &&
 			    atop(paddr) < seg->avail_end) {
 				uvmexp.npages++;
-				/*
-				 * add page to free pool
-				 *
-				 * adjust pagestate[] so that it won't go
-				 * negative.
-				 */
-				KASSERT(uvm_pagegetdirty(&seg->pgs[i])
-				    == UVM_PAGE_STATUS_UNKNOWN);
-				boot_cpu.pagestate[UVM_PAGE_STATUS_UNKNOWN]++;
+				/* add page to free pool */
 				uvm_pagefree(&seg->pgs[i]);
 			}
 		}
@@ -1316,7 +1324,9 @@ uvm_pagealloc_strat(struct uvm_object *o
 	 * otherwise we race with uvm_pglistalloc.
 	 */
 	pg->pqflags = 0;
-	ucpu->pagestate[UVM_PAGE_STATUS_CLEAN]++;
+	if (anon) {
+		ucpu->pagestate[1][UVM_PAGE_STATUS_CLEAN]++;
+	}
 	mutex_spin_exit(&uvm_fpageqlock);
 	if (anon) {
 		anon->an_page = pg;
@@ -1348,7 +1358,9 @@ uvm_pagealloc_strat(struct uvm_object *o
 		 * A zero'd page is not clean.  If we got a page not already
 		 * zero'd, then we have to zero it ourselves.
 		 */
-		uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
+		if (obj != NULL || anon != NULL) {
+			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
+		}
 		if (zeroit)
 			pmap_zero_page(VM_PAGE_TO_PHYS(pg));
 	}
@@ -1523,6 +1535,11 @@ uvm_pagefree(struct vm_page *pg)
 			} else {
 				pg->pqflags &= ~PQ_ANON;
 				atomic_dec_uint(&uvmexp.anonpages);
+				status = uvm_pagegetdirty(pg);
+				kpreempt_disable();
+				ucpu = curcpu()->ci_data.cpu_uvm;
+				ucpu->pagestate[1][status]--;
+				kpreempt_enable();
 			}
 			pg->uanon->an_page = NULL;
 			pg->uanon = NULL;
@@ -1552,6 +1569,11 @@ uvm_pagefree(struct vm_page *pg)
 	} else if (pg->uanon != NULL) {
 		pg->uanon->an_page = NULL;
 		atomic_dec_uint(&uvmexp.anonpages);
+		status = uvm_pagegetdirty(pg);
+		kpreempt_disable();
+		ucpu = curcpu()->ci_data.cpu_uvm;
+		ucpu->pagestate[1][status]--;
+		kpreempt_enable();
 	}
 
 	/*
@@ -1578,7 +1600,6 @@ uvm_pagefree(struct vm_page *pg)
 	color = VM_PGCOLOR_BUCKET(pg);
 	queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN);
 
-	status = uvm_pagegetdirty(pg);
 #ifdef DEBUG
 	pg->uobject = (void *)0xdeadbeef;
 	pg->uanon = (void *)0xdeadbeef;
@@ -1610,7 +1631,6 @@ uvm_pagefree(struct vm_page *pg)
 	if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
 		ucpu->page_idle_zero = vm_page_zero_enable;
 	}
-	ucpu->pagestate[status]--;
 
 	mutex_spin_exit(&uvm_fpageqlock);
 }

Index: src/sys/uvm/uvm_page_status.c
diff -u src/sys/uvm/uvm_page_status.c:1.1.2.2 src/sys/uvm/uvm_page_status.c:1.1.2.3
--- src/sys/uvm/uvm_page_status.c:1.1.2.2	Fri Nov 11 10:34:24 2011
+++ src/sys/uvm/uvm_page_status.c	Sat Nov 12 02:54:04 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page_status.c,v 1.1.2.2 2011/11/11 10:34:24 yamt Exp $	*/
+/*	$NetBSD: uvm_page_status.c,v 1.1.2.3 2011/11/12 02:54:04 yamt Exp $	*/
 
 /*-
  * Copyright (c)2011 YAMAMOTO Takashi,
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.1.2.2 2011/11/11 10:34:24 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.1.2.3 2011/11/12 02:54:04 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -68,15 +68,15 @@ uvm_pagegetdirty(struct vm_page *pg)
 }
 
 static void
-stat_update(unsigned int oldstatus, unsigned int newstatus)
+stat_update(bool isanon, unsigned int oldstatus, unsigned int newstatus)
 {
 	struct uvm_cpu *ucpu;
 
 	KASSERT(oldstatus != newstatus);
 	kpreempt_disable();
 	ucpu = curcpu()->ci_data.cpu_uvm;
-	ucpu->pagestate[oldstatus]--;
-	ucpu->pagestate[newstatus]++;
+	ucpu->pagestate[isanon][oldstatus]--;
+	ucpu->pagestate[isanon][newstatus]++;
 	kpreempt_enable();
 }
 
@@ -92,6 +92,7 @@ uvm_pagemarkdirty(struct vm_page *pg, un
 	const uint64_t idx = pg->offset >> PAGE_SHIFT;
 	const unsigned int oldstatus = uvm_pagegetdirty(pg);
 
+	KASSERT(uobj != NULL || pg->uanon != NULL);
 	KASSERT((~newstatus & (PG_CLEAN|PG_DIRTY)) != 0);
 	KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0);
 	KASSERT(uvm_page_locked_p(pg));
@@ -124,7 +125,16 @@ uvm_pagemarkdirty(struct vm_page *pg, un
 	pg->flags |= newstatus;
 	KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
 	    radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
-	stat_update(oldstatus, newstatus);
+	if (uobj != NULL) {
+		const bool isvnode = UVM_OBJ_IS_VNODE(uobj);
+		const bool isaobj = UVM_OBJ_IS_AOBJ(uobj);
+
+		if (isvnode || isaobj) {
+			stat_update(isaobj, oldstatus, newstatus);
+		}
+	} else {
+		stat_update(true, oldstatus, newstatus);
+	}
 }
 
 /*

Reply via email to