Module Name:    src
Committed By:   ad
Date:           Sat Dec 21 14:41:44 UTC 2019

Modified Files:
        src/sys/uvm: uvm_page.c uvm_page.h uvm_pager.c uvm_pglist.c
            uvm_physseg.c uvm_vnode.c

Log Message:
- Rename VM_PGCOLOR_BUCKET() to VM_PGCOLOR().  I want to reuse "bucket" for
  something else soon and TBH it matches what this macro does better.

- Add inlines to set/get locator values in the unused lower bits of
  pg->phys_addr.  Begin by using it to cache the freelist index, because
  computing it is expensive and that shows up during profiling.  Discussed
  on tech-kern.


To generate a diff of this commit:
cvs rdiff -u -r1.208 -r1.209 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.87 -r1.88 src/sys/uvm/uvm_page.h
cvs rdiff -u -r1.116 -r1.117 src/sys/uvm/uvm_pager.c
cvs rdiff -u -r1.75 -r1.76 src/sys/uvm/uvm_pglist.c
cvs rdiff -u -r1.12 -r1.13 src/sys/uvm/uvm_physseg.c
cvs rdiff -u -r1.103 -r1.104 src/sys/uvm/uvm_vnode.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.208 src/sys/uvm/uvm_page.c:1.209
--- src/sys/uvm/uvm_page.c:1.208	Sat Dec 21 14:33:18 2019
+++ src/sys/uvm/uvm_page.c	Sat Dec 21 14:41:44 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.208 2019/12/21 14:33:18 ad Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.209 2019/12/21 14:41:44 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.208 2019/12/21 14:33:18 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.209 2019/12/21 14:41:44 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvm.h"
@@ -672,10 +672,10 @@ uvm_page_recolor(int newncolors)
 					LIST_REMOVE(pg, pageq.list); /* global */
 					LIST_REMOVE(pg, listq.list); /* cpu */
 					LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[
-					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
+					    VM_PGCOLOR(pg)].pgfl_queues[
 					    i], pg, pageq.list);
 					LIST_INSERT_HEAD(&pgfl.pgfl_buckets[
-					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
+					    VM_PGCOLOR(pg)].pgfl_queues[
 					    i], pg, listq.list);
 				}
 			}
@@ -1282,8 +1282,8 @@ uvm_pagefree(struct vm_page *pg)
 	 */
 
 	iszero = (pg->flags & PG_ZERO);
-	index = uvm_page_lookup_freelist(pg);
-	color = VM_PGCOLOR_BUCKET(pg);
+	index = uvm_page_get_freelist(pg);
+	color = VM_PGCOLOR(pg);
 	queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN);
 
 #ifdef DEBUG
@@ -1805,6 +1805,8 @@ uvm_page_printit(struct vm_page *pg, boo
 	    pgbuf, pg->pqflags, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
 	(*pr)("  uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
 	    pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
+	(*pr)("  bucket=%d freelist=%d\n",
+	    uvm_page_get_bucket(pg), uvm_page_get_freelist(pg));
 #if defined(UVM_PAGE_TRKOWN)
 	if (pg->flags & PG_BUSY)
 		(*pr)("  owning process = %d, tag=%s\n",
@@ -1841,8 +1843,8 @@ uvm_page_printit(struct vm_page *pg, boo
 
 	/* cross-verify page queue */
 	if (pg->flags & PG_FREE) {
-		int fl = uvm_page_lookup_freelist(pg);
-		int color = VM_PGCOLOR_BUCKET(pg);
+		int fl = uvm_page_get_freelist(pg);
+		int color = VM_PGCOLOR(pg);
 		pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
 		    ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
 	} else {

Index: src/sys/uvm/uvm_page.h
diff -u src/sys/uvm/uvm_page.h:1.87 src/sys/uvm/uvm_page.h:1.88
--- src/sys/uvm/uvm_page.h:1.87	Sun Dec 15 21:11:35 2019
+++ src/sys/uvm/uvm_page.h	Sat Dec 21 14:41:44 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.h,v 1.87 2019/12/15 21:11:35 ad Exp $	*/
+/*	$NetBSD: uvm_page.h,v 1.88 2019/12/21 14:41:44 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -343,16 +343,59 @@ int uvm_direct_process(struct vm_page **
 #endif
 
 /*
- * Compute the page color bucket for a given page.
+ * Compute the page color for a given page.
  */
-#define	VM_PGCOLOR_BUCKET(pg) \
+#define	VM_PGCOLOR(pg) \
 	(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
-
 #define	PHYS_TO_VM_PAGE(pa)	uvm_phys_to_vm_page(pa)
-
 #define VM_PAGE_IS_FREE(entry)  ((entry)->flags & PG_FREE)
 #define	VM_FREE_PAGE_TO_CPU(pg)	((struct uvm_cpu *)((uintptr_t)pg->offset))
 
+/*
+ * Use the lower 10 bits of pg->phys_addr to cache some some locators for
+ * the page.  This implies that the smallest possible page size is 1kB, and
+ * that nobody should use pg->phys_addr directly (use VM_PAGE_TO_PHYS()).
+ * 
+ * - 5 bits for the freelist index, because uvm_page_lookup_freelist()
+ *   traverses an rbtree and therefore features prominently in traces
+ *   captured during performance test.  It would probably be more useful to
+ *   cache physseg index here because freelist can be inferred from physseg,
+ *   but it requires changes to allocation for UVM_HOTPLUG, so for now we'll
+ *   go with freelist.
+ *
+ * - 5 bits for "bucket", a way for us to categorise pages further as
+ *   needed (e.g. NUMA node).
+ *
+ * None of this is set in stone; it can be adjusted as needed.
+ */
+static inline unsigned
+uvm_page_get_freelist(struct vm_page *pg)
+{
+	unsigned fl = pg->phys_addr & 0x1f;
+	KASSERT(fl == (unsigned)uvm_page_lookup_freelist(pg));
+	return fl;
+}
+
+static inline unsigned
+uvm_page_get_bucket(struct vm_page *pg)
+{
+	return (pg->phys_addr & 0x3e0) >> 5;
+}
+
+static inline void
+uvm_page_set_freelist(struct vm_page *pg, unsigned fl)
+{
+	KASSERT(fl < 32);
+	pg->phys_addr = (pg->phys_addr & ~0x1f) | fl;
+}
+
+static inline void
+uvm_page_set_bucket(struct vm_page *pg, unsigned b)
+{
+	KASSERT(b < 32);
+	pg->phys_addr = (pg->phys_addr & ~0x3e0) | (b << 5);
+}
+
 #ifdef DEBUG
 void uvm_pagezerocheck(struct vm_page *);
 #endif /* DEBUG */

Index: src/sys/uvm/uvm_pager.c
diff -u src/sys/uvm/uvm_pager.c:1.116 src/sys/uvm/uvm_pager.c:1.117
--- src/sys/uvm/uvm_pager.c:1.116	Sat Dec 14 21:36:00 2019
+++ src/sys/uvm/uvm_pager.c	Sat Dec 21 14:41:44 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pager.c,v 1.116 2019/12/14 21:36:00 ad Exp $	*/
+/*	$NetBSD: uvm_pager.c,v 1.117 2019/12/21 14:41:44 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.116 2019/12/14 21:36:00 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.117 2019/12/21 14:41:44 ad Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -176,7 +176,7 @@ uvm_pagermapin(struct vm_page **pps, int
 	struct vm_page *pp;
 	vm_prot_t prot;
 	const bool pdaemon = (curlwp == uvm.pagedaemon_lwp);
-	const u_int first_color = VM_PGCOLOR_BUCKET(*pps);
+	const u_int first_color = VM_PGCOLOR(*pps);
 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
 
 	UVMHIST_LOG(maphist,"(pps=0x%#jx, npages=%jd, first_color=%ju)",

Index: src/sys/uvm/uvm_pglist.c
diff -u src/sys/uvm/uvm_pglist.c:1.75 src/sys/uvm/uvm_pglist.c:1.76
--- src/sys/uvm/uvm_pglist.c:1.75	Sat Dec 21 13:00:25 2019
+++ src/sys/uvm/uvm_pglist.c	Sat Dec 21 14:41:44 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pglist.c,v 1.75 2019/12/21 13:00:25 ad Exp $	*/
+/*	$NetBSD: uvm_pglist.c,v 1.76 2019/12/21 14:41:44 ad Exp $	*/
 
 /*-
  * Copyright (c) 1997 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.75 2019/12/21 13:00:25 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.76 2019/12/21 14:41:44 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -87,8 +87,8 @@ uvm_pglist_add(struct vm_page *pg, struc
 #error uvm_pglistalloc needs to be updated
 #endif
 
-	free_list = uvm_page_lookup_freelist(pg);
-	color = VM_PGCOLOR_BUCKET(pg);
+	free_list = uvm_page_get_freelist(pg);
+	color = VM_PGCOLOR(pg);
 	pgflidx = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
 #ifdef UVMDEBUG
 	struct vm_page *tp;
@@ -582,8 +582,8 @@ uvm_pglistfree(struct pglist *list)
 		if (iszero)
 			uvm_pagezerocheck(pg);
 #endif /* DEBUG */
-		index = uvm_page_lookup_freelist(pg);
-		color = VM_PGCOLOR_BUCKET(pg);
+		index = uvm_page_get_freelist(pg);
+		color = VM_PGCOLOR(pg);
 		queue = iszero ? PGFL_ZEROS : PGFL_UNKNOWN;
 		pg->offset = (uintptr_t)ucpu;
 		LIST_INSERT_HEAD(&uvm.page_free[index].pgfl_buckets[color].

Index: src/sys/uvm/uvm_physseg.c
diff -u src/sys/uvm/uvm_physseg.c:1.12 src/sys/uvm/uvm_physseg.c:1.13
--- src/sys/uvm/uvm_physseg.c:1.12	Fri Dec 20 19:03:17 2019
+++ src/sys/uvm/uvm_physseg.c	Sat Dec 21 14:41:44 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_physseg.c,v 1.12 2019/12/20 19:03:17 ad Exp $ */
+/* $NetBSD: uvm_physseg.c,v 1.13 2019/12/21 14:41:44 ad Exp $ */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -96,7 +96,6 @@ struct uvm_physseg {
 	struct  extent *ext;		/* extent(9) structure to manage pgs[] */
 	int	free_list;		/* which free list they belong on */
 	u_int	start_hint;		/* start looking for free pages here */
-					/* protected by uvm_fpageqlock */
 #ifdef __HAVE_PMAP_PHYSSEG
 	struct	pmap_physseg pmseg;	/* pmap specific (MD) data */
 #endif
@@ -1103,15 +1102,17 @@ uvm_physseg_init_seg(uvm_physseg_t upm, 
 	/* init and free vm_pages (we've already zeroed them) */
 	paddr = ctob(seg->start);
 	for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
-		seg->pgs[i].phys_addr = paddr;
+		pg = &seg->pgs[i];
+		pg->phys_addr = paddr;
 #ifdef __HAVE_VM_PAGE_MD
-		VM_MDPAGE_INIT(&seg->pgs[i]);
+		VM_MDPAGE_INIT(pg);
 #endif
 		if (atop(paddr) >= seg->avail_start &&
 		    atop(paddr) < seg->avail_end) {
 			uvmexp.npages++;
 			/* add page to free pool */
-			pg = &seg->pgs[i];
+			uvm_page_set_freelist(pg,
+			    uvm_page_lookup_freelist(pg));
 			/* Disable LOCKDEBUG: too many and too early. */
 			mutex_init(&pg->interlock, MUTEX_NODEBUG, IPL_NONE);
 			uvm_pagefree(pg);

Index: src/sys/uvm/uvm_vnode.c
diff -u src/sys/uvm/uvm_vnode.c:1.103 src/sys/uvm/uvm_vnode.c:1.104
--- src/sys/uvm/uvm_vnode.c:1.103	Sat Oct 28 00:37:13 2017
+++ src/sys/uvm/uvm_vnode.c	Sat Dec 21 14:41:44 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_vnode.c,v 1.103 2017/10/28 00:37:13 pgoyette Exp $	*/
+/*	$NetBSD: uvm_vnode.c,v 1.104 2019/12/21 14:41:44 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -45,7 +45,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.103 2017/10/28 00:37:13 pgoyette Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.104 2019/12/21 14:41:44 ad Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_uvmhist.h"
@@ -272,7 +272,7 @@ uvn_findpage(struct uvm_object *uobj, vo
 				continue;
 			}
 			UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)",
-			    (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0);
+			    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
 			break;
 		} else if (flags & UFP_NOCACHE) {
 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
@@ -287,7 +287,7 @@ uvn_findpage(struct uvm_object *uobj, vo
 			}
 			pg->flags |= PG_WANTED;
 			UVMHIST_LOG(ubchist, "wait %#jx (color %ju)",
-			    (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0);
+			    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
 					    "uvn_fp2", 0);
 			mutex_enter(uobj->vmobjlock);
@@ -315,7 +315,7 @@ uvn_findpage(struct uvm_object *uobj, vo
 		pg->flags |= PG_BUSY;
 		UVM_PAGE_OWN(pg, "uvn_findpage");
 		UVMHIST_LOG(ubchist, "found %#jx (color %ju)",
-		    (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0);
+		    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
 		break;
 	}
 	*pgp = pg;

Reply via email to