Module Name: src Committed By: yamt Date: Sat Nov 26 15:19:07 UTC 2011
Modified Files: src/sys/miscfs/genfs [yamt-pagecache]: genfs_io.c src/sys/uvm [yamt-pagecache]: uvm_aobj.c uvm_object.c uvm_page_array.c uvm_page_array.h uvm_vnode.c Log Message: - uvm_page_array_fill: add some more parameters - uvn_findpages: use gang-lookup - genfs_putpages: re-enable backward clustering - mechanical changes after the recent radixtree.h api changes To generate a diff of this commit: cvs rdiff -u -r1.53.2.3 -r1.53.2.4 src/sys/miscfs/genfs/genfs_io.c cvs rdiff -u -r1.116.2.5 -r1.116.2.6 src/sys/uvm/uvm_aobj.c cvs rdiff -u -r1.11.2.2 -r1.11.2.3 src/sys/uvm/uvm_object.c cvs rdiff -u -r1.1.2.2 -r1.1.2.3 src/sys/uvm/uvm_page_array.c cvs rdiff -u -r1.1.2.3 -r1.1.2.4 src/sys/uvm/uvm_page_array.h cvs rdiff -u -r1.97.2.1 -r1.97.2.2 src/sys/uvm/uvm_vnode.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/miscfs/genfs/genfs_io.c diff -u src/sys/miscfs/genfs/genfs_io.c:1.53.2.3 src/sys/miscfs/genfs/genfs_io.c:1.53.2.4 --- src/sys/miscfs/genfs/genfs_io.c:1.53.2.3 Sun Nov 20 10:49:20 2011 +++ src/sys/miscfs/genfs/genfs_io.c Sat Nov 26 15:19:06 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: genfs_io.c,v 1.53.2.3 2011/11/20 10:49:20 yamt Exp $ */ +/* $NetBSD: genfs_io.c,v 1.53.2.4 2011/11/26 15:19:06 yamt Exp $ */ /* * Copyright (c) 1982, 1986, 1989, 1993 @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.3 2011/11/20 10:49:20 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.4 2011/11/26 15:19:06 yamt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -967,7 +967,8 @@ retry: for (;;) { bool protected; - pg = uvm_page_array_fill_and_peek(&a, uobj, off, dirtyonly); + pg = uvm_page_array_fill_and_peek(&a, uobj, off, 0, + dirtyonly ? UVM_PAGE_ARRAY_FILL_DIRTYONLY : 0); if (pg == NULL) { break; } @@ -1093,7 +1094,7 @@ retry: pg->flags |= PG_BUSY; UVM_PAGE_OWN(pg, "genfs_putpages"); -#if 0 /* XXX notyet */ +#if 1 /* XXX notyet */ /* * first look backward. */ @@ -1139,7 +1140,10 @@ retry: */ nextpg = uvm_page_array_fill_and_peek(&a, uobj, - pgs[npages - 1]->offset + PAGE_SIZE, true); + pgs[npages - 1]->offset + PAGE_SIZE, + maxpages - npages, + UVM_PAGE_ARRAY_FILL_DIRTYONLY | + UVM_PAGE_ARRAY_FILL_DENSE); if (nextpg == NULL) { break; } Index: src/sys/uvm/uvm_aobj.c diff -u src/sys/uvm/uvm_aobj.c:1.116.2.5 src/sys/uvm/uvm_aobj.c:1.116.2.6 --- src/sys/uvm/uvm_aobj.c:1.116.2.5 Fri Nov 18 00:57:33 2011 +++ src/sys/uvm/uvm_aobj.c Sat Nov 26 15:19:06 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_aobj.c,v 1.116.2.5 2011/11/18 00:57:33 yamt Exp $ */ +/* $NetBSD: uvm_aobj.c,v 1.116.2.6 2011/11/26 15:19:06 yamt Exp $ */ /* * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and @@ -38,7 +38,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.2.5 2011/11/18 00:57:33 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.2.6 2011/11/26 15:19:06 yamt Exp $"); #include "opt_uvmhist.h" @@ -663,7 +663,7 @@ uao_detach_locked(struct uvm_object *uob uvm_page_array_init(&a); mutex_enter(&uvm_pageqlock); - while ((pg = uvm_page_array_fill_and_peek(&a, uobj, 0, false)) + while ((pg = uvm_page_array_fill_and_peek(&a, uobj, 0, 0, 0)) != NULL) { uvm_page_array_advance(&a); pmap_page_protect(pg, VM_PROT_NONE); @@ -756,7 +756,7 @@ uao_put(struct uvm_object *uobj, voff_t /* locked: uobj */ uvm_page_array_init(&a); curoff = start; - while ((pg = uvm_page_array_fill_and_peek(&a, uobj, curoff, false)) != + while ((pg = uvm_page_array_fill_and_peek(&a, uobj, curoff, 0, 0)) != NULL) { if (pg->offset >= stop) { break; Index: src/sys/uvm/uvm_object.c diff -u src/sys/uvm/uvm_object.c:1.11.2.2 src/sys/uvm/uvm_object.c:1.11.2.3 --- src/sys/uvm/uvm_object.c:1.11.2.2 Sun Nov 6 22:05:00 2011 +++ src/sys/uvm/uvm_object.c Sat Nov 26 15:19:06 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_object.c,v 1.11.2.2 2011/11/06 22:05:00 yamt Exp $ */ +/* $NetBSD: uvm_object.c,v 1.11.2.3 2011/11/26 15:19:06 yamt Exp $ */ /* * Copyright (c) 2006, 2010 The NetBSD Foundation, Inc. @@ -37,7 +37,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.11.2.2 2011/11/06 22:05:00 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.11.2.3 2011/11/26 15:19:06 yamt Exp $"); #include "opt_ddb.h" @@ -256,7 +256,7 @@ uvm_object_printit(struct uvm_object *uo (*pr)(" PAGES <pg,offset>:\n "); uvm_page_array_init(&a); off = 0; - while ((pg = uvm_page_array_fill_and_peek(&a, uobj, off, false)) + while ((pg = uvm_page_array_fill_and_peek(&a, uobj, off, 0, 0)) != NULL) { cnt++; (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset); Index: src/sys/uvm/uvm_page_array.c diff -u src/sys/uvm/uvm_page_array.c:1.1.2.2 src/sys/uvm/uvm_page_array.c:1.1.2.3 --- src/sys/uvm/uvm_page_array.c:1.1.2.2 Sun Nov 6 22:04:07 2011 +++ src/sys/uvm/uvm_page_array.c Sat Nov 26 15:19:06 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page_array.c,v 1.1.2.2 2011/11/06 22:04:07 yamt Exp $ */ +/* $NetBSD: uvm_page_array.c,v 1.1.2.3 2011/11/26 15:19:06 yamt Exp $ */ /*- * Copyright (c)2011 YAMAMOTO Takashi, @@ -27,7 +27,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.1.2.2 2011/11/06 22:04:07 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.1.2.3 2011/11/26 15:19:06 yamt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -115,6 +115,9 @@ uvm_page_array_advance(struct uvm_page_a * return 0 on success. in that case, cache the result in the array * so that they will be picked by later uvm_page_array_peek. * + * nwant is a number of pages to fetch. a caller should consider it a hint. + * nwant == 0 means a caller have no specific idea. + * * return ENOENT if no pages are found. * * called with object lock held. @@ -122,25 +125,33 @@ uvm_page_array_advance(struct uvm_page_a int uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj, - voff_t off, bool dirtyonly) + voff_t off, unsigned int nwant, unsigned int flags) { unsigned int npages; #if defined(DEBUG) unsigned int i; #endif /* defined(DEBUG) */ - const unsigned int maxpages = __arraycount(ar->ar_pages); + unsigned int maxpages = __arraycount(ar->ar_pages); + const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0; + const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0; + if (nwant != 0 && nwant < maxpages) { + maxpages = nwant; + } KASSERT(mutex_owned(uobj->vmobjlock)); KASSERT(uvm_page_array_peek(ar) == NULL); - if (dirtyonly) { - npages = radix_tree_gang_lookup_tagged_node( - &uobj->uo_pages, off >> PAGE_SHIFT, - (void **)ar->ar_pages, maxpages, - UVM_PAGE_DIRTY_TAG); + if ((flags & UVM_PAGE_ARRAY_FILL_DIRTYONLY) != 0) { + npages = + (backward ? radix_tree_gang_lookup_tagged_node_reverse : + radix_tree_gang_lookup_tagged_node)( + &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages, + maxpages, dense, UVM_PAGE_DIRTY_TAG); } else { - npages = radix_tree_gang_lookup_node( - &uobj->uo_pages, off >> PAGE_SHIFT, - (void **)ar->ar_pages, maxpages); + npages = + (backward ? radix_tree_gang_lookup_node_reverse : + radix_tree_gang_lookup_node)( + &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages, + maxpages, dense); } if (npages == 0) { uvm_page_array_clear(ar); @@ -170,7 +181,7 @@ uvm_page_array_fill(struct uvm_page_arra struct vm_page * uvm_page_array_fill_and_peek(struct uvm_page_array *a, struct uvm_object *uobj, - voff_t off, bool dirtyonly) + voff_t off, unsigned int nwant, unsigned int flags) { struct vm_page *pg; int error; @@ -179,7 +190,7 @@ uvm_page_array_fill_and_peek(struct uvm_ if (pg != NULL) { return pg; } - error = uvm_page_array_fill(a, uobj, off, dirtyonly); + error = uvm_page_array_fill(a, uobj, off, nwant, flags); if (error != 0) { return NULL; } Index: src/sys/uvm/uvm_page_array.h diff -u src/sys/uvm/uvm_page_array.h:1.1.2.3 src/sys/uvm/uvm_page_array.h:1.1.2.4 --- src/sys/uvm/uvm_page_array.h:1.1.2.3 Mon Nov 14 14:22:28 2011 +++ src/sys/uvm/uvm_page_array.h Sat Nov 26 15:19:06 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page_array.h,v 1.1.2.3 2011/11/14 14:22:28 yamt Exp $ */ +/* $NetBSD: uvm_page_array.h,v 1.1.2.4 2011/11/26 15:19:06 yamt Exp $ */ /*- * Copyright (c)2011 YAMAMOTO Takashi, @@ -64,8 +64,15 @@ void uvm_page_array_clear(struct uvm_pag struct vm_page *uvm_page_array_peek(struct uvm_page_array *); void uvm_page_array_advance(struct uvm_page_array *); int uvm_page_array_fill(struct uvm_page_array *, struct uvm_object *, - voff_t, bool); + voff_t, unsigned int, unsigned int); struct vm_page *uvm_page_array_fill_and_peek(struct uvm_page_array *, - struct uvm_object *, voff_t, bool); + struct uvm_object *, voff_t, unsigned int, unsigned int); + +/* + * flags for uvm_page_array_fill and uvm_page_array_fill_and_peek + */ +#define UVM_PAGE_ARRAY_FILL_DIRTYONLY 1 /* skip known-clean pages */ +#define UVM_PAGE_ARRAY_FILL_DENSE 2 /* stop on a hole */ +#define UVM_PAGE_ARRAY_FILL_BACKWARD 4 /* descend order */ #endif /* defined(_UVM_UVM_ARRAY_H_) */ Index: src/sys/uvm/uvm_vnode.c diff -u src/sys/uvm/uvm_vnode.c:1.97.2.1 src/sys/uvm/uvm_vnode.c:1.97.2.2 --- src/sys/uvm/uvm_vnode.c:1.97.2.1 Wed Nov 2 21:54:01 2011 +++ src/sys/uvm/uvm_vnode.c Sat Nov 26 15:19:06 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_vnode.c,v 1.97.2.1 2011/11/02 21:54:01 yamt Exp $ */ +/* $NetBSD: uvm_vnode.c,v 1.97.2.2 2011/11/26 15:19:06 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -45,7 +45,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.1 2011/11/02 21:54:01 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.2 2011/11/26 15:19:06 yamt Exp $"); #include "opt_uvmhist.h" @@ -64,6 +64,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c, #include <uvm/uvm.h> #include <uvm/uvm_readahead.h> +#include <uvm/uvm_page_array.h> /* * functions @@ -76,7 +77,7 @@ static int uvn_put(struct uvm_object *, static void uvn_reference(struct uvm_object *); static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **, - int); + int, struct uvm_page_array *a, unsigned int); /* * master pager structure @@ -196,12 +197,15 @@ uvn_findpages(struct uvm_object *uobj, v struct vm_page **pgs, int flags) { int i, count, found, npages, rv; + struct uvm_page_array a; + uvm_page_array_init(&a); count = found = 0; npages = *npagesp; if (flags & UFP_BACKWARD) { for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) { - rv = uvn_findpage(uobj, offset, &pgs[i], flags); + rv = uvn_findpage(uobj, offset, &pgs[i], flags, &a, + npages - i); if (rv == 0) { if (flags & UFP_DIRTYONLY) break; @@ -211,7 +215,8 @@ uvn_findpages(struct uvm_object *uobj, v } } else { for (i = 0; i < npages; i++, offset += PAGE_SIZE) { - rv = uvn_findpage(uobj, offset, &pgs[i], flags); + rv = uvn_findpage(uobj, offset, &pgs[i], flags, &a, + npages - i); if (rv == 0) { if (flags & UFP_DIRTYONLY) break; @@ -220,16 +225,21 @@ uvn_findpages(struct uvm_object *uobj, v count++; } } + uvm_page_array_fini(&a); *npagesp = count; return (found); } static int uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp, - int flags) + int flags, struct uvm_page_array *a, unsigned int nleft) { struct vm_page *pg; bool dirty; + const unsigned int fillflags = + ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) || + ((flags & UFP_DIRTYONLY) ? + (UVM_PAGE_ARRAY_FILL_DIRTYONLY|UVM_PAGE_ARRAY_FILL_DENSE) : 0); UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0); @@ -237,11 +247,19 @@ uvn_findpage(struct uvm_object *uobj, vo if (*pgp != NULL) { UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0); - return 0; + goto skip; } for (;;) { /* look for an existing page */ - pg = uvm_pagelookup(uobj, offset); + pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft, + fillflags); + if (pg != NULL && pg->offset != offset) { + KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0); + KASSERT( + ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0) + == (pg->offset < offset)); + pg = NULL; + } /* nope? allocate one now */ if (pg == NULL) { @@ -258,6 +276,7 @@ uvn_findpage(struct uvm_object *uobj, vo } mutex_exit(uobj->vmobjlock); uvm_wait("uvn_fp1"); + uvm_page_array_clear(a); mutex_enter(uobj->vmobjlock); continue; } @@ -266,20 +285,21 @@ uvn_findpage(struct uvm_object *uobj, vo break; } else if (flags & UFP_NOCACHE) { UVMHIST_LOG(ubchist, "nocache",0,0,0,0); - return 0; + goto skip; } /* page is there, see if we need to wait on it */ if ((pg->flags & PG_BUSY) != 0) { if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); - return 0; + goto skip; } pg->flags |= PG_WANTED; UVMHIST_LOG(ubchist, "wait %p (color %u)", pg, VM_PGCOLOR_BUCKET(pg), 0,0); UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, "uvn_fp2", 0); + uvm_page_array_clear(a); mutex_enter(uobj->vmobjlock); continue; } @@ -288,7 +308,7 @@ uvn_findpage(struct uvm_object *uobj, vo if ((flags & UFP_NORDONLY) && (pg->flags & (PG_RDONLY|PG_HOLE))) { UVMHIST_LOG(ubchist, "nordonly",0,0,0,0); - return 0; + goto skip; } /* stop on clean pages if requested */ @@ -305,10 +325,22 @@ uvn_findpage(struct uvm_object *uobj, vo UVM_PAGE_OWN(pg, "uvn_findpage"); UVMHIST_LOG(ubchist, "found %p (color %u)", pg, VM_PGCOLOR_BUCKET(pg), 0,0); + uvm_page_array_advance(a); break; } *pgp = pg; return 1; + +skip: + pg = uvm_page_array_peek(a); + if (pg != NULL) { + if (pg->offset == offset) { + uvm_page_array_advance(a); + } else { + KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0); + } + } + return 0; } /*