Module Name: src Committed By: ad Date: Tue May 19 21:45:35 UTC 2020
Modified Files: src/sys/uvm: uvm_readahead.c Log Message: Drop & re-acquire vmobjlock less often. To generate a diff of this commit: cvs rdiff -u -r1.12 -r1.13 src/sys/uvm/uvm_readahead.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/uvm/uvm_readahead.c diff -u src/sys/uvm/uvm_readahead.c:1.12 src/sys/uvm/uvm_readahead.c:1.13 --- src/sys/uvm/uvm_readahead.c:1.12 Sun Mar 8 18:40:29 2020 +++ src/sys/uvm/uvm_readahead.c Tue May 19 21:45:35 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_readahead.c,v 1.12 2020/03/08 18:40:29 ad Exp $ */ +/* $NetBSD: uvm_readahead.c,v 1.13 2020/05/19 21:45:35 ad Exp $ */ /*- * Copyright (c)2003, 2005, 2009 YAMAMOTO Takashi, @@ -40,7 +40,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.12 2020/03/08 18:40:29 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.13 2020/05/19 21:45:35 ad Exp $"); #include <sys/param.h> #include <sys/pool.h> @@ -126,6 +126,8 @@ ra_startio(struct uvm_object *uobj, off_ DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n", __func__, uobj, off, endoff)); + KASSERT(rw_write_held(uobj->vmobjlock)); + /* * Don't issue read-ahead if the last page of the range is already cached. * The assumption is that since the access is sequential, the intermediate @@ -133,9 +135,7 @@ ra_startio(struct uvm_object *uobj, off_ * too. This speeds up I/O using cache, since it avoids lookups and temporary * allocations done by full pgo_get. */ - rw_enter(uobj->vmobjlock, RW_READER); struct vm_page *pg = uvm_pagelookup(uobj, trunc_page(endoff - 1)); - rw_exit(uobj->vmobjlock); if (pg != NULL) { DPRINTF(("%s: off=%" PRIu64 ", sz=%zu already cached\n", __func__, off, sz)); @@ -162,9 +162,9 @@ ra_startio(struct uvm_object *uobj, off_ * use UVM_ADV_RANDOM to avoid recursion. */ - rw_enter(uobj->vmobjlock, RW_WRITER); error = (*uobj->pgops->pgo_get)(uobj, off, NULL, &npages, 0, VM_PROT_READ, UVM_ADV_RANDOM, PGO_NOTIMESTAMP); + rw_enter(uobj->vmobjlock, RW_WRITER); DPRINTF(("%s: off=%" PRIu64 ", bytelen=%zu -> %d\n", __func__, off, bytelen, error)); if (error != 0 && error != EBUSY) { @@ -332,9 +332,7 @@ do_readahead: if (rasize >= RA_MINSIZE) { off_t next; - rw_exit(uobj->vmobjlock); next = ra_startio(uobj, raoff, rasize); - rw_enter(uobj->vmobjlock, RW_WRITER); ra->ra_next = next; } } @@ -362,6 +360,8 @@ uvm_readahead(struct uvm_object *uobj, o if (size > RA_WINSIZE_MAX) { size = RA_WINSIZE_MAX; } + rw_enter(uobj->vmobjlock, RW_WRITER); ra_startio(uobj, off, size); + rw_exit(uobj->vmobjlock); return 0; }