Module Name: src Committed By: yamt Date: Wed Jun 10 01:55:33 UTC 2009
Modified Files: src/sys/uvm: uvm_map.c uvm_map.h uvm_mmap.c Log Message: on MADV_WILLNEED, start prefetching backing object's pages. To generate a diff of this commit: cvs rdiff -u -r1.270 -r1.271 src/sys/uvm/uvm_map.c cvs rdiff -u -r1.62 -r1.63 src/sys/uvm/uvm_map.h cvs rdiff -u -r1.129 -r1.130 src/sys/uvm/uvm_mmap.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/uvm/uvm_map.c diff -u src/sys/uvm/uvm_map.c:1.270 src/sys/uvm/uvm_map.c:1.271 --- src/sys/uvm/uvm_map.c:1.270 Sun May 3 16:52:54 2009 +++ src/sys/uvm/uvm_map.c Wed Jun 10 01:55:33 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_map.c,v 1.270 2009/05/03 16:52:54 pooka Exp $ */ +/* $NetBSD: uvm_map.c,v 1.271 2009/06/10 01:55:33 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -71,7 +71,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.270 2009/05/03 16:52:54 pooka Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.271 2009/06/10 01:55:33 yamt Exp $"); #include "opt_ddb.h" #include "opt_uvmhist.h" @@ -95,6 +95,7 @@ #endif #include <uvm/uvm.h> +#include <uvm/uvm_readahead.h> #if defined(DDB) || defined(DEBUGPRINT) #include <uvm/uvm_ddb.h> @@ -3335,6 +3336,57 @@ } /* + * uvm_map_willneed: apply MADV_WILLNEED + */ + +int +uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end) +{ + struct vm_map_entry *entry; + UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist); + UVMHIST_LOG(maphist,"(map=0x%lx,start=0x%lx,end=0x%lx)", + map, start, end, 0); + + vm_map_lock_read(map); + VM_MAP_RANGE_CHECK(map, start, end); + if (!uvm_map_lookup_entry(map, start, &entry)) { + entry = entry->next; + } + while (entry->start < end) { + struct vm_amap * const amap = entry->aref.ar_amap; + struct uvm_object * const uobj = entry->object.uvm_obj; + + KASSERT(entry != &map->header); + KASSERT(start < entry->end); + /* + * XXX IMPLEMENT ME. + * Should invent a "weak" mode for uvm_fault() + * which would only do the PGO_LOCKED pgo_get(). + * + * for now, we handle only the easy but common case. + */ + if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) { + off_t offset; + off_t size; + + offset = entry->offset; + if (start < entry->start) { + offset += entry->start - start; + } + size = entry->offset + (entry->end - entry->start); + if (entry->end < end) { + size -= end - entry->end; + } + uvm_readahead(uobj, offset, size); + } + entry = entry->next; + } + vm_map_unlock_read(map); + UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0); + return 0; +} + +/* * uvm_map_pageable: sets the pageability of a range in a map. * * => wires map entries. should not be used for transient page locking. Index: src/sys/uvm/uvm_map.h diff -u src/sys/uvm/uvm_map.h:1.62 src/sys/uvm/uvm_map.h:1.63 --- src/sys/uvm/uvm_map.h:1.62 Tue Jul 29 00:03:06 2008 +++ src/sys/uvm/uvm_map.h Wed Jun 10 01:55:33 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_map.h,v 1.62 2008/07/29 00:03:06 matt Exp $ */ +/* $NetBSD: uvm_map.h,v 1.63 2009/06/10 01:55:33 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -297,6 +297,7 @@ void uvm_map_deallocate(struct vm_map *); +int uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t); int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int); void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *, vaddr_t, struct uvm_mapent_reservation *); Index: src/sys/uvm/uvm_mmap.c diff -u src/sys/uvm/uvm_mmap.c:1.129 src/sys/uvm/uvm_mmap.c:1.130 --- src/sys/uvm/uvm_mmap.c:1.129 Sat May 30 04:26:16 2009 +++ src/sys/uvm/uvm_mmap.c Wed Jun 10 01:55:33 2009 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_mmap.c,v 1.129 2009/05/30 04:26:16 yamt Exp $ */ +/* $NetBSD: uvm_mmap.c,v 1.130 2009/06/10 01:55:33 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -51,7 +51,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.129 2009/05/30 04:26:16 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.130 2009/06/10 01:55:33 yamt Exp $"); #include "opt_compat_netbsd.h" #include "opt_pax.h" @@ -849,13 +849,9 @@ * Activate all these pages, pre-faulting them in if * necessary. */ - /* - * XXX IMPLEMENT ME. - * Should invent a "weak" mode for uvm_fault() - * which would only do the PGO_LOCKED pgo_get(). - */ - - return (0); + error = uvm_map_willneed(&p->p_vmspace->vm_map, + addr, addr + size); + break; case MADV_DONTNEED: