Module Name:    src
Committed By:   uebayasi
Date:           Wed Jul  7 14:29:39 UTC 2010

Modified Files:
        src/sys/miscfs/genfs [uebayasi-xip]: genfs_io.c
        src/sys/uvm [uebayasi-xip]: uvm_fault.c uvm_page.c uvm_page.h

Log Message:
To simplify things, revert global vm_page_md hash and allocate struct
vm_page [] for XIP physical segments.


To generate a diff of this commit:
cvs rdiff -u -r1.36.2.11 -r1.36.2.12 src/sys/miscfs/genfs/genfs_io.c
cvs rdiff -u -r1.166.2.7 -r1.166.2.8 src/sys/uvm/uvm_fault.c
cvs rdiff -u -r1.153.2.39 -r1.153.2.40 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.59.2.22 -r1.59.2.23 src/sys/uvm/uvm_page.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/miscfs/genfs/genfs_io.c
diff -u src/sys/miscfs/genfs/genfs_io.c:1.36.2.11 src/sys/miscfs/genfs/genfs_io.c:1.36.2.12
--- src/sys/miscfs/genfs/genfs_io.c:1.36.2.11	Tue Jul  6 07:20:27 2010
+++ src/sys/miscfs/genfs/genfs_io.c	Wed Jul  7 14:29:39 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: genfs_io.c,v 1.36.2.11 2010/07/06 07:20:27 uebayasi Exp $	*/
+/*	$NetBSD: genfs_io.c,v 1.36.2.12 2010/07/07 14:29:39 uebayasi Exp $	*/
 
 /*
  * Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.11 2010/07/06 07:20:27 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.12 2010/07/07 14:29:39 uebayasi Exp $");
 
 #include "opt_direct_page.h"
 #include "opt_xip.h"
@@ -828,11 +828,18 @@
 		KASSERT(error == 0);
 		UVMHIST_LOG(ubchist, "xip VOP_BMAP: lbn=%ld blkno=%ld run=%d", (long)lbn, (long)blkno, run, 0);
 
+		/*
+		 * XIP page metadata assignment
+		 * - Unallocated block is redirected to the dedicated zero'ed
+		 *   page.
+		 * - Assume that struct vm_page *[] array of this segment is
+		 *   allocated and linearly ordered by physical address.
+		 */
 		if (blkno < 0) {
 			static ONCE_DECL(xip_zero_page_inited);
 
 			RUN_ONCE(&xip_zero_page_inited, xip_zero_page_init);
-			phys_addr = xip_zero_page->phys_addr;
+			pps[i] = xip_zero_page;
 		} else {
 			struct vm_physseg *seg;
 
@@ -842,10 +849,12 @@
 			phys_addr = pmap_phys_address(seg->start) +
 			    (blkno << dev_bshift) +
 			    (off - (lbn << fs_bshift));
+			pps[i] = seg->pgs +
+			    ((phys_addr >> PAGE_SHIFT) - seg->start);
+			KASSERT(pps[i]->phys_addr == phys_addr);
+			KASSERT((pps[i]->flags & PG_DIRECT) != 0);
 		}
 
-		pps[i] = uvm_phys_to_vm_page_direct(phys_addr);
-
 		UVMHIST_LOG(ubchist, "xip pgs %d => phys_addr=0x%lx (%p)",
 			i,
 			(long)phys_addr,

Index: src/sys/uvm/uvm_fault.c
diff -u src/sys/uvm/uvm_fault.c:1.166.2.7 src/sys/uvm/uvm_fault.c:1.166.2.8
--- src/sys/uvm/uvm_fault.c:1.166.2.7	Wed Jun  9 15:29:58 2010
+++ src/sys/uvm/uvm_fault.c	Wed Jul  7 14:29:37 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_fault.c,v 1.166.2.7 2010/06/09 15:29:58 uebayasi Exp $	*/
+/*	$NetBSD: uvm_fault.c,v 1.166.2.8 2010/07/07 14:29:37 uebayasi Exp $	*/
 
 /*
  *
@@ -39,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.166.2.7 2010/06/09 15:29:58 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.166.2.8 2010/07/07 14:29:37 uebayasi Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_direct_page.h"
@@ -1657,9 +1657,9 @@
 	 *  - at this point uobjpage could be PG_WANTED (handle later)
 	 */
 
-	KASSERT(uvm_pageisdirect_p(uobjpage) || uobj == NULL ||
+	KASSERT(uobj == NULL || uvm_pageisdirect_p(uobjpage) ||
 	    uobj == uobjpage->uobject);
-	KASSERT(uvm_pageisdirect_p(uobjpage) || uobj == NULL ||
+	KASSERT(uobj == NULL || uvm_pageisdirect_p(uobjpage) ||
 	    !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
 	    (uobjpage->flags & PG_CLEAN) != 0);
 

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.153.2.39 src/sys/uvm/uvm_page.c:1.153.2.40
--- src/sys/uvm/uvm_page.c:1.153.2.39	Mon May 31 13:26:38 2010
+++ src/sys/uvm/uvm_page.c	Wed Jul  7 14:29:38 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.153.2.39 2010/05/31 13:26:38 uebayasi Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.153.2.40 2010/07/07 14:29:38 uebayasi Exp $	*/
 
 /*
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.153.2.39 2010/05/31 13:26:38 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.153.2.40 2010/07/07 14:29:38 uebayasi Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -195,10 +195,6 @@
 
 static void uvm_pageinsert(struct uvm_object *, struct vm_page *);
 static void uvm_pageremove(struct uvm_object *, struct vm_page *);
-#ifdef DIRECT_PAGE
-static void vm_page_direct_mdpage_insert(paddr_t);
-static void vm_page_direct_mdpage_remove(paddr_t);
-#endif
 
 /*
  * per-object tree of pages
@@ -780,7 +776,7 @@
 static struct vm_physseg *
 uvm_page_physload_common(struct vm_physseg_freelist * const,
     struct vm_physseg **, int,
-    const paddr_t, const paddr_t, const paddr_t, const paddr_t);
+    const paddr_t, const paddr_t);
 static void
 uvm_page_physunload_common(struct vm_physseg_freelist *,
     struct vm_physseg **, struct vm_physseg *);
@@ -804,7 +800,7 @@
 		panic("uvm_page_physload: bad free list %d", free_list);
 
 	seg = uvm_page_physload_common(&vm_physmem_freelist, vm_physmem_ptrs,
-	    vm_nphysmem, start, end, avail_start, avail_end);
+	    vm_nphysmem, start, end);
 	KASSERT(seg != NULL);
 
 	seg->avail_start = avail_start;
@@ -848,15 +844,38 @@
     paddr_t avail_end, int prot, int flags)
 {
 	struct vm_physseg *seg;
+	int i;
 
 	seg = uvm_page_physload_common(&vm_physdev_freelist, vm_physdev_ptrs,
-	    vm_nphysdev, start, end, avail_start, avail_end);
+	    vm_nphysdev, start, end);
 	KASSERT(seg != NULL);
 
 	seg->prot = prot;
 	seg->flags = flags;	/* XXXUEBS BUS_SPACE_MAP_* */
-	for (paddr_t pf = start; pf < end; pf++)
-		vm_page_direct_mdpage_insert(pf);
+
+	/*
+	 * XIP page metadata
+	 * - Only "phys_addr" and "vm_page_md" (== "PV" management) are used.
+	 * - No "pageq" operation is done.
+	 */
+	seg->pgs = kmem_zalloc(sizeof(struct vm_page) * (end - start),
+	    KM_SLEEP);
+	KASSERT(seg->pgs != NULL);
+	seg->endpg = seg->pgs + (end - start);
+	seg->start = start;
+	seg->end = end;
+
+	for (i = 0; i < end - start; i++) {
+		struct vm_page *pg = seg->pgs + i;
+		paddr_t paddr = (start + i) << PAGE_SHIFT;
+
+		pg->phys_addr = paddr;
+		pg->flags |= PG_DIRECT;
+#ifdef __HAVE_VM_PAGE_MD
+		VM_MDPAGE_INIT(&pg->mdpage, paddr);
+#endif
+	}
+
 	vm_nphysdev++;
 	return seg;
 }
@@ -866,8 +885,7 @@
 {
 	struct vm_physseg *seg = cookie;
 
-	for (paddr_t pf = seg->start; pf < seg->end; pf++)
-		vm_page_direct_mdpage_remove(pf);
+	kmem_free(seg->pgs, sizeof(struct vm_page) * (seg->end - seg->start));
 	uvm_page_physunload_common(&vm_physdev_freelist, vm_physdev_ptrs, seg);
 	vm_nphysdev--;
 }
@@ -876,8 +894,7 @@
 static struct vm_physseg *
 uvm_page_physload_common(struct vm_physseg_freelist *freelist,
     struct vm_physseg **segs, int nsegs,
-    const paddr_t start, const paddr_t end,
-    const paddr_t avail_start, const paddr_t avail_end)
+    const paddr_t start, const paddr_t end)
 {
 	struct vm_physseg *ps;
 	static int uvm_page_physseg_inited;
@@ -1156,52 +1173,13 @@
 	}
 }
 
-
-#ifdef DIRECT_PAGE
-/*
- * Device pages don't have struct vm_page objects for various reasons:
- *
- * - Device pages are volatile; not paging involved.  Which means we have
- *   much less state information to keep for each page.
- *
- * - Volatile, directly memory-mappable devices (framebuffers, audio devices,
- *   etc.) only need physical address and attribute (protection and some
- *   accelaration specific to physical bus) common to all the pages.
- *   Allocating vm_page objects to keep such informations is wasteful.
- *
- * - Per-page MD information is only used for XIP vnodes' copy-on-write from
- *   a device page to anon.
- */
-
-/* Assume struct vm_page * is aligned to 4 bytes. */
-/* XXXUEBS Consider to improve this. */
-#define	VM_PAGE_DIRECT_MAGIC		0x2
-#define	VM_PAGE_DIRECT_MAGIC_MASK	0x3
-#define	VM_PAGE_DIRECT_MAGIC_SHIFT	2
-
-struct vm_page *
-uvm_phys_to_vm_page_direct(paddr_t pa)
-{
-	paddr_t pf = pa >> PAGE_SHIFT;
-	uintptr_t cookie = pf << VM_PAGE_DIRECT_MAGIC_SHIFT;
-	return (void *)(cookie | VM_PAGE_DIRECT_MAGIC);
-}
-
-static inline paddr_t
-VM_PAGE_DIRECT_TO_PHYS(const struct vm_page *pg)
-{
-	uintptr_t cookie = (uintptr_t)pg & ~VM_PAGE_DIRECT_MAGIC_MASK;
-	paddr_t pf = cookie >> VM_PAGE_DIRECT_MAGIC_SHIFT;
-	return pf << PAGE_SHIFT;
-}
-
 bool
 uvm_pageisdirect_p(const struct vm_page *pg)
 {
 
-	return ((uintptr_t)pg & VM_PAGE_DIRECT_MAGIC_MASK) == VM_PAGE_DIRECT_MAGIC;
+	KASSERT(pg != NULL);
+	return (pg->flags & PG_DIRECT) != 0;
 }
-#endif
 
 
 /*
@@ -1219,7 +1197,7 @@
 #ifdef DIRECT_PAGE
 	psi = vm_physseg_find_direct(pf, &off);
 	if (psi != -1)
-		return(uvm_phys_to_vm_page_direct(pa));
+		return(&vm_physdev_ptrs[psi]->pgs[off]);
 #endif
 	psi = vm_physseg_find(pf, &off);
 	if (psi != -1)
@@ -1231,142 +1209,9 @@
 uvm_vm_page_to_phys(const struct vm_page *pg)
 {
 
-#ifdef DIRECT_PAGE
-	if (uvm_pageisdirect_p(pg)) {
-		return VM_PAGE_DIRECT_TO_PHYS(pg);
-	}
-#endif
 	return pg->phys_addr;
 }
 
-
-#ifdef __HAVE_VM_PAGE_MD
-#ifdef XIP
-/*
- * Device page's mdpage lookup.
- *
- * - Needed when promoting an XIP vnode page and invalidating its old mapping.
- *
- * - Hashing code is based on sys/arch/x86/x86/pmap.c.
- *
- * XXX Consider to allocate slots on-demand.
- */
-
-static struct vm_page_md *vm_page_direct_mdpage_lookup(struct vm_page *);
-
-struct vm_page_md *
-uvm_vm_page_to_md(struct vm_page *pg)
-{
-
-	return uvm_pageisdirect_p(pg) ?
-	    vm_page_direct_mdpage_lookup(pg) : &pg->mdpage;
-}
-
-struct vm_page_direct_mdpage_entry {
-	struct vm_page_md mde_mdpage;
-	SLIST_ENTRY(vm_page_direct_mdpage_entry) mde_hash;
-	paddr_t mde_pf;
-};
-
-/*
- * These can be optimized depending on the size of XIP'ed executables' .data
- * segments.  If page size is 4K and .data is 1M, .data spans across 256
- * pages.  Considering these pages' physical addresses are continuous, linear
- * hash should suffice.
- */
-#define	MDPG_HASH_SIZE		256	/* XXX */
-#define	MDPG_HASH_LOCK_CNT	4	/* XXX */
-
-struct vm_page_direct_mdpage {
-	kmutex_t locks[MDPG_HASH_LOCK_CNT];
-	struct vm_page_direct_mdpage_head {
-		SLIST_HEAD(, vm_page_direct_mdpage_entry) list;
-	} heads[MDPG_HASH_SIZE];
-};
-
-/* Global for now.  Consider to make this per-vm_physseg. */
-struct vm_page_direct_mdpage vm_page_direct_mdpage;
-
-static struct vm_page_direct_mdpage_head *
-vm_page_direct_mdpage_head(u_int hash)
-{
-
-	return &vm_page_direct_mdpage.heads[hash % MDPG_HASH_SIZE];
-}
-
-static kmutex_t *
-vm_page_direct_mdpage_lock(u_int hash)
-{
-
-	return &vm_page_direct_mdpage.locks[hash % MDPG_HASH_LOCK_CNT];
-}
-
-static void
-vm_page_direct_mdpage_insert(paddr_t pf)
-{
-	u_int hash = (u_int)pf;
-	kmutex_t *lock = vm_page_direct_mdpage_lock(hash);
-	struct vm_page_direct_mdpage_head *head = vm_page_direct_mdpage_head(hash);
-
-	struct vm_page_direct_mdpage_entry *mde = kmem_zalloc(sizeof(*mde), KM_SLEEP);
-
-	VM_MDPAGE_INIT(&mde->mde_mdpage, pf << PAGE_SHIFT);
-	mde->mde_pf = pf;
-
-	mutex_spin_enter(lock);
-	SLIST_INSERT_HEAD(&head->list, mde, mde_hash);
-	mutex_spin_exit(lock);
-}
-
-static void
-vm_page_direct_mdpage_remove(paddr_t pf)
-{
-	u_int hash = (u_int)pf;
-	kmutex_t *lock = vm_page_direct_mdpage_lock(hash);
-	struct vm_page_direct_mdpage_head *head = vm_page_direct_mdpage_head(hash);
-
-	struct vm_page_direct_mdpage_entry *mde;
-	struct vm_page_direct_mdpage_entry *prev = NULL;
-
-	mutex_spin_enter(lock);
-	SLIST_FOREACH(mde, &head->list, mde_hash) {
-		if (mde->mde_pf == pf) {
-			if (prev != NULL) {
-				SLIST_REMOVE_AFTER(prev, mde_hash);
-			} else {
-				SLIST_REMOVE_HEAD(&head->list, mde_hash);
-			}
-			break;
-		}
-		prev = mde;
-	}
-	mutex_spin_exit(lock);
-	KASSERT(mde != NULL);
-	kmem_free(mde, sizeof(*mde));
-}
-
-static struct vm_page_md *
-vm_page_direct_mdpage_lookup(struct vm_page *pg)
-{
-	paddr_t pf = VM_PAGE_DIRECT_TO_PHYS(pg) >> PAGE_SHIFT;
-	u_int hash = (u_int)pf;
-	kmutex_t *lock = vm_page_direct_mdpage_lock(hash);
-	struct vm_page_direct_mdpage_head *head = vm_page_direct_mdpage_head(hash);
-
-	struct vm_page_direct_mdpage_entry *mde = NULL;
-
-	mutex_spin_enter(lock);
-	SLIST_FOREACH(mde, &head->list, mde_hash)
-		if (mde->mde_pf == pf)
-			break;
-	mutex_spin_exit(lock);
-	KASSERT(mde != NULL);
-	return &mde->mde_mdpage;
-}
-#endif
-#endif
-
-
 /*
  * uvm_page_recolor: Recolor the pages if the new bucket count is
  * larger than the old one.

Index: src/sys/uvm/uvm_page.h
diff -u src/sys/uvm/uvm_page.h:1.59.2.22 src/sys/uvm/uvm_page.h:1.59.2.23
--- src/sys/uvm/uvm_page.h:1.59.2.22	Mon May 31 13:26:38 2010
+++ src/sys/uvm/uvm_page.h	Wed Jul  7 14:29:38 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.h,v 1.59.2.22 2010/05/31 13:26:38 uebayasi Exp $	*/
+/*	$NetBSD: uvm_page.h,v 1.59.2.23 2010/07/07 14:29:38 uebayasi Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -181,12 +181,13 @@
 #define	PG_FAKE		0x0040		/* page is not yet initialized */
 #define	PG_RDONLY	0x0080		/* page must be mapped read-only */
 #define	PG_ZERO		0x0100		/* page is pre-zero'd */
+#define	PG_DIRECT	0x0200		/* page is direct vnode data */
 
 #define PG_PAGER1	0x1000		/* pager-specific flag */
 
 #define	UVM_PGFLAGBITS \
 	"\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
-	"\11ZERO\15PAGER1"
+	"\11ZERO\12DIRECT\15PAGER1"
 
 #define PQ_FREE		0x0001		/* page is on free list */
 #define PQ_ANON		0x0002		/* page is part of an anon, rather
@@ -304,11 +305,7 @@
 void uvm_pagewire(struct vm_page *);
 void uvm_pagezero(struct vm_page *);
 bool uvm_pageismanaged(paddr_t);
-#ifdef DIRECT_PAGE
 bool uvm_pageisdirect_p(const struct vm_page *);
-#else
-#define	uvm_pageisdirect_p(x)	false
-#endif
 
 int uvm_page_lookup_freelist(struct vm_page *);
 
@@ -317,8 +314,6 @@
 paddr_t uvm_vm_page_to_phys(const struct vm_page *);
 #ifdef XIP
 int vm_physseg_find_direct(paddr_t, int *);
-struct vm_page *uvm_phys_to_vm_page_direct(paddr_t);
-paddr_t uvm_vm_page_to_phys_direct(const struct vm_page *);
 #endif
 
 /*
@@ -330,12 +325,7 @@
 #define VM_PAGE_TO_PHYS(entry)	uvm_vm_page_to_phys(entry)
 
 #ifdef __HAVE_VM_PAGE_MD
-#ifndef XIP
 #define	VM_PAGE_TO_MD(pg)	(&(pg)->mdpage)
-#else
-struct vm_page_md *uvm_vm_page_to_md(struct vm_page *);
-#define	VM_PAGE_TO_MD(pg)	uvm_vm_page_to_md(pg)
-#endif
 #endif
 
 /*

Reply via email to