Module Name:    src
Committed By:   para
Date:           Sat Jan 26 13:50:34 UTC 2013

Modified Files:
        src/sys/kern: subr_vmem.c
        src/sys/rump/librump/rumpkern: vm.c
        src/sys/sys: vmem.h
        src/sys/uvm: uvm_km.c
Added Files:
        src/sys/sys: vmem_impl.h

Log Message:
make vmem(9) ready to be used early during bootstrap to replace extent(9).
pass memory for vmem structs into the initialization functions and
do away with the static pools for this.
factor out the vmem internal structures into a private header.
remove special bootstrapping of the kmem_va_arena as all necessary memory
comes from pool_allocator_meta wich is fully operational at this point.


To generate a diff of this commit:
cvs rdiff -u -r1.77 -r1.78 src/sys/kern/subr_vmem.c
cvs rdiff -u -r1.132 -r1.133 src/sys/rump/librump/rumpkern/vm.c
cvs rdiff -u -r1.17 -r1.18 src/sys/sys/vmem.h
cvs rdiff -u -r0 -r1.1 src/sys/sys/vmem_impl.h
cvs rdiff -u -r1.135 -r1.136 src/sys/uvm/uvm_km.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/subr_vmem.c
diff -u src/sys/kern/subr_vmem.c:1.77 src/sys/kern/subr_vmem.c:1.78
--- src/sys/kern/subr_vmem.c:1.77	Fri Jan  4 08:28:38 2013
+++ src/sys/kern/subr_vmem.c	Sat Jan 26 13:50:33 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: subr_vmem.c,v 1.77 2013/01/04 08:28:38 para Exp $	*/
+/*	$NetBSD: subr_vmem.c,v 1.78 2013/01/26 13:50:33 para Exp $	*/
 
 /*-
  * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.77 2013/01/04 08:28:38 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.78 2013/01/26 13:50:33 para Exp $");
 
 #if defined(_KERNEL)
 #include "opt_ddb.h"
@@ -53,6 +53,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,
 #include <sys/kmem.h>
 #include <sys/pool.h>
 #include <sys/vmem.h>
+#include <sys/vmem_impl.h>
 #include <sys/workqueue.h>
 #include <sys/atomic.h>
 #include <uvm/uvm.h>
@@ -61,7 +62,13 @@ __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,
 #include <uvm/uvm_page.h>
 #include <uvm/uvm_pdaemon.h>
 #else /* defined(_KERNEL) */
+#include <stdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
 #include "../sys/vmem.h"
+#include "../sys/vmem_impl.h"
 #endif /* defined(_KERNEL) */
 
 
@@ -78,28 +85,23 @@ VMEM_EVCNT_DEFINE(bt_pages)
 VMEM_EVCNT_DEFINE(bt_count)
 VMEM_EVCNT_DEFINE(bt_inuse)
 
-#define	LOCK_DECL(name)		\
-    kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)]
-
-#define CONDVAR_DECL(name)	\
-    kcondvar_t name
+#define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
+#define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
+#define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
+#define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
 
 #else /* defined(_KERNEL) */
-#include <stdio.h>
-#include <errno.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
 
 #define VMEM_EVCNT_INCR(ev)	/* nothing */
 #define VMEM_EVCNT_DECR(ev)	/* nothing */
 
-#define	UNITTEST
-#define	KASSERT(a)		assert(a)
-#define	LOCK_DECL(name)		/* nothing */
-#define	CONDVAR_DECL(name)	/* nothing */
 #define	VMEM_CONDVAR_INIT(vm, wchan)	/* nothing */
+#define	VMEM_CONDVAR_DESTROY(vm)	/* nothing */
+#define	VMEM_CONDVAR_WAIT(vm)		/* nothing */
 #define	VMEM_CONDVAR_BROADCAST(vm)	/* nothing */
+
+#define	UNITTEST
+#define	KASSERT(a)		assert(a)
 #define	mutex_init(a, b, c)	/* nothing */
 #define	mutex_destroy(a)	/* nothing */
 #define	mutex_enter(a)		/* nothing */
@@ -110,74 +112,25 @@ VMEM_EVCNT_DEFINE(bt_inuse)
 #define	panic(...)		printf(__VA_ARGS__); abort()
 #endif /* defined(_KERNEL) */
 
-struct vmem;
-struct vmem_btag;
-
 #if defined(VMEM_SANITY)
 static void vmem_check(vmem_t *);
 #else /* defined(VMEM_SANITY) */
 #define vmem_check(vm)	/* nothing */
 #endif /* defined(VMEM_SANITY) */
 
-#define	VMEM_MAXORDER		(sizeof(vmem_size_t) * CHAR_BIT)
-
 #define	VMEM_HASHSIZE_MIN	1	/* XXX */
 #define	VMEM_HASHSIZE_MAX	65536	/* XXX */
 #define	VMEM_HASHSIZE_INIT	1
 
 #define	VM_FITMASK	(VM_BESTFIT | VM_INSTANTFIT)
 
-CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
-LIST_HEAD(vmem_freelist, vmem_btag);
-LIST_HEAD(vmem_hashlist, vmem_btag);
-
-#if defined(QCACHE)
-#define	VMEM_QCACHE_IDX_MAX	32
-
-#define	QC_NAME_MAX	16
-
-struct qcache {
-	pool_cache_t qc_cache;
-	vmem_t *qc_vmem;
-	char qc_name[QC_NAME_MAX];
-};
-typedef struct qcache qcache_t;
-#define	QC_POOL_TO_QCACHE(pool)	((qcache_t *)(pool->pr_qcache))
-#endif /* defined(QCACHE) */
-
-#define	VMEM_NAME_MAX	16
-
-/* vmem arena */
-struct vmem {
-	CONDVAR_DECL(vm_cv);
-	LOCK_DECL(vm_lock);
-	vm_flag_t vm_flags;
-	vmem_import_t *vm_importfn;
-	vmem_release_t *vm_releasefn;
-	size_t vm_nfreetags;
-	LIST_HEAD(, vmem_btag) vm_freetags;
-	void *vm_arg;
-	struct vmem_seglist vm_seglist;
-	struct vmem_freelist vm_freelist[VMEM_MAXORDER];
-	size_t vm_hashsize;
-	size_t vm_nbusytag;
-	struct vmem_hashlist *vm_hashlist;
-	struct vmem_hashlist vm_hash0;
-	size_t vm_quantum_mask;
-	int vm_quantum_shift;
-	size_t vm_size;
-	size_t vm_inuse;
-	char vm_name[VMEM_NAME_MAX+1];
-	LIST_ENTRY(vmem) vm_alllist;
+#if defined(_KERNEL)
+static bool vmem_bootstrapped = false;
+static kmutex_t vmem_list_lock;
+static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
+#endif /* defined(_KERNEL) */
 
-#if defined(QCACHE)
-	/* quantum cache */
-	size_t vm_qcache_max;
-	struct pool_allocator vm_qcache_allocator;
-	qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
-	qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
-#endif /* defined(QCACHE) */
-};
+/* ---- misc */
 
 #define	VMEM_LOCK(vm)		mutex_enter(&vm->vm_lock)
 #define	VMEM_TRYLOCK(vm)	mutex_tryenter(&vm->vm_lock)
@@ -186,44 +139,6 @@ struct vmem {
 #define	VMEM_LOCK_DESTROY(vm)	mutex_destroy(&vm->vm_lock)
 #define	VMEM_ASSERT_LOCKED(vm)	KASSERT(mutex_owned(&vm->vm_lock))
 
-#if defined(_KERNEL)
-#define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
-#define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
-#define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
-#define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
-#endif /* defined(_KERNEL) */
-
-/* boundary tag */
-struct vmem_btag {
-	CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
-	union {
-		LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
-		LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
-	} bt_u;
-#define	bt_hashlist	bt_u.u_hashlist
-#define	bt_freelist	bt_u.u_freelist
-	vmem_addr_t bt_start;
-	vmem_size_t bt_size;
-	int bt_type;
-};
-
-#define	BT_TYPE_SPAN		1
-#define	BT_TYPE_SPAN_STATIC	2
-#define	BT_TYPE_FREE		3
-#define	BT_TYPE_BUSY		4
-#define	BT_ISSPAN_P(bt)	((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
-
-#define	BT_END(bt)	((bt)->bt_start + (bt)->bt_size - 1)
-
-typedef struct vmem_btag bt_t;
-
-#if defined(_KERNEL)
-static kmutex_t vmem_list_lock;
-static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
-#endif /* defined(_KERNEL) */
-
-/* ---- misc */
-
 #define	VMEM_ALIGNUP(addr, align) \
 	(-(-(addr) & -(align)))
 
@@ -241,36 +156,26 @@ static LIST_HEAD(, vmem) vmem_list = LIS
 #else /* defined(_KERNEL) */
 
 #define	xmalloc(sz, flags) \
-    kmem_intr_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
-#define	xfree(p, sz)		kmem_intr_free(p, sz);
+    kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
+#define	xfree(p, sz)		kmem_free(p, sz);
 
 /*
- * Memory for arenas initialized during bootstrap.
- * There is memory for STATIC_VMEM_COUNT bootstrap arenas.
- *
  * BT_RESERVE calculation:
  * we allocate memory for boundry tags with vmem, therefor we have
  * to keep a reserve of bts used to allocated memory for bts. 
  * This reserve is 4 for each arena involved in allocating vmems memory.
  * BT_MAXFREE: don't cache excessive counts of bts in arenas
  */
-#define STATIC_VMEM_COUNT 4
 #define STATIC_BT_COUNT 200
 #define BT_MINRESERVE 4
 #define BT_MAXFREE 64
-/* must be equal or greater then qcache multiplier for kmem_va_arena */
-#define STATIC_QC_POOL_COUNT 8
-
-static struct vmem static_vmems[STATIC_VMEM_COUNT];
-static int static_vmem_count = STATIC_VMEM_COUNT;
 
 static struct vmem_btag static_bts[STATIC_BT_COUNT];
 static int static_bt_count = STATIC_BT_COUNT;
 
-static struct pool_cache static_qc_pools[STATIC_QC_POOL_COUNT];
-static int static_qc_pool_count = STATIC_QC_POOL_COUNT;
-
+static struct vmem kmem_va_meta_arena_store;
 vmem_t *kmem_va_meta_arena;
+static struct vmem kmem_meta_arena_store;
 vmem_t *kmem_meta_arena;
 
 static kmutex_t vmem_refill_lock;
@@ -652,30 +557,17 @@ qc_init(vmem_t *vm, size_t qcache_max, i
 		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
 		    vm->vm_name, size);
 
-		if (vm->vm_flags & VM_BOOTSTRAP) {
-			KASSERT(static_qc_pool_count > 0);
-			pc = &static_qc_pools[--static_qc_pool_count];
-			pool_cache_bootstrap(pc, size,
-			    ORDER2SIZE(vm->vm_quantum_shift), 0,
-			    PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
-			    qc->qc_name, pa, ipl, NULL, NULL, NULL);
-		} else {
-			pc = pool_cache_init(size,
-			    ORDER2SIZE(vm->vm_quantum_shift), 0,
-			    PR_NOALIGN | PR_NOTOUCH /* XXX */,
-			    qc->qc_name, pa, ipl, NULL, NULL, NULL);
-		}
+		pc = pool_cache_init(size,
+		    ORDER2SIZE(vm->vm_quantum_shift), 0,
+		    PR_NOALIGN | PR_NOTOUCH /* XXX */,
+		    qc->qc_name, pa, ipl, NULL, NULL, NULL);
+
 		qc->qc_cache = pc;
 		KASSERT(qc->qc_cache != NULL);	/* XXX */
 		if (prevqc != NULL &&
 		    qc->qc_cache->pc_pool.pr_itemsperpage ==
 		    prevqc->qc_cache->pc_pool.pr_itemsperpage) {
-			if (vm->vm_flags & VM_BOOTSTRAP) {
-				pool_cache_bootstrap_destroy(pc);
-				//static_qc_pool_count++;
-			} else {
-				pool_cache_destroy(qc->qc_cache);
-			}
+			pool_cache_destroy(qc->qc_cache);
 			vm->vm_qcache[i - 1] = prevqc;
 			continue;
 		}
@@ -700,18 +592,14 @@ qc_destroy(vmem_t *vm)
 		if (prevqc == qc) {
 			continue;
 		}
-		if (vm->vm_flags & VM_BOOTSTRAP) {
-			pool_cache_bootstrap_destroy(qc->qc_cache);
-		} else {
-			pool_cache_destroy(qc->qc_cache);
-		}
+		pool_cache_destroy(qc->qc_cache);
 		prevqc = qc;
 	}
 }
 #endif
 
 #if defined(_KERNEL)
-void
+static void
 vmem_bootstrap(void)
 {
 
@@ -725,18 +613,20 @@ vmem_bootstrap(void)
 		VMEM_EVCNT_INCR(bt_count);
 		vmem_btag_freelist_count++;
 	}
+	vmem_bootstrapped = TRUE;
 }
 
 void
-vmem_init(vmem_t *vm)
+vmem_create_arenas(vmem_t *vm)
 {
 
-	kmem_va_meta_arena = vmem_create("vmem-va", 0, 0, PAGE_SIZE,
-	    vmem_alloc, vmem_free, vm,
+	kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va",
+	    0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm,
 	    0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
 	    IPL_VM);
 
-	kmem_meta_arena = vmem_create("vmem-meta", 0, 0, PAGE_SIZE,
+	kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta",
+	    0, 0, PAGE_SIZE,
 	    uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
 }
@@ -818,6 +708,7 @@ vmem_destroy1(vmem_t *vm)
 		mutex_exit(&vmem_btag_lock);
 	}
 
+	VMEM_CONDVAR_DESTROY(vm);
 	VMEM_LOCK_DESTROY(vm);
 	xfree(vm, sizeof(*vm));
 }
@@ -959,29 +850,32 @@ vmem_fit(const bt_t *bt, vmem_size_t siz
 	return ENOMEM;
 }
 
+/* ---- vmem API */
 
 /*
  * vmem_create_internal: creates a vmem arena.
  */
 
-static vmem_t *
-vmem_create_internal(const char *name, vmem_addr_t base, vmem_size_t size,
-    vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
-    void *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
+vmem_t *
+vmem_init(vmem_t *vm, const char *name,
+    vmem_addr_t base, vmem_size_t size, vmem_size_t quantum,
+    vmem_import_t *importfn, vmem_release_t *releasefn,
+    vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
 {
-	vmem_t *vm = NULL;
 	int i;
 
 	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
 	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
 	KASSERT(quantum > 0);
 
-	if (flags & VM_BOOTSTRAP) {
 #if defined(_KERNEL)
-		KASSERT(static_vmem_count > 0);
-		vm = &static_vmems[--static_vmem_count];
+	/* XXX: SMP, we get called early... */
+	if (!vmem_bootstrapped) {
+		vmem_bootstrap();
+	}
 #endif /* defined(_KERNEL) */
-	} else {
+
+	if (vm == NULL) {
 		vm = xmalloc(sizeof(*vm), flags);
 	}
 	if (vm == NULL) {
@@ -1011,14 +905,9 @@ vmem_create_internal(const char *name, v
 	for (i = 0; i < VMEM_MAXORDER; i++) {
 		LIST_INIT(&vm->vm_freelist[i]);
 	}
-	vm->vm_hashlist = NULL;
-	if (flags & VM_BOOTSTRAP) {
-		vm->vm_hashsize = 1;
-		vm->vm_hashlist = &vm->vm_hash0;
-	} else if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
-		vmem_destroy1(vm);
-		return NULL;
-	}
+	memset(&vm->vm_hash0, 0, sizeof(struct vmem_hashlist));
+	vm->vm_hashsize = 1;
+	vm->vm_hashlist = &vm->vm_hash0;
 
 	if (size != 0) {
 		if (vmem_add(vm, base, size, flags) != 0) {
@@ -1041,7 +930,6 @@ vmem_create_internal(const char *name, v
 }
 
 
-/* ---- vmem API */
 
 /*
  * vmem_create: create an arena.
@@ -1055,11 +943,9 @@ vmem_create(const char *name, vmem_addr_
     vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
 {
 
-	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
-	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
 	KASSERT((flags & (VM_XIMPORT)) == 0);
 
-	return vmem_create_internal(name, base, size, quantum,
+	return vmem_init(NULL, name, base, size, quantum,
 	    importfn, releasefn, source, qcache_max, flags, ipl);
 }
 
@@ -1075,11 +961,9 @@ vmem_xcreate(const char *name, vmem_addr
     vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
 {
 
-	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
-	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
 	KASSERT((flags & (VM_XIMPORT)) == 0);
 
-	return vmem_create_internal(name, base, size, quantum,
+	return vmem_init(NULL, name, base, size, quantum,
 	    (vmem_import_t *)importfn, releasefn, source,
 	    qcache_max, flags | VM_XIMPORT, ipl);
 }

Index: src/sys/rump/librump/rumpkern/vm.c
diff -u src/sys/rump/librump/rumpkern/vm.c:1.132 src/sys/rump/librump/rumpkern/vm.c:1.133
--- src/sys/rump/librump/rumpkern/vm.c:1.132	Mon Jan 14 16:45:47 2013
+++ src/sys/rump/librump/rumpkern/vm.c	Sat Jan 26 13:50:33 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: vm.c,v 1.132 2013/01/14 16:45:47 pooka Exp $	*/
+/*	$NetBSD: vm.c,v 1.133 2013/01/26 13:50:33 para Exp $	*/
 
 /*
  * Copyright (c) 2007-2011 Antti Kantee.  All Rights Reserved.
@@ -41,7 +41,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.132 2013/01/14 16:45:47 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.133 2013/01/26 13:50:33 para Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -338,12 +338,11 @@ uvm_init(void)
 	pool_subsystem_init();
 
 #ifndef RUMP_UNREAL_ALLOCATORS
-	vmem_bootstrap();
 	kmem_arena = vmem_create("kmem", 0, 1024*1024, PAGE_SIZE,
 	    NULL, NULL, NULL,
 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
 
-	vmem_init(kmem_arena);
+	vmem_create_arenas(kmem_arena);
 
 	kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE,
 	    vmem_alloc, vmem_free, kmem_arena,

Index: src/sys/sys/vmem.h
diff -u src/sys/sys/vmem.h:1.17 src/sys/sys/vmem.h:1.18
--- src/sys/sys/vmem.h:1.17	Fri Jan 27 19:48:41 2012
+++ src/sys/sys/vmem.h	Sat Jan 26 13:50:33 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: vmem.h,v 1.17 2012/01/27 19:48:41 para Exp $	*/
+/*	$NetBSD: vmem.h,v 1.18 2013/01/26 13:50:33 para Exp $	*/
 
 /*-
  * Copyright (c)2006 YAMAMOTO Takashi,
@@ -54,8 +54,7 @@ extern vmem_t *kmem_arena;
 extern vmem_t *kmem_meta_arena;
 extern vmem_t *kmem_va_arena;
 
-void vmem_bootstrap(void);
-void vmem_init(vmem_t *vm);
+void vmem_create_arenas(vmem_t *vm);
 
 vmem_t *vmem_create(const char *, vmem_addr_t, vmem_size_t, vmem_size_t,
     vmem_import_t *, vmem_release_t *, vmem_t *, vmem_size_t,
@@ -63,6 +62,9 @@ vmem_t *vmem_create(const char *, vmem_a
 vmem_t *vmem_xcreate(const char *, vmem_addr_t, vmem_size_t, vmem_size_t,
     vmem_ximport_t *, vmem_release_t *, vmem_t *, vmem_size_t,
     vm_flag_t, int);
+vmem_t *vmem_init(vmem_t *, const char *, vmem_addr_t, vmem_size_t, vmem_size_t,
+    vmem_import_t *, vmem_release_t *, vmem_t *, vmem_size_t,
+    vm_flag_t, int);
 void vmem_destroy(vmem_t *);
 int vmem_alloc(vmem_t *, vmem_size_t, vm_flag_t, vmem_addr_t *);
 void vmem_free(vmem_t *, vmem_addr_t, vmem_size_t);

Index: src/sys/uvm/uvm_km.c
diff -u src/sys/uvm/uvm_km.c:1.135 src/sys/uvm/uvm_km.c:1.136
--- src/sys/uvm/uvm_km.c:1.135	Fri Sep  7 06:45:04 2012
+++ src/sys/uvm/uvm_km.c	Sat Jan 26 13:50:33 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_km.c,v 1.135 2012/09/07 06:45:04 para Exp $	*/
+/*	$NetBSD: uvm_km.c,v 1.136 2013/01/26 13:50:33 para Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -152,7 +152,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.135 2012/09/07 06:45:04 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.136 2013/01/26 13:50:33 para Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -180,6 +180,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1
 #include <sys/proc.h>
 #include <sys/pool.h>
 #include <sys/vmem.h>
+#include <sys/vmem_impl.h>
 #include <sys/kmem.h>
 
 #include <uvm/uvm.h>
@@ -202,6 +203,7 @@ int nkmempages = 0;
 vaddr_t kmembase;
 vsize_t kmemsize;
 
+static struct vmem kmem_arena_store;
 vmem_t *kmem_arena = NULL;
 vmem_t *kmem_va_arena;
 
@@ -324,10 +326,9 @@ uvm_km_bootstrap(vaddr_t start, vaddr_t 
 	kernel_map = &kernel_map_store;
 
 	pool_subsystem_init();
-	vmem_bootstrap();
 
-	kmem_arena = vmem_create("kmem", kmembase, kmemsize, PAGE_SIZE,
-	    NULL, NULL, NULL,
+	kmem_arena = vmem_init(&kmem_arena_store, "kmem",
+	    kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL,
 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
 #ifdef PMAP_GROWKERNEL
 	/*
@@ -342,7 +343,7 @@ uvm_km_bootstrap(vaddr_t start, vaddr_t 
 	}
 #endif
 
-	vmem_init(kmem_arena);
+	vmem_create_arenas(kmem_arena);
 
 	UVMHIST_LOG(maphist, "kmem vmem created (base=%#"PRIxVADDR
 	    ", size=%#"PRIxVSIZE, kmembase, kmemsize, 0,0);
@@ -350,7 +351,7 @@ uvm_km_bootstrap(vaddr_t start, vaddr_t 
 	kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE,
 	    vmem_alloc, vmem_free, kmem_arena,
 	    (kmem_arena_small ? 4 : 8) * PAGE_SIZE,
-	    VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
+	    VM_NOSLEEP, IPL_VM);
 
 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
 }

Added files:

Index: src/sys/sys/vmem_impl.h
diff -u /dev/null src/sys/sys/vmem_impl.h:1.1
--- /dev/null	Sat Jan 26 13:50:34 2013
+++ src/sys/sys/vmem_impl.h	Sat Jan 26 13:50:33 2013
@@ -0,0 +1,138 @@
+/*	$NetBSD: vmem_impl.h,v 1.1 2013/01/26 13:50:33 para Exp $	*/
+
+/*-
+ * Copyright (c)2006 YAMAMOTO Takashi,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Data structures private to vmem.
+ */
+
+#ifndef _SYS_VMEM_IMPL_H_
+#define	_SYS_VMEM_IMPL_H_
+
+#include <sys/types.h>
+
+#if defined(_KERNEL)
+#include <sys/vmem.h>
+
+#define	LOCK_DECL(name)		\
+    kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)]
+
+#define CONDVAR_DECL(name)	\
+    kcondvar_t name
+
+#else /* defined(_KERNEL) */
+#include <stdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "vmem.h"
+
+#define	LOCK_DECL(name)		/* nothing */
+#define	CONDVAR_DECL(name)	/* nothing */
+#endif /* defined(_KERNEL) */
+
+#define	VMEM_MAXORDER		(sizeof(vmem_size_t) * CHAR_BIT)
+
+typedef struct vmem_btag bt_t;
+
+CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
+LIST_HEAD(vmem_freelist, vmem_btag);
+LIST_HEAD(vmem_hashlist, vmem_btag);
+
+#if defined(QCACHE)
+#define	VMEM_QCACHE_IDX_MAX	32
+
+#define	QC_NAME_MAX	16
+
+struct qcache {
+	pool_cache_t qc_cache;
+	vmem_t *qc_vmem;
+	char qc_name[QC_NAME_MAX];
+};
+typedef struct qcache qcache_t;
+#define	QC_POOL_TO_QCACHE(pool)	((qcache_t *)(pool->pr_qcache))
+#endif /* defined(QCACHE) */
+
+#define	VMEM_NAME_MAX	16
+
+/* vmem arena */
+struct vmem {
+	CONDVAR_DECL(vm_cv);
+	LOCK_DECL(vm_lock);
+	vm_flag_t vm_flags;
+	vmem_import_t *vm_importfn;
+	vmem_release_t *vm_releasefn;
+	size_t vm_nfreetags;
+	LIST_HEAD(, vmem_btag) vm_freetags;
+	void *vm_arg;
+	struct vmem_seglist vm_seglist;
+	struct vmem_freelist vm_freelist[VMEM_MAXORDER];
+	size_t vm_hashsize;
+	size_t vm_nbusytag;
+	struct vmem_hashlist *vm_hashlist;
+	struct vmem_hashlist vm_hash0;
+	size_t vm_quantum_mask;
+	int vm_quantum_shift;
+	size_t vm_size;
+	size_t vm_inuse;
+	char vm_name[VMEM_NAME_MAX+1];
+	LIST_ENTRY(vmem) vm_alllist;
+
+#if defined(QCACHE)
+	/* quantum cache */
+	size_t vm_qcache_max;
+	struct pool_allocator vm_qcache_allocator;
+	qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
+	qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
+#endif /* defined(QCACHE) */
+};
+
+/* boundary tag */
+struct vmem_btag {
+	CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
+	union {
+		LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
+		LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
+	} bt_u;
+#define	bt_hashlist	bt_u.u_hashlist
+#define	bt_freelist	bt_u.u_freelist
+	vmem_addr_t bt_start;
+	vmem_size_t bt_size;
+	int bt_type;
+};
+
+#define	BT_TYPE_SPAN		1
+#define	BT_TYPE_SPAN_STATIC	2
+#define	BT_TYPE_FREE		3
+#define	BT_TYPE_BUSY		4
+#define	BT_ISSPAN_P(bt)	((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
+
+#define	BT_END(bt)	((bt)->bt_start + (bt)->bt_size - 1)
+
+#endif /* !_SYS_VMEM_IMPL_H_ */

Reply via email to