Index: conf/files
===================================================================
--- conf/files	(revision 143550)
+++ conf/files	(working copy)
@@ -2266,31 +2266,32 @@ ufs/ufs/ufs_extattr.c		optional ffs
 ufs/ufs/ufs_gjournal.c		optional ffs
 ufs/ufs/ufs_inode.c		optional ffs
 ufs/ufs/ufs_lookup.c		optional ffs
 ufs/ufs/ufs_quota.c		optional ffs
 ufs/ufs/ufs_vfsops.c		optional ffs
 ufs/ufs/ufs_vnops.c		optional ffs
 vm/default_pager.c		standard
 vm/device_pager.c		standard
 vm/phys_pager.c			standard
 vm/redzone.c			optional DEBUG_REDZONE
 vm/sg_pager.c			standard
 vm/swap_pager.c			standard
 vm/uma_core.c			standard
 vm/uma_dbg.c			standard
 vm/vm_contig.c			standard
-vm/memguard.c			optional DEBUG_MEMGUARD
+#vm/memguard.c			optional DEBUG_MEMGUARD
+vm/isi_memguard.c		optional DEBUG_MEMGUARD
 vm/vm_fault.c			standard
 vm/vm_glue.c			standard
 vm/vm_init.c			standard
 vm/vm_kern.c			standard
 vm/vm_map.c			standard
 vm/vm_meter.c			standard
 vm/vm_mmap.c			standard
 vm/vm_object.c			standard
 vm/vm_page.c			standard
 vm/vm_pageout.c			standard
 vm/vm_pager.c			standard
 vm/vm_phys.c			standard
 vm/vm_reserv.c			standard
 vm/vm_unix.c			standard
 vm/vm_zeroidle.c		standard
Index: kern/kern_malloc.c
===================================================================
--- kern/kern_malloc.c	(revision 143550)
+++ kern/kern_malloc.c	(working copy)
@@ -68,33 +68,31 @@ __FBSDID("$FreeBSD$");
 #ifdef MALLOC_LEAKS_CHECK
 #include <sys/isi_format.h>
 #endif /* MALLOC_LEAKS_CHECK */
 
 #include <vm/vm.h>
 #include <vm/pmap.h>
 #include <vm/vm_param.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_map.h>
 #include <vm/vm_page.h>
 #include <vm/uma.h>
 #include <vm/uma_int.h>
 #include <vm/uma_dbg.h>
 
-#ifdef DEBUG_MEMGUARD
 #include <vm/memguard.h>
-#endif
 #ifdef DEBUG_REDZONE
 #include <vm/redzone.h>
 #endif
 
 #if defined(INVARIANTS) && defined(__i386__)
 #include <machine/cpu.h>
 #endif
 
 #include <ddb/ddb.h>
 
 #ifdef KDTRACE_HOOKS
 #include <sys/dtrace_bsd.h>
 
 dtrace_malloc_probe_func_t	dtrace_malloc_probe;
 #endif
@@ -642,34 +640,36 @@ malloc(unsigned long size, struct malloc
 	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
 		atomic_add_int(&malloc_nowait_count, 1);
 		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
 			atomic_add_int(&malloc_failure_count, 1);
 			t_malloc_fail = time_uptime;
 			return (NULL);
 		}
 	}
 #endif
 #if 0	/* XXX Presently Isilon allows blocking in interrupt context. */
 	if (flags & M_WAITOK)
 		KASSERT(curthread->td_intr_nesting_level == 0,
 		   ("malloc(M_WAITOK) in interrupt context"));
 #endif
 
-#ifdef DEBUG_MEMGUARD
-	if (memguard_cmp(mtp))
-		return memguard_alloc(size, flags);
-#endif
+	if (memguard_cmp(mtp, size)) {
+		va = memguard_alloc(size, flags);
+		if (va != NULL)
+			return va;
+		/* This is unfortunate but should not be fatal. */
+	}
 
 #ifdef DEBUG_REDZONE
 	size = redzone_size_ntor(size);
 #endif
 
 #ifdef MALLOC_LEAKS_CHECK
 	if (mtp->ks_check_leaks) {
 		for (pad = 64; pad < size && pad < PAGE_SIZE; pad <<= 1)
 			;
 		size += pad;
 	} else
 		pad = 0;
 #endif /* MALLOC_LEAKS_CHECK */
 
 	if (size <= KMEM_ZMAX) {
@@ -743,36 +743,34 @@ malloc(unsigned long size, struct malloc
  *	This routine may not block.
  */
 void
 free(void *addr, struct malloc_type *mtp)
 {
 	uma_slab_t slab;
 	u_long size;
 #ifdef MALLOC_LEAKS_CHECK
 	struct leak_node *node;
 #endif /* MALLOC_LEAKS_CHECK */
 
 	/* free(NULL, ...) does nothing */
 	if (addr == NULL)
 		return;
 
-#ifdef DEBUG_MEMGUARD
-	if (memguard_cmp(mtp)) {
+	if (is_memguard_addr(addr)) {
 		memguard_free(addr);
 		return;
 	}
-#endif
 
 #ifdef DEBUG_REDZONE
 	redzone_check(addr);
 	addr = redzone_addr_ntor(addr);
 #endif
 
 	size = 0;
 
 #ifdef MALLOC_LEAKS_CHECK
 	if (mtp->ks_check_leaks) {
 		node = ((struct leak_node **)addr)[-1];
 		mtx_lock(&leak_mtx);
 		LIST_REMOVE(node, leak_link);
 		mtx_unlock(&leak_mtx);
 		call_stack_clean(&node->stack);
@@ -821,75 +819,70 @@ free(void *addr, struct malloc_type *mtp
 void *
 realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
 {
 	uma_slab_t slab;
 	unsigned long alloc;
 	void *newaddr;
 
 	/* realloc(NULL, ...) is equivalent to malloc(...) */
 	if (addr == NULL)
 		return (malloc(size, mtp, flags));
 
 	/*
 	 * XXX: Should report free of old memory and alloc of new memory to
 	 * per-CPU stats.
 	 */
-
-#ifdef DEBUG_MEMGUARD
-if (memguard_cmp(mtp)) {
-	slab = NULL;
-	alloc = size;
-} else {
-#endif
-
+	if (is_memguard_addr(addr)) {
+		slab = NULL;
+		alloc = size;
+		goto remalloc;
+	}
 #ifdef DEBUG_REDZONE
 	slab = NULL;
 	alloc = redzone_get_size(addr);
-#else
+	goto remalloc;
+#endif /* !DEBUG_REDZONE */
+
 #ifdef MALLOC_LEAKS_CHECK
 	if (mtp->ks_check_leaks)
 		slab = vtoslab(((vm_offset_t *)addr)[-1] & ~(UMA_SLAB_MASK));
 	else
 #endif /* MALLOC_LEAKS_CHECK */
 		slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
 
 	/* Sanity check */
 	KASSERT(slab != NULL,
 	    ("realloc: address %p out of range", (void *)addr));
 
 	/* Get the size of the original block */
 	if (!(slab->us_flags & UMA_SLAB_MALLOC))
 		alloc = slab->us_keg->uk_size;
 	else
 		alloc = slab->us_size;
 
 #ifdef MALLOC_LEAKS_CHECK
 	if (mtp->ks_check_leaks)
 		/** Adjust the alloc size by subtracting the size of the pad. */
 		alloc -= ((caddr_t)addr - ((caddr_t *)addr)[-1]);
 #endif /* MALLOC_LEAKS_CHECK */
 
 	/* Reuse the original block if appropriate */
 	if (size <= alloc
 	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
 		return (addr);
-#endif /* !DEBUG_REDZONE */
-
-#ifdef DEBUG_MEMGUARD
-}
-#endif
 
+remalloc:
 	/* Allocate a new, bigger (or smaller) block */
 	if ((newaddr = malloc(size, mtp, flags)) == NULL)
 		return (NULL);
 
 	/* Copy over original contents */
 	bcopy(addr, newaddr, min(size, alloc));
 	free(addr, mtp);
 	return (newaddr);
 }
 
 /*
  *	reallocf: same as realloc() but free memory on failure.
  */
 void *
 reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
@@ -900,31 +893,31 @@ reallocf(void *addr, unsigned long size,
 		free(addr, mtp);
 	return (mem);
 }
 
 /* XXXRSS turn some extra checks on if we're seeing scribblers */
 #define SCRIBBLER_CHECKS 1
 
 /*
  * Initialize the kernel memory allocator
  */
 /* ARGSUSED*/
 static void
 kmeminit(void *dummy)
 {
 	u_int8_t indx;
-	u_long mem_size;
+	u_long mem_size, tmp;
 	int i;
  
 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
 	mtx_init(&malloc_pigs_mtx, "malloc_pigs", NULL, MTX_DEF);
 #ifdef MALLOC_LEAKS_CHECK
 	mtx_init(&leak_mtx, "malloc_leaks", NULL, MTX_DEF);
 #endif /* MALLOC_LEAKS_CHECK */
 
 	/*
 	 * Try to auto-tune the kernel memory size, so that it is
 	 * more applicable for a wider range of machine sizes.
 	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
 	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
 	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
 	 * available, and on an X86 with a total KVA space of 256MB,
@@ -968,49 +961,40 @@ kmeminit(void *dummy)
 
 	/*
 	 * Limit kmem virtual size to twice the physical memory.
 	 * This allows for kmem map sparseness, but limits the size
 	 * to something sane. Be careful to not overflow the 32bit
 	 * ints while doing the check.
 	 */
 	if (((vm_kmem_size / 2) / PAGE_SIZE) > vm_cnt.v_page_count)
 		vm_kmem_size = 2 * vm_cnt.v_page_count * PAGE_SIZE;
 
 	/*
 	 * Tune settings based on the kmem map's size at this time.
 	 */
 	init_param3(vm_kmem_size / PAGE_SIZE);
 
+	tmp = memguard_fudge(vm_kmem_size, vm_kmem_size_max);
 	kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit,
-	    vm_kmem_size, TRUE);
+	    tmp, TRUE);
 	kmem_map->system_map = 1;
-
-#ifdef DEBUG_MEMGUARD
 	/*
 	 * Initialize MemGuard if support compiled in.  MemGuard is a
 	 * replacement allocator used for detecting tamper-after-free
 	 * scenarios as they occur.  It is only used for debugging.
 	 */
-	vm_memguard_divisor = 10;
-	TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
-
-	/* Pick a conservative value if provided value sucks. */
-	if ((vm_memguard_divisor <= 0) ||
-	    ((vm_kmem_size / vm_memguard_divisor) == 0))
-		vm_memguard_divisor = 10;
-	memguard_init(kmem_map, vm_kmem_size / vm_memguard_divisor);
-#endif
+	memguard_init(kmem_map);
 
 	uma_startup2();
 
 	mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
 #if defined(INVARIANTS) || defined(SCRIBBLER_CHECKS)
 	    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
 #else
 	    NULL, NULL, NULL, NULL,
 #endif
 	    UMA_ALIGN_PTR, UMA_ZONE_MALLOC
 #ifdef SCRIBBLER_CHECKS
 	    | UMA_ZONE_MAXBUCKET
 #endif
 	    );
 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
Index: vm/vm_kern.c
===================================================================
--- vm/vm_kern.c	(revision 143550)
+++ vm/vm_kern.c	(working copy)
@@ -290,35 +290,32 @@ isi_kmem_malloc_failed(vm_map_t map, vm_
  * 	(kmem_object).  This, combined with the fact that only malloc uses
  * 	this routine, ensures that we will never block in map or object waits.
  *
  * 	We don't worry about expanding the map (adding entries) since entries
  * 	for wired maps are statically allocated.
  *
  *	`map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
  *	which we never free.
  */
 vm_offset_t
 kmem_malloc(map, size, flags)
 	vm_map_t map;
 	vm_size_t size;
 	int flags;
 {
-	vm_offset_t offset, i;
-	vm_map_entry_t entry;
 	vm_offset_t addr;
-	vm_page_t m;
-	int pflags;
+	int i, rv;
 
 	size = round_page(size);
 	addr = vm_map_min(map);
 
 	/*
 	 * Locate sufficient space in the map.  This will give us the final
 	 * virtual address for the new memory, and thus will tell us the
 	 * offset within the kernel map.
 	 */
 	vm_map_lock(map);
 	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
 		vm_map_unlock(map);
 
                 if ((flags & M_NOWAIT) == 0) {
 			for (i = 0; i < 8; i++) {
@@ -338,30 +335,57 @@ kmem_malloc(map, size, flags)
 				panic_start("kmem_malloc(%ld): %s too small.",
 				    (long)size, map == kmem_map ? "kmem_map" :
 				    "mbuf space");
 				if (m_exhausted_cb)
 					m_exhausted_cb();
 				else
 					print_mbuf_usage();
 				print_alloc_pigs();
 				panic_finish();
 			}
 		} else {
 			isi_kmem_malloc_failed(map, size);
 			return (0);
 		}
 	}
+
+	/* begin Isilon */
+	rv = kmem_back(map, addr, size, flags);
+	/* end Isilon */
+	vm_map_unlock(map);
+
+	return (rv == KERN_SUCCESS ? addr : (vm_offset_t)NULL);
+}
+
+/*
+ * Isilon: split up the work of kmem_malloc into one routine that
+ * finds the address to use, and another to get pages for the
+ * addresses.
+ */
+int
+kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
+{
+	vm_offset_t offset, i;
+	vm_map_entry_t entry;
+	vm_page_t m;
+	int pflags;
+
+	/*
+	 * XXX the map must be locked for write on entry, but there's
+	 * no way to assert that.
+	 */
+
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
 	vm_object_reference(kmem_object);
 	vm_map_insert(map, kmem_object, offset, addr, addr + size,
 		VM_PROT_ALL, VM_PROT_ALL, 0);
 
 	if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
 		pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
 	else
 		pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
 
 	if (flags & M_ZERO)
 		pflags |= VM_ALLOC_ZERO;
 	if (flags & M_NODUMP)
 		pflags |= VM_ALLOC_NODUMP;
 
@@ -390,32 +414,31 @@ retry:
 			 * They are already marked busy.  Calling
 			 * vm_map_delete before the pages has been freed or
 			 * unbusied will cause a deadlock.
 			 */
 			while (i != 0) {
 				i -= PAGE_SIZE;
 				m = vm_page_lookup(kmem_object,
 						   OFF_TO_IDX(offset + i));
 				vm_page_lock_queues();
 				vm_page_unwire(m, 0);
 				vm_page_free(m);
 				vm_page_unlock_queues();
 			}
 			VM_OBJECT_UNLOCK(kmem_object);
 			vm_map_delete(map, addr, addr + size);
-			vm_map_unlock(map);
-			return (0);
+			return (KERN_NO_SPACE);
 		}
 		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
 		m->valid = VM_PAGE_BITS_ALL;
 		KASSERT((m->flags & PG_UNMANAGED) != 0,
 		    ("kmem_malloc: page %p is managed", m));
 	}
 	VM_OBJECT_UNLOCK(kmem_object);
 
 	/*
 	 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
 	 * be able to extend the previous entry so there will be a new entry
 	 * exactly corresponding to this address range and it will have
 	 * wired_count == 0.
 	 */
@@ -434,33 +457,31 @@ retry:
 
 	/*
 	 * Loop thru pages, entering them in the pmap.
 	 */
 	VM_OBJECT_LOCK(kmem_object);
 	for (i = 0; i < size; i += PAGE_SIZE) {
 		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
 		/*
 		 * Because this is kernel_pmap, this call will not block.
 		 */
 		pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
 		    TRUE);
 		vm_page_wakeup(m);
 	}
 	VM_OBJECT_UNLOCK(kmem_object);
-	vm_map_unlock(map);
-
-	return (addr);
+	return (KERN_SUCCESS);
 }
 
 /*
  *	kmem_alloc_wait:
  *
  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
  *	has no room, the caller sleeps waiting for more memory in the submap.
  *
  *	This routine may block.
  */
 vm_offset_t
 kmem_alloc_wait(map, size)
 	vm_map_t map;
 	vm_size_t size;
 {
Index: vm/memguard.h
===================================================================
--- vm/memguard.h	(revision 143550)
+++ vm/memguard.h	(working copy)
@@ -14,21 +14,32 @@
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * $FreeBSD$
  */
 
-extern u_int vm_memguard_divisor;
+#include "opt_vm.h"
 
-void	memguard_init(vm_map_t parent_map, unsigned long size);
-void 	*memguard_alloc(unsigned long size, int flags);
-void	memguard_free(void *addr);
-int	memguard_cmp(struct malloc_type *mtp);
+#ifdef DEBUG_MEMGUARD
+unsigned long	memguard_fudge(unsigned long, unsigned long);
+void	memguard_init(struct vm_map *);
+void 	*memguard_alloc(unsigned long, int);
+void	memguard_free(void *);
+int	memguard_cmp(struct malloc_type *, unsigned long);
+int	is_memguard_addr(void *);
+#else
+#define	memguard_fudge(size, xxx)	(size)
+#define	memguard_init(map)		do { } while (0)
+#define	memguard_alloc(size, flags)	NULL
+#define	memguard_free(addr)		do { } while (0)
+#define	memguard_cmp(mtp, size)		0
+#define	is_memguard_addr(addr)		0
+#endif
Index: vm/isi_memguard.c
===================================================================
--- vm/isi_memguard.c	(revision 0)
+++ vm/isi_memguard.c	(revision 0)
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2005,
+ *     Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * MemGuard is a simple replacement allocator for debugging only
+ * which provides ElectricFence-style memory barrier protection on
+ * objects being allocated, and is used to detect tampering-after-free
+ * scenarios.
+ *
+ * See the memguard(9) man page for more information on using MemGuard.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+
+#include <vm/vm.h>
+#include <vm/uma.h>
+#include <vm/vm_param.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/memguard.h>
+
+SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
+/*
+ * The vm_memguard_divisor variable controls how much of kmem_map should be
+ * reserved for MemGuard.
+ */
+static u_int vm_memguard_divisor;
+SYSCTL_UINT(_vm_memguard, OID_AUTO, divisor, CTLFLAG_RDTUN,
+    &vm_memguard_divisor,
+    0, "(kmem_size/memguard_divisor) == memguard submap size");     
+
+/*
+ * Short description (ks_shortdesc) of memory type to monitor.
+ */
+static char vm_memguard_desc[128] = "";
+static struct malloc_type *vm_memguard_mtype = NULL;
+TUNABLE_STR("vm.memguard.desc", vm_memguard_desc, sizeof(vm_memguard_desc));
+static int
+memguard_sysctl_desc(SYSCTL_HANDLER_ARGS)
+{
+	char desc[sizeof(vm_memguard_desc)];
+	int error;
+
+	strlcpy(desc, vm_memguard_desc, sizeof(desc));
+	error = sysctl_handle_string(oidp, desc, sizeof(desc), req);
+	if (error != 0 || req->newptr == NULL)
+		return (error);
+
+	mtx_lock(&malloc_mtx);
+	/*
+	 * If mtp is NULL, it will be initialized in memguard_cmp().
+	 */
+	vm_memguard_mtype = malloc_desc2type(desc);
+	strlcpy(vm_memguard_desc, desc, sizeof(vm_memguard_desc));
+	mtx_unlock(&malloc_mtx);
+	return (error);
+}
+SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
+    CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
+    memguard_sysctl_desc, "A", "Short description of memory type to monitor");
+
+static struct vm_map *memguard_map;
+static vm_offset_t memguard_cursor;
+static vm_size_t memguard_mapsize;
+static vm_size_t memguard_physlimit;
+static u_long memguard_wrap;
+static u_long memguard_succ;
+static u_long memguard_fail_kva;
+static u_long memguard_fail_pgs;
+
+SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
+    &memguard_cursor, 0, "MemGuard cursor");
+SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
+    &memguard_mapsize, 0, "MemGuard private vm_map size");
+SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
+    &memguard_physlimit, 0, "Limit on MemGuard memory consumption");
+SYSCTL_ULONG(_vm_memguard, OID_AUTO, wrapcnt, CTLFLAG_RD,
+    &memguard_wrap, 0, "MemGuard cursor wrap count");
+SYSCTL_ULONG(_vm_memguard, OID_AUTO, numalloc, CTLFLAG_RD,
+    &memguard_succ, 0, "Count of successful MemGuard allocations");
+SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
+    &memguard_fail_kva, 0, "MemGuard failures due to lack of KVA");
+SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
+    &memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
+
+/*
+ * Return a fudged value for vm_kmem_size for allocating the
+ * kernel_map.  The memguard memory will be a submap.
+ */
+unsigned long
+memguard_fudge(unsigned long vm_kmem_size, unsigned long vm_kmem_max)
+{
+	u_long mem_pgs = vm_cnt.v_page_count;
+
+	vm_memguard_divisor = 10;
+	TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
+
+	/* Pick a conservative value if provided value sucks. */
+	if ((vm_memguard_divisor <= 0) ||
+	    ((vm_kmem_size / vm_memguard_divisor) == 0))
+		vm_memguard_divisor = 10;
+	/*
+	 * Limit consumption of pages to 1/vm_memguard_divisor of
+	 * system memory.  If the KVA is smaller than this then the
+	 * KVA limit comes into play first.  This prevents memguard's
+	 * page promotions from completely using up memory, since most
+	 * malloc(9) calls are sub-page.
+	 */
+	memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
+	/*
+	 * We want as much KVA as we can take safely.  Use at most our
+	 * allotted fraction of kmem_max.  Limit this to twice the
+	 * physical memory to avoid using too much memory as pagetable
+	 * pages.
+	 */
+	memguard_mapsize = vm_kmem_max / vm_memguard_divisor;
+	/* size must be multiple of PAGE_SIZE */
+	memguard_mapsize = round_page(memguard_mapsize);
+	if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
+		memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
+	if (vm_kmem_size + memguard_mapsize > vm_kmem_max)
+		return (vm_kmem_max);
+	return (vm_kmem_size + memguard_mapsize);
+}
+
+/*
+ * Initialize the MemGuard mock allocator.  All objects from MemGuard come
+ * out of a single VM map (contiguous chunk of address space).
+ */
+void
+memguard_init(vm_map_t parent_map)
+{
+	vm_offset_t base, limit;
+
+	memguard_map = kmem_suballoc(parent_map, &base, &limit,
+	    memguard_mapsize, FALSE);
+	memguard_map->system_map = 1;
+	KASSERT(memguard_mapsize == limit - base,
+	    ("Expected %ld, got %ld", memguard_mapsize, limit - base));
+	memguard_cursor = base;
+
+	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
+	printf("\tMEMGUARD map base: 0x%lx\n", base);
+	printf("\tMEMGUARD map limit: 0x%lx\n", limit);
+	printf("\tMEMGUARD map size: %ld KBytes\n", memguard_mapsize >> 10);
+}
+
+/*
+ * Run things that can't be done as early as memguard_init().
+ */
+static void
+memguard_sysinit(void)
+{
+	struct sysctl_oid_list *parent;
+
+	parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
+
+	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
+	    &memguard_map->min_offset, "MemGuard KVA base");
+	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
+	    &memguard_map->max_offset, "MemGuard KVA end");
+	SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
+	    &memguard_map->size, "MemGuard KVA used");
+}
+SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
+
+/*
+ * v2sizep() converts a virtual address of the first page allocated for
+ * an item to a pointer to u_long recording the size of the original
+ * allocation request.
+ *
+ * This routine is very similar to those defined by UMA in uma_int.h.
+ * The difference is that this routine stores the mgfifo in one of the
+ * page's fields that is unused when the page is wired rather than the
+ * object field, which is used.
+ */
+static u_long *
+v2sizep(vm_offset_t va)
+{
+	struct vm_page *p;
+
+	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
+	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
+	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
+	return (u_long *)&p->pageq.tqe_next;
+}
+
+/*
+ * Allocate a single object of specified size with specified flags
+ * (either M_WAITOK or M_NOWAIT).
+ */
+void *
+memguard_alloc(unsigned long req_size, int flags)
+{
+	vm_offset_t addr;
+	u_long size;
+	int rv;
+
+	size = round_page(req_size);
+	if (size == 0)
+		return NULL;
+	vm_map_lock(memguard_map);
+	/*
+	 * When we pass our memory limit, reject sub-page allocations.
+	 * Page-size and larger allocations will use the same amount
+	 * of physical memory whether we allocate or hand off to
+	 * uma_large_alloc(), so keep those.
+	 */
+	if (memguard_map->size >= memguard_physlimit &&
+	    req_size < PAGE_SIZE) {
+		addr = (vm_offset_t)NULL;
+		memguard_fail_pgs++;
+		goto out;
+	}
+	/*
+	 * Keep a moving cursor so we don't recycle KVA as long as
+	 * possible.  It's not perfect, since we don't know in what
+	 * order previous allocations will be free'd, but it's simple
+	 * and fast, and requires O(1) additional storage.
+	 *
+	 * XXX This scheme will lead to greater fragmentation of the
+	 * map, unless vm_map_findspace() is tweaked.
+	 */
+	for (;;) {
+		rv = vm_map_findspace(memguard_map, memguard_cursor,
+		    size, &addr);
+		if (rv == KERN_SUCCESS)
+			break;
+		/*
+		 * The map has no space.  This may be due to
+		 * fragmentation, or because the cursor is near the
+		 * end of the map.
+		 */
+		if (memguard_cursor == vm_map_min(memguard_map)) {
+			memguard_fail_kva++;
+			addr = (vm_offset_t)NULL;
+			goto out;
+		}
+		memguard_wrap++;
+		memguard_cursor = vm_map_min(memguard_map);
+	}
+	rv = kmem_back(memguard_map, addr, size, flags);
+	if (rv != KERN_SUCCESS) {
+		memguard_fail_pgs++;
+		addr = (vm_offset_t)NULL;
+		goto out;
+	}
+	memguard_cursor = addr + size;
+	*v2sizep(trunc_page(addr)) = req_size;
+	memguard_succ++;
+out:
+	vm_map_unlock(memguard_map);
+	return ((void *)addr);
+}
+
+int
+is_memguard_addr(void *addr)
+{
+	vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
+
+	return (a >= memguard_map->min_offset && a < memguard_map->max_offset);
+}
+
+/*
+ * Free specified single object.
+ */
+void
+memguard_free(void *ptr)
+{
+	vm_offset_t addr;
+	u_long req_size, size;
+	char *temp;
+	int i;
+
+	addr = trunc_page((uintptr_t)ptr);
+	req_size = *v2sizep(addr);
+	size = round_page(req_size);
+
+	/*
+	 * Page should not be guarded right now, so force a write.
+	 * The purpose of this is to increase the likelihood of
+	 * catching a double-free, but not necessarily a
+	 * tamper-after-free (the second thread freeing might not
+	 * write before freeing, so this forces it to and,
+	 * subsequently, trigger a fault).
+	 */
+	temp = ptr;
+	for (i = 0; i < size; i += PAGE_SIZE)
+		temp[i] = 'M';
+
+	kmem_free(memguard_map, addr, size);
+}
+
+int
+memguard_cmp(struct malloc_type *mtp, unsigned long size)
+{
+
+#if 1
+	/*
+	 * The safest way of comparsion is to always compare short description
+	 * string of memory type, but it is also the slowest way.
+	 */
+	return (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0);
+#else
+	/*
+	 * If we compare pointers, there are two possible problems:
+	 * 1. Memory type was unloaded and new memory type was allocated at the
+	 *    same address.
+	 * 2. Memory type was unloaded and loaded again, but allocated at a
+	 *    different address.
+	 */
+	if (vm_memguard_mtype != NULL)
+		return (mtp == vm_memguard_mtype);
+	if (strcmp(mtp->ks_shortdesc, vm_memguard_desc) == 0) {
+		vm_memguard_mtype = mtp;
+		return (1);
+	}
+	return (0);
+#endif
+}

Property changes on: vm/isi_memguard.c
___________________________________________________________________
Added: svn:mime-type
   + text/plain
Added: svn:eol-style
   + native

Index: vm/vm_extern.h
===================================================================
--- vm/vm_extern.h	(revision 143550)
+++ vm/vm_extern.h	(working copy)
@@ -54,30 +54,31 @@ int sbrk(struct thread *, void *, int *)
 int sstk(struct thread *, void *, int *);
 int swapon(struct thread *, void *, int *);
 #endif			/* TYPEDEF_FOR_UAP */
 
 int kernacc(void *, int, int);
 vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
 vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
     vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
     unsigned long boundary, vm_memattr_t memattr);
 vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
 vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t);
 void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
 void kmem_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
 void kmem_init(vm_offset_t, vm_offset_t);
 vm_offset_t kmem_malloc(vm_map_t, vm_size_t, boolean_t);
+int kmem_back(vm_map_t, vm_offset_t, vm_size_t, int);
 vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
     boolean_t);
 void swapout_procs(int);
 int useracc(void *, int, int);
 int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
 void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t);
 void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
 int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
 int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int);
 void vm_waitproc(struct proc *);
 int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
 void vm_set_page_size(void);
 struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t);
 struct vmspace *vmspace_fork(struct vmspace *);
 int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
