diff --git a/sys/vfs/tmpfs/tmpfs.h b/sys/vfs/tmpfs/tmpfs.h
index 294c187..9adb293
--- a/sys/vfs/tmpfs/tmpfs.h
+++ b/sys/vfs/tmpfs/tmpfs.h
@@ -70,16 +70,16 @@ MALLOC_DECLARE(M_TMPFS);
 #define kmutex_t		struct lock
 
 #define mutex_init(mtx, a, b)	lockinit(mtx, "mutex", 0, 0)
-#define mutex_destroy(mtx)		lockuninit(mtx)
-#define mutex_enter(mtx)		lockmgr(mtx, LK_EXCLUSIVE)
-#define mutex_exit(mtx)			lockmgr(mtx, LK_RELEASE)
+#define mutex_destroy(mtx)	lockuninit(mtx)
+#define mutex_enter(mtx)	lockmgr(mtx, LK_EXCLUSIVE)
+#define mutex_exit(mtx)		lockmgr(mtx, LK_RELEASE)
 
-#define MNT_GETARGS 0
-#define INT_MAX		0xffffffff
-#define MAXNAMLEN	MNAMELEN
-#define IMNT_MPSAFE	0
+#define MNT_GETARGS		0
+#define INT_MAX			0xffffffff
+#define MAXNAMLEN		MNAMELEN
+#define IMNT_MPSAFE		0
 
-#define v_interlock	v_lock
+#define v_interlock		v_lock
 
 #define UPDATE_CLOSE		0
 
@@ -92,17 +92,7 @@ MALLOC_DECLARE(M_TMPFS);
 #define vm_page_lock_queues()
 #define vm_page_unlock_queues()
 
-
-/* --------------------------------------------------------------------- */
-/* For the kernel and anyone who likes peeking into kernel memory        */
-/* --------------------------------------------------------------------- */
-
-#if defined(_KERNEL)
 #include <vfs/tmpfs/tmpfs_pool.h>
-#endif /* defined(_KERNEL) */
-
-/* --------------------------------------------------------------------- */
-
 /*
  * Internal representation of a tmpfs directory entry.
  */
diff --git a/sys/vfs/tmpfs/tmpfs_pool.c b/sys/vfs/tmpfs/tmpfs_pool.c
deleted file mode 100644
index 6a4c12d..0000000
--- a/sys/vfs/tmpfs/tmpfs_pool.c
+++ /dev/null
@@ -1,285 +0,0 @@
-/*	$NetBSD: tmpfs_pool.c,v 1.14 2008/04/28 20:24:02 martin Exp $	*/
-
-/*
- * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
- * 2005 program.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Pool allocator and convenience routines for tmpfs.
- */
-
-#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tmpfs_pool.c,v 1.14 2008/04/28 20:24:02 martin Exp $");
-
-#include <sys/param.h>
-#include <sys/pool.h>
-#include <sys/atomic.h>
-
-#include <uvm/uvm.h>
-
-#include <fs/tmpfs/tmpfs.h>
-
-/* --------------------------------------------------------------------- */
-
-void *	tmpfs_pool_page_alloc(struct pool *, int);
-void	tmpfs_pool_page_free(struct pool *, void *);
-
-/* XXX: Will go away when our pool allocator does what it has to do by
- * itself. */
-extern void*	pool_page_alloc_nointr(struct pool *, int);
-extern void	pool_page_free_nointr(struct pool *, void *);
-
-/* --------------------------------------------------------------------- */
-
-/*
- * tmpfs provides a custom pool allocator mostly to exactly keep track of
- * how many memory is used for each file system instance.  These pools are
- * never shared across multiple mount points for the reasons described
- * below:
- *
- * - It is very easy to control how many memory is associated with a
- *   given file system.  tmpfs provides a custom pool allocator that
- *   controls memory usage according to previously specified usage
- *   limits, by simply increasing or decreasing a counter when pages
- *   are allocated or released, respectively.
- *
- *   If the pools were shared, we could easily end up with unaccounted
- *   memory, thus taking incorrect decisions on the amount of memory
- *   use.  As an example to prove this point, consider two mounted
- *   instances of tmpfs, one mounted on A and another one on B. Assume
- *   that each memory page can hold up to four directory entries and
- *   that, for each entry you create on A, you create three on B
- *   afterwards.  After doing this, each memory page will be holding an
- *   entry from A and three for B.  If you sum up all the space taken by
- *   the total amount of allocated entries, rounded up to a page
- *   boundary, that number will match the number of allocated pages, so
- *   everything is fine.
- *
- *   Now suppose we unmount B.  Given that the file system has to
- *   disappear, we have to delete all the directory entries attached to
- *   it.  But the problem is that freeing those entries will not release
- *   any memory page.  Instead, each page will be filled up to a 25%,
- *   and the rest, a 75%, will be lost.  Not lost in a strict term,
- *   because the memory can be reused by new entries, but lost in the
- *   sense that it is not accounted by any file system.  Despite A will
- *   think it is using an amount 'X' of memory, it will be really using
- *   fourth times that number, thus causing mistakes when it comes to
- *   decide if there is more free space for that specific instance of
- *   tmpfs.
- *
- * - The number of page faults and cache misses is reduced given that all
- *   entries of a given file system are stored in less pages.  Note that
- *   this is true because it is common to allocate and/or access many
- *   entries at once on a specific file system.
- *
- *   Following the example given above, listing a directory on file system
- *   A could result, in the worst case scenario, in fourth times more page
- *   faults if we shared the pools.
- */
-struct pool_allocator tmpfs_pool_allocator = {
-	.pa_alloc = tmpfs_pool_page_alloc,
-	.pa_free = tmpfs_pool_page_free,
-};
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Initializes the pool pointed to by tpp and associates it to the mount
- * point tmp.  The size of its elements is set to size.  Its wait channel
- * is derived from the string given in what and the mount point given in
- * 'tmp', which should result in a unique string among all existing pools.
- */
-void
-tmpfs_pool_init(struct tmpfs_pool *tpp, size_t size, const char *what,
-    struct tmpfs_mount *tmp)
-{
-	int cnt;
-
-	cnt = snprintf(tpp->tp_name, sizeof(tpp->tp_name),
-	    "%s_tmpfs_%p", what, tmp);
-	KASSERT(cnt < sizeof(tpp->tp_name));
-
-	pool_init(&tpp->tp_pool, size, 0, 0, 0, tpp->tp_name,
-	    &tmpfs_pool_allocator, IPL_NONE);
-	tpp->tp_mount = tmp;
-}
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Destroys the pool pointed to by 'tpp'.
- */
-void
-tmpfs_pool_destroy(struct tmpfs_pool *tpp)
-{
-
-	pool_destroy((struct pool *)tpp);
-}
-
-/* --------------------------------------------------------------------- */
-
-void *
-tmpfs_pool_page_alloc(struct pool *pp, int flags)
-{
-	struct tmpfs_pool *tpp;
-	struct tmpfs_mount *tmp;
-	unsigned int pages;
-	void *page;
-
-	tpp = (struct tmpfs_pool *)pp;
-	tmp = tpp->tp_mount;
-
-	pages = atomic_inc_uint_nv(&tmp->tm_pages_used);
-	if (pages >= TMPFS_PAGES_MAX(tmp)) {
-		atomic_dec_uint(&tmp->tm_pages_used);
-		return NULL;
-	}
-	/*
-	 * tmpfs never specifies PR_WAITOK as we enforce local limits
-	 * on memory allocation.  However, we should wait for memory
-	 * to become available if under our limit.  XXX The result of
-	 * the TMPFS_PAGES_MAX() check is stale.
-	 */
-	page = pool_page_alloc_nointr(pp, flags | PR_WAITOK);
-	if (page == NULL) {
-		atomic_dec_uint(&tmp->tm_pages_used);
-	}
-
-	return page;
-}
-
-/* --------------------------------------------------------------------- */
-
-void
-tmpfs_pool_page_free(struct pool *pp, void *v)
-{
-	struct tmpfs_pool *tpp;
-	struct tmpfs_mount *tmp;
-
-	tpp = (struct tmpfs_pool *)pp;
-	tmp = tpp->tp_mount;
-
-	atomic_dec_uint(&tmp->tm_pages_used);
-	pool_page_free_nointr(pp, v);
-}
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Initialize the string pool pointed to by 'tsp' and attach it to the
- * 'tmp' mount point.
- */
-void
-tmpfs_str_pool_init(struct tmpfs_str_pool *tsp, struct tmpfs_mount *tmp)
-{
-
-	tmpfs_pool_init(&tsp->tsp_pool_16,   16,   "str", tmp);
-	tmpfs_pool_init(&tsp->tsp_pool_32,   32,   "str", tmp);
-	tmpfs_pool_init(&tsp->tsp_pool_64,   64,   "str", tmp);
-	tmpfs_pool_init(&tsp->tsp_pool_128,  128,  "str", tmp);
-	tmpfs_pool_init(&tsp->tsp_pool_256,  256,  "str", tmp);
-	tmpfs_pool_init(&tsp->tsp_pool_512,  512,  "str", tmp);
-	tmpfs_pool_init(&tsp->tsp_pool_1024, 1024, "str", tmp);
-}
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Destroy the given string pool.
- */
-void
-tmpfs_str_pool_destroy(struct tmpfs_str_pool *tsp)
-{
-
-	tmpfs_pool_destroy(&tsp->tsp_pool_16);
-	tmpfs_pool_destroy(&tsp->tsp_pool_32);
-	tmpfs_pool_destroy(&tsp->tsp_pool_64);
-	tmpfs_pool_destroy(&tsp->tsp_pool_128);
-	tmpfs_pool_destroy(&tsp->tsp_pool_256);
-	tmpfs_pool_destroy(&tsp->tsp_pool_512);
-	tmpfs_pool_destroy(&tsp->tsp_pool_1024);
-}
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Allocate a new string with a minimum length of len from the 'tsp'
- * string pool.  The pool can return a bigger string, but the caller must
- * not make any assumptions about the real object size.
- */
-char *
-tmpfs_str_pool_get(struct tmpfs_str_pool *tsp, size_t len, int flags)
-{
-	struct tmpfs_pool *p;
-
-	KASSERT(len <= 1024);
-
-	if      (len <= 16)   p = &tsp->tsp_pool_16;
-	else if (len <= 32)   p = &tsp->tsp_pool_32;
-	else if (len <= 64)   p = &tsp->tsp_pool_64;
-	else if (len <= 128)  p = &tsp->tsp_pool_128;
-	else if (len <= 256)  p = &tsp->tsp_pool_256;
-	else if (len <= 512)  p = &tsp->tsp_pool_512;
-	else if (len <= 1024) p = &tsp->tsp_pool_1024;
-	else {
-		KASSERT(0);
-		p = NULL; /* Silence compiler warnings */
-	}
-
-	return (char *)TMPFS_POOL_GET(p, flags);
-}
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Destroy the str string, which was allocated from the 'tsp' string pool
- * with a length of 'len'.  The length must match the one given during
- * allocation.
- */
-void
-tmpfs_str_pool_put(struct tmpfs_str_pool *tsp, char *str, size_t len)
-{
-	struct tmpfs_pool *p;
-
-	KASSERT(len <= 1024);
-
-	if      (len <= 16)   p = &tsp->tsp_pool_16;
-	else if (len <= 32)   p = &tsp->tsp_pool_32;
-	else if (len <= 64)   p = &tsp->tsp_pool_64;
-	else if (len <= 128)  p = &tsp->tsp_pool_128;
-	else if (len <= 256)  p = &tsp->tsp_pool_256;
-	else if (len <= 512)  p = &tsp->tsp_pool_512;
-	else if (len <= 1024) p = &tsp->tsp_pool_1024;
-	else {
-		KASSERT(0);
-		p = NULL; /* Silence compiler warnings */
-	}
-
-	TMPFS_POOL_PUT(p, str);
-}
diff --git a/sys/vfs/tmpfs/tmpfs_pool.h b/sys/vfs/tmpfs/tmpfs_pool.h
index 017c7d8..f444452
--- a/sys/vfs/tmpfs/tmpfs_pool.h
+++ b/sys/vfs/tmpfs/tmpfs_pool.h
@@ -43,6 +43,7 @@ struct pool
 void *pool_get(struct pool* pp, int flags);
 void  pool_put(struct pool* pp, void *p);
 
+
 /* --------------------------------------------------------------------- */
 
 /*
@@ -107,6 +108,7 @@ void	tmpfs_pool_destroy(struct tmpfs_pool *tpp);
 
 /* --------------------------------------------------------------------- */
 
+
 /*
  * Functions to manipulate a tmpfs_str_pool.
  */
diff --git a/sys/vfs/tmpfs/tmpfs_subr.c b/sys/vfs/tmpfs/tmpfs_subr.c
index ba0633e..3e9b8a4
--- a/sys/vfs/tmpfs/tmpfs_subr.c
+++ b/sys/vfs/tmpfs/tmpfs_subr.c
@@ -861,14 +861,18 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize)
 	struct tmpfs_mount *tmp;
 	struct tmpfs_node *node;
 	off_t oldsize;
+	int truncating;
+	off_t aligned_size;
 
-	DP("tmpfs_reg_resize vp %p newsize %d\n", vp, (int)newsize);
 	KKASSERT(vp->v_type == VREG);
 	KKASSERT(newsize >= 0);
 
 	node = VP_TO_TMPFS_NODE(vp);
 	tmp = VFS_TO_TMPFS(vp->v_mount);
 
+	DP("tmpfs_reg_resize vp %p oldsize %d newsize %d\n",
+		vp, (int) node->tn_size, (int) newsize);
+
 	/* Convert the old and new sizes to the number of pages needed to
 	 * store them.  It may happen that we do not need to do anything
 	 * because the last allocated page can accommodate the change on
@@ -890,8 +894,53 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize)
 	tmp->tm_pages_used += (newpages - oldpages);
 	TMPFS_UNLOCK(tmp);
 
+	/*
+	 * XXX break atomicy, we can deadlock the backend
+	 * if we do not release the lock.  Probably not a
+	 * big deal here.
+	 */
+
+
+//	vnode_pager_setsize(vp, newsize);
+
+	if (newsize < node->tn_size) {
+		vtruncbuf(vp, newsize, PAGE_SIZE);
+		truncating = 1;
+	} else {
+		vnode_pager_setsize(vp, newsize);
+		truncating = 0;
+	}
 	node->tn_size = newsize;
-	vnode_pager_setsize(vp, newsize);
+//#if 0
+//	modflags |= HAMMER_INODE_DDIRTY;
+
+	/*
+	 * If truncating we have to clean out a portion of
+	 * the last block on-disk.  We do this in the
+	 * front-end buffer cache.
+	 */
+#if 0
+	aligned_size = round_page(newsize);
+	if (truncating && newsize < aligned_size) {
+		struct buf *bp;
+		int offset;
+
+		aligned_size -= PAGE_SIZE;
+
+		offset = (int) newsize & PAGE_MASK;
+		error = bread(vp, aligned_size, PAGE_SIZE, &bp);
+		//hammer_ip_frontend_trunc(ip, aligned_size);
+		if (error == 0) {
+			bzero(bp->b_data + offset, PAGE_SIZE - offset);
+			/* must de-cache direct-io offset */
+			bp->b_bio2.bio_offset = NOOFFSET;
+			bdwrite(bp);
+		} else {
+			kprintf("tmpfs_reg_resize ERROR %d\n", error);
+			brelse(bp);
+		}
+	}
+#endif
 	if (newsize < oldsize) {
 		size_t zerolen = round_page(newsize) - newsize;
 		vm_object_t uobj = node->tn_spec.tn_reg.tn_aobj;
@@ -899,6 +948,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize)
 
 		/*  free "backing store" */
 		/* XXX */
+
 		crit_enter();
 		if (newpages < oldpages) {
 			swap_pager_freespace(uobj,
@@ -906,16 +956,18 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize)
 			vm_object_page_remove(uobj,
 				OFF_TO_IDX(newsize + PAGE_MASK), 0, FALSE);
 		}
-
+#if 0
 		/* zero out the truncated part of the last page. */
-		if (zerolen > 0) {
+		if (zerolen >= 0) {
 			m = vm_page_grab(uobj, OFF_TO_IDX(newsize),
 					VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
 			pmap_zero_page_area((vm_paddr_t)(vm_offset_t)m,
 			    PAGE_SIZE - zerolen, zerolen);
 			vm_page_wakeup(m);
 		}
+#endif
 		crit_exit();
+
 	}
 	error = 0;
 out:
@@ -1348,14 +1400,14 @@ tmpfs_pool_destroy(struct tmpfs_pool *tpp)
 }
 
 void *
-pool_get(struct pool* pp, int flags)
+pool_get(struct pool *pp, int flags)
 {
 	pp->p_nallocs++;
 	return kmalloc(pp->p_size, M_TMPFS, M_WAITOK | M_ZERO);
 }
 
 void
-pool_put(struct pool* pp, void *p)
+pool_put(struct pool *pp, void *p)
 {
 	pp->p_nfrees++;
 	kfree(p, M_TMPFS);
diff --git a/sys/vfs/tmpfs/tmpfs_vnops.c b/sys/vfs/tmpfs/tmpfs_vnops.c
index 5e6cafd..4b1dc45
--- a/sys/vfs/tmpfs/tmpfs_vnops.c
+++ b/sys/vfs/tmpfs/tmpfs_vnops.c
@@ -179,10 +179,6 @@ struct vop_ops tmpfs_fifo_vops = {
 	/* XXX .vop_kqfilter =			tmpfs_fifo_kqfilter */
 };
 
-int tmpfs_spec_write(struct vop_write_args *ap);
-int tmpfs_spec_read(struct vop_read_args *ap);
-int tmpfs_spec_close(struct vop_close_args *ap);
-
 int
 tmpfs_nresolve(struct vop_nresolve_args *ap)
 {
@@ -1253,13 +1249,12 @@ tmpfs_strategy_read(struct vop_strategy_args *ap)
 	}
 
 	sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
-	va = (caddr_t)sf_buf_kva(sf);
+	va = (caddr_t) sf_buf_kva(sf);
 	bcopy(va, bp->b_data, PAGE_SIZE);
 	sf_buf_free(sf);
 out:
 	vm_page_unwire(m, 0);
 	vm_page_activate(m);
-
 	vm_page_wakeup(m);
 	vm_object_pip_subtract(tobj, 1);
 
@@ -1291,7 +1286,7 @@ tmpfs_strategy_write(struct vop_strategy_args *ap)
 	struct tmpfs_node *node;
 
 	DP("tmpfs_strategy_write buf %p boff %x resid %x bcnt %x %x\n",
-			bp, bio->bio_offset, bp->b_resid, bp->b_bcount, bp->b_bufsize);
+		bp, bio->bio_offset, bp->b_resid, bp->b_bcount, bp->b_bufsize);
 
 	node = VP_TO_TMPFS_NODE(ap->a_vp);
 	addr = bio->bio_offset;
@@ -1324,7 +1319,7 @@ tmpfs_strategy_write(struct vop_strategy_args *ap)
 
 out:
 	if (!error) {
-		vm_page_set_validclean(tpg, 0, PAGE_SIZE);
+		vm_page_set_validclean(tpg, 0, PAGE_SIZE);/* XXX */
 		vm_page_zero_invalid(tpg, TRUE);
 		vm_page_dirty(tpg);
 	}
@@ -2060,3 +2055,5 @@ tmpfs_fifo_write(struct vop_write_args *ap)
 	int error = VOCALL(&fifo_vnode_vops, &ap->a_head);
 	return error;
 }
+
+
