Module Name:    src
Committed By:   maxv
Date:           Mon Jul 27 09:24:28 UTC 2015

Modified Files:
        src/sys/kern: subr_kmem.c
        src/sys/uvm: files.uvm
Removed Files:
        src/sys/uvm: uvm_kmguard.c uvm_kmguard.h

Log Message:
Several changes and improvements in KMEM_GUARD:
 - merge uvm_kmguard.{c,h} into subr_kmem.c. It is only user there, and
   makes it more consistent. Also, it allows us to enable KMEM_GUARD
   without enabling DEBUG.
 - rename uvm_kmguard_XXX to kmem_guard_XXX, for consistency
 - improve kmem_guard_alloc() so that it supports allocations bigger than
   PAGE_SIZE
 - remove the canary value, and use directly the kmem header as underflow
   pattern.
 - fix some comments

(The UAF fifo is disabled for the moment; we actually need to register
the va and its size, and add a weight support not to consume too much
memory.)


To generate a diff of this commit:
cvs rdiff -u -r1.60 -r1.61 src/sys/kern/subr_kmem.c
cvs rdiff -u -r1.24 -r1.25 src/sys/uvm/files.uvm
cvs rdiff -u -r1.11 -r0 src/sys/uvm/uvm_kmguard.c
cvs rdiff -u -r1.2 -r0 src/sys/uvm/uvm_kmguard.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/subr_kmem.c
diff -u src/sys/kern/subr_kmem.c:1.60 src/sys/kern/subr_kmem.c:1.61
--- src/sys/kern/subr_kmem.c:1.60	Tue Jul 22 07:38:41 2014
+++ src/sys/kern/subr_kmem.c	Mon Jul 27 09:24:28 2015
@@ -1,11 +1,11 @@
-/*	$NetBSD: subr_kmem.c,v 1.60 2014/07/22 07:38:41 maxv Exp $	*/
+/*	$NetBSD: subr_kmem.c,v 1.61 2015/07/27 09:24:28 maxv Exp $	*/
 
 /*-
- * Copyright (c) 2009 The NetBSD Foundation, Inc.
+ * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * This code is derived from software contributed to The NetBSD Foundation
- * by Andrew Doran.
+ * by Andrew Doran and Maxime Villard.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -87,10 +87,10 @@
  *	Check the pattern on allocation.
  *
  * KMEM_GUARD
- *	A kernel with "option DEBUG" has "kmguard" debugging feature compiled
- *	in. See the comment in uvm/uvm_kmguard.c for what kind of bugs it tries
- *	to detect.  Even if compiled in, it's disabled by default because it's
- *	very expensive.  You can enable it on boot by:
+ *	A kernel with "option DEBUG" has "kmem_guard" debugging feature compiled
+ *	in. See the comment below for what kind of bugs it tries to detect. Even
+ *	if compiled in, it's disabled by default because it's very expensive.
+ *	You can enable it on boot by:
  *		boot -d
  *		db> w kmem_guard_depth 0t30000
  *		db> c
@@ -100,7 +100,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.60 2014/07/22 07:38:41 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.61 2015/07/27 09:24:28 maxv Exp $");
 
 #include <sys/param.h>
 #include <sys/callback.h>
@@ -112,7 +112,6 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,
 
 #include <uvm/uvm_extern.h>
 #include <uvm/uvm_map.h>
-#include <uvm/uvm_kmguard.h>
 
 #include <lib/libkern/libkern.h>
 
@@ -182,8 +181,10 @@ static size_t kmem_cache_big_maxidx __re
 #endif /* defined(DIAGNOSTIC) */
 
 #if defined(DEBUG) && defined(_HARDKERNEL)
+#define	KMEM_SIZE
 #define	KMEM_POISON
 #define	KMEM_GUARD
+static void *kmem_freecheck;
 #endif /* defined(DEBUG) */
 
 #if defined(KMEM_POISON)
@@ -222,10 +223,20 @@ static void kmem_size_check(void *, size
 #ifndef KMEM_GUARD_DEPTH
 #define KMEM_GUARD_DEPTH 0
 #endif
+struct kmem_guard {
+	u_int		kg_depth;
+	intptr_t *	kg_fifo;
+	u_int		kg_rotor;
+	vmem_t *	kg_vmem;
+};
+
+static bool	kmem_guard_init(struct kmem_guard *, u_int, vmem_t *);
+static void *kmem_guard_alloc(struct kmem_guard *, size_t, bool);
+static void kmem_guard_free(struct kmem_guard *, size_t, void *);
+
 int kmem_guard_depth = KMEM_GUARD_DEPTH;
-size_t kmem_guard_size;
-static struct uvm_kmguard kmem_guard;
-static void *kmem_freecheck;
+static bool kmem_guard_enabled;
+static struct kmem_guard kmem_guard;
 #endif /* defined(KMEM_GUARD) */
 
 CTASSERT(KM_SLEEP == PR_WAITOK);
@@ -246,8 +257,8 @@ kmem_intr_alloc(size_t requested_size, k
 	KASSERT(requested_size > 0);
 
 #ifdef KMEM_GUARD
-	if (requested_size <= kmem_guard_size) {
-		return uvm_kmguard_alloc(&kmem_guard, requested_size,
+	if (kmem_guard_enabled) {
+		return kmem_guard_alloc(&kmem_guard, requested_size,
 		    (kmflags & KM_SLEEP) != 0);
 	}
 #endif
@@ -324,8 +335,8 @@ kmem_intr_free(void *p, size_t requested
 	KASSERT(requested_size > 0);
 
 #ifdef KMEM_GUARD
-	if (requested_size <= kmem_guard_size) {
-		uvm_kmguard_free(&kmem_guard, requested_size, p);
+	if (kmem_guard_enabled) {
+		kmem_guard_free(&kmem_guard, requested_size, p);
 		return;
 	}
 #endif
@@ -372,7 +383,6 @@ kmem_intr_free(void *p, size_t requested
 void *
 kmem_alloc(size_t size, km_flag_t kmflags)
 {
-
 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
 	    "kmem(9) should not be used from the interrupt context");
 	return kmem_intr_alloc(size, kmflags);
@@ -386,7 +396,6 @@ kmem_alloc(size_t size, km_flag_t kmflag
 void *
 kmem_zalloc(size_t size, km_flag_t kmflags)
 {
-
 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
 	    "kmem(9) should not be used from the interrupt context");
 	return kmem_intr_zalloc(size, kmflags);
@@ -400,7 +409,6 @@ kmem_zalloc(size_t size, km_flag_t kmfla
 void
 kmem_free(void *p, size_t size)
 {
-
 	KASSERT(!cpu_intr_p());
 	KASSERT(!cpu_softintr_p());
 	kmem_intr_free(p, size);
@@ -466,9 +474,8 @@ kmem_create_caches(const struct kmem_cac
 void
 kmem_init(void)
 {
-
 #ifdef KMEM_GUARD
-	uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size,
+	kmem_guard_enabled = kmem_guard_init(&kmem_guard, kmem_guard_depth,
 	    kmem_va_arena);
 #endif
 	kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
@@ -480,10 +487,34 @@ kmem_init(void)
 size_t
 kmem_roundup_size(size_t size)
 {
-
 	return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
 }
 
+/*
+ * Used to dynamically allocate string with kmem accordingly to format.
+ */
+char *
+kmem_asprintf(const char *fmt, ...)
+{
+	int size __diagused, len;
+	va_list va;
+	char *str;
+
+	va_start(va, fmt);
+	len = vsnprintf(NULL, 0, fmt, va);
+	va_end(va);
+
+	str = kmem_alloc(len + 1, KM_SLEEP);
+
+	va_start(va, fmt);
+	size = vsnprintf(str, len + 1, fmt, va);
+	va_end(va);
+
+	KASSERT(size == len);
+
+	return str;
+}
+
 /* ------------------ DEBUG / DIAGNOSTIC ------------------ */
 
 #if defined(KMEM_POISON) || defined(KMEM_REDZONE)
@@ -626,27 +657,162 @@ kmem_redzone_check(void *p, size_t sz)
 #endif /* defined(KMEM_REDZONE) */
 
 
+#if defined(KMEM_GUARD)
 /*
- * Used to dynamically allocate string with kmem accordingly to format.
- */
-char *
-kmem_asprintf(const char *fmt, ...)
+ * The ultimate memory allocator for debugging, baby.  It tries to catch:
+ *
+ * 1. Overflow, in realtime. A guard page sits immediately after the
+ *    requested area; a read/write overflow therefore triggers a page
+ *    fault.
+ * 2. Invalid pointer/size passed, at free. A kmem_header structure sits
+ *    just before the requested area, and holds the allocated size. Any
+ *    difference with what is given at free triggers a panic.
+ * 3. Underflow, at free. If an underflow occurs, the kmem header will be
+ *    modified, and 2. will trigger a panic.
+ * 4. Use-after-free. When freeing, the memory is unmapped, and depending
+ *    on the value of kmem_guard_depth, the kernel will more or less delay
+ *    the recycling of that memory. Which means that any ulterior read/write
+ *    access to the memory will trigger a page fault, given it hasn't been
+ *    recycled yet.
+ */
+
+#include <sys/atomic.h>
+#include <uvm/uvm.h>
+
+static bool
+kmem_guard_init(struct kmem_guard *kg, u_int depth, vmem_t *vm)
+{
+	vaddr_t va;
+
+	/* If not enabled, we have nothing to do. */
+	if (depth == 0) {
+		return false;
+	}
+	depth = roundup(depth, PAGE_SIZE / sizeof(void *));
+	KASSERT(depth != 0);
+
+	/*
+	 * Allocate fifo.
+	 */
+	va = uvm_km_alloc(kernel_map, depth * sizeof(void *), PAGE_SIZE,
+	    UVM_KMF_WIRED | UVM_KMF_ZERO);
+	if (va == 0) {
+		return false;
+	}
+
+	/*
+	 * Init object.
+	 */
+	kg->kg_vmem = vm;
+	kg->kg_fifo = (void *)va;
+	kg->kg_depth = depth;
+	kg->kg_rotor = 0;
+
+	printf("kmem_guard(%p): depth %d\n", kg, depth);
+	return true;
+}
+
+static void *
+kmem_guard_alloc(struct kmem_guard *kg, size_t requested_size, bool waitok)
+{
+	struct vm_page *pg;
+	vm_flag_t flags;
+	vmem_addr_t va;
+	vaddr_t loopva;
+	vsize_t loopsize;
+	size_t size;
+	void **p;
+
+	/*
+	 * Compute the size: take the kmem header into account, and add a guard
+	 * page at the end.
+	 */
+	size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
+
+	/* Allocate pages of kernel VA, but do not map anything in yet. */
+	flags = VM_BESTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP);
+	if (vmem_alloc(kg->kg_vmem, size, flags, &va) != 0) {
+		return NULL;
+	}
+
+	loopva = va;
+	loopsize = size - PAGE_SIZE;
+
+	while (loopsize) {
+		pg = uvm_pagealloc(NULL, loopva, NULL, 0);
+		if (__predict_false(pg == NULL)) {
+			if (waitok) {
+				uvm_wait("kmem_guard");
+				continue;
+			} else {
+				uvm_km_pgremove_intrsafe(kernel_map, va,
+				    va + size);
+				vmem_free(kg->kg_vmem, va, size);
+				return NULL;
+			}
+		}
+
+		pg->flags &= ~PG_BUSY;	/* new page */
+		UVM_PAGE_OWN(pg, NULL);
+		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
+		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
+
+		loopva += PAGE_SIZE;
+		loopsize -= PAGE_SIZE;
+	}
+
+	pmap_update(pmap_kernel());
+
+	/*
+	 * Offset the returned pointer so that the unmapped guard page sits
+	 * immediately after the returned object.
+	 */
+	p = (void **)((va + (size - PAGE_SIZE) - requested_size) & ~(uintptr_t)ALIGNBYTES);
+	kmem_size_set((uint8_t *)p - SIZE_SIZE, requested_size);
+	return (void *)p;
+}
+
+static void
+kmem_guard_free(struct kmem_guard *kg, size_t requested_size, void *p)
 {
-	int size __diagused, len;
-	va_list va;
-	char *str;
+	vaddr_t va;
+	u_int rotor;
+	size_t size;
+	uint8_t *ptr;
 
-	va_start(va, fmt);
-	len = vsnprintf(NULL, 0, fmt, va);
-	va_end(va);
+	ptr = (uint8_t *)p - SIZE_SIZE;
+	kmem_size_check(ptr, requested_size);
+	va = trunc_page((vaddr_t)ptr);
+	size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
 
-	str = kmem_alloc(len + 1, KM_SLEEP);
+	KASSERT(pmap_extract(pmap_kernel(), va, NULL));
+	KASSERT(!pmap_extract(pmap_kernel(), va + (size - PAGE_SIZE), NULL));
 
-	va_start(va, fmt);
-	size = vsnprintf(str, len + 1, fmt, va);
-	va_end(va);
+	/*
+	 * Unmap and free the pages. The last one is never allocated.
+	 */
+	uvm_km_pgremove_intrsafe(kernel_map, va, va + size);
+	pmap_update(pmap_kernel());
 
-	KASSERT(size == len);
+#if 0
+	/*
+	 * XXX: Here, we need to atomically register the va and its size in the
+	 * fifo.
+	 */
 
-	return str;
+	/*
+	 * Put the VA allocation into the list and swap an old one out to free.
+	 * This behaves mostly like a fifo.
+	 */
+	rotor = atomic_inc_uint_nv(&kg->kg_rotor) % kg->kg_depth;
+	va = (vaddr_t)atomic_swap_ptr(&kg->kg_fifo[rotor], (void *)va);
+	if (va != 0) {
+		vmem_free(kg->kg_vmem, va, size);
+	}
+#else
+	(void)rotor;
+	vmem_free(kg->kg_vmem, va, size);
+#endif
 }
+
+#endif /* defined(KMEM_GUARD) */

Index: src/sys/uvm/files.uvm
diff -u src/sys/uvm/files.uvm:1.24 src/sys/uvm/files.uvm:1.25
--- src/sys/uvm/files.uvm:1.24	Sun Apr 12 12:44:13 2015
+++ src/sys/uvm/files.uvm	Mon Jul 27 09:24:28 2015
@@ -1,4 +1,4 @@
-#	$NetBSD: files.uvm,v 1.24 2015/04/12 12:44:13 joerg Exp $
+#	$NetBSD: files.uvm,v 1.25 2015/07/27 09:24:28 maxv Exp $
 
 #
 # UVM options
@@ -28,7 +28,6 @@ file	uvm/uvm_glue.c			uvm
 file	uvm/uvm_init.c			uvm
 file	uvm/uvm_io.c			uvm
 file	uvm/uvm_km.c			uvm
-file	uvm/uvm_kmguard.c		debug
 file	uvm/uvm_loan.c			uvm
 file	uvm/uvm_map.c			uvm
 file	uvm/uvm_meter.c			uvm

Reply via email to