Module Name:    src
Committed By:   maxv
Date:           Thu Nov 14 16:23:53 UTC 2019

Modified Files:
        src/sys/arch/amd64/amd64: amd64_trap.S busfunc.S cpu_in_cksum.S
            cpufunc.S lock_stubs.S locore.S machdep.c mptramp.S spl.S
        src/sys/arch/amd64/conf: GENERIC Makefile.amd64
        src/sys/arch/amd64/include: cpu.h frameasm.h param.h pmap.h types.h
        src/sys/arch/x86/include: bus_defs.h pmap.h
        src/sys/arch/x86/x86: bus_dma.c pmap.c
        src/sys/conf: files
        src/sys/kern: files.kern kern_lwp.c kern_malloc.c subr_kmem.c
            subr_pool.c
        src/sys/lib/libkern: libkern.h
        src/sys/net: if.c
        src/sys/sys: atomic.h bus_proto.h cdefs.h lwp.h systm.h
        src/sys/uvm: uvm_km.c
Added Files:
        src/sys/arch/amd64/include: msan.h
        src/sys/kern: subr_msan.c
        src/sys/sys: msan.h

Log Message:
Add support for Kernel Memory Sanitizer (kMSan). It detects uninitialized
memory used by the kernel at run time, and just like kASan and kCSan, it
is an excellent feature. It has already detected 38 uninitialized variables
in the kernel during my testing, which I have since discreetly fixed.

We use two shadows:
 - "shad", to track uninitialized memory with a bit granularity (1:1).
   Each bit set to 1 in the shad corresponds to one uninitialized bit of
   real kernel memory.
 - "orig", to track the origin of the memory with a 4-byte granularity
   (1:1). Each uint32_t cell in the orig indicates the origin of the
   associated uint32_t of real kernel memory.

The memory consumption of these shadows is consequent, so at least 4GB of
RAM is recommended to run kMSan.

The compiler inserts calls to specific __msan_* functions on each memory
access, to manage both the shad and the orig and detect uninitialized
memory accesses that change the execution flow (like an "if" on an
uninitialized variable).

We mark as uninit several types of memory buffers (stack, pools, kmem,
malloc, uvm_km), and check each buffer passed to copyout, copyoutstr,
bwrite, if_transmit_lock and DMA operations, to detect uninitialized memory
that leaves the system. This allows us to detect kernel info leaks in a way
that is more efficient and also more user-friendly than KLEAK.

Contrary to kASan, kMSan requires comprehensive coverage, ie we cannot
tolerate having one non-instrumented function, because this could cause
false positives. kMSan cannot instrument ASM functions, so I converted
most of them to __asm__ inlines, which kMSan is able to instrument. Those
that remain receive special treatment.

Contrary to kASan again, kMSan uses a TLS, so we must context-switch this
TLS during interrupts. We use different contexts depending on the interrupt
level.

The orig tracks precisely the origin of a buffer. We use a special encoding
for the orig values, and pack together in each uint32_t cell of the orig:
 - a code designating the type of memory (Stack, Pool, etc), and
 - a compressed pointer, which points either (1) to a string containing
   the name of the variable associated with the cell, or (2) to an area
   in the kernel .text section which we resolve to a symbol name + offset.

This encoding allows us not to consume extra memory for associating
information with each cell, and produces a precise output, that can tell
for example the name of an uninitialized variable on the stack, the
function in which it was pushed on the stack, and the function where we
accessed this uninitialized variable.

kMSan is available with LLVM, but not with GCC.

The code is organized in a way that is similar to kASan and kCSan, so it
means that other architectures than amd64 can be supported.


To generate a diff of this commit:
cvs rdiff -u -r1.49 -r1.50 src/sys/arch/amd64/amd64/amd64_trap.S
cvs rdiff -u -r1.11 -r1.12 src/sys/arch/amd64/amd64/busfunc.S
cvs rdiff -u -r1.3 -r1.4 src/sys/arch/amd64/amd64/cpu_in_cksum.S
cvs rdiff -u -r1.46 -r1.47 src/sys/arch/amd64/amd64/cpufunc.S
cvs rdiff -u -r1.32 -r1.33 src/sys/arch/amd64/amd64/lock_stubs.S
cvs rdiff -u -r1.189 -r1.190 src/sys/arch/amd64/amd64/locore.S
cvs rdiff -u -r1.338 -r1.339 src/sys/arch/amd64/amd64/machdep.c
cvs rdiff -u -r1.27 -r1.28 src/sys/arch/amd64/amd64/mptramp.S
cvs rdiff -u -r1.41 -r1.42 src/sys/arch/amd64/amd64/spl.S
cvs rdiff -u -r1.545 -r1.546 src/sys/arch/amd64/conf/GENERIC
cvs rdiff -u -r1.79 -r1.80 src/sys/arch/amd64/conf/Makefile.amd64
cvs rdiff -u -r1.64 -r1.65 src/sys/arch/amd64/include/cpu.h
cvs rdiff -u -r1.45 -r1.46 src/sys/arch/amd64/include/frameasm.h
cvs rdiff -u -r0 -r1.1 src/sys/arch/amd64/include/msan.h
cvs rdiff -u -r1.32 -r1.33 src/sys/arch/amd64/include/param.h
cvs rdiff -u -r1.63 -r1.64 src/sys/arch/amd64/include/pmap.h \
    src/sys/arch/amd64/include/types.h
cvs rdiff -u -r1.4 -r1.5 src/sys/arch/x86/include/bus_defs.h
cvs rdiff -u -r1.104 -r1.105 src/sys/arch/x86/include/pmap.h
cvs rdiff -u -r1.80 -r1.81 src/sys/arch/x86/x86/bus_dma.c
cvs rdiff -u -r1.338 -r1.339 src/sys/arch/x86/x86/pmap.c
cvs rdiff -u -r1.1243 -r1.1244 src/sys/conf/files
cvs rdiff -u -r1.36 -r1.37 src/sys/kern/files.kern
cvs rdiff -u -r1.207 -r1.208 src/sys/kern/kern_lwp.c
cvs rdiff -u -r1.157 -r1.158 src/sys/kern/kern_malloc.c
cvs rdiff -u -r1.76 -r1.77 src/sys/kern/subr_kmem.c
cvs rdiff -u -r0 -r1.1 src/sys/kern/subr_msan.c
cvs rdiff -u -r1.261 -r1.262 src/sys/kern/subr_pool.c
cvs rdiff -u -r1.133 -r1.134 src/sys/lib/libkern/libkern.h
cvs rdiff -u -r1.464 -r1.465 src/sys/net/if.c
cvs rdiff -u -r1.16 -r1.17 src/sys/sys/atomic.h
cvs rdiff -u -r1.10 -r1.11 src/sys/sys/bus_proto.h
cvs rdiff -u -r1.148 -r1.149 src/sys/sys/cdefs.h
cvs rdiff -u -r1.187 -r1.188 src/sys/sys/lwp.h
cvs rdiff -u -r0 -r1.1 src/sys/sys/msan.h
cvs rdiff -u -r1.288 -r1.289 src/sys/sys/systm.h
cvs rdiff -u -r1.146 -r1.147 src/sys/uvm/uvm_km.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/amd64_trap.S
diff -u src/sys/arch/amd64/amd64/amd64_trap.S:1.49 src/sys/arch/amd64/amd64/amd64_trap.S:1.50
--- src/sys/arch/amd64/amd64/amd64_trap.S:1.49	Sat Oct 12 06:31:03 2019
+++ src/sys/arch/amd64/amd64/amd64_trap.S	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: amd64_trap.S,v 1.49 2019/10/12 06:31:03 maxv Exp $	*/
+/*	$NetBSD: amd64_trap.S,v 1.50 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
@@ -224,6 +224,7 @@ IDTVEC(trap01)
 	cld
 	SMAP_ENABLE
 	IBRS_ENTER
+	KMSAN_ENTER
 	movw	%gs,TF_GS(%rsp)
 	movw	%fs,TF_FS(%rsp)
 	movw	%es,TF_ES(%rsp)
@@ -267,6 +268,7 @@ IDTVEC(trap02)
 	movw	%ds,TF_DS(%rsp)
 
 	SVS_ENTER_NMI
+	KMSAN_ENTER
 
 	movl	$MSR_GSBASE,%ecx
 	rdmsr
@@ -292,6 +294,7 @@ IDTVEC(trap02)
 	IBRS_LEAVE
 1:
 
+	KMSAN_LEAVE
 	SVS_LEAVE_NMI
 	INTR_RESTORE_GPRS
 	addq	$TF_REGSIZE+16,%rsp
@@ -668,6 +671,7 @@ calltrap:
 	movl	$T_ASTFLT,TF_TRAPNO(%rsp)
 	movq	%rsp,%rdi
 	incq	CPUVAR(NTRAP)
+	KMSAN_INIT_ARG(8)
 	call	_C_LABEL(trap)
 	jmp	.Lalltraps_checkast	/* re-check ASTs */
 3:	CHECK_DEFERRED_SWITCH

Index: src/sys/arch/amd64/amd64/busfunc.S
diff -u src/sys/arch/amd64/amd64/busfunc.S:1.11 src/sys/arch/amd64/amd64/busfunc.S:1.12
--- src/sys/arch/amd64/amd64/busfunc.S:1.11	Sat Jun 22 05:20:57 2013
+++ src/sys/arch/amd64/amd64/busfunc.S	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: busfunc.S,v 1.11 2013/06/22 05:20:57 uebayasi Exp $	*/
+/*	$NetBSD: busfunc.S,v 1.12 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*-
  * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
@@ -30,6 +30,7 @@
  */
 
 #include <machine/asm.h>
+#include <machine/frameasm.h>
 
 #include "assym.h"
 
@@ -47,10 +48,12 @@ ENTRY(bus_space_read_1)
 	cmpl	$X86_BUS_SPACE_IO, BST_TYPE(%rdi)
 	je	1f
 	movzbl	(%rdx), %eax
+	KMSAN_INIT_RET(1)
 	ret
 1:
 	xorl	%eax, %eax
 	inb	%dx, %al
+	KMSAN_INIT_RET(1)
 	ret
 END(bus_space_read_1)
 
@@ -63,10 +66,12 @@ ENTRY(bus_space_read_2)
 	cmpl	$X86_BUS_SPACE_IO, BST_TYPE(%rdi)
 	je	1f
 	movzwl	(%rdx), %eax
+	KMSAN_INIT_RET(2)
 	ret
 1:
 	xorl	%eax, %eax
 	inw	%dx, %ax
+	KMSAN_INIT_RET(2)
 	ret
 END(bus_space_read_2)
 
@@ -79,9 +84,11 @@ ENTRY(bus_space_read_4)
 	cmpl	$X86_BUS_SPACE_IO, BST_TYPE(%rdi)
 	je	1f
 	movl	(%rdx), %eax
+	KMSAN_INIT_RET(4)
 	ret
 1:
 	inl	%dx, %eax
+	KMSAN_INIT_RET(4)
 	ret
 END(bus_space_read_4)
 
@@ -94,6 +101,7 @@ ENTRY(bus_space_read_8)
 	cmpl	$X86_BUS_SPACE_IO, BST_TYPE(%rdi)
 	je	.Ldopanic
 	movq	(%rdx), %rax
+	KMSAN_INIT_RET(8)
 	ret
 END(bus_space_read_8)
 

Index: src/sys/arch/amd64/amd64/cpu_in_cksum.S
diff -u src/sys/arch/amd64/amd64/cpu_in_cksum.S:1.3 src/sys/arch/amd64/amd64/cpu_in_cksum.S:1.4
--- src/sys/arch/amd64/amd64/cpu_in_cksum.S:1.3	Tue Jun 30 21:08:24 2015
+++ src/sys/arch/amd64/amd64/cpu_in_cksum.S	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu_in_cksum.S,v 1.3 2015/06/30 21:08:24 christos Exp $ */
+/* $NetBSD: cpu_in_cksum.S,v 1.4 2019/11/14 16:23:52 maxv Exp $ */
 
 /*-
  * Copyright (c) 2008 Joerg Sonnenberger <jo...@netbsd.org>.
@@ -30,6 +30,7 @@
  */
 
 #include <machine/asm.h>
+#include <machine/frameasm.h>
 #include "assym.h"
 
 ENTRY(cpu_in_cksum)
@@ -282,6 +283,7 @@ ENTRY(cpu_in_cksum)
 .Mreturn:
 	popq	%rbx
 	popq	%rbp
+	KMSAN_INIT_RET(4)
 	ret
 
 .Mout_of_mbufs:

Index: src/sys/arch/amd64/amd64/cpufunc.S
diff -u src/sys/arch/amd64/amd64/cpufunc.S:1.46 src/sys/arch/amd64/amd64/cpufunc.S:1.47
--- src/sys/arch/amd64/amd64/cpufunc.S:1.46	Wed Oct 30 17:06:57 2019
+++ src/sys/arch/amd64/amd64/cpufunc.S	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.S,v 1.46 2019/10/30 17:06:57 maxv Exp $	*/
+/*	$NetBSD: cpufunc.S,v 1.47 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -153,6 +153,7 @@ END(setusergs)
 ENTRY(x86_read_flags)
 	pushfq
 	popq	%rax
+	KMSAN_INIT_RET(8)
 	ret
 END(x86_read_flags)
 
@@ -174,6 +175,7 @@ ENTRY(tsc_get_timecount)
 	addl	CPUVAR(CC_SKEW), %eax
 	cmpq	%rdi, L_NCSW(%rcx)
 	jne	2f
+	KMSAN_INIT_RET(4)
 	ret
 2:
 	jmp	1b
@@ -194,6 +196,13 @@ ENTRY(rdmsr_safe)
 
 	xorq	%rax, %rax
 	movq	%rax, PCB_ONFAULT(%r8)
+#ifdef KMSAN
+	movq	%rsi,%rdi
+	movq	$8,%rsi
+	xorq	%rdx,%rdx
+	callq	_C_LABEL(kmsan_mark)
+#endif
+	KMSAN_INIT_RET(4)
 	ret
 END(rdmsr_safe)
 
@@ -211,12 +220,14 @@ ENTRY(cpu_counter)
 	shlq	$32, %rdx
 	orq	%rdx, %rax
 	addq	CPUVAR(CC_SKEW), %rax
+	KMSAN_INIT_RET(8)
 	ret
 END(cpu_counter)
 
 ENTRY(cpu_counter32)
 	rdtsc
 	addl	CPUVAR(CC_SKEW), %eax
+	KMSAN_INIT_RET(4)
 	ret
 END(cpu_counter32)
 
@@ -230,11 +241,13 @@ END(breakpoint)
 
 ENTRY(x86_curcpu)
 	movq	%gs:(CPU_INFO_SELF), %rax
+	KMSAN_INIT_RET(8)
 	ret
 END(x86_curcpu)
 
 ENTRY(x86_curlwp)
 	movq	%gs:(CPU_INFO_CURLWP), %rax
+	KMSAN_INIT_RET(8)
 	ret
 END(x86_curlwp)
 
@@ -246,12 +259,14 @@ END(cpu_set_curpri)
 ENTRY(__byte_swap_u32_variable)
 	movl	%edi, %eax
 	bswapl	%eax
+	KMSAN_INIT_RET(4)
 	ret
 END(__byte_swap_u32_variable)
 
 ENTRY(__byte_swap_u16_variable)
 	movl	%edi, %eax
 	xchgb	%al, %ah
+	KMSAN_INIT_RET(2)
 	ret
 END(__byte_swap_u16_variable)
 
@@ -330,6 +345,7 @@ ENTRY(inb)
 	movq	%rdi, %rdx
 	xorq	%rax, %rax
 	inb	%dx, %al
+	KMSAN_INIT_RET(1)
 	ret
 END(inb)
 
@@ -346,6 +362,7 @@ ENTRY(inw)
 	movq	%rdi, %rdx
 	xorq	%rax, %rax
 	inw	%dx, %ax
+	KMSAN_INIT_RET(2)
 	ret
 END(inw)
 
@@ -362,6 +379,7 @@ ENTRY(inl)
 	movq	%rdi, %rdx
 	xorq	%rax, %rax
 	inl	%dx, %eax
+	KMSAN_INIT_RET(4)
 	ret
 END(inl)
 

Index: src/sys/arch/amd64/amd64/lock_stubs.S
diff -u src/sys/arch/amd64/amd64/lock_stubs.S:1.32 src/sys/arch/amd64/amd64/lock_stubs.S:1.33
--- src/sys/arch/amd64/amd64/lock_stubs.S:1.32	Thu Sep  5 12:57:30 2019
+++ src/sys/arch/amd64/amd64/lock_stubs.S	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: lock_stubs.S,v 1.32 2019/09/05 12:57:30 maxv Exp $	*/
+/*	$NetBSD: lock_stubs.S,v 1.33 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -343,6 +343,7 @@ ENTRY(__cpu_simple_lock_try)
 	cmpxchgb %ah, (%rdi)
 	movl	$0, %eax
 	setz	%al
+	KMSAN_INIT_RET(4)
 	RET
 END(__cpu_simple_lock_try)
 

Index: src/sys/arch/amd64/amd64/locore.S
diff -u src/sys/arch/amd64/amd64/locore.S:1.189 src/sys/arch/amd64/amd64/locore.S:1.190
--- src/sys/arch/amd64/amd64/locore.S:1.189	Sat Oct 12 06:31:03 2019
+++ src/sys/arch/amd64/amd64/locore.S	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.189 2019/10/12 06:31:03 maxv Exp $	*/
+/*	$NetBSD: locore.S,v 1.190 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright-o-rama!
@@ -1235,6 +1235,7 @@ ENTRY(cpu_switchto)
 
 .Lswitch_return:
 	/* Return to the new LWP, returning 'oldlwp' in %rax. */
+	KMSAN_INIT_RET(8)
 	movq	%r13,%rax
 	popq	%r15
 	popq	%r14
@@ -1321,6 +1322,7 @@ ENTRY(handle_syscall)
 	STI(si)
 	/* Pushed T_ASTFLT into tf_trapno on entry. */
 	movq	%rsp,%rdi
+	KMSAN_INIT_ARG(8)
 	call	_C_LABEL(trap)
 	jmp	.Lsyscall_checkast	/* re-check ASTs */
 END(handle_syscall)
@@ -1336,8 +1338,10 @@ ENTRY(lwp_trampoline)
 	movq	%rbp,%r14	/* for .Lsyscall_checkast */
 	movq	%rax,%rdi
 	xorq	%rbp,%rbp
+	KMSAN_INIT_ARG(16)
 	call	_C_LABEL(lwp_startup)
 	movq	%r13,%rdi
+	KMSAN_INIT_ARG(8)
 	call	*%r12
 	jmp	.Lsyscall_checkast
 END(lwp_trampoline)
@@ -1410,6 +1414,7 @@ IDTVEC(\name)
 	.if	\is_svs
 		SVS_ENTER
 	.endif
+	KMSAN_ENTER
 	jmp	handle_syscall
 IDTVEC_END(\name)
 .endm
@@ -1453,6 +1458,7 @@ IDTVEC_END(osyscall)
 	TEXT_USER_BEGIN
 	_ALIGN_TEXT
 LABEL(syscall_sysret)
+	KMSAN_LEAVE
 	MDS_LEAVE
 	SVS_LEAVE
 	IBRS_LEAVE
@@ -1501,10 +1507,12 @@ ENTRY(sse2_idlezero_page)
 	sfence
 	incl	%eax
 	popq	%rbp
+	KMSAN_INIT_RET(1)
 	ret
 2:
 	sfence
 	popq	%rbp
+	KMSAN_INIT_RET(1)
 	ret
 END(sse2_idlezero_page)
 
@@ -1546,6 +1554,7 @@ END(pagezero)
 	.type intrfastexit,@function
 LABEL(intrfastexit)
 	NOT_XEN(cli;)
+	KMSAN_LEAVE
 
 	testb	$SEL_UPL,TF_CS(%rsp)
 	jz	.Lkexit

Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.338 src/sys/arch/amd64/amd64/machdep.c:1.339
--- src/sys/arch/amd64/amd64/machdep.c:1.338	Tue Nov  5 20:19:17 2019
+++ src/sys/arch/amd64/amd64/machdep.c	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.338 2019/11/05 20:19:17 maxv Exp $	*/
+/*	$NetBSD: machdep.c,v 1.339 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -110,7 +110,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.338 2019/11/05 20:19:17 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.339 2019/11/14 16:23:52 maxv Exp $");
 
 #include "opt_modular.h"
 #include "opt_user_ldt.h"
@@ -153,6 +153,7 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 
 #include <sys/proc.h>
 #include <sys/asan.h>
 #include <sys/csan.h>
+#include <sys/msan.h>
 
 #ifdef KGDB
 #include <sys/kgdb.h>
@@ -1637,6 +1638,13 @@ init_slotspace(void)
 	slotspace.area[SLAREA_ASAN].active = true;
 #endif
 
+#ifdef KMSAN
+	/* MSAN. */
+	slotspace.area[SLAREA_MSAN].sslot = L4_SLOT_KMSAN;
+	slotspace.area[SLAREA_MSAN].nslot = NL4_SLOT_KMSAN;
+	slotspace.area[SLAREA_MSAN].active = true;
+#endif
+
 	/* Kernel. */
 	slotspace.area[SLAREA_KERN].sslot = L4_SLOT_KERNBASE;
 	slotspace.area[SLAREA_KERN].nslot = 1;
@@ -1763,6 +1771,7 @@ init_x86_64(paddr_t first_avail)
 	kasan_init();
 #endif
 	kcsan_init();
+	kmsan_init((void *)lwp0uarea);
 
 	pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 32 * 1024 * 1024);
 

Index: src/sys/arch/amd64/amd64/mptramp.S
diff -u src/sys/arch/amd64/amd64/mptramp.S:1.27 src/sys/arch/amd64/amd64/mptramp.S:1.28
--- src/sys/arch/amd64/amd64/mptramp.S:1.27	Tue Nov  5 20:19:17 2019
+++ src/sys/arch/amd64/amd64/mptramp.S	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: mptramp.S,v 1.27 2019/11/05 20:19:17 maxv Exp $	*/
+/*	$NetBSD: mptramp.S,v 1.28 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 2000, 2016 The NetBSD Foundation, Inc.
@@ -76,6 +76,7 @@
 
 #include "assym.h"
 #include "opt_kcsan.h"
+#include "opt_kmsan.h"
 #include <machine/asm.h>
 #include <machine/specialreg.h>
 #include <machine/segments.h>
@@ -245,7 +246,7 @@ _C_LABEL(cpu_spinup_trampoline_end):	/* 
 	movl	PCB_CR0(%rsi),%eax
 	movq	%rax,%cr0
 
-#ifdef KCSAN
+#if defined(KCSAN) || defined(KMSAN)
 	/*
 	 * The C instrumentation uses GS.base, so initialize it right now. It
 	 * gets re-initialized later, that's fine.

Index: src/sys/arch/amd64/amd64/spl.S
diff -u src/sys/arch/amd64/amd64/spl.S:1.41 src/sys/arch/amd64/amd64/spl.S:1.42
--- src/sys/arch/amd64/amd64/spl.S:1.41	Sat Oct 12 06:31:03 2019
+++ src/sys/arch/amd64/amd64/spl.S	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: spl.S,v 1.41 2019/10/12 06:31:03 maxv Exp $	*/
+/*	$NetBSD: spl.S,v 1.42 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 2003 Wasabi Systems, Inc.
@@ -66,6 +66,7 @@
 
 #include "opt_ddb.h"
 #include "opt_kasan.h"
+#include "opt_kmsan.h"
 
 #define ALIGN_TEXT	.align 16,0x90
 
@@ -86,6 +87,7 @@ ENTRY(splraise)
 	cmpl	%edi,%eax
 	cmoval	%eax,%edi
 	movl	%edi,CPUVAR(ILEVEL)
+	KMSAN_INIT_RET(4)
 	ret
 END(splraise)
 
@@ -128,6 +130,16 @@ IDTVEC(softintr)
 	popq	%rax
 #endif
 
+#ifdef KMSAN
+	pushq	%rax
+	pushq	%rdx
+	pushq	%rcx
+	callq	_C_LABEL(kmsan_softint)
+	popq	%rcx
+	popq	%rdx
+	popq	%rax
+#endif
+
 	/* save old context */
 	movq	%rsp,PCB_RSP(%rcx)
 	movq	%rbp,PCB_RBP(%rcx)
@@ -187,6 +199,7 @@ IDTVEC(recurse_preempt)
 	movl	$IPL_PREEMPT,CPUVAR(ILEVEL)
 	sti
 	xorq	%rdi,%rdi
+	KMSAN_INIT_ARG(8)
 	call	_C_LABEL(kpreempt)
 	cli
 	jmp	*%r13			/* back to Xspllower */
@@ -203,6 +216,7 @@ IDTVEC(resume_preempt)
 	testq	$SEL_RPL,TF_CS(%rsp)
 	jnz	1f
 	movq	TF_RIP(%rsp),%rdi
+	KMSAN_INIT_ARG(8)
 	call	_C_LABEL(kpreempt)	/* from kernel */
 	cli
 	jmp	*%r13			/* back to Xdoreti */
@@ -391,6 +405,7 @@ LABEL(doreti_checkast)
 	movl	$T_ASTFLT,TF_TRAPNO(%rsp)	/* XXX undo later.. */
 	/* Pushed T_ASTFLT into tf_trapno on entry. */
 	movq	%rsp,%rdi
+	KMSAN_INIT_ARG(8)
 	call	_C_LABEL(trap)
 	CLI(si)
 	jmp	doreti_checkast

Index: src/sys/arch/amd64/conf/GENERIC
diff -u src/sys/arch/amd64/conf/GENERIC:1.545 src/sys/arch/amd64/conf/GENERIC:1.546
--- src/sys/arch/amd64/conf/GENERIC:1.545	Tue Nov  5 20:19:17 2019
+++ src/sys/arch/amd64/conf/GENERIC	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-# $NetBSD: GENERIC,v 1.545 2019/11/05 20:19:17 maxv Exp $
+# $NetBSD: GENERIC,v 1.546 2019/11/14 16:23:52 maxv Exp $
 #
 # GENERIC machine description file
 #
@@ -22,7 +22,7 @@ include 	"arch/amd64/conf/std.amd64"
 
 options 	INCLUDE_CONFIG_FILE	# embed config file in kernel binary
 
-#ident		"GENERIC-$Revision: 1.545 $"
+#ident		"GENERIC-$Revision: 1.546 $"
 
 maxusers	64		# estimated number of users
 
@@ -133,6 +133,17 @@ options 	KDTRACE_HOOKS	# kernel DTrace h
 #options 	KCSAN		# mandatory
 #options 	KCSAN_PANIC	# optional
 
+# Kernel Memory Sanitizer (kMSan). You need to disable SVS and kernel modules
+# to use it. The quarantine is optional and can help KMSAN find uninitialized
+# memory in pool caches. Note that KMSAN requires at least 4GB of RAM.
+#makeoptions 	KMSAN=1		# mandatory
+#options 	KMSAN		# mandatory
+#no options	SVS		# mandatory
+#no options 	MODULAR		# mandatory
+#no options 	MODULAR_DEFAULT_AUTOLOAD	# mandatory
+#options	POOL_QUARANTINE	# optional
+#options 	KMSAN_PANIC	# optional
+
 # Kernel Info Leak Detector.
 #makeoptions 	KLEAK=1
 #options 	KLEAK

Index: src/sys/arch/amd64/conf/Makefile.amd64
diff -u src/sys/arch/amd64/conf/Makefile.amd64:1.79 src/sys/arch/amd64/conf/Makefile.amd64:1.80
--- src/sys/arch/amd64/conf/Makefile.amd64:1.79	Tue Nov  5 20:19:17 2019
+++ src/sys/arch/amd64/conf/Makefile.amd64	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-#	$NetBSD: Makefile.amd64,v 1.79 2019/11/05 20:19:17 maxv Exp $
+#	$NetBSD: Makefile.amd64,v 1.80 2019/11/14 16:23:52 maxv Exp $
 
 # Makefile for NetBSD
 #
@@ -69,6 +69,14 @@ KCSANFLAGS.${f}=	# empty
 CFLAGS+=	${KCSANFLAGS.${.IMPSRC:T}:U${KCSANFLAGS}}
 .endif
 
+.if ${KMSAN:U0} > 0 && ${HAVE_LLVM:Uno} == "yes"
+KMSANFLAGS=	-fsanitize=kernel-memory
+.for f in subr_msan.c
+KMSANFLAGS.${f}=	# empty
+.endfor
+CFLAGS+=	${KMSANFLAGS.${.IMPSRC:T}:U${KMSANFLAGS}}
+.endif
+
 ##
 ## (3) libkern and compat
 ##

Index: src/sys/arch/amd64/include/cpu.h
diff -u src/sys/arch/amd64/include/cpu.h:1.64 src/sys/arch/amd64/include/cpu.h:1.65
--- src/sys/arch/amd64/include/cpu.h:1.64	Mon Feb 11 14:59:32 2019
+++ src/sys/arch/amd64/include/cpu.h	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.64 2019/02/11 14:59:32 cherry Exp $	*/
+/*	$NetBSD: cpu.h,v 1.65 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1990 The Regents of the University of California.
@@ -43,7 +43,19 @@
 
 #ifdef _KERNEL
 
+#ifdef _KERNEL_OPT
+#include "opt_kmsan.h"
+#endif
+
 #if defined(__GNUC__) && !defined(_MODULE)
+
+/*
+ * KMSAN: disable the inlines below, to force the use of the ASM functions,
+ * where no KMSAN instrumentation is added. This is because the instrumentation
+ * does not handle the segment registers correctly. And there appears to be no
+ * way to tell LLVM not to add KMSAN instrumentation in these __asm blocks.
+ */
+#if !defined(KMSAN) || defined(KMSAN_NO_INST)
 static struct cpu_info *x86_curcpu(void);
 static lwp_t *x86_curlwp(void);
 
@@ -81,6 +93,12 @@ cpu_set_curpri(int pri)
 	    "r" (pri)
 	);
 }
+#else
+struct cpu_info *x86_curcpu(void);
+lwp_t *x86_curlwp(void);
+void cpu_set_curpri(int);
+#endif
+
 #endif	/* __GNUC__ && !_MODULE */
 
 #ifdef XENPV

Index: src/sys/arch/amd64/include/frameasm.h
diff -u src/sys/arch/amd64/include/frameasm.h:1.45 src/sys/arch/amd64/include/frameasm.h:1.46
--- src/sys/arch/amd64/include/frameasm.h:1.45	Sat Oct 12 06:31:03 2019
+++ src/sys/arch/amd64/include/frameasm.h	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: frameasm.h,v 1.45 2019/10/12 06:31:03 maxv Exp $	*/
+/*	$NetBSD: frameasm.h,v 1.46 2019/11/14 16:23:52 maxv Exp $	*/
 
 #ifndef _AMD64_MACHINE_FRAMEASM_H
 #define _AMD64_MACHINE_FRAMEASM_H
@@ -6,6 +6,7 @@
 #ifdef _KERNEL_OPT
 #include "opt_xen.h"
 #include "opt_svs.h"
+#include "opt_kmsan.h"
 #endif
 
 /*
@@ -205,6 +206,67 @@
 #define SVS_LEAVE_ALTSTACK	/* nothing */
 #endif
 
+#ifdef KMSAN
+#define KMSAN_ENTER	\
+	movq	%rsp,%rdi		; \
+	movq	$TF_REGSIZE+16+40,%rsi	; \
+	xorq	%rdx,%rdx		; \
+	callq	kmsan_mark		; \
+	callq	kmsan_intr_enter
+#define KMSAN_LEAVE	\
+	pushq	%rbp			; \
+	movq	%rsp,%rbp		; \
+	callq	kmsan_intr_leave	; \
+	popq	%rbp
+#define KMSAN_INIT_ARG(sz)	\
+	pushq	%rax			; \
+	pushq	%rcx			; \
+	pushq	%rdx			; \
+	pushq	%rsi			; \
+	pushq	%rdi			; \
+	pushq	%r8			; \
+	pushq	%r9			; \
+	pushq	%r10			; \
+	pushq	%r11			; \
+	movq	$sz,%rdi		; \
+	callq	_C_LABEL(kmsan_init_arg); \
+	popq	%r11			; \
+	popq	%r10			; \
+	popq	%r9			; \
+	popq	%r8			; \
+	popq	%rdi			; \
+	popq	%rsi			; \
+	popq	%rdx			; \
+	popq	%rcx			; \
+	popq	%rax
+#define KMSAN_INIT_RET(sz)	\
+	pushq	%rax			; \
+	pushq	%rcx			; \
+	pushq	%rdx			; \
+	pushq	%rsi			; \
+	pushq	%rdi			; \
+	pushq	%r8			; \
+	pushq	%r9			; \
+	pushq	%r10			; \
+	pushq	%r11			; \
+	movq	$sz,%rdi		; \
+	callq	_C_LABEL(kmsan_init_ret); \
+	popq	%r11			; \
+	popq	%r10			; \
+	popq	%r9			; \
+	popq	%r8			; \
+	popq	%rdi			; \
+	popq	%rsi			; \
+	popq	%rdx			; \
+	popq	%rcx			; \
+	popq	%rax
+#else
+#define KMSAN_ENTER		/* nothing */
+#define KMSAN_LEAVE		/* nothing */
+#define KMSAN_INIT_ARG(sz)	/* nothing */
+#define KMSAN_INIT_RET(sz)	/* nothing */
+#endif
+
 #define	INTRENTRY \
 	subq	$TF_REGSIZE,%rsp	; \
 	INTR_SAVE_GPRS			; \
@@ -219,7 +281,7 @@
 	movw	%fs,TF_FS(%rsp)		; \
 	movw	%es,TF_ES(%rsp)		; \
 	movw	%ds,TF_DS(%rsp)		; \
-98:
+98:	KMSAN_ENTER
 
 #define INTRFASTEXIT \
 	jmp	intrfastexit
@@ -238,7 +300,8 @@
 #define INTR_RECURSE_ENTRY \
 	subq	$TF_REGSIZE,%rsp	; \
 	INTR_SAVE_GPRS			; \
-	cld
+	cld				; \
+	KMSAN_ENTER
 
 #define	CHECK_DEFERRED_SWITCH \
 	cmpl	$0, CPUVAR(WANT_PMAPLOAD)

Index: src/sys/arch/amd64/include/param.h
diff -u src/sys/arch/amd64/include/param.h:1.32 src/sys/arch/amd64/include/param.h:1.33
--- src/sys/arch/amd64/include/param.h:1.32	Sat Sep 28 15:11:53 2019
+++ src/sys/arch/amd64/include/param.h	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: param.h,v 1.32 2019/09/28 15:11:53 christos Exp $	*/
+/*	$NetBSD: param.h,v 1.33 2019/11/14 16:23:52 maxv Exp $	*/
 
 #ifdef __x86_64__
 
@@ -12,6 +12,7 @@
 #if defined(_KERNEL_OPT)
 #include "opt_kasan.h"
 #include "opt_kleak.h"
+#include "opt_kmsan.h"
 #endif
 #endif
 
@@ -44,7 +45,11 @@
 /*
  * Maximum physical memory supported by the implementation.
  */
+#if defined(KMSAN)
+#define MAXPHYSMEM	0x008000000000ULL /* 512GB */
+#else
 #define MAXPHYSMEM	0x100000000000ULL /* 16TB */
+#endif
 
 /*
  * XXXfvdl change this (after bootstrap) to take # of bits from
@@ -63,7 +68,7 @@
 #define	SSIZE		1		/* initial stack size/NBPG */
 #define	SINCR		1		/* increment of stack/NBPG */
 
-#if defined(KASAN) || defined(KLEAK)
+#if defined(KASAN) || defined(KLEAK) || defined(KMSAN)
 #define	UPAGES		8
 #elif defined(DIAGNOSTIC)
 #define	UPAGES		5		/* pages of u-area (1 for redzone) */

Index: src/sys/arch/amd64/include/pmap.h
diff -u src/sys/arch/amd64/include/pmap.h:1.63 src/sys/arch/amd64/include/pmap.h:1.64
--- src/sys/arch/amd64/include/pmap.h:1.63	Fri Nov  1 15:11:43 2019
+++ src/sys/arch/amd64/include/pmap.h	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.63 2019/11/01 15:11:43 maxv Exp $	*/
+/*	$NetBSD: pmap.h,v 1.64 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -68,6 +68,7 @@
 #if defined(_KERNEL_OPT)
 #include "opt_xen.h"
 #include "opt_kasan.h"
+#include "opt_kmsan.h"
 #include "opt_kubsan.h"
 #endif
 
@@ -98,6 +99,11 @@
 #define NL4_SLOT_KASAN		32
 #endif
 
+#ifdef KMSAN
+#define L4_SLOT_KMSAN		256
+#define NL4_SLOT_KMSAN		4
+#endif
+
 #define NL4_SLOT_DIRECT		32
 
 #ifndef XENPV
@@ -133,14 +139,18 @@ extern pt_entry_t *pte_base;
 
 #define PDP_BASE	L4_BASE
 
+#if defined(KMSAN)
+#define NKL4_MAX_ENTRIES	(unsigned long)1	/* 512GB only */
+#else
 #define NKL4_MAX_ENTRIES	(unsigned long)64
+#endif
 #define NKL3_MAX_ENTRIES	(unsigned long)(NKL4_MAX_ENTRIES * 512)
 #define NKL2_MAX_ENTRIES	(unsigned long)(NKL3_MAX_ENTRIES * 512)
 #define NKL1_MAX_ENTRIES	(unsigned long)(NKL2_MAX_ENTRIES * 512)
 
 #define NKL4_KIMG_ENTRIES	1
 #define NKL3_KIMG_ENTRIES	1
-#if defined(KUBSAN)
+#if defined(KUBSAN) || defined(KMSAN)
 #define NKL2_KIMG_ENTRIES	64	/* really big kernel */
 #else
 #define NKL2_KIMG_ENTRIES	48
Index: src/sys/arch/amd64/include/types.h
diff -u src/sys/arch/amd64/include/types.h:1.63 src/sys/arch/amd64/include/types.h:1.64
--- src/sys/arch/amd64/include/types.h:1.63	Fri Oct  4 06:27:42 2019
+++ src/sys/arch/amd64/include/types.h	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: types.h,v 1.63 2019/10/04 06:27:42 maxv Exp $	*/
+/*	$NetBSD: types.h,v 1.64 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1990 The Regents of the University of California.
@@ -106,12 +106,13 @@ typedef	unsigned char		__cpu_simple_lock
 
 #include "opt_xen.h"
 #include "opt_kasan.h"
+#include "opt_kmsan.h"
 #ifdef KASAN
 #define __HAVE_KASAN_INSTR_BUS
 #define __HAVE_KASAN_INSTR_DMA
 #endif
 #if defined(__x86_64__) && !defined(XENPV)
-#if !defined(KASAN)
+#if !defined(KASAN) && !defined(KMSAN)
 #define	__HAVE_PCPU_AREA 1
 #define	__HAVE_DIRECT_MAP 1
 #endif

Index: src/sys/arch/x86/include/bus_defs.h
diff -u src/sys/arch/x86/include/bus_defs.h:1.4 src/sys/arch/x86/include/bus_defs.h:1.5
--- src/sys/arch/x86/include/bus_defs.h:1.4	Fri Oct  4 06:27:42 2019
+++ src/sys/arch/x86/include/bus_defs.h	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: bus_defs.h,v 1.4 2019/10/04 06:27:42 maxv Exp $	*/
+/*	$NetBSD: bus_defs.h,v 1.5 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc.
@@ -66,6 +66,7 @@
 
 #ifdef _KERNEL_OPT
 #include "opt_kasan.h"
+#include "opt_kmsan.h"
 #endif
 
 #include <x86/busdefs.h>
@@ -145,7 +146,7 @@ struct x86_bus_dmamap {
 	/*
 	 * PUBLIC MEMBERS: these are used by machine-independent code.
 	 */
-#if defined(KASAN)
+#if defined(KASAN) || defined(KMSAN)
 	void		*dm_buf;
 	bus_size_t	dm_buflen;
 	int		dm_buftype;

Index: src/sys/arch/x86/include/pmap.h
diff -u src/sys/arch/x86/include/pmap.h:1.104 src/sys/arch/x86/include/pmap.h:1.105
--- src/sys/arch/x86/include/pmap.h:1.104	Wed Nov 13 12:55:10 2019
+++ src/sys/arch/x86/include/pmap.h	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.104 2019/11/13 12:55:10 maxv Exp $	*/
+/*	$NetBSD: pmap.h,v 1.105 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -172,8 +172,9 @@ struct bootspace {
 #define SLAREA_DMAP	4
 #define SLAREA_HYPV	5
 #define SLAREA_ASAN	6
-#define SLAREA_KERN	7
-#define SLSPACE_NAREAS	8
+#define SLAREA_MSAN	7
+#define SLAREA_KERN	8
+#define SLSPACE_NAREAS	9
 
 struct slotspace {
 	struct {

Index: src/sys/arch/x86/x86/bus_dma.c
diff -u src/sys/arch/x86/x86/bus_dma.c:1.80 src/sys/arch/x86/x86/bus_dma.c:1.81
--- src/sys/arch/x86/x86/bus_dma.c:1.80	Fri Oct  4 06:27:42 2019
+++ src/sys/arch/x86/x86/bus_dma.c	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: bus_dma.c,v 1.80 2019/10/04 06:27:42 maxv Exp $	*/
+/*	$NetBSD: bus_dma.c,v 1.81 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2007 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.80 2019/10/04 06:27:42 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.81 2019/11/14 16:23:52 maxv Exp $");
 
 /*
  * The following is included because _bus_dma_uiomove is derived from
@@ -96,6 +96,7 @@ __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 
 #include <sys/mbuf.h>
 #include <sys/proc.h>
 #include <sys/asan.h>
+#include <sys/msan.h>
 
 #include <sys/bus.h>
 #include <machine/bus_private.h>
@@ -1329,6 +1330,7 @@ bus_dmamap_sync(bus_dma_tag_t t, bus_dma
 	bus_dma_tag_t it;
 
 	kasan_dma_sync(p, o, l, ops);
+	kmsan_dma_sync(p, o, l, ops);
 
 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_SYNC) == 0)
 		;	/* skip override */
@@ -1390,6 +1392,7 @@ bus_dmamap_load(bus_dma_tag_t t, bus_dma
 	bus_dma_tag_t it;
 
 	kasan_dma_load(dmam, buf, buflen, KASAN_DMA_LINEAR);
+	kmsan_dma_load(dmam, buf, buflen, KMSAN_DMA_LINEAR);
 
 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD) == 0)
 		;	/* skip override */
@@ -1410,6 +1413,7 @@ bus_dmamap_load_mbuf(bus_dma_tag_t t, bu
 	bus_dma_tag_t it;
 
 	kasan_dma_load(dmam, chain, 0, KASAN_DMA_MBUF);
+	kmsan_dma_load(dmam, chain, 0, KMSAN_DMA_MBUF);
 
 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_MBUF) == 0)
 		;	/* skip override */
@@ -1430,6 +1434,7 @@ bus_dmamap_load_uio(bus_dma_tag_t t, bus
 	bus_dma_tag_t it;
 
 	kasan_dma_load(dmam, uio, 0, KASAN_DMA_UIO);
+	kmsan_dma_load(dmam, uio, 0, KMSAN_DMA_UIO);
 
 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_UIO) == 0)
 		;	/* skip override */
@@ -1451,6 +1456,7 @@ bus_dmamap_load_raw(bus_dma_tag_t t, bus
 	bus_dma_tag_t it;
 
 	kasan_dma_load(dmam, NULL, 0, KASAN_DMA_RAW);
+	kmsan_dma_load(dmam, NULL, 0, KMSAN_DMA_RAW);
 
 	if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_RAW) == 0)
 		;	/* skip override */

Index: src/sys/arch/x86/x86/pmap.c
diff -u src/sys/arch/x86/x86/pmap.c:1.338 src/sys/arch/x86/x86/pmap.c:1.339
--- src/sys/arch/x86/x86/pmap.c:1.338	Wed Nov 13 12:55:10 2019
+++ src/sys/arch/x86/x86/pmap.c	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.338 2019/11/13 12:55:10 maxv Exp $	*/
+/*	$NetBSD: pmap.c,v 1.339 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.338 2019/11/13 12:55:10 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.339 2019/11/14 16:23:52 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -151,6 +151,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.3
 #include <sys/xcall.h>
 #include <sys/kcore.h>
 #include <sys/asan.h>
+#include <sys/msan.h>
 
 #include <uvm/uvm.h>
 #include <uvm/pmap/pmap_pvt.h>
@@ -1299,7 +1300,7 @@ pmap_pagetree_nentries_range(vaddr_t sta
 }
 #endif
 
-#if defined(__HAVE_DIRECT_MAP) || defined(KASAN)
+#if defined(__HAVE_DIRECT_MAP) || defined(KASAN) || defined(KMSAN)
 static inline void
 slotspace_copy(int type, pd_entry_t *dst, pd_entry_t *src)
 {
@@ -2283,6 +2284,9 @@ pmap_pdp_ctor(void *arg, void *v, int fl
 #ifdef KASAN
 	slotspace_copy(SLAREA_ASAN, pdir, PDP_BASE);
 #endif
+#ifdef KMSAN
+	slotspace_copy(SLAREA_MSAN, pdir, PDP_BASE);
+#endif
 #endif /* XENPV  && __x86_64__*/
 
 #ifdef XENPV
@@ -4574,6 +4578,9 @@ pmap_growkernel(vaddr_t maxkvaddr)
 	    (size_t)(maxkvaddr - pmap_maxkvaddr));
 #endif
 
+	kmsan_shadow_map((void *)pmap_maxkvaddr,
+	    (size_t)(maxkvaddr - pmap_maxkvaddr));
+
 	pmap_alloc_level(cpm, pmap_maxkvaddr, needed_kptp);
 
 	/*

Index: src/sys/conf/files
diff -u src/sys/conf/files:1.1243 src/sys/conf/files:1.1244
--- src/sys/conf/files:1.1243	Mon Nov 11 04:04:29 2019
+++ src/sys/conf/files	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-#	$NetBSD: files,v 1.1243 2019/11/11 04:04:29 msaitoh Exp $
+#	$NetBSD: files,v 1.1244 2019/11/14 16:23:52 maxv Exp $
 #	@(#)files.newconf	7.5 (Berkeley) 5/10/93
 
 version 	20171118
@@ -33,6 +33,8 @@ defflag				KASAN
 defflag opt_kasan.h		KASAN_PANIC
 defflag				KCSAN
 defflag opt_kcsan.h		KCSAN_PANIC
+defflag				KMSAN
+defflag opt_kmsan.h		KMSAN_PANIC
 defflag				KLEAK
 defflag				KCOV
 defflag opt_pool.h		POOL_QUARANTINE

Index: src/sys/kern/files.kern
diff -u src/sys/kern/files.kern:1.36 src/sys/kern/files.kern:1.37
--- src/sys/kern/files.kern:1.36	Tue Nov  5 20:19:17 2019
+++ src/sys/kern/files.kern	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-#	$NetBSD: files.kern,v 1.36 2019/11/05 20:19:17 maxv Exp $
+#	$NetBSD: files.kern,v 1.37 2019/11/14 16:23:52 maxv Exp $
 
 #
 # kernel sources
@@ -132,6 +132,7 @@ file	kern/subr_localcount.c		kern
 file	kern/subr_lockdebug.c		kern
 file	kern/subr_log.c			kern
 file	kern/subr_lwp_specificdata.c	kern
+file	kern/subr_msan.c		kmsan
 file	kern/subr_once.c		kern
 file	kern/subr_optstr.c		kern
 file	kern/subr_pcq.c			kern

Index: src/sys/kern/kern_lwp.c
diff -u src/sys/kern/kern_lwp.c:1.207 src/sys/kern/kern_lwp.c:1.208
--- src/sys/kern/kern_lwp.c:1.207	Sun Nov 10 23:39:03 2019
+++ src/sys/kern/kern_lwp.c	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_lwp.c,v 1.207 2019/11/10 23:39:03 joerg Exp $	*/
+/*	$NetBSD: kern_lwp.c,v 1.208 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -211,7 +211,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.207 2019/11/10 23:39:03 joerg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.208 2019/11/14 16:23:52 maxv Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -244,6 +244,7 @@ __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v
 #include <sys/uidinfo.h>
 #include <sys/sysctl.h>
 #include <sys/psref.h>
+#include <sys/msan.h>
 
 #include <uvm/uvm_extern.h>
 #include <uvm/uvm_object.h>
@@ -844,6 +845,7 @@ lwp_create(lwp_t *l1, proc_t *p2, vaddr_
 	l2->l_pflag = LP_MPSAFE;
 	TAILQ_INIT(&l2->l_ld_locks);
 	l2->l_psrefs = 0;
+	kmsan_lwp_alloc(l2);
 
 	/*
 	 * For vfork, borrow parent's lwpctl context if it exists.
@@ -1298,6 +1300,7 @@ lwp_free(struct lwp *l, bool recycle, bo
 	if (l->l_name != NULL)
 		kmem_free(l->l_name, MAXCOMLEN);
 
+	kmsan_lwp_free(l);
 	cpu_lwp_free2(l);
 	uvm_lwp_exit(l);
 

Index: src/sys/kern/kern_malloc.c
diff -u src/sys/kern/kern_malloc.c:1.157 src/sys/kern/kern_malloc.c:1.158
--- src/sys/kern/kern_malloc.c:1.157	Sun Apr  7 09:20:04 2019
+++ src/sys/kern/kern_malloc.c	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_malloc.c,v 1.157 2019/04/07 09:20:04 maxv Exp $	*/
+/*	$NetBSD: kern_malloc.c,v 1.158 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 1987, 1991, 1993
@@ -70,12 +70,13 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.157 2019/04/07 09:20:04 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.158 2019/11/14 16:23:52 maxv Exp $");
 
 #include <sys/param.h>
 #include <sys/malloc.h>
 #include <sys/kmem.h>
 #include <sys/asan.h>
+#include <sys/msan.h>
 
 /*
  * Built-in malloc types.  Note: ought to be removed.
@@ -129,6 +130,9 @@ kern_malloc(unsigned long reqsize, int f
 	if (p == NULL)
 		return NULL;
 
+	kmsan_mark(p, allocsize, KMSAN_STATE_UNINIT);
+	kmsan_orig(p, allocsize, KMSAN_TYPE_MALLOC, __RET_ADDR);
+
 	if ((flags & M_ZERO) != 0) {
 		memset(p, 0, allocsize);
 	}
@@ -155,11 +159,16 @@ kern_free(void *addr)
 	kasan_mark(addr, mh->mh_size - sizeof(struct malloc_header),
 	    mh->mh_size - sizeof(struct malloc_header), KASAN_MALLOC_REDZONE);
 
-	if (mh->mh_size >= PAGE_SIZE + sizeof(struct malloc_header))
+	if (mh->mh_size >= PAGE_SIZE + sizeof(struct malloc_header)) {
+		kmsan_mark((char *)addr - PAGE_SIZE,
+		    mh->mh_size + PAGE_SIZE - sizeof(struct malloc_header),
+		    KMSAN_STATE_INITED);
 		kmem_intr_free((char *)addr - PAGE_SIZE,
 		    mh->mh_size + PAGE_SIZE - sizeof(struct malloc_header));
-	else
+	} else {
+		kmsan_mark(mh, mh->mh_size, KMSAN_STATE_INITED);
 		kmem_intr_free(mh, mh->mh_size);
+	}
 }
 
 void *

Index: src/sys/kern/subr_kmem.c
diff -u src/sys/kern/subr_kmem.c:1.76 src/sys/kern/subr_kmem.c:1.77
--- src/sys/kern/subr_kmem.c:1.76	Thu Aug 15 12:06:42 2019
+++ src/sys/kern/subr_kmem.c	Thu Nov 14 16:23:52 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: subr_kmem.c,v 1.76 2019/08/15 12:06:42 maxv Exp $	*/
+/*	$NetBSD: subr_kmem.c,v 1.77 2019/11/14 16:23:52 maxv Exp $	*/
 
 /*
  * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
@@ -78,7 +78,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.76 2019/08/15 12:06:42 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.77 2019/11/14 16:23:52 maxv Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_kmem.h"
@@ -92,6 +92,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,
 #include <sys/lockdebug.h>
 #include <sys/cpu.h>
 #include <sys/asan.h>
+#include <sys/msan.h>
 
 #include <uvm/uvm_extern.h>
 #include <uvm/uvm_map.h>
@@ -304,6 +305,10 @@ kmem_alloc(size_t size, km_flag_t kmflag
 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
 	    "kmem(9) should not be used from the interrupt context");
 	v = kmem_intr_alloc(size, kmflags);
+	if (__predict_true(v != NULL)) {
+		kmsan_mark(v, size, KMSAN_STATE_UNINIT);
+		kmsan_orig(v, size, KMSAN_TYPE_KMEM, __RET_ADDR);
+	}
 	KASSERT(v || (kmflags & KM_NOSLEEP) != 0);
 	return v;
 }
@@ -334,6 +339,7 @@ kmem_free(void *p, size_t size)
 	KASSERT(!cpu_intr_p());
 	KASSERT(!cpu_softintr_p());
 	kmem_intr_free(p, size);
+	kmsan_mark(p, size, KMSAN_STATE_INITED);
 }
 
 static size_t

Index: src/sys/kern/subr_pool.c
diff -u src/sys/kern/subr_pool.c:1.261 src/sys/kern/subr_pool.c:1.262
--- src/sys/kern/subr_pool.c:1.261	Wed Oct 16 18:29:49 2019
+++ src/sys/kern/subr_pool.c	Thu Nov 14 16:23:53 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: subr_pool.c,v 1.261 2019/10/16 18:29:49 christos Exp $	*/
+/*	$NetBSD: subr_pool.c,v 1.262 2019/11/14 16:23:53 maxv Exp $	*/
 
 /*
  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018
@@ -33,7 +33,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.261 2019/10/16 18:29:49 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.262 2019/11/14 16:23:53 maxv Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_ddb.h"
@@ -58,6 +58,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,
 #include <sys/cpu.h>
 #include <sys/atomic.h>
 #include <sys/asan.h>
+#include <sys/msan.h>
 
 #include <uvm/uvm_extern.h>
 
@@ -83,7 +84,7 @@ static struct pool phpool[PHPOOL_MAX];
 #define	PHPOOL_FREELIST_NELEM(idx) \
 	(((idx) == 0) ? BITMAP_MIN_SIZE : BITMAP_SIZE * (1 << (idx)))
 
-#if defined(DIAGNOSTIC) || defined(KASAN)
+#if !defined(KMSAN) && (defined(DIAGNOSTIC) || defined(KASAN))
 #define POOL_REDZONE
 #endif
 
@@ -104,6 +105,18 @@ static void pool_cache_redzone_check(poo
 # define pool_cache_redzone_check(pc, ptr)	__nothing
 #endif
 
+#ifdef KMSAN
+static inline void pool_get_kmsan(struct pool *, void *);
+static inline void pool_put_kmsan(struct pool *, void *);
+static inline void pool_cache_get_kmsan(pool_cache_t, void *);
+static inline void pool_cache_put_kmsan(pool_cache_t, void *);
+#else
+#define pool_get_kmsan(pp, ptr)		__nothing
+#define pool_put_kmsan(pp, ptr)		__nothing
+#define pool_cache_get_kmsan(pc, ptr)	__nothing
+#define pool_cache_put_kmsan(pc, ptr)	__nothing
+#endif
+
 #ifdef KLEAK
 static void pool_kleak_fill(struct pool *, void *);
 static void pool_cache_kleak_fill(pool_cache_t, void *);
@@ -128,10 +141,8 @@ static bool pool_cache_put_quarantine(po
 #define NO_CTOR	__FPTRCAST(int (*)(void *, void *, int), nullop)
 #define NO_DTOR	__FPTRCAST(void (*)(void *, void *), nullop)
 
-#if defined(KASAN) || defined(KLEAK)
 #define pc_has_ctor(pc) ((pc)->pc_ctor != NO_CTOR)
 #define pc_has_dtor(pc) ((pc)->pc_dtor != NO_DTOR)
-#endif
 
 /*
  * Pool backend allocators.
@@ -1194,6 +1205,7 @@ pool_get(struct pool *pp, int flags)
 	KASSERT((((vaddr_t)v) & (pp->pr_align - 1)) == 0);
 	FREECHECK_OUT(&pp->pr_freecheck, v);
 	pool_redzone_fill(pp, v);
+	pool_get_kmsan(pp, v);
 	if (flags & PR_ZERO)
 		memset(v, 0, pp->pr_reqsize);
 	else
@@ -1211,6 +1223,7 @@ pool_do_put(struct pool *pp, void *v, st
 
 	KASSERT(mutex_owned(&pp->pr_lock));
 	pool_redzone_check(pp, v);
+	pool_put_kmsan(pp, v);
 	FREECHECK_IN(&pp->pr_freecheck, v);
 	LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
 
@@ -2563,6 +2576,7 @@ pool_cache_get_paddr(pool_cache_t pc, in
 			splx(s);
 			FREECHECK_OUT(&pc->pc_freecheck, object);
 			pool_redzone_fill(&pc->pc_pool, object);
+			pool_cache_get_kmsan(pc, object);
 			pool_cache_kleak_fill(pc, object);
 			return object;
 		}
@@ -2712,6 +2726,7 @@ pool_cache_put_paddr(pool_cache_t pc, vo
 	int s;
 
 	KASSERT(object != NULL);
+	pool_cache_put_kmsan(pc, object);
 	pool_cache_redzone_check(pc, object);
 	FREECHECK_IN(&pc->pc_freecheck, object);
 
@@ -2896,6 +2911,36 @@ pool_page_free_meta(struct pool *pp, voi
 	vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
 }
 
+#ifdef KMSAN
+static inline void
+pool_get_kmsan(struct pool *pp, void *p)
+{
+	kmsan_orig(p, pp->pr_size, KMSAN_TYPE_POOL, __RET_ADDR);
+	kmsan_mark(p, pp->pr_size, KMSAN_STATE_UNINIT);
+}
+
+static inline void
+pool_put_kmsan(struct pool *pp, void *p)
+{
+	kmsan_mark(p, pp->pr_size, KMSAN_STATE_INITED);
+}
+
+static inline void
+pool_cache_get_kmsan(pool_cache_t pc, void *p)
+{
+	if (__predict_false(pc_has_ctor(pc))) {
+		return;
+	}
+	pool_get_kmsan(&pc->pc_pool, p);
+}
+
+static inline void
+pool_cache_put_kmsan(pool_cache_t pc, void *p)
+{
+	pool_put_kmsan(&pc->pc_pool, p);
+}
+#endif
+
 #ifdef KLEAK
 static void
 pool_kleak_fill(struct pool *pp, void *p)

Index: src/sys/lib/libkern/libkern.h
diff -u src/sys/lib/libkern/libkern.h:1.133 src/sys/lib/libkern/libkern.h:1.134
--- src/sys/lib/libkern/libkern.h:1.133	Tue Nov  5 20:19:18 2019
+++ src/sys/lib/libkern/libkern.h	Thu Nov 14 16:23:53 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: libkern.h,v 1.133 2019/11/05 20:19:18 maxv Exp $	*/
+/*	$NetBSD: libkern.h,v 1.134 2019/11/14 16:23:53 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1992, 1993
@@ -384,6 +384,13 @@ void	*kcsan_memset(void *, int, size_t);
 #define	memcpy(d, s, l)		kcsan_memcpy(d, s, l)
 #define	memcmp(a, b, l)		kcsan_memcmp(a, b, l)
 #define	memset(d, v, l)		kcsan_memset(d, v, l)
+#elif defined(_KERNEL) && defined(KMSAN)
+void	*kmsan_memcpy(void *, const void *, size_t);
+int	 kmsan_memcmp(const void *, const void *, size_t);
+void	*kmsan_memset(void *, int, size_t);
+#define	memcpy(d, s, l)		kmsan_memcpy(d, s, l)
+#define	memcmp(a, b, l)		kmsan_memcmp(a, b, l)
+#define	memset(d, v, l)		kmsan_memset(d, v, l)
 #else
 #define	memcpy(d, s, l)		__builtin_memcpy(d, s, l)
 #define	memcmp(a, b, l)		__builtin_memcmp(a, b, l)
@@ -411,6 +418,13 @@ size_t	 kcsan_strlen(const char *);
 #define	strcpy(d, s)		kcsan_strcpy(d, s)
 #define	strcmp(a, b)		kcsan_strcmp(a, b)
 #define	strlen(a)		kcsan_strlen(a)
+#elif defined(_KERNEL) && defined(KMSAN)
+char	*kmsan_strcpy(char *, const char *);
+int	 kmsan_strcmp(const char *, const char *);
+size_t	 kmsan_strlen(const char *);
+#define	strcpy(d, s)		kmsan_strcpy(d, s)
+#define	strcmp(a, b)		kmsan_strcmp(a, b)
+#define	strlen(a)		kmsan_strlen(a)
 #else
 #define	strcpy(d, s)		__builtin_strcpy(d, s)
 #define	strcmp(a, b)		__builtin_strcmp(a, b)
@@ -460,6 +474,9 @@ void	*kasan_memmove(void *, const void *
 #elif defined(_KERNEL) && defined(KCSAN)
 void	*kcsan_memmove(void *, const void *, size_t);
 #define	memmove(d, s, l)	kcsan_memmove(d, s, l)
+#elif defined(_KERNEL) && defined(KMSAN)
+void	*kmsan_memmove(void *, const void *, size_t);
+#define	memmove(d, s, l)	kmsan_memmove(d, s, l)
 #endif
 
 int	 pmatch(const char *, const char *, const char **);

Index: src/sys/net/if.c
diff -u src/sys/net/if.c:1.464 src/sys/net/if.c:1.465
--- src/sys/net/if.c:1.464	Wed Nov 13 02:51:22 2019
+++ src/sys/net/if.c	Thu Nov 14 16:23:53 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: if.c,v 1.464 2019/11/13 02:51:22 ozaki-r Exp $	*/
+/*	$NetBSD: if.c,v 1.465 2019/11/14 16:23:53 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1999, 2000, 2001, 2008 The NetBSD Foundation, Inc.
@@ -90,7 +90,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if.c,v 1.464 2019/11/13 02:51:22 ozaki-r Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if.c,v 1.465 2019/11/14 16:23:53 maxv Exp $");
 
 #if defined(_KERNEL_OPT)
 #include "opt_inet.h"
@@ -121,6 +121,7 @@ __KERNEL_RCSID(0, "$NetBSD: if.c,v 1.464
 #include <sys/intr.h>
 #include <sys/module_hook.h>
 #include <sys/compat_stub.h>
+#include <sys/msan.h>
 
 #include <net/if.h>
 #include <net/if_dl.h>
@@ -3610,6 +3611,8 @@ if_transmit_lock(struct ifnet *ifp, stru
 {
 	int error;
 
+	kmsan_check_mbuf(m);
+
 #ifdef ALTQ
 	KERNEL_LOCK(1, NULL);
 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {

Index: src/sys/sys/atomic.h
diff -u src/sys/sys/atomic.h:1.16 src/sys/sys/atomic.h:1.17
--- src/sys/sys/atomic.h:1.16	Tue Nov  5 20:19:18 2019
+++ src/sys/sys/atomic.h	Thu Nov 14 16:23:53 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: atomic.h,v 1.16 2019/11/05 20:19:18 maxv Exp $	*/
+/*	$NetBSD: atomic.h,v 1.17 2019/11/14 16:23:53 maxv Exp $	*/
 
 /*-
  * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
@@ -40,6 +40,7 @@
 #if defined(_KERNEL) && defined(_KERNEL_OPT)
 #include "opt_kasan.h"
 #include "opt_kcsan.h"
+#include "opt_kmsan.h"
 #endif
 
 #if defined(KASAN)
@@ -84,6 +85,27 @@
 #define ATOMIC_PROTO_INC(name, tret, targ1) \
 	void kcsan_atomic_inc_##name(volatile targ1 *); \
 	tret kcsan_atomic_inc_##name##_nv(volatile targ1 *)
+#elif defined(KMSAN)
+#define ATOMIC_PROTO_ADD(name, tret, targ1, targ2) \
+	void kmsan_atomic_add_##name(volatile targ1 *, targ2); \
+	tret kmsan_atomic_add_##name##_nv(volatile targ1 *, targ2)
+#define ATOMIC_PROTO_AND(name, tret, targ1, targ2) \
+	void kmsan_atomic_and_##name(volatile targ1 *, targ2); \
+	tret kmsan_atomic_and_##name##_nv(volatile targ1 *, targ2)
+#define ATOMIC_PROTO_OR(name, tret, targ1, targ2) \
+	void kmsan_atomic_or_##name(volatile targ1 *, targ2); \
+	tret kmsan_atomic_or_##name##_nv(volatile targ1 *, targ2)
+#define ATOMIC_PROTO_CAS(name, tret, targ1, targ2) \
+	tret kmsan_atomic_cas_##name(volatile targ1 *, targ2, targ2); \
+	tret kmsan_atomic_cas_##name##_ni(volatile targ1 *, targ2, targ2)
+#define ATOMIC_PROTO_SWAP(name, tret, targ1, targ2) \
+	tret kmsan_atomic_swap_##name(volatile targ1 *, targ2)
+#define ATOMIC_PROTO_DEC(name, tret, targ1) \
+	void kmsan_atomic_dec_##name(volatile targ1 *); \
+	tret kmsan_atomic_dec_##name##_nv(volatile targ1 *)
+#define ATOMIC_PROTO_INC(name, tret, targ1) \
+	void kmsan_atomic_inc_##name(volatile targ1 *); \
+	tret kmsan_atomic_inc_##name##_nv(volatile targ1 *)
 #else
 #define ATOMIC_PROTO_ADD(name, tret, targ1, targ2) \
 	void atomic_add_##name(volatile targ1 *, targ2); \
@@ -297,6 +319,68 @@ __END_DECLS
 #define atomic_inc_ulong_nv	kcsan_atomic_inc_ulong_nv
 #define atomic_inc_ptr_nv	kcsan_atomic_inc_ptr_nv
 #define atomic_inc_64_nv	kcsan_atomic_inc_64_nv
+#elif defined(KMSAN)
+#define atomic_add_32		kmsan_atomic_add_32
+#define atomic_add_int		kmsan_atomic_add_int
+#define atomic_add_long		kmsan_atomic_add_long
+#define atomic_add_ptr		kmsan_atomic_add_ptr
+#define atomic_add_64		kmsan_atomic_add_64
+#define atomic_add_32_nv	kmsan_atomic_add_32_nv
+#define atomic_add_int_nv	kmsan_atomic_add_int_nv
+#define atomic_add_long_nv	kmsan_atomic_add_long_nv
+#define atomic_add_ptr_nv	kmsan_atomic_add_ptr_nv
+#define atomic_add_64_nv	kmsan_atomic_add_64_nv
+#define atomic_and_32		kmsan_atomic_and_32
+#define atomic_and_uint		kmsan_atomic_and_uint
+#define atomic_and_ulong	kmsan_atomic_and_ulong
+#define atomic_and_64		kmsan_atomic_and_64
+#define atomic_and_32_nv	kmsan_atomic_and_32_nv
+#define atomic_and_uint_nv	kmsan_atomic_and_uint_nv
+#define atomic_and_ulong_nv	kmsan_atomic_and_ulong_nv
+#define atomic_and_64_nv	kmsan_atomic_and_64_nv
+#define atomic_or_32		kmsan_atomic_or_32
+#define atomic_or_uint		kmsan_atomic_or_uint
+#define atomic_or_ulong		kmsan_atomic_or_ulong
+#define atomic_or_64		kmsan_atomic_or_64
+#define atomic_or_32_nv		kmsan_atomic_or_32_nv
+#define atomic_or_uint_nv	kmsan_atomic_or_uint_nv
+#define atomic_or_ulong_nv	kmsan_atomic_or_ulong_nv
+#define atomic_or_64_nv		kmsan_atomic_or_64_nv
+#define atomic_cas_32		kmsan_atomic_cas_32
+#define atomic_cas_uint		kmsan_atomic_cas_uint
+#define atomic_cas_ulong	kmsan_atomic_cas_ulong
+#define atomic_cas_ptr		kmsan_atomic_cas_ptr
+#define atomic_cas_64		kmsan_atomic_cas_64
+#define atomic_cas_32_ni	kmsan_atomic_cas_32_ni
+#define atomic_cas_uint_ni	kmsan_atomic_cas_uint_ni
+#define atomic_cas_ulong_ni	kmsan_atomic_cas_ulong_ni
+#define atomic_cas_ptr_ni	kmsan_atomic_cas_ptr_ni
+#define atomic_cas_64_ni	kmsan_atomic_cas_64_ni
+#define atomic_swap_32		kmsan_atomic_swap_32
+#define atomic_swap_uint	kmsan_atomic_swap_uint
+#define atomic_swap_ulong	kmsan_atomic_swap_ulong
+#define atomic_swap_ptr		kmsan_atomic_swap_ptr
+#define atomic_swap_64		kmsan_atomic_swap_64
+#define atomic_dec_32		kmsan_atomic_dec_32
+#define atomic_dec_uint		kmsan_atomic_dec_uint
+#define atomic_dec_ulong	kmsan_atomic_dec_ulong
+#define atomic_dec_ptr		kmsan_atomic_dec_ptr
+#define atomic_dec_64		kmsan_atomic_dec_64
+#define atomic_dec_32_nv	kmsan_atomic_dec_32_nv
+#define atomic_dec_uint_nv	kmsan_atomic_dec_uint_nv
+#define atomic_dec_ulong_nv	kmsan_atomic_dec_ulong_nv
+#define atomic_dec_ptr_nv	kmsan_atomic_dec_ptr_nv
+#define atomic_dec_64_nv	kmsan_atomic_dec_64_nv
+#define atomic_inc_32		kmsan_atomic_inc_32
+#define atomic_inc_uint		kmsan_atomic_inc_uint
+#define atomic_inc_ulong	kmsan_atomic_inc_ulong
+#define atomic_inc_ptr		kmsan_atomic_inc_ptr
+#define atomic_inc_64		kmsan_atomic_inc_64
+#define atomic_inc_32_nv	kmsan_atomic_inc_32_nv
+#define atomic_inc_uint_nv	kmsan_atomic_inc_uint_nv
+#define atomic_inc_ulong_nv	kmsan_atomic_inc_ulong_nv
+#define atomic_inc_ptr_nv	kmsan_atomic_inc_ptr_nv
+#define atomic_inc_64_nv	kmsan_atomic_inc_64_nv
 #endif
 
 #endif /* ! _SYS_ATOMIC_H_ */

Index: src/sys/sys/bus_proto.h
diff -u src/sys/sys/bus_proto.h:1.10 src/sys/sys/bus_proto.h:1.11
--- src/sys/sys/bus_proto.h:1.10	Tue Nov  5 20:19:18 2019
+++ src/sys/sys/bus_proto.h	Thu Nov 14 16:23:53 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: bus_proto.h,v 1.10 2019/11/05 20:19:18 maxv Exp $	*/
+/*	$NetBSD: bus_proto.h,v 1.11 2019/11/14 16:23:53 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2001, 2007 The NetBSD Foundation, Inc.
@@ -67,6 +67,7 @@
 #ifdef _KERNEL_OPT
 #include "opt_kasan.h"
 #include "opt_kcsan.h"
+#include "opt_kmsan.h"
 #endif
 
 /*
@@ -185,6 +186,32 @@ void kcsan_bus_space_read_region_stream_
 #define bus_space_read_region_stream_2 kcsan_bus_space_read_region_stream_2
 #define bus_space_read_region_stream_4 kcsan_bus_space_read_region_stream_4
 #define bus_space_read_region_stream_8 kcsan_bus_space_read_region_stream_8
+#elif defined(KMSAN)
+#define BUS_SPACE_READ_MEM_PROTOS(bytes, bits)					\
+void kmsan_bus_space_read_multi_##bytes(bus_space_tag_t, bus_space_handle_t,	\
+    bus_size_t, uint##bits##_t *, bus_size_t);					\
+void kmsan_bus_space_read_multi_stream_##bytes(bus_space_tag_t,			\
+    bus_space_handle_t, bus_size_t, uint##bits##_t *, bus_size_t);		\
+void kmsan_bus_space_read_region_##bytes(bus_space_tag_t, bus_space_handle_t,	\
+    bus_size_t, uint##bits##_t *, bus_size_t);					\
+void kmsan_bus_space_read_region_stream_##bytes(bus_space_tag_t,		\
+    bus_space_handle_t, bus_size_t, uint##bits##_t *, bus_size_t);
+#define bus_space_read_multi_1 kmsan_bus_space_read_multi_1
+#define bus_space_read_multi_2 kmsan_bus_space_read_multi_2
+#define bus_space_read_multi_4 kmsan_bus_space_read_multi_4
+#define bus_space_read_multi_8 kmsan_bus_space_read_multi_8
+#define bus_space_read_multi_stream_1 kmsan_bus_space_read_multi_stream_1
+#define bus_space_read_multi_stream_2 kmsan_bus_space_read_multi_stream_2
+#define bus_space_read_multi_stream_4 kmsan_bus_space_read_multi_stream_4
+#define bus_space_read_multi_stream_8 kmsan_bus_space_read_multi_stream_8
+#define bus_space_read_region_1 kmsan_bus_space_read_region_1
+#define bus_space_read_region_2 kmsan_bus_space_read_region_2
+#define bus_space_read_region_4 kmsan_bus_space_read_region_4
+#define bus_space_read_region_8 kmsan_bus_space_read_region_8
+#define bus_space_read_region_stream_1 kmsan_bus_space_read_region_stream_1
+#define bus_space_read_region_stream_2 kmsan_bus_space_read_region_stream_2
+#define bus_space_read_region_stream_4 kmsan_bus_space_read_region_stream_4
+#define bus_space_read_region_stream_8 kmsan_bus_space_read_region_stream_8
 #else
 #define BUS_SPACE_READ_MEM_PROTOS(bytes, bits)				\
 void bus_space_read_multi_##bytes(bus_space_tag_t, bus_space_handle_t,	\
@@ -274,6 +301,32 @@ void kcsan_bus_space_write_region_stream
 #define bus_space_write_region_stream_2 kcsan_bus_space_write_region_stream_2
 #define bus_space_write_region_stream_4 kcsan_bus_space_write_region_stream_4
 #define bus_space_write_region_stream_8 kcsan_bus_space_write_region_stream_8
+#elif defined(KMSAN)
+#define BUS_SPACE_WRITE_MEM_PROTOS(bytes, bits)					\
+void kmsan_bus_space_write_multi_##bytes(bus_space_tag_t, bus_space_handle_t,	\
+    bus_size_t, const uint##bits##_t *, bus_size_t);				\
+void kmsan_bus_space_write_multi_stream_##bytes(bus_space_tag_t,		\
+    bus_space_handle_t, bus_size_t, const uint##bits##_t *, bus_size_t);	\
+void kmsan_bus_space_write_region_##bytes(bus_space_tag_t, bus_space_handle_t,	\
+    bus_size_t, const uint##bits##_t *, bus_size_t);				\
+void kmsan_bus_space_write_region_stream_##bytes(bus_space_tag_t,		\
+    bus_space_handle_t, bus_size_t, const uint##bits##_t *, bus_size_t);
+#define bus_space_write_multi_1 kmsan_bus_space_write_multi_1
+#define bus_space_write_multi_2 kmsan_bus_space_write_multi_2
+#define bus_space_write_multi_4 kmsan_bus_space_write_multi_4
+#define bus_space_write_multi_8 kmsan_bus_space_write_multi_8
+#define bus_space_write_multi_stream_1 kmsan_bus_space_write_multi_stream_1
+#define bus_space_write_multi_stream_2 kmsan_bus_space_write_multi_stream_2
+#define bus_space_write_multi_stream_4 kmsan_bus_space_write_multi_stream_4
+#define bus_space_write_multi_stream_8 kmsan_bus_space_write_multi_stream_8
+#define bus_space_write_region_1 kmsan_bus_space_write_region_1
+#define bus_space_write_region_2 kmsan_bus_space_write_region_2
+#define bus_space_write_region_4 kmsan_bus_space_write_region_4
+#define bus_space_write_region_8 kmsan_bus_space_write_region_8
+#define bus_space_write_region_stream_1 kmsan_bus_space_write_region_stream_1
+#define bus_space_write_region_stream_2 kmsan_bus_space_write_region_stream_2
+#define bus_space_write_region_stream_4 kmsan_bus_space_write_region_stream_4
+#define bus_space_write_region_stream_8 kmsan_bus_space_write_region_stream_8
 #else
 #define BUS_SPACE_WRITE_MEM_PROTOS(bytes, bits)				\
 void bus_space_write_multi_##bytes(bus_space_tag_t, bus_space_handle_t,	\

Index: src/sys/sys/cdefs.h
diff -u src/sys/sys/cdefs.h:1.148 src/sys/sys/cdefs.h:1.149
--- src/sys/sys/cdefs.h:1.148	Tue Nov  5 20:19:18 2019
+++ src/sys/sys/cdefs.h	Thu Nov 14 16:23:53 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: cdefs.h,v 1.148 2019/11/05 20:19:18 maxv Exp $	*/
+/*	$NetBSD: cdefs.h,v 1.149 2019/11/14 16:23:53 maxv Exp $	*/
 
 /* * Copyright (c) 1991, 1993
  *	The Regents of the University of California.  All rights reserved.
@@ -348,6 +348,12 @@
 #define	__nocsan	/* nothing */
 #endif
 
+#if defined(__clang__) && defined(KMSAN)
+#define	__nomsan	__attribute__((no_sanitize("memory")))
+#else
+#define	__nomsan	/* nothing */
+#endif
+
 #if defined(__clang__)
 #define __noubsan	__attribute__((no_sanitize("undefined")))
 #elif __GNUC_PREREQ__(4, 9)

Index: src/sys/sys/lwp.h
diff -u src/sys/sys/lwp.h:1.187 src/sys/sys/lwp.h:1.188
--- src/sys/sys/lwp.h:1.187	Thu Oct  3 22:26:43 2019
+++ src/sys/sys/lwp.h	Thu Nov 14 16:23:53 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: lwp.h,v 1.187 2019/10/03 22:26:43 kamil Exp $	*/
+/*	$NetBSD: lwp.h,v 1.188 2019/11/14 16:23:53 maxv Exp $	*/
 
 /*
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010
@@ -53,6 +53,9 @@ struct lwp;
 /* forward declare this for <machine/cpu.h> so it can get l_cpu. */
 static __inline struct cpu_info *lwp_getcpu(struct lwp *);
 #include <machine/cpu.h>		/* curcpu() and cpu_info */
+#ifdef _KERNEL_OPT
+#include "opt_kmsan.h"
+#endif
 #endif
 
 #include <machine/proc.h>		/* Machine-dependent proc substruct. */
@@ -204,6 +207,10 @@ struct lwp {
 	uint64_t	*l_syscall_counter; /* !: counter for current process */
 
 	struct kdtrace_thread *l_dtrace; /* (: DTrace-specific data. */
+
+#ifdef KMSAN
+	void		*l_kmsan; /* !: KMSAN private data. */
+#endif
 };
 
 /*

Index: src/sys/sys/systm.h
diff -u src/sys/sys/systm.h:1.288 src/sys/sys/systm.h:1.289
--- src/sys/sys/systm.h:1.288	Tue Nov  5 20:19:18 2019
+++ src/sys/sys/systm.h	Thu Nov 14 16:23:53 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: systm.h,v 1.288 2019/11/05 20:19:18 maxv Exp $	*/
+/*	$NetBSD: systm.h,v 1.289 2019/11/14 16:23:53 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1982, 1988, 1991, 1993
@@ -271,6 +271,9 @@ int	kasan_kcopy(const void *, void *, si
 #elif defined(_KERNEL) && defined(KCSAN)
 int	kcsan_kcopy(const void *, void *, size_t);
 #define kcopy		kcsan_kcopy
+#elif defined(_KERNEL) && defined(KMSAN)
+int	kmsan_kcopy(const void *, void *, size_t);
+#define kcopy		kmsan_kcopy
 #else
 int	kcopy(const void *, void *, size_t);
 #endif
@@ -286,6 +289,7 @@ int	kasan_copystr(const void *, void *, 
 int	kasan_copyinstr(const void *, void *, size_t, size_t *);
 int	kasan_copyoutstr(const void *, void *, size_t, size_t *);
 int	kasan_copyin(const void *, void *, size_t);
+int	copyout(const void *, void *, size_t);
 #define copystr		kasan_copystr
 #define copyinstr	kasan_copyinstr
 #define copyoutstr	kasan_copyoutstr
@@ -295,17 +299,29 @@ int	kcsan_copystr(const void *, void *, 
 int	kcsan_copyinstr(const void *, void *, size_t, size_t *);
 int	kcsan_copyoutstr(const void *, void *, size_t, size_t *);
 int	kcsan_copyin(const void *, void *, size_t);
+int	copyout(const void *, void *, size_t);
 #define copystr		kcsan_copystr
 #define copyinstr	kcsan_copyinstr
 #define copyoutstr	kcsan_copyoutstr
 #define copyin		kcsan_copyin
+#elif defined(_KERNEL) && defined(KMSAN)
+int	kmsan_copystr(const void *, void *, size_t, size_t *);
+int	kmsan_copyinstr(const void *, void *, size_t, size_t *);
+int	kmsan_copyoutstr(const void *, void *, size_t, size_t *);
+int	kmsan_copyin(const void *, void *, size_t);
+int	kmsan_copyout(const void *, void *, size_t);
+#define copystr		kmsan_copystr
+#define copyinstr	kmsan_copyinstr
+#define copyoutstr	kmsan_copyoutstr
+#define copyin		kmsan_copyin
+#define copyout		kmsan_copyout
 #else
 int	copystr(const void *, void *, size_t, size_t *);
 int	copyinstr(const void *, void *, size_t, size_t *);
 int	copyoutstr(const void *, void *, size_t, size_t *);
 int	copyin(const void *, void *, size_t);
-#endif
 int	copyout(const void *, void *, size_t);
+#endif
 
 #ifdef KLEAK
 #define copyout		kleak_copyout

Index: src/sys/uvm/uvm_km.c
diff -u src/sys/uvm/uvm_km.c:1.146 src/sys/uvm/uvm_km.c:1.147
--- src/sys/uvm/uvm_km.c:1.146	Sun Dec  2 21:00:13 2018
+++ src/sys/uvm/uvm_km.c	Thu Nov 14 16:23:53 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_km.c,v 1.146 2018/12/02 21:00:13 maxv Exp $	*/
+/*	$NetBSD: uvm_km.c,v 1.147 2019/11/14 16:23:53 maxv Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -152,7 +152,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.146 2018/12/02 21:00:13 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.147 2019/11/14 16:23:53 maxv Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -182,6 +182,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1
 #include <sys/vmem.h>
 #include <sys/vmem_impl.h>
 #include <sys/kmem.h>
+#include <sys/msan.h>
 
 #include <uvm/uvm.h>
 
@@ -224,7 +225,9 @@ kmeminit_nkmempages(void)
 		return;
 	}
 
-#if defined(PMAP_MAP_POOLPAGE)
+#if defined(KMSAN)
+	npages = (physmem / 8);
+#elif defined(PMAP_MAP_POOLPAGE)
 	npages = (physmem / 4);
 #else
 	npages = (physmem / 3) * 2;
@@ -703,6 +706,8 @@ uvm_km_alloc(struct vm_map *map, vsize_t
 
 	if ((flags & UVM_KMF_ZERO) == 0) {
 		kleak_fill_area((void *)kva, size);
+		kmsan_orig((void *)kva, size, KMSAN_TYPE_UVM, __RET_ADDR);
+		kmsan_mark((void *)kva, size, KMSAN_STATE_UNINIT);
 	}
 
 	UVMHIST_LOG(maphist,"<- done (kva=0x%jx)", kva,0,0,0);

Added files:

Index: src/sys/arch/amd64/include/msan.h
diff -u /dev/null src/sys/arch/amd64/include/msan.h:1.1
--- /dev/null	Thu Nov 14 16:23:54 2019
+++ src/sys/arch/amd64/include/msan.h	Thu Nov 14 16:23:52 2019
@@ -0,0 +1,241 @@
+/*	$NetBSD: msan.h,v 1.1 2019/11/14 16:23:52 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2019 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/ksyms.h>
+
+#include <amd64/pmap.h>
+#include <amd64/vmparam.h>
+
+#ifdef __HAVE_PCPU_AREA
+#error "PCPU area not allowed with KMSAN"
+#endif
+#ifdef __HAVE_DIRECT_MAP
+#error "DMAP not allowed with KMSAN"
+#endif
+
+/*
+ * One big shadow, divided in two sub-shadows (SHAD and ORIG), themselves
+ * divided in two regions (MAIN and KERN).
+ */
+
+#define __MD_SHADOW_SIZE	0x20000000000ULL	/* 4 * NBPD_L4 */
+#define __MD_SHADOW_START	(VA_SIGN_NEG((L4_SLOT_KMSAN * NBPD_L4)))
+#define __MD_SHADOW_END		(__MD_SHADOW_START + __MD_SHADOW_SIZE)
+
+#define __MD_SHAD_MAIN_START	(__MD_SHADOW_START)
+#define __MD_SHAD_KERN_START	(__MD_SHADOW_START + 0x8000000000ULL)
+
+#define __MD_ORIG_MAIN_START	(__MD_SHAD_KERN_START + 0x8000000000ULL)
+#define __MD_ORIG_KERN_START	(__MD_ORIG_MAIN_START + 0x8000000000ULL)
+
+#define __MD_PTR_BASE		0xFFFFFFFF80000000ULL
+#define __MD_ORIG_TYPE		__BITS(31,28)
+
+static inline int8_t *
+kmsan_md_addr_to_shad(const void *addr)
+{
+	vaddr_t va = (vaddr_t)addr;
+
+	if (va >= vm_min_kernel_address && va < vm_max_kernel_address) {
+		return (int8_t *)(__MD_SHAD_MAIN_START + (va - vm_min_kernel_address));
+	} else if (va >= KERNBASE) {
+		return (int8_t *)(__MD_SHAD_KERN_START + (va - KERNBASE));
+	} else {
+		panic("%s: impossible, va=%p", __func__, (void *)va);
+	}
+}
+
+static inline int8_t *
+kmsan_md_addr_to_orig(const void *addr)
+{
+	vaddr_t va = (vaddr_t)addr;
+
+	if (va >= vm_min_kernel_address && va < vm_max_kernel_address) {
+		return (int8_t *)(__MD_ORIG_MAIN_START + (va - vm_min_kernel_address));
+	} else if (va >= KERNBASE) {
+		return (int8_t *)(__MD_ORIG_KERN_START + (va - KERNBASE));
+	} else {
+		panic("%s: impossible, va=%p", __func__, (void *)va);
+	}
+}
+
+static inline bool
+kmsan_md_unsupported(vaddr_t addr)
+{
+	return (addr >= (vaddr_t)PTE_BASE &&
+	    addr < ((vaddr_t)PTE_BASE + NBPD_L4));
+}
+
+static inline paddr_t
+__md_palloc(void)
+{
+	return pmap_get_physpage();
+}
+
+static void
+kmsan_md_shadow_map_page(vaddr_t va)
+{
+	paddr_t pa;
+
+	KASSERT(va >= __MD_SHADOW_START && va < __MD_SHADOW_END);
+
+	if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
+		pa = __md_palloc();
+		L4_BASE[pl4_i(va)] = pa | PTE_W | pmap_pg_nx | PTE_P;
+	}
+	if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
+		pa = __md_palloc();
+		L3_BASE[pl3_i(va)] = pa | PTE_W | pmap_pg_nx | PTE_P;
+	}
+	if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
+		pa = __md_palloc();
+		L2_BASE[pl2_i(va)] = pa | PTE_W | pmap_pg_nx | PTE_P;
+	}
+	if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
+		pa = __md_palloc();
+		L1_BASE[pl1_i(va)] = pa | PTE_W | pmap_pg_g | pmap_pg_nx | PTE_P;
+	}
+}
+
+static void
+kmsan_md_init(void)
+{
+	extern struct bootspace bootspace;
+	size_t i;
+
+	CTASSERT((__MD_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KMSAN);
+
+	/* Kernel. */
+	for (i = 0; i < BTSPACE_NSEGS; i++) {
+		if (bootspace.segs[i].type == BTSEG_NONE) {
+			continue;
+		}
+		kmsan_shadow_map((void *)bootspace.segs[i].va,
+		    bootspace.segs[i].sz);
+	}
+
+	/* Boot region. */
+	kmsan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
+
+	/* Module map. */
+	kmsan_shadow_map((void *)bootspace.smodule,
+	    (size_t)(bootspace.emodule - bootspace.smodule));
+
+	/* The bootstrap spare va. */
+	kmsan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
+}
+
+static inline msan_orig_t
+kmsan_md_orig_encode(int type, uintptr_t ptr)
+{
+	msan_orig_t ret;
+
+	ret = (ptr & 0xFFFFFFFF) & ~__MD_ORIG_TYPE;
+	ret |= __SHIFTIN(type, __MD_ORIG_TYPE);
+
+	return ret;
+}
+
+static inline void
+kmsan_md_orig_decode(msan_orig_t orig, int *type, uintptr_t *ptr)
+{
+	*type = __SHIFTOUT(orig, __MD_ORIG_TYPE);
+	*ptr = (uintptr_t)(orig & ~__MD_ORIG_TYPE) | __MD_PTR_BASE;
+}
+
+static inline bool
+kmsan_md_is_pc(uintptr_t ptr)
+{
+	extern uint8_t __rodata_start;
+
+	return (ptr < (uintptr_t)&__rodata_start);
+}
+
+static inline bool
+__md_unwind_end(const char *name)
+{
+	if (!strcmp(name, "syscall") ||
+	    !strcmp(name, "alltraps") ||
+	    !strcmp(name, "handle_syscall") ||
+	    !strncmp(name, "Xtrap", 5) ||
+	    !strncmp(name, "Xintr", 5) ||
+	    !strncmp(name, "Xhandle", 7) ||
+	    !strncmp(name, "Xresume", 7) ||
+	    !strncmp(name, "Xstray", 6) ||
+	    !strncmp(name, "Xhold", 5) ||
+	    !strncmp(name, "Xrecurse", 8) ||
+	    !strcmp(name, "Xdoreti") ||
+	    !strncmp(name, "Xsoft", 5)) {
+		return true;
+	}
+
+	return false;
+}
+
+static void
+kmsan_md_unwind(void)
+{
+	uint64_t *rbp, rip;
+	const char *mod;
+	const char *sym;
+	size_t nsym;
+	int error;
+
+	rbp = (uint64_t *)__builtin_frame_address(0);
+	nsym = 0;
+
+	while (1) {
+		/* 8(%rbp) contains the saved %rip. */
+		rip = *(rbp + 1);
+
+		if (rip < KERNBASE) {
+			break;
+		}
+		error = ksyms_getname(&mod, &sym, (vaddr_t)rip, KSYMS_PROC);
+		if (error) {
+			break;
+		}
+		kmsan_printf("#%zu %p in %s <%s>\n", nsym, (void *)rip, sym, mod);
+		if (__md_unwind_end(sym)) {
+			break;
+		}
+
+		rbp = (uint64_t *)*(rbp);
+		if (rbp == 0) {
+			break;
+		}
+		nsym++;
+
+		if (nsym >= 15) {
+			break;
+		}
+	}
+}

Index: src/sys/kern/subr_msan.c
diff -u /dev/null src/sys/kern/subr_msan.c:1.1
--- /dev/null	Thu Nov 14 16:23:54 2019
+++ src/sys/kern/subr_msan.c	Thu Nov 14 16:23:52 2019
@@ -0,0 +1,1356 @@
+/*	$NetBSD: subr_msan.c,v 1.1 2019/11/14 16:23:52 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2019 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define KMSAN_NO_INST
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: subr_msan.c,v 1.1 2019/11/14 16:23:52 maxv Exp $");
+
+#include <sys/param.h>
+#include <sys/device.h>
+#include <sys/kernel.h>
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/kprintf.h>
+#include <sys/kmem.h>
+#include <sys/mbuf.h>
+#include <sys/buf.h>
+#include <sys/cpu.h>
+#include <sys/msan.h>
+
+#include <uvm/uvm.h>
+
+static void kmsan_printf(const char *, ...);
+
+#ifdef KMSAN_PANIC
+#define REPORT panic
+#else
+#define REPORT kmsan_printf
+#endif
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Part of the compiler ABI.
+ */
+
+typedef uint32_t msan_orig_t;
+
+typedef struct {
+	uint8_t *shad;
+	msan_orig_t *orig;
+} msan_meta_t;
+
+#define MSAN_PARAM_SIZE		800
+#define MSAN_RETVAL_SIZE	800
+typedef struct {
+	uint8_t param[MSAN_PARAM_SIZE];
+	uint8_t retval[MSAN_RETVAL_SIZE];
+	uint8_t _va_arg[MSAN_PARAM_SIZE];
+	uint8_t va_arg_origin[MSAN_PARAM_SIZE];
+	uint64_t va_arg_overflow_size;
+	msan_orig_t param_origin[MSAN_PARAM_SIZE];
+	msan_orig_t retval_origin;
+	msan_orig_t origin;
+} msan_tls_t;
+
+/* -------------------------------------------------------------------------- */
+
+/* The MD code. */
+#include <machine/msan.h>
+
+/* -------------------------------------------------------------------------- */
+
+#define __RET_ADDR	(uintptr_t)__builtin_return_address(0)
+#define MSAN_NCONTEXT	16
+
+typedef struct {
+	size_t ctx;
+	msan_tls_t tls[MSAN_NCONTEXT];
+} msan_lwp_t;
+
+static msan_tls_t dummy_tls;
+
+static uint8_t msan_dummy_shad[PAGE_SIZE] __aligned(PAGE_SIZE);
+static uint8_t msan_dummy_orig[PAGE_SIZE] __aligned(PAGE_SIZE);
+static msan_lwp_t msan_lwp0;
+static bool kmsan_enabled __read_mostly;
+
+/* -------------------------------------------------------------------------- */
+
+static bool kmsan_reporting = false;
+
+static inline void
+kmsan_printf(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	kprintf(fmt, TOCONS, NULL, NULL, ap);
+	va_end(ap);
+}
+
+static inline const char *
+kmsan_orig_name(int type)
+{
+	switch (type) {
+	case KMSAN_TYPE_STACK:
+		return "Stack";
+	case KMSAN_TYPE_KMEM:
+		return "Kmem";
+	case KMSAN_TYPE_MALLOC:
+		return "Malloc";
+	case KMSAN_TYPE_POOL:
+		return "Pool";
+	case KMSAN_TYPE_UVM:
+		return "Uvm";
+	default:
+		return "Unknown";
+	}
+}
+
+/*
+ * The format of the string is: "----var@function". Parse it to display a nice
+ * warning.
+ */
+static void
+kmsan_report_hook(const void *addr, size_t size, size_t off, const char *hook)
+{
+	const char *mod, *sym;
+	extern int db_active;
+	msan_orig_t *orig;
+	const char *typename;
+	char *var, *fn;
+	uintptr_t ptr;
+	char buf[128];
+	int type;
+
+	if (__predict_false(panicstr != NULL || db_active || kmsan_reporting))
+		return;
+
+	kmsan_reporting = true;
+	__insn_barrier();
+
+	orig = (msan_orig_t *)kmsan_md_addr_to_orig(addr);
+	orig = (msan_orig_t *)((uintptr_t)orig & ~0x3);
+
+	if (*orig == 0) {
+		REPORT("MSan: Uninitialized Memory In %s() At Offset "
+		    "%zu\n", hook, off);
+		goto out;
+	}
+
+	kmsan_md_orig_decode(*orig, &type, &ptr);
+	typename = kmsan_orig_name(type);
+
+	if (kmsan_md_is_pc(ptr)) {
+		if (ksyms_getname(&mod, &sym, (vaddr_t)ptr, KSYMS_PROC)) {
+			REPORT("MSan: Uninitialized %s Memory In %s() "
+			    "At Offset %zu, IP %p\n", typename, hook, off,
+			    (void *)ptr);
+		} else {
+			REPORT("MSan: Uninitialized %s Memory In %s() "
+			    "At Offset %zu, From %s()\n", typename, hook, off,
+			    sym);
+		}
+	} else {
+		var = (char *)ptr + 4;
+		strlcpy(buf, var, sizeof(buf));
+		var = buf;
+		fn = strchr(buf, '@');
+		*fn++ = '\0';
+		REPORT("MSan: Uninitialized %s Memory In %s() At Offset "
+		    "%zu, Variable '%s' From %s()\n", typename, hook, off,
+		    var, fn);
+	}
+
+out:
+	kmsan_md_unwind();
+	__insn_barrier();
+	kmsan_reporting = false;
+}
+
+static void
+kmsan_report_inline(msan_orig_t orig, unsigned long pc)
+{
+	const char *mod, *sym;
+	extern int db_active;
+	const char *typename;
+	char *var, *fn;
+	uintptr_t ptr;
+	char buf[128];
+	int type;
+
+	if (__predict_false(panicstr != NULL || db_active || kmsan_reporting))
+		return;
+
+	kmsan_reporting = true;
+	__insn_barrier();
+
+	if (orig == 0) {
+		REPORT("MSan: Uninitialized Variable In %p\n",
+		    (void *)pc);
+		goto out;
+	}
+
+	kmsan_md_orig_decode(orig, &type, &ptr);
+	typename = kmsan_orig_name(type);
+
+	if (kmsan_md_is_pc(ptr)) {
+		if (ksyms_getname(&mod, &sym, (vaddr_t)ptr, KSYMS_PROC)) {
+			REPORT("MSan: Uninitialized %s Memory, "
+			    "Origin %x\n", typename, orig);
+		} else {
+			REPORT("MSan: Uninitialized %s Memory "
+			    "From %s()\n", typename, sym);
+		}
+	} else {
+		var = (char *)ptr + 4;
+		strlcpy(buf, var, sizeof(buf));
+		var = buf;
+		fn = strchr(buf, '@');
+		*fn++ = '\0';
+		REPORT("MSan: Uninitialized Variable '%s' From %s()\n",
+		    var, fn);
+	}
+
+out:
+	kmsan_md_unwind();
+	__insn_barrier();
+	kmsan_reporting = false;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static inline msan_meta_t
+kmsan_meta_get(void *addr, size_t size)
+{
+	msan_meta_t ret;
+
+	if (__predict_false(!kmsan_enabled)) {
+		ret.shad = msan_dummy_shad;
+		ret.orig = (msan_orig_t *)msan_dummy_orig;
+	} else if (__predict_false(kmsan_md_unsupported((vaddr_t)addr))) {
+		ret.shad = msan_dummy_shad;
+		ret.orig = (msan_orig_t *)msan_dummy_orig;
+	} else {
+		ret.shad = (void *)kmsan_md_addr_to_shad(addr);
+		ret.orig = (msan_orig_t *)kmsan_md_addr_to_orig(addr);
+		ret.orig = (msan_orig_t *)((uintptr_t)ret.orig & ~0x3);
+	}
+
+	return ret;
+}
+
+static inline void
+kmsan_origin_fill(void *addr, msan_orig_t o, size_t size)
+{
+	msan_orig_t *orig;
+	size_t i;
+
+	if (__predict_false(!kmsan_enabled))
+		return;
+	if (__predict_false(kmsan_md_unsupported((vaddr_t)addr)))
+		return;
+
+	orig = (msan_orig_t *)kmsan_md_addr_to_orig(addr);
+	size += ((uintptr_t)orig & 0x3);
+	orig = (msan_orig_t *)((uintptr_t)orig & ~0x3);
+
+	for (i = 0; i < size; i += 4) {
+		orig[i / 4] = o;
+	}
+}
+
+static inline void
+kmsan_shadow_fill(void *addr, uint8_t c, size_t size)
+{
+	uint8_t *shad;
+
+	if (__predict_false(!kmsan_enabled))
+		return;
+	if (__predict_false(kmsan_md_unsupported((vaddr_t)addr)))
+		return;
+
+	shad = kmsan_md_addr_to_shad(addr);
+	__builtin_memset(shad, c, size);
+}
+
+static inline void
+kmsan_meta_copy(void *dst, const void *src, size_t size)
+{
+	uint8_t *orig_src, *orig_dst;
+	uint8_t *shad_src, *shad_dst;
+	msan_orig_t *_src, *_dst;
+	size_t i;
+
+	if (__predict_false(!kmsan_enabled))
+		return;
+	if (__predict_false(kmsan_md_unsupported((vaddr_t)dst)))
+		return;
+	if (__predict_false(kmsan_md_unsupported((vaddr_t)src))) {
+		kmsan_shadow_fill(dst, KMSAN_STATE_INITED, size);
+		return;
+	}
+
+	shad_src = kmsan_md_addr_to_shad(src);
+	shad_dst = kmsan_md_addr_to_shad(dst);
+	__builtin_memmove(shad_dst, shad_src, size);
+
+	orig_src = kmsan_md_addr_to_orig(src);
+	orig_dst = kmsan_md_addr_to_orig(dst);
+	for (i = 0; i < size; i++) {
+		_src = (msan_orig_t *)((uintptr_t)orig_src & ~0x3);
+		_dst = (msan_orig_t *)((uintptr_t)orig_dst & ~0x3);
+		*_dst = *_src;
+		orig_src++;
+		orig_dst++;
+	}
+}
+
+static inline void
+kmsan_shadow_check(const void *addr, size_t size, const char *hook)
+{
+	uint8_t *shad;
+	size_t i;
+
+	if (__predict_false(!kmsan_enabled))
+		return;
+	if (__predict_false(kmsan_md_unsupported((vaddr_t)addr)))
+		return;
+
+	shad = kmsan_md_addr_to_shad(addr);
+	for (i = 0; i < size; i++) {
+		if (__predict_true(shad[i] == 0))
+			continue;
+		kmsan_report_hook((const char *)addr + i, size, i, hook);
+		break;
+	}
+}
+
+void kmsan_init_arg(size_t);
+void kmsan_init_ret(size_t);
+
+void
+kmsan_init_arg(size_t n)
+{
+	msan_lwp_t *lwp;
+	uint8_t *arg;
+
+	if (__predict_false(!kmsan_enabled))
+		return;
+	lwp = curlwp->l_kmsan;
+	arg = lwp->tls[lwp->ctx].param;
+	__builtin_memset(arg, 0, n);
+}
+
+void
+kmsan_init_ret(size_t n)
+{
+	msan_lwp_t *lwp;
+	uint8_t *arg;
+
+	if (__predict_false(!kmsan_enabled))
+		return;
+	lwp = curlwp->l_kmsan;
+	arg = lwp->tls[lwp->ctx].retval;
+	__builtin_memset(arg, 0, n);
+}
+
+static void
+kmsan_check_arg(size_t size, const char *hook)
+{
+	msan_lwp_t *lwp;
+	uint8_t *arg;
+	size_t i;
+
+	if (__predict_false(!kmsan_enabled))
+		return;
+	lwp = curlwp->l_kmsan;
+	arg = lwp->tls[lwp->ctx].param;
+
+	for (i = 0; i < size; i++) {
+		if (__predict_true(arg[i] == 0))
+			continue;
+		kmsan_report_hook((const char *)arg + i, size, i, hook);
+		break;
+	}
+}
+
+void
+kmsan_lwp_alloc(struct lwp *l)
+{
+	msan_lwp_t *lwp;
+
+	kmsan_init_arg(sizeof(size_t) + sizeof(km_flag_t));
+	lwp = kmem_zalloc(sizeof(msan_lwp_t), KM_SLEEP);
+	lwp->ctx = 1;
+
+	l->l_kmsan = lwp;
+}
+
+void
+kmsan_lwp_free(struct lwp *l)
+{
+	kmsan_init_arg(sizeof(void *) + sizeof(size_t));
+	kmem_free(l->l_kmsan, sizeof(msan_lwp_t));
+}
+
+void kmsan_intr_enter(void);
+void kmsan_intr_leave(void);
+void kmsan_softint(struct lwp *);
+
+void
+kmsan_intr_enter(void)
+{
+	msan_lwp_t *lwp;
+
+	if (__predict_false(!kmsan_enabled))
+		return;
+	lwp = curlwp->l_kmsan;
+
+	lwp->ctx++;
+	if (__predict_false(lwp->ctx >= MSAN_NCONTEXT)) {
+		kmsan_enabled = false;
+		panic("%s: lwp->ctx = %zu", __func__, lwp->ctx);
+	}
+
+	kmsan_init_arg(sizeof(void *));
+}
+
+void
+kmsan_intr_leave(void)
+{
+	msan_lwp_t *lwp;
+
+	if (__predict_false(!kmsan_enabled))
+		return;
+	lwp = curlwp->l_kmsan;
+
+	if (__predict_false(lwp->ctx == 0)) {
+		kmsan_enabled = false;
+		panic("%s: lwp->ctx = %zu", __func__, lwp->ctx);
+	}
+	lwp->ctx--;
+}
+
+void
+kmsan_softint(struct lwp *l)
+{
+	kmsan_init_arg(sizeof(lwp_t *) + sizeof(int));
+}
+
+/* -------------------------------------------------------------------------- */
+
+void
+kmsan_shadow_map(void *addr, size_t size)
+{
+	size_t npages, i;
+	vaddr_t va;
+
+	KASSERT((vaddr_t)addr % PAGE_SIZE == 0);
+	KASSERT(size % PAGE_SIZE == 0);
+
+	npages = size / PAGE_SIZE;
+
+	va = (vaddr_t)kmsan_md_addr_to_shad(addr);
+	for (i = 0; i < npages; i++) {
+		kmsan_md_shadow_map_page(va + i * PAGE_SIZE);
+	}
+
+	va = (vaddr_t)kmsan_md_addr_to_orig(addr);
+	for (i = 0; i < npages; i++) {
+		kmsan_md_shadow_map_page(va + i * PAGE_SIZE);
+	}
+}
+
+void
+kmsan_orig(void *addr, size_t size, int type, uintptr_t pc)
+{
+	msan_orig_t orig;
+
+	orig = kmsan_md_orig_encode(type, pc);
+	kmsan_origin_fill(addr, orig, size);
+}
+
+void
+kmsan_mark(void *addr, size_t size, uint8_t c)
+{
+	kmsan_shadow_fill(addr, c, size);
+}
+
+void
+kmsan_check_mbuf(void *buf)
+{
+	struct mbuf *m = buf;
+
+	do {
+		kmsan_shadow_check(mtod(m, void *), m->m_len, "if_transmit");
+	} while ((m = m->m_next) != NULL);
+}
+
+void
+kmsan_check_buf(void *buf)
+{
+	buf_t *bp = buf;
+
+	kmsan_shadow_check(bp->b_data, bp->b_bcount, "bwrite");
+}
+
+void
+kmsan_init(void *stack)
+{
+	/* MD initialization. */
+	kmsan_md_init();
+
+	/* Map the stack. */
+	kmsan_shadow_map(stack, USPACE);
+
+	/* Initialize the TLS for curlwp. */
+	msan_lwp0.ctx = 1;
+	curlwp->l_kmsan = &msan_lwp0;
+
+	/* Now officially enabled. */
+	kmsan_enabled = true;
+}
+
+/* -------------------------------------------------------------------------- */
+
+msan_meta_t __msan_metadata_ptr_for_load_n(void *, size_t);
+msan_meta_t __msan_metadata_ptr_for_store_n(void *, size_t);
+
+msan_meta_t __msan_metadata_ptr_for_load_n(void *addr, size_t size)
+{
+	return kmsan_meta_get(addr, size);
+}
+
+msan_meta_t __msan_metadata_ptr_for_store_n(void *addr, size_t size)
+{
+	return kmsan_meta_get(addr, size);
+}
+
+#define MSAN_META_FUNC(size)						\
+	msan_meta_t __msan_metadata_ptr_for_load_##size(void *);	\
+	msan_meta_t __msan_metadata_ptr_for_load_##size(void *addr)	\
+	{								\
+		return kmsan_meta_get(addr, size);			\
+	}								\
+	msan_meta_t __msan_metadata_ptr_for_store_##size(void *);	\
+	msan_meta_t __msan_metadata_ptr_for_store_##size(void *addr)	\
+	{								\
+		return kmsan_meta_get(addr, size);			\
+	}
+
+MSAN_META_FUNC(1)
+MSAN_META_FUNC(2)
+MSAN_META_FUNC(4)
+MSAN_META_FUNC(8)
+
+void __msan_instrument_asm_store(void *, size_t);
+msan_orig_t __msan_chain_origin(msan_orig_t);
+void __msan_poison_alloca(void *, uint64_t, char *);
+void __msan_unpoison_alloca(void *, uint64_t);
+void __msan_warning(msan_orig_t);
+msan_tls_t *__msan_get_context_state(void);
+
+void __msan_instrument_asm_store(void *addr, size_t size)
+{
+	kmsan_shadow_fill(addr, KMSAN_STATE_INITED, size);
+}
+
+msan_orig_t __msan_chain_origin(msan_orig_t origin)
+{
+	return origin;
+}
+
+void __msan_poison_alloca(void *addr, uint64_t size, char *descr)
+{
+	msan_orig_t orig;
+
+	orig = kmsan_md_orig_encode(KMSAN_TYPE_STACK, (uintptr_t)descr);
+	kmsan_origin_fill(addr, orig, size);
+	kmsan_shadow_fill(addr, KMSAN_STATE_UNINIT, size);
+}
+
+void __msan_unpoison_alloca(void *addr, uint64_t size)
+{
+	kmsan_shadow_fill(addr, KMSAN_STATE_INITED, size);
+}
+
+void __msan_warning(msan_orig_t origin)
+{
+	if (__predict_false(!kmsan_enabled))
+		return;
+	kmsan_report_inline(origin, __RET_ADDR);
+}
+
+msan_tls_t *__msan_get_context_state(void)
+{
+	msan_lwp_t *lwp;
+
+	if (__predict_false(!kmsan_enabled))
+		return &dummy_tls;
+	lwp = curlwp->l_kmsan;
+
+	return &lwp->tls[lwp->ctx];
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Function hooks. Mostly ASM functions which need KMSAN wrappers to handle
+ * initialized areas properly.
+ */
+
+void *kmsan_memcpy(void *dst, const void *src, size_t len)
+{
+	/* No kmsan_check_arg, because inlined. */
+	kmsan_init_ret(sizeof(void *));
+	if (__predict_true(len != 0)) {
+		kmsan_meta_copy(dst, src, len);
+	}
+	return __builtin_memcpy(dst, src, len);
+}
+
+int
+kmsan_memcmp(const void *b1, const void *b2, size_t len)
+{
+	const uint8_t *_b1 = b1, *_b2 = b2;
+	size_t i;
+
+	kmsan_check_arg(sizeof(b1) + sizeof(b2) + sizeof(len), "memcmp");
+	kmsan_init_ret(sizeof(int));
+
+	for (i = 0; i < len; i++) {
+		if (*_b1 != *_b2) {
+			kmsan_shadow_check(b1, i + 1, "memcmp");
+			kmsan_shadow_check(b2, i + 1, "memcmp");
+			return *_b1 - *_b2;
+		}
+		_b1++, _b2++;
+	}
+
+	return 0;
+}
+
+void *kmsan_memset(void *dst, int c, size_t len)
+{
+	/* No kmsan_check_arg, because inlined. */
+	kmsan_shadow_fill(dst, KMSAN_STATE_INITED, len);
+	kmsan_init_ret(sizeof(void *));
+	return __builtin_memset(dst, c, len);
+}
+
+void *kmsan_memmove(void *dst, const void *src, size_t len)
+{
+	/* No kmsan_check_arg, because inlined. */
+	if (__predict_true(len != 0)) {
+		kmsan_meta_copy(dst, src, len);
+	}
+	kmsan_init_ret(sizeof(void *));
+	return __builtin_memmove(dst, src, len);
+}
+
+__strong_alias(__msan_memcpy, kmsan_memcpy)
+__strong_alias(__msan_memset, kmsan_memset)
+__strong_alias(__msan_memmove, kmsan_memmove)
+
+char *
+kmsan_strcpy(char *dst, const char *src)
+{
+	const char *_src = src;
+	char *_dst = dst;
+	size_t len = 0;
+
+	kmsan_check_arg(sizeof(dst) + sizeof(src), "strcpy");
+
+	while (1) {
+		len++;
+		*dst = *src;
+		if (*src == '\0')
+			break;
+		src++, dst++;
+	}
+
+	kmsan_shadow_check(_src, len, "strcpy");
+	kmsan_shadow_fill(_dst, KMSAN_STATE_INITED, len);
+	kmsan_init_ret(sizeof(char *));
+	return _dst;
+}
+
+int
+kmsan_strcmp(const char *s1, const char *s2)
+{
+	const char *_s1 = s1, *_s2 = s2;
+	size_t len = 0;
+
+	kmsan_check_arg(sizeof(s1) + sizeof(s2), "strcmp");
+	kmsan_init_ret(sizeof(int));
+
+	while (1) {
+		len++;
+		if (*s1 != *s2)
+			break;
+		if (*s1 == '\0') {
+			kmsan_shadow_check(_s1, len, "strcmp");
+			kmsan_shadow_check(_s2, len, "strcmp");
+			return 0;
+		}
+		s1++, s2++;
+	}
+
+	kmsan_shadow_check(_s1, len, "strcmp");
+	kmsan_shadow_check(_s2, len, "strcmp");
+
+	return (*(const unsigned char *)s1 - *(const unsigned char *)s2);
+}
+
+size_t
+kmsan_strlen(const char *str)
+{
+	const char *s;
+
+	kmsan_check_arg(sizeof(str), "strlen");
+
+	s = str;
+	while (1) {
+		if (*s == '\0')
+			break;
+		s++;
+	}
+
+	kmsan_shadow_check(str, (size_t)(s - str) + 1, "strlen");
+	kmsan_init_ret(sizeof(size_t));
+	return (s - str);
+}
+
+#undef kcopy
+#undef copystr
+#undef copyin
+#undef copyout
+#undef copyinstr
+#undef copyoutstr
+
+int	kmsan_kcopy(const void *, void *, size_t);
+int	kmsan_copystr(const void *, void *, size_t, size_t *);
+int	kmsan_copyin(const void *, void *, size_t);
+int	kmsan_copyout(const void *, void *, size_t);
+int	kmsan_copyinstr(const void *, void *, size_t, size_t *);
+int	kmsan_copyoutstr(const void *, void *, size_t, size_t *);
+
+int	kcopy(const void *, void *, size_t);
+int	copystr(const void *, void *, size_t, size_t *);
+int	copyin(const void *, void *, size_t);
+int	copyout(const void *, void *, size_t);
+int	copyinstr(const void *, void *, size_t, size_t *);
+int	copyoutstr(const void *, void *, size_t, size_t *);
+
+int
+kmsan_kcopy(const void *src, void *dst, size_t len)
+{
+	kmsan_check_arg(sizeof(src) + sizeof(dst) + sizeof(len), "kcopy");
+	if (__predict_true(len != 0)) {
+		kmsan_meta_copy(dst, src, len);
+	}
+	kmsan_init_ret(sizeof(int));
+	return kcopy(src, dst, len);
+}
+
+int
+kmsan_copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
+{
+	size_t _done;
+	int ret;
+
+	kmsan_check_arg(sizeof(kfaddr) + sizeof(kdaddr) +
+	    sizeof(len) + sizeof(done), "copystr");
+	ret = copystr(kfaddr, kdaddr, len, &_done);
+	if (ret == 0)
+		kmsan_meta_copy(kdaddr, kfaddr, _done);
+	if (done != NULL) {
+		*done = _done;
+		kmsan_shadow_fill(done, KMSAN_STATE_INITED, sizeof(size_t));
+	}
+	kmsan_init_ret(sizeof(int));
+
+	return ret;
+}
+
+int
+kmsan_copyin(const void *uaddr, void *kaddr, size_t len)
+{
+	int ret;
+
+	kmsan_check_arg(sizeof(uaddr) + sizeof(kaddr) + sizeof(len), "copyin");
+	ret = copyin(uaddr, kaddr, len);
+	if (ret == 0)
+		kmsan_shadow_fill(kaddr, KMSAN_STATE_INITED, len);
+	kmsan_init_ret(sizeof(int));
+
+	return ret;
+}
+
+int
+kmsan_copyout(const void *kaddr, void *uaddr, size_t len)
+{
+	kmsan_check_arg(sizeof(kaddr) + sizeof(uaddr) + sizeof(len), "copyout");
+	kmsan_shadow_check(kaddr, len, "copyout");
+	kmsan_init_ret(sizeof(int));
+	return copyout(kaddr, uaddr, len);
+}
+
+int
+kmsan_copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
+{
+	size_t _done;
+	int ret;
+
+	kmsan_check_arg(sizeof(uaddr) + sizeof(kaddr) +
+	    sizeof(len) + sizeof(done), "copyinstr");
+	ret = copyinstr(uaddr, kaddr, len, &_done);
+	if (ret == 0)
+		kmsan_shadow_fill(kaddr, KMSAN_STATE_INITED, _done);
+	if (done != NULL) {
+		*done = _done;
+		kmsan_shadow_fill(done, KMSAN_STATE_INITED, sizeof(size_t));
+	}
+	kmsan_init_ret(sizeof(int));
+
+	return ret;
+}
+
+int
+kmsan_copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
+{
+	size_t _done;
+	int ret;
+
+	kmsan_check_arg(sizeof(kaddr) + sizeof(uaddr) +
+	    sizeof(len) + sizeof(done), "copyoutstr");
+	ret = copyoutstr(kaddr, uaddr, len, &_done);
+	kmsan_shadow_check(kaddr, _done, "copyoutstr");
+	if (done != NULL) {
+		*done = _done;
+		kmsan_shadow_fill(done, KMSAN_STATE_INITED, sizeof(size_t));
+	}
+	kmsan_init_ret(sizeof(int));
+
+	return ret;
+}
+
+/* -------------------------------------------------------------------------- */
+
+#undef atomic_add_32
+#undef atomic_add_int
+#undef atomic_add_long
+#undef atomic_add_ptr
+#undef atomic_add_64
+#undef atomic_add_32_nv
+#undef atomic_add_int_nv
+#undef atomic_add_long_nv
+#undef atomic_add_ptr_nv
+#undef atomic_add_64_nv
+#undef atomic_and_32
+#undef atomic_and_uint
+#undef atomic_and_ulong
+#undef atomic_and_64
+#undef atomic_and_32_nv
+#undef atomic_and_uint_nv
+#undef atomic_and_ulong_nv
+#undef atomic_and_64_nv
+#undef atomic_or_32
+#undef atomic_or_uint
+#undef atomic_or_ulong
+#undef atomic_or_64
+#undef atomic_or_32_nv
+#undef atomic_or_uint_nv
+#undef atomic_or_ulong_nv
+#undef atomic_or_64_nv
+#undef atomic_cas_32
+#undef atomic_cas_uint
+#undef atomic_cas_ulong
+#undef atomic_cas_ptr
+#undef atomic_cas_64
+#undef atomic_cas_32_ni
+#undef atomic_cas_uint_ni
+#undef atomic_cas_ulong_ni
+#undef atomic_cas_ptr_ni
+#undef atomic_cas_64_ni
+#undef atomic_swap_32
+#undef atomic_swap_uint
+#undef atomic_swap_ulong
+#undef atomic_swap_ptr
+#undef atomic_swap_64
+#undef atomic_dec_32
+#undef atomic_dec_uint
+#undef atomic_dec_ulong
+#undef atomic_dec_ptr
+#undef atomic_dec_64
+#undef atomic_dec_32_nv
+#undef atomic_dec_uint_nv
+#undef atomic_dec_ulong_nv
+#undef atomic_dec_ptr_nv
+#undef atomic_dec_64_nv
+#undef atomic_inc_32
+#undef atomic_inc_uint
+#undef atomic_inc_ulong
+#undef atomic_inc_ptr
+#undef atomic_inc_64
+#undef atomic_inc_32_nv
+#undef atomic_inc_uint_nv
+#undef atomic_inc_ulong_nv
+#undef atomic_inc_ptr_nv
+#undef atomic_inc_64_nv
+
+#define MSAN_ATOMIC_FUNC_ADD(name, tret, targ1, targ2) \
+	void atomic_add_##name(volatile targ1 *, targ2); \
+	void kmsan_atomic_add_##name(volatile targ1 *, targ2); \
+	void kmsan_atomic_add_##name(volatile targ1 *ptr, targ2 val) \
+	{ \
+		kmsan_check_arg(sizeof(ptr) + sizeof(val), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		atomic_add_##name(ptr, val); \
+	} \
+	tret atomic_add_##name##_nv(volatile targ1 *, targ2); \
+	tret kmsan_atomic_add_##name##_nv(volatile targ1 *, targ2); \
+	tret kmsan_atomic_add_##name##_nv(volatile targ1 *ptr, targ2 val) \
+	{ \
+		kmsan_check_arg(sizeof(ptr) + sizeof(val), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		kmsan_init_ret(sizeof(tret)); \
+		return atomic_add_##name##_nv(ptr, val); \
+	}
+#define MSAN_ATOMIC_FUNC_AND(name, tret, targ1, targ2) \
+	void atomic_and_##name(volatile targ1 *, targ2); \
+	void kmsan_atomic_and_##name(volatile targ1 *, targ2); \
+	void kmsan_atomic_and_##name(volatile targ1 *ptr, targ2 val) \
+	{ \
+		kmsan_check_arg(sizeof(ptr) + sizeof(val), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		atomic_and_##name(ptr, val); \
+	} \
+	tret atomic_and_##name##_nv(volatile targ1 *, targ2); \
+	tret kmsan_atomic_and_##name##_nv(volatile targ1 *, targ2); \
+	tret kmsan_atomic_and_##name##_nv(volatile targ1 *ptr, targ2 val) \
+	{ \
+		kmsan_check_arg(sizeof(ptr) + sizeof(val), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		kmsan_init_ret(sizeof(tret)); \
+		return atomic_and_##name##_nv(ptr, val); \
+	}
+
+#define MSAN_ATOMIC_FUNC_OR(name, tret, targ1, targ2) \
+	void atomic_or_##name(volatile targ1 *, targ2); \
+	void kmsan_atomic_or_##name(volatile targ1 *, targ2); \
+	void kmsan_atomic_or_##name(volatile targ1 *ptr, targ2 val) \
+	{ \
+		kmsan_check_arg(sizeof(ptr) + sizeof(val), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		atomic_or_##name(ptr, val); \
+	} \
+	tret atomic_or_##name##_nv(volatile targ1 *, targ2); \
+	tret kmsan_atomic_or_##name##_nv(volatile targ1 *, targ2); \
+	tret kmsan_atomic_or_##name##_nv(volatile targ1 *ptr, targ2 val) \
+	{ \
+		kmsan_check_arg(sizeof(ptr) + sizeof(val), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		kmsan_init_ret(sizeof(tret)); \
+		return atomic_or_##name##_nv(ptr, val); \
+	}
+
+#define MSAN_ATOMIC_FUNC_CAS(name, tret, targ1, targ2) \
+	tret atomic_cas_##name(volatile targ1 *, targ2, targ2); \
+	tret kmsan_atomic_cas_##name(volatile targ1 *, targ2, targ2); \
+	tret kmsan_atomic_cas_##name(volatile targ1 *ptr, targ2 exp, targ2 new) \
+	{ \
+		kmsan_check_arg(sizeof(ptr) + sizeof(exp) + sizeof(new), \
+		    __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		kmsan_init_ret(sizeof(tret)); \
+		return atomic_cas_##name(ptr, exp, new); \
+	} \
+	tret atomic_cas_##name##_ni(volatile targ1 *, targ2, targ2); \
+	tret kmsan_atomic_cas_##name##_ni(volatile targ1 *, targ2, targ2); \
+	tret kmsan_atomic_cas_##name##_ni(volatile targ1 *ptr, targ2 exp, targ2 new) \
+	{ \
+		kmsan_check_arg(sizeof(ptr) + sizeof(exp) + sizeof(new), \
+		    __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		kmsan_init_ret(sizeof(tret)); \
+		return atomic_cas_##name##_ni(ptr, exp, new); \
+	}
+
+#define MSAN_ATOMIC_FUNC_SWAP(name, tret, targ1, targ2) \
+	tret atomic_swap_##name(volatile targ1 *, targ2); \
+	tret kmsan_atomic_swap_##name(volatile targ1 *, targ2); \
+	tret kmsan_atomic_swap_##name(volatile targ1 *ptr, targ2 val) \
+	{ \
+		kmsan_check_arg(sizeof(ptr) + sizeof(val), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		kmsan_init_ret(sizeof(tret)); \
+		return atomic_swap_##name(ptr, val); \
+	}
+
+#define MSAN_ATOMIC_FUNC_DEC(name, tret, targ1) \
+	void atomic_dec_##name(volatile targ1 *); \
+	void kmsan_atomic_dec_##name(volatile targ1 *); \
+	void kmsan_atomic_dec_##name(volatile targ1 *ptr) \
+	{ \
+		kmsan_check_arg(sizeof(ptr), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		atomic_dec_##name(ptr); \
+	} \
+	tret atomic_dec_##name##_nv(volatile targ1 *); \
+	tret kmsan_atomic_dec_##name##_nv(volatile targ1 *); \
+	tret kmsan_atomic_dec_##name##_nv(volatile targ1 *ptr) \
+	{ \
+		kmsan_check_arg(sizeof(ptr), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		kmsan_init_ret(sizeof(tret)); \
+		return atomic_dec_##name##_nv(ptr); \
+	}
+
+#define MSAN_ATOMIC_FUNC_INC(name, tret, targ1) \
+	void atomic_inc_##name(volatile targ1 *); \
+	void kmsan_atomic_inc_##name(volatile targ1 *); \
+	void kmsan_atomic_inc_##name(volatile targ1 *ptr) \
+	{ \
+		kmsan_check_arg(sizeof(ptr), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		atomic_inc_##name(ptr); \
+	} \
+	tret atomic_inc_##name##_nv(volatile targ1 *); \
+	tret kmsan_atomic_inc_##name##_nv(volatile targ1 *); \
+	tret kmsan_atomic_inc_##name##_nv(volatile targ1 *ptr) \
+	{ \
+		kmsan_check_arg(sizeof(ptr), __func__); \
+		kmsan_shadow_check((uintptr_t)ptr, sizeof(tret), __func__); \
+		kmsan_init_ret(sizeof(tret)); \
+		return atomic_inc_##name##_nv(ptr); \
+	}
+
+MSAN_ATOMIC_FUNC_ADD(32, uint32_t, uint32_t, int32_t);
+MSAN_ATOMIC_FUNC_ADD(64, uint64_t, uint64_t, int64_t);
+MSAN_ATOMIC_FUNC_ADD(int, unsigned int, unsigned int, int);
+MSAN_ATOMIC_FUNC_ADD(long, unsigned long, unsigned long, long);
+MSAN_ATOMIC_FUNC_ADD(ptr, void *, void, ssize_t);
+
+MSAN_ATOMIC_FUNC_AND(32, uint32_t, uint32_t, uint32_t);
+MSAN_ATOMIC_FUNC_AND(64, uint64_t, uint64_t, uint64_t);
+MSAN_ATOMIC_FUNC_AND(uint, unsigned int, unsigned int, unsigned int);
+MSAN_ATOMIC_FUNC_AND(ulong, unsigned long, unsigned long, unsigned long);
+
+MSAN_ATOMIC_FUNC_OR(32, uint32_t, uint32_t, uint32_t);
+MSAN_ATOMIC_FUNC_OR(64, uint64_t, uint64_t, uint64_t);
+MSAN_ATOMIC_FUNC_OR(uint, unsigned int, unsigned int, unsigned int);
+MSAN_ATOMIC_FUNC_OR(ulong, unsigned long, unsigned long, unsigned long);
+
+MSAN_ATOMIC_FUNC_CAS(32, uint32_t, uint32_t, uint32_t);
+MSAN_ATOMIC_FUNC_CAS(64, uint64_t, uint64_t, uint64_t);
+MSAN_ATOMIC_FUNC_CAS(uint, unsigned int, unsigned int, unsigned int);
+MSAN_ATOMIC_FUNC_CAS(ulong, unsigned long, unsigned long, unsigned long);
+MSAN_ATOMIC_FUNC_CAS(ptr, void *, void, void *);
+
+MSAN_ATOMIC_FUNC_SWAP(32, uint32_t, uint32_t, uint32_t);
+MSAN_ATOMIC_FUNC_SWAP(64, uint64_t, uint64_t, uint64_t);
+MSAN_ATOMIC_FUNC_SWAP(uint, unsigned int, unsigned int, unsigned int);
+MSAN_ATOMIC_FUNC_SWAP(ulong, unsigned long, unsigned long, unsigned long);
+MSAN_ATOMIC_FUNC_SWAP(ptr, void *, void, void *);
+
+MSAN_ATOMIC_FUNC_DEC(32, uint32_t, uint32_t)
+MSAN_ATOMIC_FUNC_DEC(64, uint64_t, uint64_t)
+MSAN_ATOMIC_FUNC_DEC(uint, unsigned int, unsigned int);
+MSAN_ATOMIC_FUNC_DEC(ulong, unsigned long, unsigned long);
+MSAN_ATOMIC_FUNC_DEC(ptr, void *, void);
+
+MSAN_ATOMIC_FUNC_INC(32, uint32_t, uint32_t)
+MSAN_ATOMIC_FUNC_INC(64, uint64_t, uint64_t)
+MSAN_ATOMIC_FUNC_INC(uint, unsigned int, unsigned int);
+MSAN_ATOMIC_FUNC_INC(ulong, unsigned long, unsigned long);
+MSAN_ATOMIC_FUNC_INC(ptr, void *, void);
+
+/* -------------------------------------------------------------------------- */
+
+#include <sys/bus.h>
+
+#undef bus_space_read_multi_1
+#undef bus_space_read_multi_2
+#undef bus_space_read_multi_4
+#undef bus_space_read_multi_8
+#undef bus_space_read_multi_stream_1
+#undef bus_space_read_multi_stream_2
+#undef bus_space_read_multi_stream_4
+#undef bus_space_read_multi_stream_8
+#undef bus_space_read_region_1
+#undef bus_space_read_region_2
+#undef bus_space_read_region_4
+#undef bus_space_read_region_8
+#undef bus_space_read_region_stream_1
+#undef bus_space_read_region_stream_2
+#undef bus_space_read_region_stream_4
+#undef bus_space_read_region_stream_8
+
+#define MSAN_BUS_READ_FUNC(bytes, bits) \
+	void bus_space_read_multi_##bytes(bus_space_tag_t, bus_space_handle_t,	\
+	    bus_size_t, uint##bits##_t *, bus_size_t);				\
+	void kmsan_bus_space_read_multi_##bytes(bus_space_tag_t,		\
+	    bus_space_handle_t, bus_size_t, uint##bits##_t *, bus_size_t);	\
+	void kmsan_bus_space_read_multi_##bytes(bus_space_tag_t tag,		\
+	    bus_space_handle_t hnd, bus_size_t size, uint##bits##_t *buf,	\
+	    bus_size_t count)							\
+	{									\
+		kmsan_shadow_fill(buf, KMSAN_STATE_INITED,			\
+		    sizeof(uint##bits##_t) * count);				\
+		bus_space_read_multi_##bytes(tag, hnd, size, buf, count);	\
+	}									\
+	void bus_space_read_multi_stream_##bytes(bus_space_tag_t,		\
+	    bus_space_handle_t, bus_size_t, uint##bits##_t *, bus_size_t);	\
+	void kmsan_bus_space_read_multi_stream_##bytes(bus_space_tag_t,		\
+	    bus_space_handle_t, bus_size_t, uint##bits##_t *, bus_size_t);	\
+	void kmsan_bus_space_read_multi_stream_##bytes(bus_space_tag_t tag,	\
+	    bus_space_handle_t hnd, bus_size_t size, uint##bits##_t *buf,	\
+	    bus_size_t count)							\
+	{									\
+		kmsan_shadow_fill(buf, KMSAN_STATE_INITED,			\
+		    sizeof(uint##bits##_t) * count);				\
+		bus_space_read_multi_stream_##bytes(tag, hnd, size, buf, count);\
+	}									\
+	void bus_space_read_region_##bytes(bus_space_tag_t, bus_space_handle_t,	\
+	    bus_size_t, uint##bits##_t *, bus_size_t);				\
+	void kmsan_bus_space_read_region_##bytes(bus_space_tag_t,		\
+	    bus_space_handle_t, bus_size_t, uint##bits##_t *, bus_size_t);	\
+	void kmsan_bus_space_read_region_##bytes(bus_space_tag_t tag,		\
+	    bus_space_handle_t hnd, bus_size_t size, uint##bits##_t *buf,	\
+	    bus_size_t count)							\
+	{									\
+		kmsan_shadow_fill(buf, KMSAN_STATE_INITED,			\
+		    sizeof(uint##bits##_t) * count);				\
+		bus_space_read_region_##bytes(tag, hnd, size, buf, count);	\
+	}									\
+	void bus_space_read_region_stream_##bytes(bus_space_tag_t,		\
+	    bus_space_handle_t, bus_size_t, uint##bits##_t *, bus_size_t);	\
+	void kmsan_bus_space_read_region_stream_##bytes(bus_space_tag_t,	\
+	    bus_space_handle_t, bus_size_t, uint##bits##_t *, bus_size_t);	\
+	void kmsan_bus_space_read_region_stream_##bytes(bus_space_tag_t tag,	\
+	    bus_space_handle_t hnd, bus_size_t size, uint##bits##_t *buf,	\
+	    bus_size_t count)							\
+	{									\
+		kmsan_shadow_fill(buf, KMSAN_STATE_INITED,			\
+		    sizeof(uint##bits##_t) * count);				\
+		bus_space_read_region_stream_##bytes(tag, hnd, size, buf, count);\
+	}
+
+MSAN_BUS_READ_FUNC(1, 8)
+MSAN_BUS_READ_FUNC(2, 16)
+MSAN_BUS_READ_FUNC(4, 32)
+MSAN_BUS_READ_FUNC(8, 64)
+
+#undef bus_space_write_multi_1
+#undef bus_space_write_multi_2
+#undef bus_space_write_multi_4
+#undef bus_space_write_multi_8
+#undef bus_space_write_multi_stream_1
+#undef bus_space_write_multi_stream_2
+#undef bus_space_write_multi_stream_4
+#undef bus_space_write_multi_stream_8
+#undef bus_space_write_region_1
+#undef bus_space_write_region_2
+#undef bus_space_write_region_4
+#undef bus_space_write_region_8
+#undef bus_space_write_region_stream_1
+#undef bus_space_write_region_stream_2
+#undef bus_space_write_region_stream_4
+#undef bus_space_write_region_stream_8
+
+#define MSAN_BUS_WRITE_FUNC(bytes, bits) \
+	void bus_space_write_multi_##bytes(bus_space_tag_t, bus_space_handle_t,	\
+	    bus_size_t, const uint##bits##_t *, bus_size_t);			\
+	void kmsan_bus_space_write_multi_##bytes(bus_space_tag_t,		\
+	    bus_space_handle_t, bus_size_t, const uint##bits##_t *, bus_size_t);\
+	void kmsan_bus_space_write_multi_##bytes(bus_space_tag_t tag,		\
+	    bus_space_handle_t hnd, bus_size_t size, const uint##bits##_t *buf,	\
+	    bus_size_t count)							\
+	{									\
+		kmsan_shadow_check(buf, sizeof(uint##bits##_t) * count,		\
+		    "bus_space_write");						\
+		bus_space_write_multi_##bytes(tag, hnd, size, buf, count);	\
+	}									\
+	void bus_space_write_multi_stream_##bytes(bus_space_tag_t,		\
+	    bus_space_handle_t, bus_size_t, const uint##bits##_t *, bus_size_t);\
+	void kmsan_bus_space_write_multi_stream_##bytes(bus_space_tag_t,	\
+	    bus_space_handle_t, bus_size_t, const uint##bits##_t *, bus_size_t);\
+	void kmsan_bus_space_write_multi_stream_##bytes(bus_space_tag_t tag,	\
+	    bus_space_handle_t hnd, bus_size_t size, const uint##bits##_t *buf,	\
+	    bus_size_t count)							\
+	{									\
+		kmsan_shadow_check(buf, sizeof(uint##bits##_t) * count,		\
+		    "bus_space_write");						\
+		bus_space_write_multi_stream_##bytes(tag, hnd, size, buf, count);\
+	}									\
+	void bus_space_write_region_##bytes(bus_space_tag_t, bus_space_handle_t,\
+	    bus_size_t, const uint##bits##_t *, bus_size_t);			\
+	void kmsan_bus_space_write_region_##bytes(bus_space_tag_t,		\
+	    bus_space_handle_t, bus_size_t, const uint##bits##_t *, bus_size_t);\
+	void kmsan_bus_space_write_region_##bytes(bus_space_tag_t tag,		\
+	    bus_space_handle_t hnd, bus_size_t size, const uint##bits##_t *buf,	\
+	    bus_size_t count)							\
+	{									\
+		kmsan_shadow_check(buf, sizeof(uint##bits##_t) * count,		\
+		    "bus_space_write");						\
+		bus_space_write_region_##bytes(tag, hnd, size, buf, count);	\
+	}									\
+	void bus_space_write_region_stream_##bytes(bus_space_tag_t,		\
+	    bus_space_handle_t, bus_size_t, const uint##bits##_t *, bus_size_t);\
+	void kmsan_bus_space_write_region_stream_##bytes(bus_space_tag_t,	\
+	    bus_space_handle_t, bus_size_t, const uint##bits##_t *, bus_size_t);\
+	void kmsan_bus_space_write_region_stream_##bytes(bus_space_tag_t tag,	\
+	    bus_space_handle_t hnd, bus_size_t size, const uint##bits##_t *buf,	\
+	    bus_size_t count)							\
+	{									\
+		kmsan_shadow_check(buf, sizeof(uint##bits##_t) * count,		\
+		    "bus_space_write");						\
+		bus_space_write_region_stream_##bytes(tag, hnd, size, buf, count);\
+	}
+
+MSAN_BUS_WRITE_FUNC(1, 8)
+MSAN_BUS_WRITE_FUNC(2, 16)
+MSAN_BUS_WRITE_FUNC(4, 32)
+MSAN_BUS_WRITE_FUNC(8, 64)
+
+/* -------------------------------------------------------------------------- */
+
+#include <sys/mbuf.h>
+
+static void
+kmsan_dma_sync_linear(uint8_t *buf, bus_addr_t offset, bus_size_t len,
+    bool init, uintptr_t pc)
+{
+	if (init) {
+		kmsan_shadow_fill(buf + offset, KMSAN_STATE_INITED, len);
+	} else {
+		kmsan_shadow_check(buf + offset, len, "dma_sync_linear");
+	}
+}
+
+static void
+kmsan_dma_sync_mbuf(struct mbuf *m, bus_addr_t offset, bus_size_t len,
+    bool init, uintptr_t pc)
+{
+	bus_addr_t minlen;
+
+	for (; m != NULL && len != 0; m = m->m_next) {
+		if (offset >= m->m_len) {
+			offset -= m->m_len;
+			continue;
+		}
+
+		minlen = MIN(len, m->m_len - offset);
+
+		if (init) {
+			kmsan_shadow_fill(mtod(m, char *) + offset,
+			    KMSAN_STATE_INITED, minlen);
+		} else {
+			kmsan_shadow_check(mtod(m, char *) + offset,
+			    minlen, "dma_sync_mbuf");
+		}
+
+		offset = 0;
+		len -= minlen;
+	}
+}
+
+static void
+kmsan_dma_sync_uio(struct uio *uio, bus_addr_t offset, bus_size_t len,
+    bool init, uintptr_t pc)
+{
+	bus_size_t minlen, resid;
+	struct iovec *iov;
+	int i;
+
+	if (uio->uio_vmspace != NULL)
+		return;
+
+	resid = uio->uio_resid;
+	iov = uio->uio_iov;
+
+	for (i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
+		minlen = MIN(resid, iov[i].iov_len);
+
+		if (init) {
+			kmsan_shadow_fill(iov[i].iov_base,
+			    KMSAN_STATE_INITED, minlen);
+		} else {
+			kmsan_shadow_check(iov[i].iov_base, minlen,
+			    "dma_sync_uio");
+		}
+
+		resid -= minlen;
+	}
+}
+
+void
+kmsan_dma_sync(bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops)
+{
+	bool init;
+
+	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) == 0)
+		return;
+	init = (ops & BUS_DMASYNC_POSTREAD) != 0;
+
+	switch (map->dm_buftype) {
+	case KMSAN_DMA_LINEAR:
+		kmsan_dma_sync_linear(map->dm_buf, offset, len, init,
+		    __RET_ADDR);
+		break;
+	case KMSAN_DMA_MBUF:
+		kmsan_dma_sync_mbuf(map->dm_buf, offset, len, init,
+		    __RET_ADDR);
+		break;
+	case KMSAN_DMA_UIO:
+		kmsan_dma_sync_uio(map->dm_buf, offset, len, init,
+		    __RET_ADDR);
+		break;
+	case KMSAN_DMA_RAW:
+		break;
+	default:
+		panic("%s: impossible", __func__);
+	}
+}
+
+void
+kmsan_dma_load(bus_dmamap_t map, void *buf, bus_size_t buflen, int type)
+{
+	map->dm_buf = buf;
+	map->dm_buflen = buflen;
+	map->dm_buftype = type;
+}

Index: src/sys/sys/msan.h
diff -u /dev/null src/sys/sys/msan.h:1.1
--- /dev/null	Thu Nov 14 16:23:54 2019
+++ src/sys/sys/msan.h	Thu Nov 14 16:23:53 2019
@@ -0,0 +1,85 @@
+/*	$NetBSD: msan.h,v 1.1 2019/11/14 16:23:53 maxv Exp $	*/
+
+/*
+ * Copyright (c) 2019 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SYS_MSAN_H_
+#define _SYS_MSAN_H_
+
+#ifdef _KERNEL_OPT
+#include "opt_kmsan.h"
+#endif
+
+#ifdef KMSAN
+#include <sys/types.h>
+#include <sys/bus.h>
+
+#define KMSAN_STATE_UNINIT	0xFF
+#define KMSAN_STATE_INITED	0x00
+
+#define KMSAN_TYPE_STACK	0
+#define KMSAN_TYPE_KMEM		1
+#define KMSAN_TYPE_MALLOC	2
+#define KMSAN_TYPE_POOL		3
+#define KMSAN_TYPE_UVM		4
+
+#define KMSAN_DMA_LINEAR	1
+#define KMSAN_DMA_MBUF		2
+#define KMSAN_DMA_UIO		3
+#define KMSAN_DMA_RAW		4
+
+#define __RET_ADDR		(uintptr_t)__builtin_return_address(0)
+
+void kmsan_init(void *);
+void kmsan_shadow_map(void *, size_t);
+
+void kmsan_lwp_alloc(struct lwp *);
+void kmsan_lwp_free(struct lwp *);
+
+void kmsan_dma_sync(bus_dmamap_t, bus_addr_t, bus_size_t, int);
+void kmsan_dma_load(bus_dmamap_t, void *, bus_size_t, int);
+
+void kmsan_orig(void *, size_t, int, uintptr_t);
+void kmsan_mark(void *, size_t, uint8_t);
+void kmsan_check_mbuf(void *);
+void kmsan_check_buf(void *);
+#else
+#define kmsan_init(u)			__nothing
+#define kmsan_shadow_map(a, s)		__nothing
+#define kmsan_lwp_alloc(l)		__nothing
+#define kmsan_lwp_free(l)		__nothing
+#define kmsan_dma_sync(m, a, s, o)	__nothing
+#define kmsan_dma_load(m, b, s, o)	__nothing
+#define kmsan_orig(p, l, c, a)		__nothing
+#define kmsan_mark(p, l, c)		__nothing
+#define kmsan_check_mbuf(m)		__nothing
+#define kmsan_check_buf(b)		__nothing
+#endif
+
+#endif /* !_SYS_MSAN_H_ */

Reply via email to