Module Name: src Committed By: maxv Date: Sat May 2 11:37:17 UTC 2020
Modified Files: src/external/cddl/osnet/dev/fbt/x86: fbt_isa.c src/sys/arch/amd64/amd64: cpufunc.S src/sys/arch/i386/i386: cpufunc.S src/sys/arch/x86/include: cpufunc.h src/sys/arch/x86/x86: patch.c spectre.c svs.c Log Message: Modify the hotpatch mechanism, in order to make it much less ROP-friendly. Currently x86_patch_window_open is a big problem, because it is a perfect function to inject/modify executable code with ROP. - Remove x86_patch_window_open(), along with its x86_patch_window_close() counterpart. - Introduce a read-only link-set of hotpatch descriptor structures, which reference a maximum of two read-only hotpatch sources. - Modify x86_hotpatch() to open a window and call the new x86_hotpatch_apply() function in a hard-coded manner. - Modify x86_hotpatch() to take a name and a selector, and have x86_hotpatch_apply() resolve the descriptor from the name and the source from the selector, before hotpatching. - Move the error handling in a separate x86_hotpatch_cleanup() function, that gets called after we closed the window. The resulting implementation is a bit complex and non-obvious. But it gains the following properties: the code executed in the hotpatch window is strictly hard-coded (no callback and no possibility to execute your own code in the window) and the pointers this code accesses are strictly read-only (no possibility to forge pointers to hotpatch an area that was not designated as hotpatchable at compile-time, and no possibility to choose what bytes to write other than the maximum of two read-only templates that were designated as valid for the given destination at compile-time). With current CPUs this slightly improves a situation that is already pretty bad by definition on x86. Assuming CET however, this change closes a big hole and is kinda great. The only ~problem there is, is that dtrace-fbt tries to hotpatch random places with random bytes, and there is just no way to make it safe. However dtrace is only in a module, that is rarely used and never compiled into the kernel, so it's not a big problem; add a shitty & vulnerable independent hotpatch window in it, and leave big XXXs. It looks like fbt is going to collapse soon anyway. To generate a diff of this commit: cvs rdiff -u -r1.2 -r1.3 src/external/cddl/osnet/dev/fbt/x86/fbt_isa.c cvs rdiff -u -r1.49 -r1.50 src/sys/arch/amd64/amd64/cpufunc.S cvs rdiff -u -r1.38 -r1.39 src/sys/arch/i386/i386/cpufunc.S cvs rdiff -u -r1.38 -r1.39 src/sys/arch/x86/include/cpufunc.h cvs rdiff -u -r1.46 -r1.47 src/sys/arch/x86/x86/patch.c cvs rdiff -u -r1.34 -r1.35 src/sys/arch/x86/x86/spectre.c \ src/sys/arch/x86/x86/svs.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/external/cddl/osnet/dev/fbt/x86/fbt_isa.c diff -u src/external/cddl/osnet/dev/fbt/x86/fbt_isa.c:1.2 src/external/cddl/osnet/dev/fbt/x86/fbt_isa.c:1.3 --- src/external/cddl/osnet/dev/fbt/x86/fbt_isa.c:1.2 Wed Nov 13 10:13:41 2019 +++ src/external/cddl/osnet/dev/fbt/x86/fbt_isa.c Sat May 2 11:37:17 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: fbt_isa.c,v 1.2 2019/11/13 10:13:41 maxv Exp $ */ +/* $NetBSD: fbt_isa.c,v 1.3 2020/05/02 11:37:17 maxv Exp $ */ /* * CDDL HEADER START @@ -172,18 +172,40 @@ fbt_patch_tracepoint(fbt_probe_t *fbt, f #endif #ifdef __NetBSD__ +/* + * XXX XXX XXX This is absolutely unsafe, the mere existence of this code is a + * problem, because this function is too easily ROP-able. But this gets + * compiled as a module and never in the kernel, so we are fine "by default". + * XXX Add a #warning if it gets compiled in the kernel? + */ void fbt_patch_tracepoint(fbt_probe_t *fbt, fbt_patchval_t val) { u_long psl, cr0; - x86_patch_window_open(&psl, &cr0); + /* Disable interrupts. */ + psl = x86_read_psl(); + x86_disable_intr(); + + /* Disable write protection in supervisor mode. */ + cr0 = rcr0(); + lcr0(cr0 & ~CR0_WP); + /* XXX XXX XXX Shouldn't rely on caller-provided dst! */ + /* XXX XXX XXX Shouldn't rely on caller-provided val! */ for (; fbt != NULL; fbt = fbt->fbtp_next) { *fbt->fbtp_patchpoint = val; } - x86_patch_window_close(psl, cr0); + /* Write back and invalidate cache, flush pipelines. */ + wbinvd(); + x86_flush(); + + /* Re-enable write protection. */ + lcr0(cr0); + + /* Restore the PSL, potentially re-enabling interrupts. */ + x86_write_psl(psl); } #endif Index: src/sys/arch/amd64/amd64/cpufunc.S diff -u src/sys/arch/amd64/amd64/cpufunc.S:1.49 src/sys/arch/amd64/amd64/cpufunc.S:1.50 --- src/sys/arch/amd64/amd64/cpufunc.S:1.49 Thu Nov 21 19:23:58 2019 +++ src/sys/arch/amd64/amd64/cpufunc.S Sat May 2 11:37:17 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: cpufunc.S,v 1.49 2019/11/21 19:23:58 ad Exp $ */ +/* $NetBSD: cpufunc.S,v 1.50 2020/05/02 11:37:17 maxv Exp $ */ /* * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. @@ -380,3 +380,41 @@ ENTRY(outl) outl %eax, %dx ret END(outl) + +/* + * %rdi = name + * %rsi = sel + */ +ENTRY(x86_hotpatch) + /* save RFLAGS, and disable intrs */ + pushfq + cli + + /* save CR0, and disable WP */ + movq %cr0,%rcx + pushq %rcx + andq $~CR0_WP,%rcx + movq %rcx,%cr0 + + callq _C_LABEL(x86_hotpatch_apply) + + /* write back and invalidate cache */ + wbinvd + + /* restore CR0 */ + popq %rcx + movq %rcx,%cr0 + + /* flush instruction pipeline */ + pushq %rax + callq x86_flush + popq %rax + + /* clean up */ + movq %rax,%rdi + callq _C_LABEL(x86_hotpatch_cleanup) + + /* restore RFLAGS */ + popfq + ret +END(x86_hotpatch) Index: src/sys/arch/i386/i386/cpufunc.S diff -u src/sys/arch/i386/i386/cpufunc.S:1.38 src/sys/arch/i386/i386/cpufunc.S:1.39 --- src/sys/arch/i386/i386/cpufunc.S:1.38 Thu Nov 21 19:24:00 2019 +++ src/sys/arch/i386/i386/cpufunc.S Sat May 2 11:37:17 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: cpufunc.S,v 1.38 2019/11/21 19:24:00 ad Exp $ */ +/* $NetBSD: cpufunc.S,v 1.39 2020/05/02 11:37:17 maxv Exp $ */ /*- * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc. @@ -38,7 +38,7 @@ #include <sys/errno.h> #include <machine/asm.h> -__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.38 2019/11/21 19:24:00 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.39 2020/05/02 11:37:17 maxv Exp $"); #include "opt_xen.h" @@ -274,3 +274,41 @@ ENTRY(outl) outl %eax, %dx ret END(outl) + +ENTRY(x86_hotpatch) + /* save EFLAGS, and disable intrs */ + pushfl + cli + + /* save CR0, and disable WP */ + movl %cr0,%ecx + pushl %ecx + andl $~CR0_WP,%ecx + movl %ecx,%cr0 + + pushl 4*4(%esp) /* arg2 */ + pushl 4*4(%esp) /* arg1 */ + call _C_LABEL(x86_hotpatch_apply) + addl $2*4,%esp + + /* write back and invalidate cache */ + wbinvd + + /* restore CR0 */ + popl %ecx + movl %ecx,%cr0 + + /* flush instruction pipeline */ + pushl %eax + call x86_flush + popl %eax + + /* clean up */ + pushl %eax + call _C_LABEL(x86_hotpatch_cleanup) + addl $4,%esp + + /* restore RFLAGS */ + popfl + ret +END(x86_hotpatch) Index: src/sys/arch/x86/include/cpufunc.h diff -u src/sys/arch/x86/include/cpufunc.h:1.38 src/sys/arch/x86/include/cpufunc.h:1.39 --- src/sys/arch/x86/include/cpufunc.h:1.38 Sat Apr 25 15:26:18 2020 +++ src/sys/arch/x86/include/cpufunc.h Sat May 2 11:37:17 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: cpufunc.h,v 1.38 2020/04/25 15:26:18 bouyer Exp $ */ +/* $NetBSD: cpufunc.h,v 1.39 2020/05/02 11:37:17 maxv Exp $ */ /* * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc. @@ -104,9 +104,18 @@ rdtsc(void) } #ifndef XENPV -void x86_hotpatch(uint32_t, const uint8_t *, size_t); -void x86_patch_window_open(u_long *, u_long *); -void x86_patch_window_close(u_long, u_long); +struct x86_hotpatch_source { + uint8_t *saddr; + uint8_t *eaddr; +}; + +struct x86_hotpatch_descriptor { + uint8_t name; + uint8_t nsrc; + const struct x86_hotpatch_source *srcs[]; +}; + +void x86_hotpatch(uint8_t, uint8_t); void x86_patch(bool); #endif Index: src/sys/arch/x86/x86/patch.c diff -u src/sys/arch/x86/x86/patch.c:1.46 src/sys/arch/x86/x86/patch.c:1.47 --- src/sys/arch/x86/x86/patch.c:1.46 Fri May 1 09:40:47 2020 +++ src/sys/arch/x86/x86/patch.c Sat May 2 11:37:17 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: patch.c,v 1.46 2020/05/01 09:40:47 maxv Exp $ */ +/* $NetBSD: patch.c,v 1.47 2020/05/02 11:37:17 maxv Exp $ */ /*- * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc. @@ -34,7 +34,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.46 2020/05/01 09:40:47 maxv Exp $"); +__KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.47 2020/05/02 11:37:17 maxv Exp $"); #include "opt_lockdebug.h" #ifdef i386 @@ -52,12 +52,137 @@ __KERNEL_RCSID(0, "$NetBSD: patch.c,v 1. #include <x86/cpuvar.h> #include <x86/cputypes.h> -struct hotpatch { +__link_set_decl(x86_hotpatch_descriptors, struct x86_hotpatch_descriptor); + +struct x86_hotpatch_destination { uint8_t name; uint8_t size; void *addr; } __packed; +/* -------------------------------------------------------------------------- */ + +/* CLAC instruction, part of SMAP. */ +extern uint8_t hp_clac, hp_clac_end; +static const struct x86_hotpatch_source hp_clac_source = { + .saddr = &hp_clac, + .eaddr = &hp_clac_end +}; +static const struct x86_hotpatch_descriptor hp_clac_desc = { + .name = HP_NAME_CLAC, + .nsrc = 1, + .srcs = { &hp_clac_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_clac_desc); + +/* STAC instruction, part of SMAP. */ +extern uint8_t hp_stac, hp_stac_end; +static const struct x86_hotpatch_source hp_stac_source = { + .saddr = &hp_stac, + .eaddr = &hp_stac_end +}; +static const struct x86_hotpatch_descriptor hp_stac_desc = { + .name = HP_NAME_STAC, + .nsrc = 1, + .srcs = { &hp_stac_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_stac_desc); + +/* Errata on certain AMD CPUs. */ +extern uint8_t hp_retfence, hp_retfence_end; +static const struct x86_hotpatch_source hp_retfence_source = { + .saddr = &hp_retfence, + .eaddr = &hp_retfence_end +}; +static const struct x86_hotpatch_descriptor hp_retfence_desc = { + .name = HP_NAME_RETFENCE, + .nsrc = 1, + .srcs = { &hp_retfence_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_retfence_desc); + +/* No lock when on a single processor. */ +extern uint8_t hp_nolock, hp_nolock_end; +static const struct x86_hotpatch_source hp_nolock_source = { + .saddr = &hp_nolock, + .eaddr = &hp_nolock_end +}; +static const struct x86_hotpatch_descriptor hp_nolock_desc = { + .name = HP_NAME_NOLOCK, + .nsrc = 1, + .srcs = { &hp_nolock_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_nolock_desc); + +/* Use LFENCE if available, part of SSE2. */ +extern uint8_t sse2_lfence, sse2_lfence_end; +static const struct x86_hotpatch_source hp_sse2_lfence_source = { + .saddr = &sse2_lfence, + .eaddr = &sse2_lfence_end +}; +static const struct x86_hotpatch_descriptor hp_sse2_lfence_desc = { + .name = HP_NAME_SSE2_LFENCE, + .nsrc = 1, + .srcs = { &hp_sse2_lfence_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_sse2_lfence_desc); + +/* Use MFENCE if available, part of SSE2. */ +extern uint8_t sse2_mfence, sse2_mfence_end; +static const struct x86_hotpatch_source hp_sse2_mfence_source = { + .saddr = &sse2_mfence, + .eaddr = &sse2_mfence_end +}; +static const struct x86_hotpatch_descriptor hp_sse2_mfence_desc = { + .name = HP_NAME_SSE2_MFENCE, + .nsrc = 1, + .srcs = { &hp_sse2_mfence_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_sse2_mfence_desc); + +#ifdef i386 +/* CAS_64. */ +extern uint8_t _atomic_cas_cx8, _atomic_cas_cx8_end; +static const struct x86_hotpatch_source hp_cas_cx8_source = { + .saddr = &_atomic_cas_cx8, + .eaddr = &_atomic_cas_cx8_end +}; +static const struct x86_hotpatch_descriptor hp_cas_cx8_desc = { + .name = HP_NAME_CAS_64, + .nsrc = 1, + .srcs = { &hp_cas_cx8_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_cas_cx8_desc); + +/* SPLLOWER. */ +extern uint8_t cx8_spllower, cx8_spllower_end; +static const struct x86_hotpatch_source hp_cx8_spllower_source = { + .saddr = &cx8_spllower, + .eaddr = &cx8_spllower_end +}; +static const struct x86_hotpatch_descriptor hp_cx8_spllower_desc = { + .name = HP_NAME_SPLLOWER, + .nsrc = 1, + .srcs = { &hp_cx8_spllower_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_cx8_spllower_desc); + +/* MUTEX_EXIT. */ +extern uint8_t i686_mutex_spin_exit, i686_mutex_spin_exit_end; +static const struct x86_hotpatch_source hp_i686_mutex_spin_exit_source = { + .saddr = &i686_mutex_spin_exit, + .eaddr = &i686_mutex_spin_exit_end +}; +static const struct x86_hotpatch_descriptor hp_i686_mutex_spin_exit_desc = { + .name = HP_NAME_MUTEX_EXIT, + .nsrc = 1, + .srcs = { &hp_i686_mutex_spin_exit_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_i686_mutex_spin_exit_desc); +#endif + +/* -------------------------------------------------------------------------- */ + static inline void __unused patchbytes(void *addr, const uint8_t *bytes, size_t size) { @@ -69,63 +194,86 @@ patchbytes(void *addr, const uint8_t *by } } -/* The local variables have unknown alignment to UBSan */ -__noubsan -void -x86_hotpatch(uint32_t name, const uint8_t *bytes, size_t size) +/* + * Rules: each pointer accessed in this function MUST be read-only. + * + * Called from ASM only, prototype not public. + */ +int x86_hotpatch_apply(uint8_t, uint8_t); +int +__noubsan /* the local variables have unknown alignment to UBSan */ +x86_hotpatch_apply(uint8_t name, uint8_t sel) { + struct x86_hotpatch_descriptor * const *iter; + const struct x86_hotpatch_descriptor *desc; + const struct x86_hotpatch_source *src; + const struct x86_hotpatch_destination *hps, *hpe, *hp; extern char __rodata_hotpatch_start; extern char __rodata_hotpatch_end; - struct hotpatch *hps, *hpe, *hp; + const uint8_t *bytes; + bool found = false; + size_t size; + + /* + * Find the descriptor, and perform some sanity checks. + */ + __link_set_foreach(iter, x86_hotpatch_descriptors) { + desc = *iter; + if (desc->name == name) { + found = true; + break; + } + } + if (!found) + return -1; + if (desc->nsrc > 2) + return -1; + if (sel >= desc->nsrc) + return -1; - hps = (struct hotpatch *)&__rodata_hotpatch_start; - hpe = (struct hotpatch *)&__rodata_hotpatch_end; + /* + * Get the hotpatch source. + */ + src = desc->srcs[sel]; + bytes = src->saddr; + size = (size_t)src->eaddr - (size_t)src->saddr; + /* + * Apply the hotpatch on each registered destination. + */ + hps = (struct x86_hotpatch_destination *)&__rodata_hotpatch_start; + hpe = (struct x86_hotpatch_destination *)&__rodata_hotpatch_end; for (hp = hps; hp < hpe; hp++) { if (hp->name != name) { continue; } if (hp->size != size) { - panic("x86_hotpatch: incorrect size"); + return -1; } patchbytes(hp->addr, bytes, size); } -} -void -x86_patch_window_open(u_long *psl, u_long *cr0) -{ - /* Disable interrupts. */ - *psl = x86_read_psl(); - x86_disable_intr(); - - /* Disable write protection in supervisor mode. */ - *cr0 = rcr0(); - lcr0(*cr0 & ~CR0_WP); + return 0; } +/* + * Interrupts disabled here. Called from ASM only, prototype not public. + */ +void x86_hotpatch_cleanup(int); void -x86_patch_window_close(u_long psl, u_long cr0) +x86_hotpatch_cleanup(int retval) { - /* Write back and invalidate cache, flush pipelines. */ - wbinvd(); - x86_flush(); - - /* Re-enable write protection. */ - lcr0(cr0); - - /* Restore the PSL, potentially re-enabling interrupts. */ - x86_write_psl(psl); + if (retval != 0) { + panic("x86_hotpatch_apply failed"); + } } +/* -------------------------------------------------------------------------- */ + void x86_patch(bool early) { static bool first, second; - uint8_t *bytes; - size_t size; - u_long psl; - u_long cr0; if (early) { if (first) @@ -137,18 +285,12 @@ x86_patch(bool early) second = true; } - x86_patch_window_open(&psl, &cr0); - if (!early && ncpu == 1) { #ifndef LOCKDEBUG /* * Uniprocessor: kill LOCK prefixes. */ - extern uint8_t hp_nolock, hp_nolock_end; - - bytes = &hp_nolock; - size = (size_t)&hp_nolock_end - (size_t)&hp_nolock; - x86_hotpatch(HP_NAME_NOLOCK, bytes, size); + x86_hotpatch(HP_NAME_NOLOCK, 0); #endif } @@ -159,16 +301,8 @@ x86_patch(bool early) * ordinary non-temporal stores are always issued in * program order to main memory and to other CPUs. */ - extern uint8_t sse2_lfence, sse2_lfence_end; - extern uint8_t sse2_mfence, sse2_mfence_end; - - bytes = &sse2_lfence; - size = (size_t)&sse2_lfence_end - (size_t)&sse2_lfence; - x86_hotpatch(HP_NAME_SSE2_LFENCE, bytes, size); - - bytes = &sse2_mfence; - size = (size_t)&sse2_mfence_end - (size_t)&sse2_mfence; - x86_hotpatch(HP_NAME_SSE2_MFENCE, bytes, size); + x86_hotpatch(HP_NAME_SSE2_LFENCE, 0); + x86_hotpatch(HP_NAME_SSE2_MFENCE, 0); } #ifdef i386 @@ -177,27 +311,15 @@ x86_patch(bool early) * may be gone. */ if ((cpu_feature[0] & CPUID_CX8) != 0) { - extern uint8_t _atomic_cas_cx8, _atomic_cas_cx8_end; - - bytes = &_atomic_cas_cx8; - size = (size_t)&_atomic_cas_cx8_end - (size_t)&_atomic_cas_cx8; - x86_hotpatch(HP_NAME_CAS_64, bytes, size); + x86_hotpatch(HP_NAME_CAS_64, 0); } #if !defined(SPLDEBUG) if (!early && (cpu_feature[0] & CPUID_CX8) != 0) { /* Faster splx(), mutex_spin_exit(). */ - extern uint8_t cx8_spllower, cx8_spllower_end; - extern uint8_t i686_mutex_spin_exit, i686_mutex_spin_exit_end; - - bytes = &cx8_spllower; - size = (size_t)&cx8_spllower_end - (size_t)&cx8_spllower; - x86_hotpatch(HP_NAME_SPLLOWER, bytes, size); - + x86_hotpatch(HP_NAME_SPLLOWER, 0); #if !defined(LOCKDEBUG) - bytes = &i686_mutex_spin_exit; - size = (size_t)&i686_mutex_spin_exit_end - (size_t)&i686_mutex_spin_exit; - x86_hotpatch(HP_NAME_MUTEX_EXIT, bytes, size); + x86_hotpatch(HP_NAME_MUTEX_EXIT, 0); #endif } #endif /* !SPLDEBUG */ @@ -212,11 +334,7 @@ x86_patch(bool early) (CPUID_TO_FAMILY(cpu_info_primary.ci_signature) == 0xe || (CPUID_TO_FAMILY(cpu_info_primary.ci_signature) == 0xf && CPUID_TO_EXTMODEL(cpu_info_primary.ci_signature) < 0x4))) { - extern uint8_t hp_retfence, hp_retfence_end; - - bytes = &hp_retfence; - size = (size_t)&hp_retfence_end - (size_t)&hp_retfence; - x86_hotpatch(HP_NAME_RETFENCE, bytes, size); + x86_hotpatch(HP_NAME_RETFENCE, 0); } /* @@ -224,19 +342,9 @@ x86_patch(bool early) * instructions. */ if (!early && cpu_feature[5] & CPUID_SEF_SMAP) { - extern uint8_t hp_clac, hp_clac_end; - extern uint8_t hp_stac, hp_stac_end; - KASSERT(rcr4() & CR4_SMAP); - bytes = &hp_clac; - size = (size_t)&hp_clac_end - (size_t)&hp_clac; - x86_hotpatch(HP_NAME_CLAC, bytes, size); - - bytes = &hp_stac; - size = (size_t)&hp_stac_end - (size_t)&hp_stac; - x86_hotpatch(HP_NAME_STAC, bytes, size); + x86_hotpatch(HP_NAME_CLAC, 0); + x86_hotpatch(HP_NAME_STAC, 0); } - - x86_patch_window_close(psl, cr0); } Index: src/sys/arch/x86/x86/spectre.c diff -u src/sys/arch/x86/x86/spectre.c:1.34 src/sys/arch/x86/x86/spectre.c:1.35 --- src/sys/arch/x86/x86/spectre.c:1.34 Fri Feb 21 00:26:22 2020 +++ src/sys/arch/x86/x86/spectre.c Sat May 2 11:37:17 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: spectre.c,v 1.34 2020/02/21 00:26:22 joerg Exp $ */ +/* $NetBSD: spectre.c,v 1.35 2020/05/02 11:37:17 maxv Exp $ */ /* * Copyright (c) 2018-2019 NetBSD Foundation, Inc. @@ -34,7 +34,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: spectre.c,v 1.34 2020/02/21 00:26:22 joerg Exp $"); +__KERNEL_RCSID(0, "$NetBSD: spectre.c,v 1.35 2020/05/02 11:37:17 maxv Exp $"); #include "opt_spectre.h" @@ -169,48 +169,54 @@ static volatile unsigned long ibrs_cpu_b static volatile unsigned long ibrs_cpu_barrier2 __cacheline_aligned; #ifdef __x86_64__ +/* IBRS_ENTER. */ +extern uint8_t noibrs_enter, noibrs_enter_end; +extern uint8_t ibrs_enter, ibrs_enter_end; +static const struct x86_hotpatch_source hp_noibrs_enter_source = { + .saddr = &noibrs_enter, + .eaddr = &noibrs_enter_end +}; +static const struct x86_hotpatch_source hp_ibrs_enter_source = { + .saddr = &ibrs_enter, + .eaddr = &ibrs_enter_end +}; +static const struct x86_hotpatch_descriptor hp_ibrs_enter_desc = { + .name = HP_NAME_IBRS_ENTER, + .nsrc = 2, + .srcs = { &hp_noibrs_enter_source, &hp_ibrs_enter_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_ibrs_enter_desc); + +/* IBRS_LEAVE. */ +extern uint8_t noibrs_leave, noibrs_leave_end; +extern uint8_t ibrs_leave, ibrs_leave_end; +static const struct x86_hotpatch_source hp_noibrs_leave_source = { + .saddr = &noibrs_leave, + .eaddr = &noibrs_leave_end +}; +static const struct x86_hotpatch_source hp_ibrs_leave_source = { + .saddr = &ibrs_leave, + .eaddr = &ibrs_leave_end +}; +static const struct x86_hotpatch_descriptor hp_ibrs_leave_desc = { + .name = HP_NAME_IBRS_LEAVE, + .nsrc = 2, + .srcs = { &hp_noibrs_leave_source, &hp_ibrs_leave_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_ibrs_leave_desc); + static void ibrs_disable_hotpatch(void) { - extern uint8_t noibrs_enter, noibrs_enter_end; - extern uint8_t noibrs_leave, noibrs_leave_end; - u_long psl, cr0; - uint8_t *bytes; - size_t size; - - x86_patch_window_open(&psl, &cr0); - - bytes = &noibrs_enter; - size = (size_t)&noibrs_enter_end - (size_t)&noibrs_enter; - x86_hotpatch(HP_NAME_IBRS_ENTER, bytes, size); - - bytes = &noibrs_leave; - size = (size_t)&noibrs_leave_end - (size_t)&noibrs_leave; - x86_hotpatch(HP_NAME_IBRS_LEAVE, bytes, size); - - x86_patch_window_close(psl, cr0); + x86_hotpatch(HP_NAME_IBRS_ENTER, /* noibrs */ 0); + x86_hotpatch(HP_NAME_IBRS_LEAVE, /* noibrs */ 0); } static void ibrs_enable_hotpatch(void) { - extern uint8_t ibrs_enter, ibrs_enter_end; - extern uint8_t ibrs_leave, ibrs_leave_end; - u_long psl, cr0; - uint8_t *bytes; - size_t size; - - x86_patch_window_open(&psl, &cr0); - - bytes = &ibrs_enter; - size = (size_t)&ibrs_enter_end - (size_t)&ibrs_enter; - x86_hotpatch(HP_NAME_IBRS_ENTER, bytes, size); - - bytes = &ibrs_leave; - size = (size_t)&ibrs_leave_end - (size_t)&ibrs_leave; - x86_hotpatch(HP_NAME_IBRS_LEAVE, bytes, size); - - x86_patch_window_close(psl, cr0); + x86_hotpatch(HP_NAME_IBRS_ENTER, /* ibrs */ 1); + x86_hotpatch(HP_NAME_IBRS_LEAVE, /* ibrs */ 1); } #else /* IBRS not supported on i386 */ @@ -564,38 +570,34 @@ static volatile unsigned long mds_cpu_ba static volatile unsigned long mds_cpu_barrier2 __cacheline_aligned; #ifdef __x86_64__ +/* MDS_LEAVE. */ +extern uint8_t nomds_leave, nomds_leave_end; +extern uint8_t mds_leave, mds_leave_end; +static const struct x86_hotpatch_source hp_nomds_leave_source = { + .saddr = &nomds_leave, + .eaddr = &nomds_leave_end +}; +static const struct x86_hotpatch_source hp_mds_leave_source = { + .saddr = &mds_leave, + .eaddr = &mds_leave_end +}; +static const struct x86_hotpatch_descriptor hp_mds_leave_desc = { + .name = HP_NAME_MDS_LEAVE, + .nsrc = 2, + .srcs = { &hp_nomds_leave_source, &hp_mds_leave_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_mds_leave_desc); + static void mds_disable_hotpatch(void) { - extern uint8_t nomds_leave, nomds_leave_end; - u_long psl, cr0; - uint8_t *bytes; - size_t size; - - x86_patch_window_open(&psl, &cr0); - - bytes = &nomds_leave; - size = (size_t)&nomds_leave_end - (size_t)&nomds_leave; - x86_hotpatch(HP_NAME_MDS_LEAVE, bytes, size); - - x86_patch_window_close(psl, cr0); + x86_hotpatch(HP_NAME_MDS_LEAVE, /* nomds */ 0); } static void mds_enable_hotpatch(void) { - extern uint8_t mds_leave, mds_leave_end; - u_long psl, cr0; - uint8_t *bytes; - size_t size; - - x86_patch_window_open(&psl, &cr0); - - bytes = &mds_leave; - size = (size_t)&mds_leave_end - (size_t)&mds_leave; - x86_hotpatch(HP_NAME_MDS_LEAVE, bytes, size); - - x86_patch_window_close(psl, cr0); + x86_hotpatch(HP_NAME_MDS_LEAVE, /* mds */ 1); } #else /* MDS not supported on i386 */ Index: src/sys/arch/x86/x86/svs.c diff -u src/sys/arch/x86/x86/svs.c:1.34 src/sys/arch/x86/x86/svs.c:1.35 --- src/sys/arch/x86/x86/svs.c:1.34 Sat Apr 25 15:26:18 2020 +++ src/sys/arch/x86/x86/svs.c Sat May 2 11:37:17 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: svs.c,v 1.34 2020/04/25 15:26:18 bouyer Exp $ */ +/* $NetBSD: svs.c,v 1.35 2020/05/02 11:37:17 maxv Exp $ */ /* * Copyright (c) 2018-2019 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: svs.c,v 1.34 2020/04/25 15:26:18 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: svs.c,v 1.35 2020/05/02 11:37:17 maxv Exp $"); #include "opt_svs.h" #include "opt_user_ldt.h" @@ -226,6 +226,88 @@ __KERNEL_RCSID(0, "$NetBSD: svs.c,v 1.34 * makes sense on GENERIC_KASLR kernels. */ +/* -------------------------------------------------------------------------- */ + +/* SVS_ENTER. */ +extern uint8_t svs_enter, svs_enter_end; +static const struct x86_hotpatch_source hp_svs_enter_source = { + .saddr = &svs_enter, + .eaddr = &svs_enter_end +}; +static const struct x86_hotpatch_descriptor hp_svs_enter_desc = { + .name = HP_NAME_SVS_ENTER, + .nsrc = 1, + .srcs = { &hp_svs_enter_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_svs_enter_desc); + +/* SVS_ENTER_ALT. */ +extern uint8_t svs_enter_altstack, svs_enter_altstack_end; +static const struct x86_hotpatch_source hp_svs_enter_altstack_source = { + .saddr = &svs_enter_altstack, + .eaddr = &svs_enter_altstack_end +}; +static const struct x86_hotpatch_descriptor hp_svs_enter_altstack_desc = { + .name = HP_NAME_SVS_ENTER_ALT, + .nsrc = 1, + .srcs = { &hp_svs_enter_altstack_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_svs_enter_altstack_desc); + +/* SVS_ENTER_NMI. */ +extern uint8_t svs_enter_nmi, svs_enter_nmi_end; +static const struct x86_hotpatch_source hp_svs_enter_nmi_source = { + .saddr = &svs_enter_nmi, + .eaddr = &svs_enter_nmi_end +}; +static const struct x86_hotpatch_descriptor hp_svs_enter_nmi_desc = { + .name = HP_NAME_SVS_ENTER_NMI, + .nsrc = 1, + .srcs = { &hp_svs_enter_nmi_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_svs_enter_nmi_desc); + +/* SVS_LEAVE. */ +extern uint8_t svs_leave, svs_leave_end; +static const struct x86_hotpatch_source hp_svs_leave_source = { + .saddr = &svs_leave, + .eaddr = &svs_leave_end +}; +static const struct x86_hotpatch_descriptor hp_svs_leave_desc = { + .name = HP_NAME_SVS_LEAVE, + .nsrc = 1, + .srcs = { &hp_svs_leave_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_svs_leave_desc); + +/* SVS_LEAVE_ALT. */ +extern uint8_t svs_leave_altstack, svs_leave_altstack_end; +static const struct x86_hotpatch_source hp_svs_leave_altstack_source = { + .saddr = &svs_leave_altstack, + .eaddr = &svs_leave_altstack_end +}; +static const struct x86_hotpatch_descriptor hp_svs_leave_altstack_desc = { + .name = HP_NAME_SVS_LEAVE_ALT, + .nsrc = 1, + .srcs = { &hp_svs_leave_altstack_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_svs_leave_altstack_desc); + +/* SVS_LEAVE_NMI. */ +extern uint8_t svs_leave_nmi, svs_leave_nmi_end; +static const struct x86_hotpatch_source hp_svs_leave_nmi_source = { + .saddr = &svs_leave_nmi, + .eaddr = &svs_leave_nmi_end +}; +static const struct x86_hotpatch_descriptor hp_svs_leave_nmi_desc = { + .name = HP_NAME_SVS_LEAVE_NMI, + .nsrc = 1, + .srcs = { &hp_svs_leave_nmi_source } +}; +__link_set_add_rodata(x86_hotpatch_descriptors, hp_svs_leave_nmi_desc); + +/* -------------------------------------------------------------------------- */ + bool svs_enabled __read_mostly = false; bool svs_pcid __read_mostly = false; @@ -636,49 +718,15 @@ svs_pdir_switch(struct pmap *pmap) static void svs_enable(void) { - extern uint8_t svs_enter, svs_enter_end; - extern uint8_t svs_enter_altstack, svs_enter_altstack_end; - extern uint8_t svs_enter_nmi, svs_enter_nmi_end; - extern uint8_t svs_leave, svs_leave_end; - extern uint8_t svs_leave_altstack, svs_leave_altstack_end; - extern uint8_t svs_leave_nmi, svs_leave_nmi_end; - u_long psl, cr0; - uint8_t *bytes; - size_t size; - svs_enabled = true; - x86_patch_window_open(&psl, &cr0); - - bytes = &svs_enter; - size = (size_t)&svs_enter_end - (size_t)&svs_enter; - x86_hotpatch(HP_NAME_SVS_ENTER, bytes, size); - - bytes = &svs_enter_altstack; - size = (size_t)&svs_enter_altstack_end - - (size_t)&svs_enter_altstack; - x86_hotpatch(HP_NAME_SVS_ENTER_ALT, bytes, size); - - bytes = &svs_enter_nmi; - size = (size_t)&svs_enter_nmi_end - - (size_t)&svs_enter_nmi; - x86_hotpatch(HP_NAME_SVS_ENTER_NMI, bytes, size); - - bytes = &svs_leave; - size = (size_t)&svs_leave_end - (size_t)&svs_leave; - x86_hotpatch(HP_NAME_SVS_LEAVE, bytes, size); - - bytes = &svs_leave_altstack; - size = (size_t)&svs_leave_altstack_end - - (size_t)&svs_leave_altstack; - x86_hotpatch(HP_NAME_SVS_LEAVE_ALT, bytes, size); - - bytes = &svs_leave_nmi; - size = (size_t)&svs_leave_nmi_end - - (size_t)&svs_leave_nmi; - x86_hotpatch(HP_NAME_SVS_LEAVE_NMI, bytes, size); - - x86_patch_window_close(psl, cr0); + x86_hotpatch(HP_NAME_SVS_ENTER, 0); + x86_hotpatch(HP_NAME_SVS_ENTER_ALT, 0); + x86_hotpatch(HP_NAME_SVS_ENTER_NMI, 0); + + x86_hotpatch(HP_NAME_SVS_LEAVE, 0); + x86_hotpatch(HP_NAME_SVS_LEAVE_ALT, 0); + x86_hotpatch(HP_NAME_SVS_LEAVE_NMI, 0); } void