Author: tychon
Date: Fri Mar  9 19:39:08 2018
New Revision: 330704
URL: https://svnweb.freebsd.org/changeset/base/330704

Log:
  MFC r328011,329162
  
  r328011:
  
  Provide some mitigation against CVE-2017-5715 by clearing registers
  upon returning from the guest which aren't immediately clobbered by
  the host.  This eradicates any remaining guest contents limiting their
  usefulness in an exploit gadget.
  
  r329162:
  
  Provide further mitigation against CVE-2017-5715 by flushing the
  return stack buffer (RSB) upon returning from the guest.

Modified:
  stable/11/sys/amd64/vmm/amd/svm_support.S
  stable/11/sys/amd64/vmm/intel/vmcs.c
  stable/11/sys/amd64/vmm/intel/vmx.h
  stable/11/sys/amd64/vmm/intel/vmx_support.S
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/amd64/vmm/amd/svm_support.S
==============================================================================
--- stable/11/sys/amd64/vmm/amd/svm_support.S   Fri Mar  9 19:04:06 2018        
(r330703)
+++ stable/11/sys/amd64/vmm/amd/svm_support.S   Fri Mar  9 19:39:08 2018        
(r330704)
@@ -113,6 +113,23 @@ ENTRY(svm_launch)
        movq %rdi, SCTX_RDI(%rax)
        movq %rsi, SCTX_RSI(%rax)
 
+       /*
+        * To prevent malicious branch target predictions from
+        * affecting the host, overwrite all entries in the RSB upon
+        * exiting a guest.
+        */
+       mov $16, %ecx   /* 16 iterations, two calls per loop */
+       mov %rsp, %rax
+0:     call 2f         /* create an RSB entry. */
+1:     pause
+       call 1b         /* capture rogue speculation. */
+2:     call 2f         /* create an RSB entry. */
+1:     pause
+       call 1b         /* capture rogue speculation. */
+2:     sub $1, %ecx
+       jnz 0b
+       mov %rax, %rsp
+
        /* Restore host state */
        pop %r15
        pop %r14
@@ -124,8 +141,20 @@ ENTRY(svm_launch)
        pop %rdx
        mov %edx, %eax
        shr $32, %rdx
-       mov $MSR_GSBASE, %ecx
+       mov $MSR_GSBASE, %rcx
        wrmsr
+
+       /*
+        * Clobber the remaining registers with guest contents so they
+        * can't be misused.
+        */
+       xor %rbp, %rbp
+       xor %rdi, %rdi
+       xor %rsi, %rsi
+       xor %r8, %r8
+       xor %r9, %r9
+       xor %r10, %r10
+       xor %r11, %r11
 
        VLEAVE
        ret

Modified: stable/11/sys/amd64/vmm/intel/vmcs.c
==============================================================================
--- stable/11/sys/amd64/vmm/intel/vmcs.c        Fri Mar  9 19:04:06 2018        
(r330703)
+++ stable/11/sys/amd64/vmm/intel/vmcs.c        Fri Mar  9 19:39:08 2018        
(r330704)
@@ -32,6 +32,7 @@
 __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
+#include <sys/sysctl.h>
 #include <sys/systm.h>
 #include <sys/pcpu.h>
 
@@ -50,6 +51,12 @@ __FBSDID("$FreeBSD$");
 #include <ddb/ddb.h>
 #endif
 
+SYSCTL_DECL(_hw_vmm_vmx);
+
+static int no_flush_rsb;
+SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, no_flush_rsb, CTLFLAG_RW,
+    &no_flush_rsb, 0, "Do not flush RSB upon vmexit");
+
 static uint64_t
 vmcs_fix_regval(uint32_t encoding, uint64_t val)
 {
@@ -401,8 +408,15 @@ vmcs_init(struct vmcs *vmcs)
                goto done;
 
        /* instruction pointer */
-       if ((error = vmwrite(VMCS_HOST_RIP, (u_long)vmx_exit_guest)) != 0)
-               goto done;
+       if (no_flush_rsb) {
+               if ((error = vmwrite(VMCS_HOST_RIP,
+                   (u_long)vmx_exit_guest)) != 0)
+                       goto done;
+       } else {
+               if ((error = vmwrite(VMCS_HOST_RIP,
+                   (u_long)vmx_exit_guest_flush_rsb)) != 0)
+                       goto done;
+       }
 
        /* link pointer */
        if ((error = vmwrite(VMCS_LINK_POINTER, ~0)) != 0)

Modified: stable/11/sys/amd64/vmm/intel/vmx.h
==============================================================================
--- stable/11/sys/amd64/vmm/intel/vmx.h Fri Mar  9 19:04:06 2018        
(r330703)
+++ stable/11/sys/amd64/vmm/intel/vmx.h Fri Mar  9 19:39:08 2018        
(r330704)
@@ -148,5 +148,6 @@ u_long      vmx_fix_cr4(u_long cr4);
 int    vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset);
 
 extern char    vmx_exit_guest[];
+extern char    vmx_exit_guest_flush_rsb[];
 
 #endif

Modified: stable/11/sys/amd64/vmm/intel/vmx_support.S
==============================================================================
--- stable/11/sys/amd64/vmm/intel/vmx_support.S Fri Mar  9 19:04:06 2018        
(r330703)
+++ stable/11/sys/amd64/vmm/intel/vmx_support.S Fri Mar  9 19:39:08 2018        
(r330704)
@@ -42,6 +42,29 @@
 #define VLEAVE  pop %rbp
 
 /*
+ * Save the guest context.
+ */
+#define        VMX_GUEST_SAVE                                                  
\
+       movq    %rdi,VMXCTX_GUEST_RDI(%rsp);                            \
+       movq    %rsi,VMXCTX_GUEST_RSI(%rsp);                            \
+       movq    %rdx,VMXCTX_GUEST_RDX(%rsp);                            \
+       movq    %rcx,VMXCTX_GUEST_RCX(%rsp);                            \
+       movq    %r8,VMXCTX_GUEST_R8(%rsp);                              \
+       movq    %r9,VMXCTX_GUEST_R9(%rsp);                              \
+       movq    %rax,VMXCTX_GUEST_RAX(%rsp);                            \
+       movq    %rbx,VMXCTX_GUEST_RBX(%rsp);                            \
+       movq    %rbp,VMXCTX_GUEST_RBP(%rsp);                            \
+       movq    %r10,VMXCTX_GUEST_R10(%rsp);                            \
+       movq    %r11,VMXCTX_GUEST_R11(%rsp);                            \
+       movq    %r12,VMXCTX_GUEST_R12(%rsp);                            \
+       movq    %r13,VMXCTX_GUEST_R13(%rsp);                            \
+       movq    %r14,VMXCTX_GUEST_R14(%rsp);                            \
+       movq    %r15,VMXCTX_GUEST_R15(%rsp);                            \
+       movq    %cr2,%rdi;                                              \
+       movq    %rdi,VMXCTX_GUEST_CR2(%rsp);                            \
+       movq    %rsp,%rdi;
+
+/*
  * Assumes that %rdi holds a pointer to the 'vmxctx'.
  *
  * On "return" all registers are updated to reflect guest state. The two
@@ -72,6 +95,20 @@
        movq    VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
 
 /*
+ * Clobber the remaining registers with guest contents so they can't
+ * be misused.
+ */
+#define        VMX_GUEST_CLOBBER                                               
\
+       xor     %rax, %rax;                                             \
+       xor     %rcx, %rcx;                                             \
+       xor     %rdx, %rdx;                                             \
+       xor     %rsi, %rsi;                                             \
+       xor     %r8, %r8;                                               \
+       xor     %r9, %r9;                                               \
+       xor     %r10, %r10;                                             \
+       xor     %r11, %r11;
+
+/*
  * Save and restore the host context.
  *
  * Assumes that %rdi holds a pointer to the 'vmxctx'.
@@ -197,33 +234,57 @@ inst_error:
  * The VMCS-restored %rsp points to the struct vmxctx
  */
        ALIGN_TEXT
-       .globl  vmx_exit_guest
-vmx_exit_guest:
+       .globl  vmx_exit_guest_flush_rsb
+vmx_exit_guest_flush_rsb:
        /*
         * Save guest state that is not automatically saved in the vmcs.
         */
-       movq    %rdi,VMXCTX_GUEST_RDI(%rsp)
-       movq    %rsi,VMXCTX_GUEST_RSI(%rsp)
-       movq    %rdx,VMXCTX_GUEST_RDX(%rsp)
-       movq    %rcx,VMXCTX_GUEST_RCX(%rsp)
-       movq    %r8,VMXCTX_GUEST_R8(%rsp)
-       movq    %r9,VMXCTX_GUEST_R9(%rsp)
-       movq    %rax,VMXCTX_GUEST_RAX(%rsp)
-       movq    %rbx,VMXCTX_GUEST_RBX(%rsp)
-       movq    %rbp,VMXCTX_GUEST_RBP(%rsp)
-       movq    %r10,VMXCTX_GUEST_R10(%rsp)
-       movq    %r11,VMXCTX_GUEST_R11(%rsp)
-       movq    %r12,VMXCTX_GUEST_R12(%rsp)
-       movq    %r13,VMXCTX_GUEST_R13(%rsp)
-       movq    %r14,VMXCTX_GUEST_R14(%rsp)
-       movq    %r15,VMXCTX_GUEST_R15(%rsp)
+       VMX_GUEST_SAVE
 
-       movq    %cr2,%rdi
-       movq    %rdi,VMXCTX_GUEST_CR2(%rsp)
+       /*
+        * Deactivate guest pmap from this cpu.
+        */
+       movq    VMXCTX_PMAP(%rdi), %r11
+       movl    PCPU(CPUID), %r10d
+       LK btrl %r10d, PM_ACTIVE(%r11)
 
-       movq    %rsp,%rdi
+       VMX_HOST_RESTORE
 
+       VMX_GUEST_CLOBBER
+
        /*
+        * To prevent malicious branch target predictions from
+        * affecting the host, overwrite all entries in the RSB upon
+        * exiting a guest.
+        */
+       mov     $16, %ecx       /* 16 iterations, two calls per loop */
+       mov     %rsp, %rax
+0:     call    2f              /* create an RSB entry. */
+1:     pause
+       call    1b              /* capture rogue speculation. */
+2:     call    2f              /* create an RSB entry. */
+1:     pause
+       call    1b              /* capture rogue speculation. */
+2:     sub     $1, %ecx
+       jnz     0b
+       mov     %rax, %rsp
+
+       /*
+        * This will return to the caller of 'vmx_enter_guest()' with a return
+        * value of VMX_GUEST_VMEXIT.
+        */
+       movl    $VMX_GUEST_VMEXIT, %eax
+       VLEAVE
+       ret
+
+       .globl  vmx_exit_guest
+vmx_exit_guest:
+       /*
+        * Save guest state that is not automatically saved in the vmcs.
+        */
+       VMX_GUEST_SAVE
+
+       /*
         * Deactivate guest pmap from this cpu.
         */
        movq    VMXCTX_PMAP(%rdi), %r11
@@ -231,6 +292,8 @@ vmx_exit_guest:
        LK btrl %r10d, PM_ACTIVE(%r11)
 
        VMX_HOST_RESTORE
+
+       VMX_GUEST_CLOBBER
 
        /*
         * This will return to the caller of 'vmx_enter_guest()' with a return
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to