> Author: tychon
> Date: Mon Feb 12 14:45:27 2018
> New Revision: 329162
> URL: https://svnweb.freebsd.org/changeset/base/329162
> 
> Log:
>   Provide further mitigation against CVE-2017-5715 by flushing the
>   return stack buffer (RSB) upon returning from the guest.
>   
>   This was inspired by this linux commit:
>   
> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/arch/x86/kvm?id=117cc7a908c83697b0b737d15ae1eb5943afe35b
>   
>   Reviewed by:        grehan
>   Sponsored by:       Dell EMC Isilon
>   Differential Revision:      https://reviews.freebsd.org/D14272

Plans to MFC this?
It would be good to have as many meltdown/spectre patches as possible
in the upcomming 11.2 release.


> Modified:
>   head/sys/amd64/vmm/amd/svm_support.S
>   head/sys/amd64/vmm/intel/vmcs.c
>   head/sys/amd64/vmm/intel/vmx.h
>   head/sys/amd64/vmm/intel/vmx_support.S
> 
> Modified: head/sys/amd64/vmm/amd/svm_support.S
> ==============================================================================
> --- head/sys/amd64/vmm/amd/svm_support.S      Mon Feb 12 14:44:21 2018        
> (r329161)
> +++ head/sys/amd64/vmm/amd/svm_support.S      Mon Feb 12 14:45:27 2018        
> (r329162)
> @@ -113,6 +113,23 @@ ENTRY(svm_launch)
>       movq %rdi, SCTX_RDI(%rax)
>       movq %rsi, SCTX_RSI(%rax)
>  
> +     /*
> +      * To prevent malicious branch target predictions from
> +      * affecting the host, overwrite all entries in the RSB upon
> +      * exiting a guest.
> +      */
> +     mov $16, %ecx   /* 16 iterations, two calls per loop */
> +     mov %rsp, %rax
> +0:   call 2f         /* create an RSB entry. */
> +1:   pause
> +     call 1b         /* capture rogue speculation. */
> +2:   call 2f         /* create an RSB entry. */
> +1:   pause
> +     call 1b         /* capture rogue speculation. */
> +2:   sub $1, %ecx
> +     jnz 0b
> +     mov %rax, %rsp
> +
>       /* Restore host state */
>       pop %r15
>       pop %r14
> 
> Modified: head/sys/amd64/vmm/intel/vmcs.c
> ==============================================================================
> --- head/sys/amd64/vmm/intel/vmcs.c   Mon Feb 12 14:44:21 2018        
> (r329161)
> +++ head/sys/amd64/vmm/intel/vmcs.c   Mon Feb 12 14:45:27 2018        
> (r329162)
> @@ -34,6 +34,7 @@
>  __FBSDID("$FreeBSD$");
>  
>  #include <sys/param.h>
> +#include <sys/sysctl.h>
>  #include <sys/systm.h>
>  #include <sys/pcpu.h>
>  
> @@ -52,6 +53,12 @@ __FBSDID("$FreeBSD$");
>  #include <ddb/ddb.h>
>  #endif
>  
> +SYSCTL_DECL(_hw_vmm_vmx);
> +
> +static int no_flush_rsb;
> +SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, no_flush_rsb, CTLFLAG_RW,
> +    &no_flush_rsb, 0, "Do not flush RSB upon vmexit");
> +
>  static uint64_t
>  vmcs_fix_regval(uint32_t encoding, uint64_t val)
>  {
> @@ -403,8 +410,15 @@ vmcs_init(struct vmcs *vmcs)
>               goto done;
>  
>       /* instruction pointer */
> -     if ((error = vmwrite(VMCS_HOST_RIP, (u_long)vmx_exit_guest)) != 0)
> -             goto done;
> +     if (no_flush_rsb) {
> +             if ((error = vmwrite(VMCS_HOST_RIP,
> +                 (u_long)vmx_exit_guest)) != 0)
> +                     goto done;
> +     } else {
> +             if ((error = vmwrite(VMCS_HOST_RIP,
> +                 (u_long)vmx_exit_guest_flush_rsb)) != 0)
> +                     goto done;
> +     }
>  
>       /* link pointer */
>       if ((error = vmwrite(VMCS_LINK_POINTER, ~0)) != 0)
> 
> Modified: head/sys/amd64/vmm/intel/vmx.h
> ==============================================================================
> --- head/sys/amd64/vmm/intel/vmx.h    Mon Feb 12 14:44:21 2018        
> (r329161)
> +++ head/sys/amd64/vmm/intel/vmx.h    Mon Feb 12 14:45:27 2018        
> (r329162)
> @@ -150,5 +150,6 @@ u_long    vmx_fix_cr4(u_long cr4);
>  int  vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset);
>  
>  extern char  vmx_exit_guest[];
> +extern char  vmx_exit_guest_flush_rsb[];
>  
>  #endif
> 
> Modified: head/sys/amd64/vmm/intel/vmx_support.S
> ==============================================================================
> --- head/sys/amd64/vmm/intel/vmx_support.S    Mon Feb 12 14:44:21 2018        
> (r329161)
> +++ head/sys/amd64/vmm/intel/vmx_support.S    Mon Feb 12 14:45:27 2018        
> (r329162)
> @@ -42,6 +42,29 @@
>  #define VLEAVE  pop %rbp
>  
>  /*
> + * Save the guest context.
> + */
> +#define      VMX_GUEST_SAVE                                                  
> \
> +     movq    %rdi,VMXCTX_GUEST_RDI(%rsp);                            \
> +     movq    %rsi,VMXCTX_GUEST_RSI(%rsp);                            \
> +     movq    %rdx,VMXCTX_GUEST_RDX(%rsp);                            \
> +     movq    %rcx,VMXCTX_GUEST_RCX(%rsp);                            \
> +     movq    %r8,VMXCTX_GUEST_R8(%rsp);                              \
> +     movq    %r9,VMXCTX_GUEST_R9(%rsp);                              \
> +     movq    %rax,VMXCTX_GUEST_RAX(%rsp);                            \
> +     movq    %rbx,VMXCTX_GUEST_RBX(%rsp);                            \
> +     movq    %rbp,VMXCTX_GUEST_RBP(%rsp);                            \
> +     movq    %r10,VMXCTX_GUEST_R10(%rsp);                            \
> +     movq    %r11,VMXCTX_GUEST_R11(%rsp);                            \
> +     movq    %r12,VMXCTX_GUEST_R12(%rsp);                            \
> +     movq    %r13,VMXCTX_GUEST_R13(%rsp);                            \
> +     movq    %r14,VMXCTX_GUEST_R14(%rsp);                            \
> +     movq    %r15,VMXCTX_GUEST_R15(%rsp);                            \
> +     movq    %cr2,%rdi;                                              \
> +     movq    %rdi,VMXCTX_GUEST_CR2(%rsp);                            \
> +     movq    %rsp,%rdi;
> +
> +/*
>   * Assumes that %rdi holds a pointer to the 'vmxctx'.
>   *
>   * On "return" all registers are updated to reflect guest state. The two
> @@ -211,31 +234,55 @@ inst_error:
>   * The VMCS-restored %rsp points to the struct vmxctx
>   */
>       ALIGN_TEXT
> -     .globl  vmx_exit_guest
> -vmx_exit_guest:
> +     .globl  vmx_exit_guest_flush_rsb
> +vmx_exit_guest_flush_rsb:
>       /*
>        * Save guest state that is not automatically saved in the vmcs.
>        */
> -     movq    %rdi,VMXCTX_GUEST_RDI(%rsp)
> -     movq    %rsi,VMXCTX_GUEST_RSI(%rsp)
> -     movq    %rdx,VMXCTX_GUEST_RDX(%rsp)
> -     movq    %rcx,VMXCTX_GUEST_RCX(%rsp)
> -     movq    %r8,VMXCTX_GUEST_R8(%rsp)
> -     movq    %r9,VMXCTX_GUEST_R9(%rsp)
> -     movq    %rax,VMXCTX_GUEST_RAX(%rsp)
> -     movq    %rbx,VMXCTX_GUEST_RBX(%rsp)
> -     movq    %rbp,VMXCTX_GUEST_RBP(%rsp)
> -     movq    %r10,VMXCTX_GUEST_R10(%rsp)
> -     movq    %r11,VMXCTX_GUEST_R11(%rsp)
> -     movq    %r12,VMXCTX_GUEST_R12(%rsp)
> -     movq    %r13,VMXCTX_GUEST_R13(%rsp)
> -     movq    %r14,VMXCTX_GUEST_R14(%rsp)
> -     movq    %r15,VMXCTX_GUEST_R15(%rsp)
> +     VMX_GUEST_SAVE
>  
> -     movq    %cr2,%rdi
> -     movq    %rdi,VMXCTX_GUEST_CR2(%rsp)
> +     /*
> +      * Deactivate guest pmap from this cpu.
> +      */
> +     movq    VMXCTX_PMAP(%rdi), %r11
> +     movl    PCPU(CPUID), %r10d
> +     LK btrl %r10d, PM_ACTIVE(%r11)
>  
> -     movq    %rsp,%rdi
> +     VMX_HOST_RESTORE
> +
> +     VMX_GUEST_CLOBBER
> +
> +     /*
> +      * To prevent malicious branch target predictions from
> +      * affecting the host, overwrite all entries in the RSB upon
> +      * exiting a guest.
> +      */
> +     mov     $16, %ecx       /* 16 iterations, two calls per loop */
> +     mov     %rsp, %rax
> +0:   call    2f              /* create an RSB entry. */
> +1:   pause
> +     call    1b              /* capture rogue speculation. */
> +2:   call    2f              /* create an RSB entry. */
> +1:   pause
> +     call    1b              /* capture rogue speculation. */
> +2:   sub     $1, %ecx
> +     jnz     0b
> +     mov     %rax, %rsp
> +
> +     /*
> +      * This will return to the caller of 'vmx_enter_guest()' with a return
> +      * value of VMX_GUEST_VMEXIT.
> +      */
> +     movl    $VMX_GUEST_VMEXIT, %eax
> +     VLEAVE
> +     ret
> +
> +     .globl  vmx_exit_guest
> +vmx_exit_guest:
> +     /*
> +      * Save guest state that is not automatically saved in the vmcs.
> +      */
> +     VMX_GUEST_SAVE
>  
>       /*
>        * Deactivate guest pmap from this cpu.
> 
> 

-- 
Rod Grimes                                                 rgri...@freebsd.org
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to