[PATCH v2 1/2] KVM: x86: use native MSR ops for SPEC_CTRL

2018-02-22 Thread Paolo Bonzini
Having a paravirt indirect call in the IBRS restore path is not a
good idea, since we are trying to protect from speculative execution
of bogus indirect branch targets.  It is also slower, so use
native_wrmsrl on the vmentry path too.

Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d
Cc: x...@kernel.org
Cc: Radim Krčmář 
Cc: KarimAllah Ahmed 
Cc: David Woodhouse 
Cc: Jim Mattson 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: sta...@vger.kernel.org
Reviewed-by: Jim Mattson 
Signed-off-by: Paolo Bonzini 
---
 arch/x86/kvm/svm.c | 7 ---
 arch/x86/kvm/vmx.c | 7 ---
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b3e488a74828..1598beeda11c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -49,6 +49,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include 
@@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 * being speculatively taken.
 */
if (svm->spec_ctrl)
-   wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+   native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
 
asm volatile (
"push %%" _ASM_BP "; \n\t"
@@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 * save it.
 */
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-   rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+   svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
if (svm->spec_ctrl)
-   wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+   native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 67b028d8e726..5caeb8dc5bda 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -51,6 +51,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include "trace.h"
@@ -9453,7 +9454,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 * being speculatively taken.
 */
if (vmx->spec_ctrl)
-   wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+   native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
 
vmx->__launched = vmx->loaded_vmcs->launched;
asm(
@@ -9589,10 +9590,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
*vcpu)
 * save it.
 */
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-   rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+   vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
if (vmx->spec_ctrl)
-   wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+   native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
-- 
1.8.3.1




[PATCH v2 1/2] KVM: x86: use native MSR ops for SPEC_CTRL

2018-02-22 Thread Paolo Bonzini
Having a paravirt indirect call in the IBRS restore path is not a
good idea, since we are trying to protect from speculative execution
of bogus indirect branch targets.  It is also slower, so use
native_wrmsrl on the vmentry path too.

Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d
Cc: x...@kernel.org
Cc: Radim Krčmář 
Cc: KarimAllah Ahmed 
Cc: David Woodhouse 
Cc: Jim Mattson 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: sta...@vger.kernel.org
Reviewed-by: Jim Mattson 
Signed-off-by: Paolo Bonzini 
---
 arch/x86/kvm/svm.c | 7 ---
 arch/x86/kvm/vmx.c | 7 ---
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b3e488a74828..1598beeda11c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -49,6 +49,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include 
@@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 * being speculatively taken.
 */
if (svm->spec_ctrl)
-   wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+   native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
 
asm volatile (
"push %%" _ASM_BP "; \n\t"
@@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 * save it.
 */
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-   rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+   svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
if (svm->spec_ctrl)
-   wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+   native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 67b028d8e726..5caeb8dc5bda 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -51,6 +51,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include "trace.h"
@@ -9453,7 +9454,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 * being speculatively taken.
 */
if (vmx->spec_ctrl)
-   wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+   native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
 
vmx->__launched = vmx->loaded_vmcs->launched;
asm(
@@ -9589,10 +9590,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
*vcpu)
 * save it.
 */
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-   rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+   vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
if (vmx->spec_ctrl)
-   wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+   native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
-- 
1.8.3.1