Re: [PATCH v6 5/5] KVM: SVM: Allow direct access to MSR_IA32_SPEC_CTRL
On Thu, Feb 01, 2018 at 10:59:46PM +0100, KarimAllah Ahmed wrote: > [ Based on a patch from Paolo Bonzini] > > ... basically doing exactly what we do for VMX: > > - Passthrough SPEC_CTRL to guests (if enabled in guest CPUID) > - Save and restore SPEC_CTRL around VMExit and VMEntry only if the guest > actually used it. > > Cc: Asit Mallick > Cc: Arjan Van De Ven > Cc: Dave Hansen > Cc: Andi Kleen > Cc: Andrea Arcangeli > Cc: Linus Torvalds > Cc: Tim Chen > Cc: Thomas Gleixner > Cc: Dan Williams > Cc: Jun Nakajima > Cc: Paolo Bonzini > Cc: David Woodhouse > Cc: Greg KH > Cc: Andy Lutomirski > Cc: Ashok Raj > Signed-off-by: KarimAllah Ahmed Reviewed-by: Konrad Rzeszutek Wilk > + { .index = MSR_IA32_SPEC_CTRL, .always = false }, This .always = [false|true] field keeps throwing me off. So glad: https://www.spinics.net/lists/kvm/msg161606.html explains it better.
Re: [PATCH v6 5/5] KVM: SVM: Allow direct access to MSR_IA32_SPEC_CTRL
On Thu, Feb 01, 2018 at 10:59:46PM +0100, KarimAllah Ahmed wrote: > [ Based on a patch from Paolo Bonzini ] > > ... basically doing exactly what we do for VMX: > > - Passthrough SPEC_CTRL to guests (if enabled in guest CPUID) > - Save and restore SPEC_CTRL around VMExit and VMEntry only if the guest > actually used it. > > Cc: Asit Mallick > Cc: Arjan Van De Ven > Cc: Dave Hansen > Cc: Andi Kleen > Cc: Andrea Arcangeli > Cc: Linus Torvalds > Cc: Tim Chen > Cc: Thomas Gleixner > Cc: Dan Williams > Cc: Jun Nakajima > Cc: Paolo Bonzini > Cc: David Woodhouse > Cc: Greg KH > Cc: Andy Lutomirski > Cc: Ashok Raj > Signed-off-by: KarimAllah Ahmed Reviewed-by: Konrad Rzeszutek Wilk > + { .index = MSR_IA32_SPEC_CTRL, .always = false }, This .always = [false|true] field keeps throwing me off. So glad: https://www.spinics.net/lists/kvm/msg161606.html explains it better.
Re: [PATCH v6 5/5] KVM: SVM: Allow direct access to MSR_IA32_SPEC_CTRL
On Thu, Feb 01, 2018 at 10:59:46PM +0100, KarimAllah Ahmed wrote: [ Based on a patch from Paolo Bonzini] ... basically doing exactly what we do for VMX: - Passthrough SPEC_CTRL to guests (if enabled in guest CPUID) - Save and restore SPEC_CTRL around VMExit and VMEntry only if the guest actually used it. Cc: Asit Mallick Cc: Arjan Van De Ven Cc: Dave Hansen Cc: Andi Kleen Cc: Andrea Arcangeli Cc: Linus Torvalds Cc: Tim Chen Cc: Thomas Gleixner Cc: Dan Williams Cc: Jun Nakajima Cc: Paolo Bonzini Cc: David Woodhouse Cc: Greg KH Cc: Andy Lutomirski Cc: Ashok Raj Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse Reviewed-by: Darren Kenny --- v5: - Add SPEC_CTRL to direct_access_msrs. --- arch/x86/kvm/svm.c | 59 ++ 1 file changed, 59 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 254eefb..c6ab343 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -184,6 +184,9 @@ struct vcpu_svm { u64 gs_base; } host; + u64 spec_ctrl; + bool save_spec_ctrl_on_exit; + u32 *msrpm; ulong nmi_iret_rip; @@ -249,6 +252,7 @@ static const struct svm_direct_access_msrs { { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK,.always = true }, #endif + { .index = MSR_IA32_SPEC_CTRL, .always = false }, { .index = MSR_IA32_PRED_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, @@ -1584,6 +1588,8 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 dummy; u32 eax = 1; + svm->spec_ctrl = 0; + if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; @@ -3605,6 +3611,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_VM_CR: msr_info->data = svm->nested.vm_cr_msr; break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + msr_info->data = svm->spec_ctrl; + break; case MSR_IA32_UCODE_REV: msr_info->data = 0x0165; break; @@ -3696,6 +3709,30 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; + case MSR_IA32_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + return 1; + + svm->spec_ctrl = data; + + /* +* When it's written (to non-zero) for the first time, pass +* it through. This means we don't have to take the perf +* hit of saving it on vmexit for the common case of guests +* that don't use it. +*/ + if (data && !svm->save_spec_ctrl_on_exit) { + svm->save_spec_ctrl_on_exit = true; + if (is_guest_mode(vcpu)) + break; + set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); + } + break; case MSR_IA32_PRED_CMD: if (!msr->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBPB)) @@ -4964,6 +5001,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); + /* +* If this vCPU has touched SPEC_CTRL, restore the guest's value if +* it's non-zero. Since vmentry is serialising on affected CPUs, there +* is no need to worry about the conditional branch over the wrmsr +* being speculatively taken. +*/ + if (svm->spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -5056,6 +5102,19 @@
Re: [PATCH v6 5/5] KVM: SVM: Allow direct access to MSR_IA32_SPEC_CTRL
On Thu, Feb 01, 2018 at 10:59:46PM +0100, KarimAllah Ahmed wrote: [ Based on a patch from Paolo Bonzini ] ... basically doing exactly what we do for VMX: - Passthrough SPEC_CTRL to guests (if enabled in guest CPUID) - Save and restore SPEC_CTRL around VMExit and VMEntry only if the guest actually used it. Cc: Asit Mallick Cc: Arjan Van De Ven Cc: Dave Hansen Cc: Andi Kleen Cc: Andrea Arcangeli Cc: Linus Torvalds Cc: Tim Chen Cc: Thomas Gleixner Cc: Dan Williams Cc: Jun Nakajima Cc: Paolo Bonzini Cc: David Woodhouse Cc: Greg KH Cc: Andy Lutomirski Cc: Ashok Raj Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse Reviewed-by: Darren Kenny --- v5: - Add SPEC_CTRL to direct_access_msrs. --- arch/x86/kvm/svm.c | 59 ++ 1 file changed, 59 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 254eefb..c6ab343 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -184,6 +184,9 @@ struct vcpu_svm { u64 gs_base; } host; + u64 spec_ctrl; + bool save_spec_ctrl_on_exit; + u32 *msrpm; ulong nmi_iret_rip; @@ -249,6 +252,7 @@ static const struct svm_direct_access_msrs { { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK,.always = true }, #endif + { .index = MSR_IA32_SPEC_CTRL, .always = false }, { .index = MSR_IA32_PRED_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, @@ -1584,6 +1588,8 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 dummy; u32 eax = 1; + svm->spec_ctrl = 0; + if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; @@ -3605,6 +3611,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_VM_CR: msr_info->data = svm->nested.vm_cr_msr; break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + msr_info->data = svm->spec_ctrl; + break; case MSR_IA32_UCODE_REV: msr_info->data = 0x0165; break; @@ -3696,6 +3709,30 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; + case MSR_IA32_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + return 1; + + svm->spec_ctrl = data; + + /* +* When it's written (to non-zero) for the first time, pass +* it through. This means we don't have to take the perf +* hit of saving it on vmexit for the common case of guests +* that don't use it. +*/ + if (data && !svm->save_spec_ctrl_on_exit) { + svm->save_spec_ctrl_on_exit = true; + if (is_guest_mode(vcpu)) + break; + set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); + } + break; case MSR_IA32_PRED_CMD: if (!msr->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBPB)) @@ -4964,6 +5001,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); + /* +* If this vCPU has touched SPEC_CTRL, restore the guest's value if +* it's non-zero. Since vmentry is serialising on affected CPUs, there +* is no need to worry about the conditional branch over the wrmsr +* being speculatively taken. +*/ + if (svm->spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -5056,6 +5102,19 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* +* We do not use IBRS in the kernel. If this vCPU has used the +* SPEC_CTRL MSR it may have left it on; save the value and +* turn it off. This is much more efficient than blindly adding +* it to the atomic save/restore list. Especially as the former +* (Saving guest MSRs on vmexit) doesn't even exist in KVM. +
[PATCH v6 5/5] KVM: SVM: Allow direct access to MSR_IA32_SPEC_CTRL
[ Based on a patch from Paolo Bonzini] ... basically doing exactly what we do for VMX: - Passthrough SPEC_CTRL to guests (if enabled in guest CPUID) - Save and restore SPEC_CTRL around VMExit and VMEntry only if the guest actually used it. Cc: Asit Mallick Cc: Arjan Van De Ven Cc: Dave Hansen Cc: Andi Kleen Cc: Andrea Arcangeli Cc: Linus Torvalds Cc: Tim Chen Cc: Thomas Gleixner Cc: Dan Williams Cc: Jun Nakajima Cc: Paolo Bonzini Cc: David Woodhouse Cc: Greg KH Cc: Andy Lutomirski Cc: Ashok Raj Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse --- v5: - Add SPEC_CTRL to direct_access_msrs. --- arch/x86/kvm/svm.c | 59 ++ 1 file changed, 59 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 254eefb..c6ab343 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -184,6 +184,9 @@ struct vcpu_svm { u64 gs_base; } host; + u64 spec_ctrl; + bool save_spec_ctrl_on_exit; + u32 *msrpm; ulong nmi_iret_rip; @@ -249,6 +252,7 @@ static const struct svm_direct_access_msrs { { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK,.always = true }, #endif + { .index = MSR_IA32_SPEC_CTRL, .always = false }, { .index = MSR_IA32_PRED_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, @@ -1584,6 +1588,8 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 dummy; u32 eax = 1; + svm->spec_ctrl = 0; + if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; @@ -3605,6 +3611,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_VM_CR: msr_info->data = svm->nested.vm_cr_msr; break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + msr_info->data = svm->spec_ctrl; + break; case MSR_IA32_UCODE_REV: msr_info->data = 0x0165; break; @@ -3696,6 +3709,30 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; + case MSR_IA32_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + return 1; + + svm->spec_ctrl = data; + + /* +* When it's written (to non-zero) for the first time, pass +* it through. This means we don't have to take the perf +* hit of saving it on vmexit for the common case of guests +* that don't use it. +*/ + if (data && !svm->save_spec_ctrl_on_exit) { + svm->save_spec_ctrl_on_exit = true; + if (is_guest_mode(vcpu)) + break; + set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); + } + break; case MSR_IA32_PRED_CMD: if (!msr->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBPB)) @@ -4964,6 +5001,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); + /* +* If this vCPU has touched SPEC_CTRL, restore the guest's value if +* it's non-zero. Since vmentry is serialising on affected CPUs, there +* is no need to worry about the conditional branch over the wrmsr +* being speculatively taken. +*/ + if (svm->spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -5056,6 +5102,19 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* +* We do not use
[PATCH v6 5/5] KVM: SVM: Allow direct access to MSR_IA32_SPEC_CTRL
[ Based on a patch from Paolo Bonzini ] ... basically doing exactly what we do for VMX: - Passthrough SPEC_CTRL to guests (if enabled in guest CPUID) - Save and restore SPEC_CTRL around VMExit and VMEntry only if the guest actually used it. Cc: Asit Mallick Cc: Arjan Van De Ven Cc: Dave Hansen Cc: Andi Kleen Cc: Andrea Arcangeli Cc: Linus Torvalds Cc: Tim Chen Cc: Thomas Gleixner Cc: Dan Williams Cc: Jun Nakajima Cc: Paolo Bonzini Cc: David Woodhouse Cc: Greg KH Cc: Andy Lutomirski Cc: Ashok Raj Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse --- v5: - Add SPEC_CTRL to direct_access_msrs. --- arch/x86/kvm/svm.c | 59 ++ 1 file changed, 59 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 254eefb..c6ab343 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -184,6 +184,9 @@ struct vcpu_svm { u64 gs_base; } host; + u64 spec_ctrl; + bool save_spec_ctrl_on_exit; + u32 *msrpm; ulong nmi_iret_rip; @@ -249,6 +252,7 @@ static const struct svm_direct_access_msrs { { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK,.always = true }, #endif + { .index = MSR_IA32_SPEC_CTRL, .always = false }, { .index = MSR_IA32_PRED_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, @@ -1584,6 +1588,8 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 dummy; u32 eax = 1; + svm->spec_ctrl = 0; + if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; @@ -3605,6 +3611,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_VM_CR: msr_info->data = svm->nested.vm_cr_msr; break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + msr_info->data = svm->spec_ctrl; + break; case MSR_IA32_UCODE_REV: msr_info->data = 0x0165; break; @@ -3696,6 +3709,30 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; + case MSR_IA32_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + return 1; + + svm->spec_ctrl = data; + + /* +* When it's written (to non-zero) for the first time, pass +* it through. This means we don't have to take the perf +* hit of saving it on vmexit for the common case of guests +* that don't use it. +*/ + if (data && !svm->save_spec_ctrl_on_exit) { + svm->save_spec_ctrl_on_exit = true; + if (is_guest_mode(vcpu)) + break; + set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); + } + break; case MSR_IA32_PRED_CMD: if (!msr->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBPB)) @@ -4964,6 +5001,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); + /* +* If this vCPU has touched SPEC_CTRL, restore the guest's value if +* it's non-zero. Since vmentry is serialising on affected CPUs, there +* is no need to worry about the conditional branch over the wrmsr +* being speculatively taken. +*/ + if (svm->spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -5056,6 +5102,19 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* +* We do not use IBRS in the kernel. If this vCPU has used the +* SPEC_CTRL MSR it may have left it on; save the value and +* turn it off. This is much more efficient than blindly adding +* it to the atomic save/restore list. Especially as the former +* (Saving guest MSRs on vmexit) doesn't even exist in KVM. +*/ + if (svm->save_spec_ctrl_on_exit) +