Re: [PATCH 3/3] KVM: SVM: Reenable handle_fastpath_set_msr_irqoff() after complete_interrupts()

2020-09-09 Thread Vitaly Kuznetsov
Wanpeng Li  writes:

> From: Wanpeng Li 
>
> Moving the call to svm_exit_handlers_fastpath() after 
> svm_complete_interrupts() 
> since svm_complete_interrupts() consumes rip and reenable the function 
> handle_fastpath_set_msr_irqoff() call in svm_exit_handlers_fastpath().
>
> Suggested-by: Sean Christopherson 
> Cc: Paul K. 
> Signed-off-by: Wanpeng Li 
> ---
>  arch/x86/kvm/svm/svm.c | 7 ++-
>  1 file changed, 6 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 74bcf0a..ac819f0 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -3347,6 +3347,11 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
>  
>  static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
>  {
> + if (!is_guest_mode(vcpu) &&
> + to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
> + to_svm(vcpu)->vmcb->control.exit_info_1)
> + return handle_fastpath_set_msr_irqoff(vcpu);
> +
>   return EXIT_FASTPATH_NONE;
>  }
>  
> @@ -3495,7 +3500,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct 
> kvm_vcpu *vcpu)
>   stgi();
>  
>   /* Any pending NMI will happen here */
> - exit_fastpath = svm_exit_handlers_fastpath(vcpu);
>  
>   if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
>   kvm_after_interrupt(>vcpu);
> @@ -3529,6 +3533,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct 
> kvm_vcpu *vcpu)
>   svm_handle_mce(svm);
>  
>   svm_complete_interrupts(svm);
> + exit_fastpath = svm_exit_handlers_fastpath(vcpu);
>  
>   vmcb_mark_all_clean(svm->vmcb);
>   return exit_fastpath;

Reviewed-by: Vitaly Kuznetsov 

-- 
Vitaly



[PATCH 3/3] KVM: SVM: Reenable handle_fastpath_set_msr_irqoff() after complete_interrupts()

2020-09-08 Thread Wanpeng Li
From: Wanpeng Li 

Moving the call to svm_exit_handlers_fastpath() after svm_complete_interrupts() 
since svm_complete_interrupts() consumes rip and reenable the function 
handle_fastpath_set_msr_irqoff() call in svm_exit_handlers_fastpath().

Suggested-by: Sean Christopherson 
Cc: Paul K. 
Signed-off-by: Wanpeng Li 
---
 arch/x86/kvm/svm/svm.c | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 74bcf0a..ac819f0 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3347,6 +3347,11 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
 
 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 {
+   if (!is_guest_mode(vcpu) &&
+   to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
+   to_svm(vcpu)->vmcb->control.exit_info_1)
+   return handle_fastpath_set_msr_irqoff(vcpu);
+
return EXIT_FASTPATH_NONE;
 }
 
@@ -3495,7 +3500,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu 
*vcpu)
stgi();
 
/* Any pending NMI will happen here */
-   exit_fastpath = svm_exit_handlers_fastpath(vcpu);
 
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_interrupt(>vcpu);
@@ -3529,6 +3533,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu 
*vcpu)
svm_handle_mce(svm);
 
svm_complete_interrupts(svm);
+   exit_fastpath = svm_exit_handlers_fastpath(vcpu);
 
vmcb_mark_all_clean(svm->vmcb);
return exit_fastpath;
-- 
2.7.4