Re: [PATCH 5/7] KVM: nSVM: fix running nested guests when npt=0

2021-02-17 Thread Maxim Levitsky
On Wed, 2021-02-17 at 16:57 +0200, Maxim Levitsky wrote:
> In case of npt=0 on host,
> nSVM needs the same .inject_page_fault tweak as VMX has,
> to make sure that shadow mmu faults are injected as vmexits.
> 
> Signed-off-by: Maxim Levitsky 
> ---
>  arch/x86/kvm/svm/nested.c | 18 ++
>  arch/x86/kvm/svm/svm.c|  5 -
>  arch/x86/kvm/svm/svm.h|  1 +
>  3 files changed, 23 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 1bc31e2e8fe0..53b9037259b5 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -53,6 +53,23 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu 
> *vcpu,
>   nested_svm_vmexit(svm);
>  }
>  
> +void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct 
> x86_exception *fault)
> +{
> +   struct vcpu_svm *svm = to_svm(vcpu);
> +   WARN_ON(!is_guest_mode(vcpu));
> +
> +   if (vmcb_is_intercept(>nested.ctl, INTERCEPT_EXCEPTION_OFFSET + 
> PF_VECTOR) &&
> +!svm->nested.nested_run_pending) {
> +   svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
> +   svm->vmcb->control.exit_code_hi = 0;
> +   svm->vmcb->control.exit_info_1 = fault->error_code;
> +   svm->vmcb->control.exit_info_2 = fault->address;
> +   nested_svm_vmexit(svm);
> +   } else {
> +   kvm_inject_page_fault(vcpu, fault);
> +   }
> +}
> +
>  static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
>  {
>   struct vcpu_svm *svm = to_svm(vcpu);
> @@ -531,6 +548,7 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 
> vmcb12_gpa,
>   if (ret)
>   return ret;
>  
> +
Sorry for this whitespace change.
Best regards,
Maxim Levitsky
>   svm_set_gif(svm, true);
>  
>   return 0;
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 74a334c9902a..59e1767df030 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -3915,7 +3915,10 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, 
> unsigned long root,
>  
>  static void svm_complete_mmu_init(struct kvm_vcpu *vcpu)
>  {
> -
> + if (!npt_enabled && is_guest_mode(vcpu)) {
> + WARN_ON(mmu_is_nested(vcpu));
> + vcpu->arch.mmu->inject_page_fault = 
> svm_inject_page_fault_nested;
> + }
>  }
>  
>  static int is_disabled(void)
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 7b6ca0e49a14..fda80d56c6e3 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -437,6 +437,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm 
> *svm)
>   return vmcb_is_intercept(>nested.ctl, INTERCEPT_NMI);
>  }
>  
> +void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct 
> x86_exception *fault);
>  int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, struct vmcb 
> *vmcb12);
>  void svm_leave_nested(struct vcpu_svm *svm);
>  void svm_free_nested(struct vcpu_svm *svm);




[PATCH 5/7] KVM: nSVM: fix running nested guests when npt=0

2021-02-17 Thread Maxim Levitsky
In case of npt=0 on host,
nSVM needs the same .inject_page_fault tweak as VMX has,
to make sure that shadow mmu faults are injected as vmexits.

Signed-off-by: Maxim Levitsky 
---
 arch/x86/kvm/svm/nested.c | 18 ++
 arch/x86/kvm/svm/svm.c|  5 -
 arch/x86/kvm/svm/svm.h|  1 +
 3 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 1bc31e2e8fe0..53b9037259b5 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -53,6 +53,23 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
nested_svm_vmexit(svm);
 }
 
+void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception 
*fault)
+{
+   struct vcpu_svm *svm = to_svm(vcpu);
+   WARN_ON(!is_guest_mode(vcpu));
+
+   if (vmcb_is_intercept(>nested.ctl, INTERCEPT_EXCEPTION_OFFSET + 
PF_VECTOR) &&
+  !svm->nested.nested_run_pending) {
+   svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
+   svm->vmcb->control.exit_code_hi = 0;
+   svm->vmcb->control.exit_info_1 = fault->error_code;
+   svm->vmcb->control.exit_info_2 = fault->address;
+   nested_svm_vmexit(svm);
+   } else {
+   kvm_inject_page_fault(vcpu, fault);
+   }
+}
+
 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
 {
struct vcpu_svm *svm = to_svm(vcpu);
@@ -531,6 +548,7 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 
vmcb12_gpa,
if (ret)
return ret;
 
+
svm_set_gif(svm, true);
 
return 0;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 74a334c9902a..59e1767df030 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3915,7 +3915,10 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, 
unsigned long root,
 
 static void svm_complete_mmu_init(struct kvm_vcpu *vcpu)
 {
-
+   if (!npt_enabled && is_guest_mode(vcpu)) {
+   WARN_ON(mmu_is_nested(vcpu));
+   vcpu->arch.mmu->inject_page_fault = 
svm_inject_page_fault_nested;
+   }
 }
 
 static int is_disabled(void)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 7b6ca0e49a14..fda80d56c6e3 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -437,6 +437,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
return vmcb_is_intercept(>nested.ctl, INTERCEPT_NMI);
 }
 
+void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception 
*fault);
 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, struct vmcb 
*vmcb12);
 void svm_leave_nested(struct vcpu_svm *svm);
 void svm_free_nested(struct vcpu_svm *svm);
-- 
2.26.2