On 27/06/2017 03:47, Wanpeng Li wrote:
> -static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
> +static int nested_vmx_check_exception(struct kvm_vcpu *vcpu)
>  {
>       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
> +     unsigned int nr = vcpu->arch.exception.nr;
>  
> -     if (!(vmcs12->exception_bitmap & (1u << nr)))
> +     if (!((vmcs12->exception_bitmap & (1u << nr)) ||
> +             (nr == PF_VECTOR && vcpu->arch.exception.async_page_fault)))
>               return 0;
>  
> +     if (vcpu->arch.exception.async_page_fault) {
> +             vmcs_write32(VM_EXIT_INTR_ERROR_CODE, 
> vcpu->arch.exception.error_code);
> +             nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
> +                     PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
> +                     INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
> +                     vcpu->arch.apf.nested_apf_token);
> +             return 1;
> +     }
> +
>       nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
>                         vmcs_read32(VM_EXIT_INTR_INFO),
>                         vmcs_readl(EXIT_QUALIFICATION));
> @@ -2442,7 +2453,7 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
>       u32 intr_info = nr | INTR_INFO_VALID_MASK;
>  
>       if (!reinject && is_guest_mode(vcpu) &&
> -         nested_vmx_check_exception(vcpu, nr))
> +         nested_vmx_check_exception(vcpu))
>               return;
>  
>       if (has_error_code) {

The corresponding change for svm.c should be:

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6e3095d1bad4..b92f56b98844 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2391,15 +2391,19 @@ static int nested_svm_check_exception(struct vcpu_svm 
*svm, unsigned nr,
        if (!is_guest_mode(&svm->vcpu))
                return 0;
 
+       vmexit = nested_svm_intercept(svm);
+       if (vmexit != NESTED_EXIT_DONE)
+               return 0;
+
        svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
        svm->vmcb->control.exit_code_hi = 0;
        svm->vmcb->control.exit_info_1 = error_code;
-       svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
-
-       vmexit = nested_svm_intercept(svm);
-       if (vmexit == NESTED_EXIT_DONE)
-               svm->nested.exit_required = true;
+       if (svm->vcpu.arch.exception.nested_apf)
+               svm->vmcb->control.exit_info_2 = 
svm->vcpu.arch.apf.nested_apf_token;
+       else
+               svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
 
+       svm->nested.exit_required = true;
        return vmexit;
 }
 
@@ -2592,7 +2596,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
                        vmexit = NESTED_EXIT_DONE;
                /* async page fault always cause vmexit */
                else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
-                        svm->apf_reason != 0)
+                        svm->vcpu.arch.exception.nested_apf)
                        vmexit = NESTED_EXIT_DONE;
                break;
        }

(where I'm already using the "nested_apf" name I proposed in the other email.

Paolo

Reply via email to