Il 20/04/2013 10:52, Jan Kiszka ha scritto:
> As we may emulate the loading of EFER on VM-entry and VM-exit, implement
> the checks that VMX performs on the guest and host values on vmlaunch/
> vmresume. Factor out kvm_valid_efer for this purpose which checks for
> set reserved bits.
> 
> Signed-off-by: Jan Kiszka <jan.kis...@siemens.com>
> ---
> 
> Changes in v2:
>  - refactored if clauses as requested by Paolo
>  - fixed typo in comment found my Marcelo
> 
>  arch/x86/include/asm/kvm_host.h |    1 +
>  arch/x86/kvm/vmx.c              |   40 
> +++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/x86.c              |   29 ++++++++++++++++++---------
>  3 files changed, 60 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 599f98b..18635ae 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -809,6 +809,7 @@ static inline int emulate_instruction(struct kvm_vcpu 
> *vcpu,
>  }
>  
>  void kvm_enable_efer_bits(u64);
> +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
>  int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
>  int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 19aebc7..e3b951f 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -7327,6 +7327,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool 
> launch)
>       struct vcpu_vmx *vmx = to_vmx(vcpu);
>       int cpu;
>       struct loaded_vmcs *vmcs02;
> +     bool ia32e;
>  
>       if (!nested_vmx_check_permission(vcpu) ||
>           !nested_vmx_check_vmcs12(vcpu))
> @@ -7415,6 +7416,45 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool 
> launch)
>       }
>  
>       /*
> +      * If the “load IA32_EFER” VM-entry control is 1, the following checks
> +      * are performed on the field for the IA32_EFER MSR:
> +      * - Bits reserved in the IA32_EFER MSR must be 0.
> +      * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
> +      *   the IA-32e mode guest VM-exit control. It must also be identical
> +      *   to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
> +      *   CR0.PG) is 1.
> +      */
> +     if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
> +             ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
> +             if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
> +                 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
> +                 ((vmcs12->guest_cr0 & X86_CR0_PG) &&
> +                  ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
> +                     nested_vmx_entry_failure(vcpu, vmcs12,
> +                             EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
> +                     return 1;
> +             }
> +     }
> +
> +     /*
> +      * If the load IA32_EFER VM-exit control is 1, bits reserved in the
> +      * IA32_EFER MSR must be 0 in the field for that register. In addition,
> +      * the values of the LMA and LME bits in the field must each be that of
> +      * the host address-space size VM-exit control.
> +      */
> +     if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
> +             ia32e = (vmcs12->vm_exit_controls &
> +                      VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
> +             if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
> +                 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
> +                 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
> +                     nested_vmx_entry_failure(vcpu, vmcs12,
> +                             EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
> +                     return 1;
> +             }
> +     }

Looks good, difficult to do better with C's operator precedence rules.

Paolo

> +     /*
>        * We're finally done with prerequisite checking, and can start with
>        * the nested entry.
>        */
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 50e2e10..482784d 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -845,23 +845,17 @@ static const u32 emulated_msrs[] = {
>       MSR_IA32_MCG_CTL,
>  };
>  
> -static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
> +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
>  {
> -     u64 old_efer = vcpu->arch.efer;
> -
>       if (efer & efer_reserved_bits)
> -             return 1;
> -
> -     if (is_paging(vcpu)
> -         && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
> -             return 1;
> +             return false;
>  
>       if (efer & EFER_FFXSR) {
>               struct kvm_cpuid_entry2 *feat;
>  
>               feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
>               if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
> -                     return 1;
> +                     return false;
>       }
>  
>       if (efer & EFER_SVME) {
> @@ -869,9 +863,24 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
>  
>               feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
>               if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
> -                     return 1;
> +                     return false;
>       }
>  
> +     return true;
> +}
> +EXPORT_SYMBOL_GPL(kvm_valid_efer);
> +
> +static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
> +{
> +     u64 old_efer = vcpu->arch.efer;
> +
> +     if (!kvm_valid_efer(vcpu, efer))
> +             return 1;
> +
> +     if (is_paging(vcpu)
> +         && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
> +             return 1;
> +
>       efer &= ~EFER_LMA;
>       efer |= vcpu->arch.efer & EFER_LMA;
>  
> 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to