On Sun, Apr 14, 2013 at 12:44:09PM +0200, Jan Kiszka wrote:
> From: Jan Kiszka <[email protected]>
> 
> As we may emulate the loading of EFER on VM-entry and VM-exit, implement
> the checks that VMX performs on the guest and host values on vmlaunch/
> vmresume. Factor out kvm_valid_efer for this purpose which checks for
> set reserved bits.
> 
> Signed-off-by: Jan Kiszka <[email protected]>
Applied v2 of 1/2 and 2/2.

> ---
>  arch/x86/include/asm/kvm_host.h |    1 +
>  arch/x86/kvm/vmx.c              |   38 ++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/x86.c              |   29 +++++++++++++++++++----------
>  3 files changed, 58 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index b2c7263..28a458f 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -805,6 +805,7 @@ static inline int emulate_instruction(struct kvm_vcpu 
> *vcpu,
>  }
>  
>  void kvm_enable_efer_bits(u64);
> +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
>  int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
>  int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index cc2ba55..0d13b29 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -7257,6 +7257,44 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool 
> launch)
>       }
>  
>       /*
> +      * If the “load IA32_EFER” VM-entry control is 1, the following checks
> +      * are performed on the field for the IA32_EFER MSR:
> +      * - Bits reserved in the IA32_EFER MSR must be 0.
> +      * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
> +      *   the IA-32e mode guest VM-exit control. It must also be identical
> +      *   to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
> +      *   CR0.PG) is 1.1
> +      */
> +     if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER &&
> +         (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
> +          !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) !=
> +          !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
> +          (vmcs12->guest_cr0 & X86_CR0_PG &&
> +           !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) !=
> +           !!(vmcs12->guest_ia32_efer & EFER_LME)))) {
> +             nested_vmx_entry_failure(vcpu, vmcs12,
> +                     EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
> +             return 1;
> +     }
> +
> +     /*
> +      * If the load IA32_EFER VM-exit control is 1, bits reserved in the
> +      * IA32_EFER MSR must be 0 in the field for that register. In addition,
> +      * the values of the LMA and LME bits in the field must each be that of
> +      * the host address-space size VM-exit control.
> +      */
> +     if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER &&
> +         (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
> +          !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
> +          !!(vmcs12->host_ia32_efer & EFER_LMA) ||
> +          !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
> +          !!(vmcs12->host_ia32_efer & EFER_LME))) {
> +             nested_vmx_entry_failure(vcpu, vmcs12,
> +                     EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
> +             return 1;
> +     }
> +
> +     /*
>        * We're finally done with prerequisite checking, and can start with
>        * the nested entry.
>        */
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index eb9927e..f248a3a 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -845,23 +845,17 @@ static const u32 emulated_msrs[] = {
>       MSR_IA32_MCG_CTL,
>  };
>  
> -static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
> +bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
>  {
> -     u64 old_efer = vcpu->arch.efer;
> -
>       if (efer & efer_reserved_bits)
> -             return 1;
> -
> -     if (is_paging(vcpu)
> -         && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
> -             return 1;
> +             return false;
>  
>       if (efer & EFER_FFXSR) {
>               struct kvm_cpuid_entry2 *feat;
>  
>               feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
>               if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
> -                     return 1;
> +                     return false;
>       }
>  
>       if (efer & EFER_SVME) {
> @@ -869,9 +863,24 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
>  
>               feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
>               if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
> -                     return 1;
> +                     return false;
>       }
>  
> +     return true;
> +}
> +EXPORT_SYMBOL_GPL(kvm_valid_efer);
> +
> +static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
> +{
> +     u64 old_efer = vcpu->arch.efer;
> +
> +     if (!kvm_valid_efer(vcpu, efer))
> +             return 1;
> +
> +     if (is_paging(vcpu)
> +         && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
> +             return 1;
> +
>       efer &= ~EFER_LMA;
>       efer |= vcpu->arch.efer & EFER_LMA;
>  
> -- 
> 1.7.3.4

--
                        Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to