On Tue, May 11, 2010 at 01:30:07PM +0800, Sheng Yang wrote:
> Only modifying some bits of CR0/CR4 needs paging mode switch.
> 
> Add update_rsvd_bits_mask() to address EFER.NX bit updating for reserved bits.
> 
> Signed-off-by: Sheng Yang <sh...@linux.intel.com>
> ---
>  arch/x86/include/asm/kvm_host.h |    1 +
>  arch/x86/kvm/mmu.c              |   17 ++++++++++++++---
>  arch/x86/kvm/x86.c              |   14 ++++++++++++--
>  3 files changed, 27 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index ed48904..c8c8a03 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -553,6 +553,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, 
> int slot);
>  void kvm_mmu_zap_all(struct kvm *kvm);
>  unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
>  void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int 
> kvm_nr_mmu_pages);
> +void update_rsvd_bits_mask(struct kvm_vcpu *vcpu);
>  
>  int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
>  
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 5412185..98abdcf 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2335,6 +2335,19 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu 
> *vcpu, int level)
>       }
>  }
>  
> +void update_rsvd_bits_mask(struct kvm_vcpu *vcpu)
> +{
> +     if (!is_paging(vcpu))
> +             return;
> +     if (is_long_mode(vcpu))
> +             reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
> +     else if (is_pae(vcpu))
> +             reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
> +     else
> +             reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
> +}
> +EXPORT_SYMBOL_GPL(update_rsvd_bits_mask);
> +
>  static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
>  {
>       struct kvm_mmu *context = &vcpu->arch.mmu;
> @@ -2400,18 +2413,16 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
>               context->gva_to_gpa = nonpaging_gva_to_gpa;
>               context->root_level = 0;
>       } else if (is_long_mode(vcpu)) {
> -             reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
>               context->gva_to_gpa = paging64_gva_to_gpa;
>               context->root_level = PT64_ROOT_LEVEL;
>       } else if (is_pae(vcpu)) {
> -             reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
>               context->gva_to_gpa = paging64_gva_to_gpa;
>               context->root_level = PT32E_ROOT_LEVEL;
>       } else {
> -             reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
>               context->gva_to_gpa = paging32_gva_to_gpa;
>               context->root_level = PT32_ROOT_LEVEL;
>       }
> +     update_rsvd_bits_mask(vcpu);
>  
>       return 0;
>  }
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b59fc67..1c76e08 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -416,6 +416,9 @@ out:
>  
>  static int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
>  {
> +     unsigned long old_cr0 = kvm_read_cr0(vcpu);
> +     unsigned long update_bits = X86_CR0_PG | X86_CR0_PE;

If PAE paging would be in use following an execution of MOV to CR0 or
MOV to CR4 (see Section 4.1.1) and the instruction is modifying any of
CR0.CD, CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, or CR4.PSE; then the PDPTEs
are loaded from the address in CR3.

If the PDPTRS changed, the mmu must be reloaded.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to