On Mon, Sep 14, 2020 at 03:15:37PM -0500, Tom Lendacky wrote:
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 6f5988c305e1..5e5f1e8fed3a 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1033,6 +1033,26 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long 
> cr4)
>  }
>  EXPORT_SYMBOL_GPL(kvm_set_cr4);
>  
> +int kvm_track_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
> +{
> +     unsigned long old_cr4 = kvm_read_cr4(vcpu);
> +     unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
> +                                X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
> +
> +     if (kvm_x86_ops.set_cr4(vcpu, cr4))
> +             return 1;

Pretty much all the same comments as EFER and CR0, e.g. call svm_set_cr4()
directly instead of bouncing through kvm_x86_ops.  And with that, this can
be called __kvm_set_cr4() to be consistent with __kvm_set_cr0().

> +
> +     if (((cr4 ^ old_cr4) & pdptr_bits) ||
> +         (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
> +             kvm_mmu_reset_context(vcpu);
> +
> +     if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
> +             kvm_update_cpuid_runtime(vcpu);
> +
> +     return 0;
> +}
> +EXPORT_SYMBOL_GPL(kvm_track_cr4);
> +
>  int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
>  {
>       bool skip_tlb_flush = false;
> -- 
> 2.28.0
> 

Reply via email to