Sean Christopherson writes:
> Replace the kvm_x86_ops pointer in common x86 with an instance of the
> struct to save one memory instance when invoking function. Copy the
> struct by value to set the ops during kvm_init().
>
> Arbitrarily use kvm_x86_ops.hardware_enable to track whether or not the
> ops have been initialized, i.e. a vendor KVM module has been loaded.
>
> Suggested-by: Paolo Bonzini
> Signed-off-by: Sean Christopherson
> ---
> arch/x86/include/asm/kvm_host.h | 18 +-
> arch/x86/kvm/cpuid.c| 4 +-
> arch/x86/kvm/hyperv.c | 8 +-
> arch/x86/kvm/kvm_cache_regs.h | 10 +-
> arch/x86/kvm/lapic.c| 30 +--
> arch/x86/kvm/mmu.h | 8 +-
> arch/x86/kvm/mmu/mmu.c | 32 +--
> arch/x86/kvm/pmu.c | 30 +--
> arch/x86/kvm/pmu.h | 2 +-
> arch/x86/kvm/svm.c | 2 +-
> arch/x86/kvm/trace.h| 4 +-
> arch/x86/kvm/vmx/nested.c | 2 +-
> arch/x86/kvm/vmx/vmx.c | 4 +-
> arch/x86/kvm/x86.c | 356
> arch/x86/kvm/x86.h | 4 +-
> 15 files changed, 257 insertions(+), 257 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index f4c5b49299ff..54f991244fae 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1274,13 +1274,13 @@ struct kvm_arch_async_pf {
>
> extern u64 __read_mostly host_efer;
>
> -extern struct kvm_x86_ops *kvm_x86_ops;
> +extern struct kvm_x86_ops kvm_x86_ops;
> extern struct kmem_cache *x86_fpu_cache;
>
> #define __KVM_HAVE_ARCH_VM_ALLOC
> static inline struct kvm *kvm_arch_alloc_vm(void)
> {
> - return __vmalloc(kvm_x86_ops->vm_size,
> + return __vmalloc(kvm_x86_ops.vm_size,
>GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
> }
> void kvm_arch_free_vm(struct kvm *kvm);
> @@ -1288,8 +1288,8 @@ void kvm_arch_free_vm(struct kvm *kvm);
> #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
> static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
> {
> - if (kvm_x86_ops->tlb_remote_flush &&
> - !kvm_x86_ops->tlb_remote_flush(kvm))
> + if (kvm_x86_ops.tlb_remote_flush &&
> + !kvm_x86_ops.tlb_remote_flush(kvm))
> return 0;
> else
> return -ENOTSUPP;
> @@ -1375,7 +1375,7 @@ extern u64 kvm_mce_cap_supported;
> *
> * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
> * decode the instruction length. For use *only* by
> - * kvm_x86_ops->skip_emulated_instruction() implementations.
> + * kvm_x86_ops.skip_emulated_instruction() implementations.
> *
> * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
> *retry native execution under certain conditions,
> @@ -1669,14 +1669,14 @@ static inline bool kvm_irq_is_postable(struct
> kvm_lapic_irq *irq)
>
> static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
> {
> - if (kvm_x86_ops->vcpu_blocking)
> - kvm_x86_ops->vcpu_blocking(vcpu);
> + if (kvm_x86_ops.vcpu_blocking)
> + kvm_x86_ops.vcpu_blocking(vcpu);
> }
>
> static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
> {
> - if (kvm_x86_ops->vcpu_unblocking)
> - kvm_x86_ops->vcpu_unblocking(vcpu);
> + if (kvm_x86_ops.vcpu_unblocking)
> + kvm_x86_ops.vcpu_unblocking(vcpu);
> }
>
> static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 435a7da07d5f..0aefa9acae10 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -209,7 +209,7 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
> vcpu->arch.cpuid_nent = cpuid->nent;
> cpuid_fix_nx_cap(vcpu);
> kvm_apic_set_version(vcpu);
> - kvm_x86_ops->cpuid_update(vcpu);
> + kvm_x86_ops.cpuid_update(vcpu);
> r = kvm_update_cpuid(vcpu);
>
> out:
> @@ -232,7 +232,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
> goto out;
> vcpu->arch.cpuid_nent = cpuid->nent;
> kvm_apic_set_version(vcpu);
> - kvm_x86_ops->cpuid_update(vcpu);
> + kvm_x86_ops.cpuid_update(vcpu);
> r = kvm_update_cpuid(vcpu);
> out:
> return r;
> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> index a86fda7a1d03..bcefa9d4e57e 100644
> --- a/arch/x86/kvm/hyperv.c
> +++ b/arch/x86/kvm/hyperv.c
> @@ -1022,7 +1022,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32
> msr, u64 data,
> addr = gfn_to_hva(kvm, gfn);
> if (kvm_is_error_hva(addr))
> return 1;
> - kvm_x86_ops->patch_hypercall(vcpu, instructions);
> + kvm_x86_ops.patch_hypercall(vcpu, instructions);
> ((unsigned char *)instructions)[3] = 0xc3; /* ret */