Currently, after a VM boots with APICv enabled, it could go into
the following states:

  * activated = VM is running w/ APICv
  * suspended = VM deactivate APICv temporarily
  * disabled  = VM deactivate APICv permanently

Introduce KVM APICv state enum to help keep track of the APICv states
along with a new variable struct kvm_arch.apicv_state to store
the current state of each VM, and kvm_arch.apicv_lock to synchronize
access of apicv_state since it can be accessed by each vcpu.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpa...@amd.com>
---
 arch/x86/include/asm/kvm_host.h | 11 +++++++++++
 arch/x86/kvm/x86.c              | 14 +++++++++++++-
 2 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 277f06f..562bfbd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -851,6 +851,15 @@ enum kvm_irqchip_mode {
        KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
 };
 
+/*
+ * KVM assumes all vcpus in a VM operate in the same mode.
+ */
+enum kvm_apicv_state {
+       APICV_DISABLED,         /* Disabled (such as for Hyper-V case) */
+       APICV_SUSPENDED,        /* Deactivated temporary */
+       APICV_ACTIVATED,        /* Default status when APICV is enabled */
+};
+
 struct kvm_arch {
        unsigned long n_used_mmu_pages;
        unsigned long n_requested_mmu_pages;
@@ -879,6 +888,8 @@ struct kvm_arch {
        struct kvm_apic_map *apic_map;
 
        bool apic_access_page_done;
+       struct mutex apicv_lock;
+       enum kvm_apicv_state apicv_state;
 
        gpa_t wall_clock;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 01f5a56..64d275e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4630,6 +4630,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
                kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
                r = 0;
+               if (kvm_x86_ops->get_enable_apicv(kvm))
+                       kvm->arch.apicv_state = APICV_ACTIVATED;
 split_irqchip_unlock:
                mutex_unlock(&kvm->lock);
                break;
@@ -4749,6 +4751,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
                /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
+               if (kvm_x86_ops->get_enable_apicv(kvm))
+                       kvm->arch.apicv_state = APICV_ACTIVATED;
        create_irqchip_unlock:
                mutex_unlock(&kvm->lock);
                break;
@@ -9209,13 +9213,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                goto fail_free_pio_data;
 
        if (irqchip_in_kernel(vcpu->kvm)) {
-               vcpu->arch.apicv_active = 
kvm_x86_ops->get_enable_apicv(vcpu->kvm);
                r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
                if (r < 0)
                        goto fail_mmu_destroy;
        } else
                static_key_slow_inc(&kvm_no_apic_vcpu);
 
+       mutex_lock(&vcpu->kvm->arch.apicv_lock);
+       if (irqchip_in_kernel(vcpu->kvm) &&
+           vcpu->kvm->arch.apicv_state == APICV_ACTIVATED)
+               vcpu->arch.apicv_active = 
kvm_x86_ops->get_enable_apicv(vcpu->kvm);
+       mutex_unlock(&vcpu->kvm->arch.apicv_lock);
+
        vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
                                       GFP_KERNEL_ACCOUNT);
        if (!vcpu->arch.mce_banks) {
@@ -9314,6 +9323,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        kvm_page_track_init(kvm);
        kvm_mmu_init_vm(kvm);
 
+       /* APICV initialization */
+       mutex_init(&kvm->arch.apicv_lock);
+
        if (kvm_x86_ops->vm_init)
                return kvm_x86_ops->vm_init(kvm);
 
-- 
1.8.3.1

Reply via email to