There were three calls sites:
 - recalculate_apic_map and kvm_apic_match_physical_addr, where it would
   only complicate implementation of x2APIC hotplug;
 - in apic_debug, where it was still somewhat preserved, but keeping the
   old function just for apic_debug was not worth it

Signed-off-by: Radim Krčmář <rkrc...@redhat.com>
---
 arch/x86/kvm/lapic.c | 41 ++++++++++++++++++++++++++++++-----------
 arch/x86/kvm/lapic.h | 11 -----------
 2 files changed, 30 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 09edd32b8e42..e645b4bc6437 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -115,6 +115,16 @@ static inline int apic_enabled(struct kvm_lapic *apic)
        (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
         APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
 
+static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
+{
+       return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
+}
+
+static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
+{
+       return apic->vcpu->vcpu_id;
+}
+
 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
                u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
        switch (map->mode) {
@@ -159,13 +169,13 @@ static void recalculate_apic_map(struct kvm *kvm)
        struct kvm_apic_map *new, *old = NULL;
        struct kvm_vcpu *vcpu;
        int i;
-       u32 max_id = 255;
+       u32 max_id = 256; /* enough space for any xAPIC ID */
 
        mutex_lock(&kvm->arch.apic_map_lock);
 
        kvm_for_each_vcpu(i, vcpu, kvm)
                if (kvm_apic_present(vcpu))
-                       max_id = max(max_id, kvm_apic_id(vcpu->arch.apic));
+                       max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
 
        new = kvm_kvzalloc(sizeof(struct kvm_apic_map) +
                           sizeof(struct kvm_lapic *) * ((u64)max_id + 1));
@@ -179,16 +189,23 @@ static void recalculate_apic_map(struct kvm *kvm)
                struct kvm_lapic *apic = vcpu->arch.apic;
                struct kvm_lapic **cluster;
                u16 mask;
-               u32 ldr, aid;
+               u32 ldr;
+               u8 xapic_id;
+               u32 x2apic_id;
 
                if (!kvm_apic_present(vcpu))
                        continue;
 
-               aid = kvm_apic_id(apic);
-               ldr = kvm_lapic_get_reg(apic, APIC_LDR);
+               xapic_id = kvm_xapic_id(apic);
+               x2apic_id = kvm_x2apic_id(apic);
 
-               if (aid <= new->max_apic_id)
-                       new->phys_map[aid] = apic;
+               if (apic_x2apic_mode(apic) &&
+                               x2apic_id <= new->max_apic_id)
+                       new->phys_map[x2apic_id] = apic;
+               else if (!apic_x2apic_mode(apic))
+                       new->phys_map[xapic_id] = apic;
+
+               ldr = kvm_lapic_get_reg(apic, APIC_LDR);
 
                if (apic_x2apic_mode(apic)) {
                        new->mode |= KVM_APIC_MODE_X2APIC;
@@ -250,6 +267,8 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic 
*apic, u32 id)
 {
        u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
 
+       WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
+
        kvm_lapic_set_reg(apic, APIC_ID, id);
        kvm_lapic_set_reg(apic, APIC_LDR, ldr);
        recalculate_apic_map(apic->vcpu->kvm);
@@ -591,9 +610,9 @@ static bool kvm_apic_match_physical_addr(struct kvm_lapic 
*apic, u32 mda)
                return true;
 
        if (apic_x2apic_mode(apic))
-               return mda == kvm_apic_id(apic);
+               return mda == kvm_x2apic_id(apic);
 
-       return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic));
+       return mda == SET_APIC_DEST_FIELD(kvm_xapic_id(apic));
 }
 
 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
@@ -1907,9 +1926,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool 
init_event)
        vcpu->arch.apic_arb_prio = 0;
        vcpu->arch.apic_attention = 0;
 
-       apic_debug("%s: vcpu=%p, id=%d, base_msr="
+       apic_debug("%s: vcpu=%p, id=0x%x, base_msr="
                   "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
-                  vcpu, kvm_apic_id(apic),
+                  vcpu, kvm_lapic_get_reg(apic, APIC_ID),
                   vcpu->arch.apic_base, apic->base_address);
 }
 
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index e0c80233b3e1..cb16e6fd2330 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -202,17 +202,6 @@ static inline int kvm_lapic_latched_init(struct kvm_vcpu 
*vcpu)
        return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, 
&vcpu->arch.apic->pending_events);
 }
 
-static inline u32 kvm_apic_id(struct kvm_lapic *apic)
-{
-       /* To avoid a race between apic_base and following APIC_ID update when
-        * switching to x2apic_mode, the x2apic mode returns initial x2apic id.
-        */
-       if (apic_x2apic_mode(apic))
-               return apic->vcpu->vcpu_id;
-
-       return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
-}
-
 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
 
 void wait_lapic_expire(struct kvm_vcpu *vcpu);
-- 
2.10.2

Reply via email to