We've had 10 page-sized bitmaps that were being allocated and freed one
by one when we could just use a cycle and MSR bitmaps had a lot of
useless code lying around.

This patch
 * enumerates vmx bitmaps and uses an array to store them
 * replaces vmx_enable_intercept_msr_read_x2apic() with a condition
 * joins vmx_msr_disable_intercept_msr_{read,write}_x2apic()
 * renames x2apic_apicv_inactive msr_bitmaps to x2apic and original
   x2apic bitmaps to x2apic_apicv

Signed-off-by: Radim Krčmář <rkrc...@redhat.com>
---
 arch/x86/kvm/vmx.c | 297 +++++++++++++++++------------------------------------
 1 file changed, 92 insertions(+), 205 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1cca146f4341..dfbcd45fcb2b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -921,16 +921,20 @@ static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
 
-static unsigned long *vmx_io_bitmap_a;
-static unsigned long *vmx_io_bitmap_b;
-static unsigned long *vmx_msr_bitmap_legacy;
-static unsigned long *vmx_msr_bitmap_longmode;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
-static unsigned long *vmx_vmread_bitmap;
-static unsigned long *vmx_vmwrite_bitmap;
+enum vmx_bitmap {
+       vmx_io_bitmap_a,
+       vmx_io_bitmap_b,
+       vmx_msr_bitmap_legacy,
+       vmx_msr_bitmap_legacy_x2apic,
+       vmx_msr_bitmap_legacy_x2apic_apicv,
+       vmx_msr_bitmap_longmode,
+       vmx_msr_bitmap_longmode_x2apic,
+       vmx_msr_bitmap_longmode_x2apic_apicv,
+       vmx_vmread_bitmap,
+       vmx_vmwrite_bitmap,
+       VMX_BITMAP_NR
+};
+static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
 
 static bool cpu_has_load_ia32_efer;
 static bool cpu_has_load_perf_global_ctrl;
@@ -2519,23 +2523,26 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, 
int to)
 
 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
 {
-       unsigned long *msr_bitmap;
+       enum vmx_bitmap msr_bitmap;
 
-       if (is_guest_mode(vcpu))
-               msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
-       else if (cpu_has_secondary_exec_ctrls() &&
-                (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
-                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
+       if (is_guest_mode(vcpu)) {
+               vmcs_write64(MSR_BITMAP, __pa(to_vmx(vcpu)->nested.msr_bitmap));
+               return;
+       }
+
+       if (cpu_has_secondary_exec_ctrls() &&
+           (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
+           SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
                if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) {
                        if (is_long_mode(vcpu))
+                               msr_bitmap = 
vmx_msr_bitmap_longmode_x2apic_apicv;
+                       else
+                               msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv;
+               } else {
+                       if (is_long_mode(vcpu))
                                msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
                        else
                                msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
-               } else {
-                       if (is_long_mode(vcpu))
-                               msr_bitmap = 
vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
-                       else
-                               msr_bitmap = 
vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
                }
        } else {
                if (is_long_mode(vcpu))
@@ -2544,7 +2551,7 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
                        msr_bitmap = vmx_msr_bitmap_legacy;
        }
 
-       vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
+       vmcs_write64(MSR_BITMAP, __pa(vmx_bitmap[msr_bitmap]));
 }
 
 /*
@@ -3600,13 +3607,13 @@ static void init_vmcs_shadow_fields(void)
        /* shadowed fields guest access without vmexit */
        for (i = 0; i < max_shadow_read_write_fields; i++) {
                clear_bit(shadow_read_write_fields[i],
-                         vmx_vmwrite_bitmap);
+                         vmx_bitmap[vmx_vmwrite_bitmap]);
                clear_bit(shadow_read_write_fields[i],
-                         vmx_vmread_bitmap);
+                         vmx_bitmap[vmx_vmread_bitmap]);
        }
        for (i = 0; i < max_shadow_read_only_fields; i++)
                clear_bit(shadow_read_only_fields[i],
-                         vmx_vmread_bitmap);
+                         vmx_bitmap[vmx_vmread_bitmap]);
 }
 
 static __init int alloc_kvm_area(void)
@@ -4601,41 +4608,6 @@ static void __vmx_disable_intercept_for_msr(unsigned 
long *msr_bitmap,
        }
 }
 
-static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
-                                               u32 msr, int type)
-{
-       int f = sizeof(unsigned long);
-
-       if (!cpu_has_vmx_msr_bitmap())
-               return;
-
-       /*
-        * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
-        * have the write-low and read-high bitmap offsets the wrong way round.
-        * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
-        */
-       if (msr <= 0x1fff) {
-               if (type & MSR_TYPE_R)
-                       /* read-low */
-                       __set_bit(msr, msr_bitmap + 0x000 / f);
-
-               if (type & MSR_TYPE_W)
-                       /* write-low */
-                       __set_bit(msr, msr_bitmap + 0x800 / f);
-
-       } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
-               msr &= 0x1fff;
-               if (type & MSR_TYPE_R)
-                       /* read-high */
-                       __set_bit(msr, msr_bitmap + 0x400 / f);
-
-               if (type & MSR_TYPE_W)
-                       /* write-high */
-                       __set_bit(msr, msr_bitmap + 0xc00 / f);
-
-       }
-}
-
 /*
  * If a msr is allowed by L0, we should check whether it is allowed by L1.
  * The corresponding bit will be cleared unless both of L0 and L1 allow it.
@@ -4684,55 +4656,31 @@ static void 
nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
 
 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
 {
-       if (!longmode_only)
-               __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
-                                               msr, MSR_TYPE_R | MSR_TYPE_W);
-       __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
-                                               msr, MSR_TYPE_R | MSR_TYPE_W);
+       __vmx_disable_intercept_for_msr(vmx_bitmap[vmx_msr_bitmap_longmode],
+                       msr, MSR_TYPE_R | MSR_TYPE_W);
+
+       if (longmode_only)
+               return;
+       __vmx_disable_intercept_for_msr(vmx_bitmap[vmx_msr_bitmap_legacy],
+                       msr, MSR_TYPE_R | MSR_TYPE_W);
 }
 
-static void vmx_enable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
+static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool 
apicv_active)
 {
        if (apicv_active) {
-               __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
-                               msr, MSR_TYPE_R);
-               __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
-                               msr, MSR_TYPE_R);
+               __vmx_disable_intercept_for_msr(
+                               vmx_bitmap[vmx_msr_bitmap_legacy_x2apic_apicv],
+                               msr, type);
+               __vmx_disable_intercept_for_msr(
+                               
vmx_bitmap[vmx_msr_bitmap_longmode_x2apic_apicv],
+                               msr, type);
        } else {
-               
__vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
-                               msr, MSR_TYPE_R);
-               
__vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
-                               msr, MSR_TYPE_R);
-       }
-}
-
-static void vmx_disable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
-{
-       if (apicv_active) {
-               __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
-                               msr, MSR_TYPE_R);
-               __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
-                               msr, MSR_TYPE_R);
-       } else {
-               
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
-                               msr, MSR_TYPE_R);
-               
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
-                               msr, MSR_TYPE_R);
-       }
-}
-
-static void vmx_disable_intercept_msr_write_x2apic(u32 msr, bool apicv_active)
-{
-       if (apicv_active) {
-               __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
-                               msr, MSR_TYPE_W);
-               __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
-                               msr, MSR_TYPE_W);
-       } else {
-               
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
-                               msr, MSR_TYPE_W);
-               
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
-                               msr, MSR_TYPE_W);
+               __vmx_disable_intercept_for_msr(
+                               vmx_bitmap[vmx_msr_bitmap_legacy_x2apic],
+                               msr, type);
+               __vmx_disable_intercept_for_msr(
+                               vmx_bitmap[vmx_msr_bitmap_longmode_x2apic],
+                               msr, type);
        }
 }
 
@@ -5034,12 +4982,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        int i;
 
        /* I/O */
-       vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
-       vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
+       vmcs_write64(IO_BITMAP_A, __pa(vmx_bitmap[vmx_io_bitmap_a]));
+       vmcs_write64(IO_BITMAP_B, __pa(vmx_bitmap[vmx_io_bitmap_b]));
 
        if (enable_shadow_vmcs) {
-               vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
-               vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
+               vmcs_write64(VMREAD_BITMAP, 
__pa(vmx_bitmap[vmx_vmread_bitmap]));
+               vmcs_write64(VMWRITE_BITMAP, 
__pa(vmx_bitmap[vmx_vmwrite_bitmap]));
        }
        if (cpu_has_vmx_msr_bitmap())
                vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
@@ -6377,68 +6325,30 @@ static __init int hardware_setup(void)
        for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
                kvm_define_shared_msr(i, vmx_msr_index[i]);
 
-       vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_io_bitmap_a)
-               return r;
+       for (i = 0; i < ARRAY_SIZE(vmx_bitmap); i++) {
+               vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL);
+               if (!vmx_bitmap[i])
+                       goto out;
+       }
 
-       vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_io_bitmap_b)
-               goto out;
-
-       vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_legacy)
-               goto out1;
-
-       vmx_msr_bitmap_legacy_x2apic =
-                               (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_legacy_x2apic)
-               goto out2;
-
-       vmx_msr_bitmap_legacy_x2apic_apicv_inactive =
-                               (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_legacy_x2apic_apicv_inactive)
-               goto out3;
-
-       vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_longmode)
-               goto out4;
-
-       vmx_msr_bitmap_longmode_x2apic =
-                               (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_longmode_x2apic)
-               goto out5;
-
-       vmx_msr_bitmap_longmode_x2apic_apicv_inactive =
-                               (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_longmode_x2apic_apicv_inactive)
-               goto out6;
-
-       vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_vmread_bitmap)
-               goto out7;
-
-       vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_vmwrite_bitmap)
-               goto out8;
-
-       memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
-       memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
+       memset(vmx_bitmap[vmx_vmread_bitmap], 0xff, PAGE_SIZE);
+       memset(vmx_bitmap[vmx_vmwrite_bitmap], 0xff, PAGE_SIZE);
 
        /*
         * Allow direct access to the PC debug port (it is often used for I/O
         * delays, but the vmexits simply slow things down).
         */
-       memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
-       clear_bit(0x80, vmx_io_bitmap_a);
+       memset(vmx_bitmap[vmx_io_bitmap_a], 0xff, PAGE_SIZE);
+       clear_bit(0x80, vmx_bitmap[vmx_io_bitmap_a]);
 
-       memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
+       memset(vmx_bitmap[vmx_io_bitmap_b], 0xff, PAGE_SIZE);
 
-       memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
-       memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
+       memset(vmx_bitmap[vmx_msr_bitmap_legacy], 0xff, PAGE_SIZE);
+       memset(vmx_bitmap[vmx_msr_bitmap_longmode], 0xff, PAGE_SIZE);
 
        if (setup_vmcs_config(&vmcs_config) < 0) {
                r = -EIO;
-               goto out9;
+               goto out;
        }
 
        if (boot_cpu_has(X86_FEATURE_NX))
@@ -6501,39 +6411,39 @@ static __init int hardware_setup(void)
        vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
        vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
 
-       memcpy(vmx_msr_bitmap_legacy_x2apic,
-                       vmx_msr_bitmap_legacy, PAGE_SIZE);
-       memcpy(vmx_msr_bitmap_longmode_x2apic,
-                       vmx_msr_bitmap_longmode, PAGE_SIZE);
-       memcpy(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
-                       vmx_msr_bitmap_legacy, PAGE_SIZE);
-       memcpy(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
-                       vmx_msr_bitmap_longmode, PAGE_SIZE);
+       memcpy(vmx_bitmap[vmx_msr_bitmap_legacy_x2apic],
+              vmx_bitmap[vmx_msr_bitmap_legacy], PAGE_SIZE);
+       memcpy(vmx_bitmap[vmx_msr_bitmap_legacy_x2apic_apicv],
+              vmx_bitmap[vmx_msr_bitmap_legacy], PAGE_SIZE);
+       memcpy(vmx_bitmap[vmx_msr_bitmap_longmode_x2apic],
+              vmx_bitmap[vmx_msr_bitmap_longmode], PAGE_SIZE);
+       memcpy(vmx_bitmap[vmx_msr_bitmap_longmode_x2apic_apicv],
+              vmx_bitmap[vmx_msr_bitmap_longmode], PAGE_SIZE);
 
        set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
 
        /*
         * enable_apicv && kvm_vcpu_apicv_active()
         */
-       for (msr = 0x800; msr <= 0x8ff; msr++)
-               vmx_disable_intercept_msr_read_x2apic(msr, true);
+       for (msr = 0x800; msr <= 0x8ff; msr++) {
+               if (msr == 0x839 /* TMCCT */)
+                       continue;
+               vmx_disable_intercept_msr_x2apic(msr, MSR_TYPE_R, true);
+       }
 
-       /* TMCCT */
-       vmx_enable_intercept_msr_read_x2apic(0x839, true);
        /* TPR */
-       vmx_disable_intercept_msr_write_x2apic(0x808, true);
+       vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_W, true);
        /* EOI */
-       vmx_disable_intercept_msr_write_x2apic(0x80b, true);
+       vmx_disable_intercept_msr_x2apic(0x80b, MSR_TYPE_W, true);
        /* SELF-IPI */
-       vmx_disable_intercept_msr_write_x2apic(0x83f, true);
+       vmx_disable_intercept_msr_x2apic(0x83f, MSR_TYPE_W, true);
 
        /*
         * (enable_apicv && !kvm_vcpu_apicv_active()) ||
         *      !enable_apicv
         */
        /* TPR */
-       vmx_disable_intercept_msr_read_x2apic(0x808, false);
-       vmx_disable_intercept_msr_write_x2apic(0x808, false);
+       vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_R | MSR_TYPE_W, false);
 
        if (enable_ept) {
                kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
@@ -6580,42 +6490,19 @@ static __init int hardware_setup(void)
 
        return alloc_kvm_area();
 
-out9:
-       free_page((unsigned long)vmx_vmwrite_bitmap);
-out8:
-       free_page((unsigned long)vmx_vmread_bitmap);
-out7:
-       free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
-out6:
-       free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out5:
-       free_page((unsigned long)vmx_msr_bitmap_longmode);
-out4:
-       free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
-out3:
-       free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
-out2:
-       free_page((unsigned long)vmx_msr_bitmap_legacy);
-out1:
-       free_page((unsigned long)vmx_io_bitmap_b);
 out:
-       free_page((unsigned long)vmx_io_bitmap_a);
+       for (i = 0; i < ARRAY_SIZE(vmx_bitmap); i++)
+               free_page((unsigned long)vmx_bitmap[i]);
 
     return r;
 }
 
 static __exit void hardware_unsetup(void)
 {
-       free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
-       free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
-       free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-       free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
-       free_page((unsigned long)vmx_msr_bitmap_legacy);
-       free_page((unsigned long)vmx_msr_bitmap_longmode);
-       free_page((unsigned long)vmx_io_bitmap_b);
-       free_page((unsigned long)vmx_io_bitmap_a);
-       free_page((unsigned long)vmx_vmwrite_bitmap);
-       free_page((unsigned long)vmx_vmread_bitmap);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(vmx_bitmap); i++)
+               free_page((unsigned long)vmx_bitmap[i]);
 
        free_kvm_area();
 }
-- 
2.10.0

Reply via email to