Separate msr_bitmap for each vcpu, prepared for guest PAT support.
Signed-off-by: Sheng Yang <[EMAIL PROTECTED]>
---
arch/x86/kvm/vmx.c | 53 +++++++++++++++++++++++++++++----------------------
1 files changed, 30 insertions(+), 23 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4556cc3..8c44e37 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -90,6 +90,7 @@ struct vcpu_vmx {
} rmode;
int vpid;
bool emulation_required;
+ struct page *msr_bitmap;
};
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -106,7 +107,6 @@ static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
static struct page *vmx_io_bitmap_a;
static struct page *vmx_io_bitmap_b;
-static struct page *vmx_msr_bitmap;
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -2082,6 +2082,25 @@ static void vmx_disable_intercept_for_msr(struct page
*msr_bitmap, u32 msr)
kunmap(msr_bitmap);
}
+static int setup_msr_bitmap(struct page *msr_bitmap)
+{
+ void *va;
+
+ va = kmap(msr_bitmap);
+ if (!va)
+ return -EINVAL;
+ memset(va, 0xff, PAGE_SIZE);
+ kunmap(msr_bitmap);
+
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP);
+
+ return 0;
+}
+
/*
* Sets up the vmcs for emulated real mode.
*/
@@ -2099,8 +2118,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
- if (cpu_has_vmx_msr_bitmap())
- vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap));
+ if (cpu_has_vmx_msr_bitmap()) {
+ setup_msr_bitmap(vmx->msr_bitmap);
+ vmcs_write64(MSR_BITMAP, page_to_phys(vmx->msr_bitmap));
+ }
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
@@ -3368,6 +3389,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
vmx_free_vmcs(vcpu);
kfree(vmx->host_msrs);
kfree(vmx->guest_msrs);
+ __free_page(vmx->msr_bitmap);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
}
@@ -3403,6 +3425,10 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm,
unsigned int id)
vmcs_clear(vmx->vmcs);
+ vmx->msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!vmx->msr_bitmap)
+ goto free_vmcs;
+
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
err = vmx_vcpu_setup(vmx);
@@ -3524,12 +3550,6 @@ static int __init vmx_init(void)
goto out;
}
- vmx_msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (!vmx_msr_bitmap) {
- r = -ENOMEM;
- goto out1;
- }
-
/*
* Allow direct access to the PC debug port (it is often used for I/O
* delays, but the vmexits simply slow things down).
@@ -3543,21 +3563,11 @@ static int __init vmx_init(void)
memset(va, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b);
- va = kmap(vmx_msr_bitmap);
- memset(va, 0xff, PAGE_SIZE);
- kunmap(vmx_msr_bitmap);
-
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r)
- goto out2;
-
- vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE);
- vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE);
- vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS);
- vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
- vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
+ goto out1;
if (vm_need_ept()) {
bypass_guest_pf = 0;
@@ -3577,8 +3587,6 @@ static int __init vmx_init(void)
return 0;
-out2:
- __free_page(vmx_msr_bitmap);
out1:
__free_page(vmx_io_bitmap_b);
out:
@@ -3588,7 +3596,6 @@ out:
static void __exit vmx_exit(void)
{
- __free_page(vmx_msr_bitmap);
__free_page(vmx_io_bitmap_b);
__free_page(vmx_io_bitmap_a);
--
1.5.4.5
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html