We need to provide locking around the current_vmcs/VMCS interactions to
protect against race conditions.
Signed-off-by: Gregory Haskins <[EMAIL PROTECTED]>
---
drivers/kvm/vmx.c | 77 ++++++++++++++++++++++++++++++++++++++++++++---------
1 files changed, 64 insertions(+), 13 deletions(-)
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 5f0a7fd..6b697f8 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -188,6 +188,20 @@ static struct kvm_msr_entry *find_msr_entry(struct
kvm_vcpu *vcpu, u32 msr)
return NULL;
}
+static void vmcs_load(struct vmcs *vmcs)
+{
+ u64 phys_addr = __pa(vmcs);
+ u8 error;
+
+ asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
+ : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
+ : "cc");
+
+ if (error)
+ printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
+ vmcs, phys_addr);
+}
+
static void vmcs_clear(struct vmcs *vmcs)
{
u64 phys_addr = __pa(vmcs);
@@ -205,11 +219,40 @@ static void __vcpu_clear(void *arg)
{
struct kvm_vcpu *vcpu = arg;
int cpu = raw_smp_processor_id();
+ unsigned long flags;
- if (vcpu->cpu == cpu)
+ local_irq_save(flags);
+
+ if (vcpu->cpu != -1) {
+ /*
+ * We should *never* try to __vcpu_clear a remote VMCS. This
+ * would have been addressed at a higher layer already
+ */
+ BUG_ON(vcpu->cpu != cpu);
+
+ /*
+ * Execute the VMCLEAR operation regardless of whether the
+ * VMCS is currently active on this CPU or not (it doesn't
+ * necessarily have to be)
+ */
vmcs_clear(vmx(vcpu)->vmcs);
- if (per_cpu(current_vmcs, cpu) == vmx(vcpu)->vmcs)
- per_cpu(current_vmcs, cpu) = NULL;
+
+ /*
+ * And finally, if this VMCS *was* currently active on this
+ * CPU, mark the CPU as available again
+ */
+ if (per_cpu(current_vmcs, cpu) == vmx(vcpu)->vmcs)
+ per_cpu(current_vmcs, cpu) = NULL;
+ } else
+ /*
+ * If vcpu->cpu thinks we are not installed anywhere,
+ * but this CPU thinks are are currently active, something is
+ * wacked.
+ */
+ BUG_ON(per_cpu(current_vmcs, cpu) == vmx(vcpu)->vmcs);
+
+ local_irq_restore(flags);
+
rdtscll(vcpu->host_tsc);
}
@@ -220,6 +263,7 @@ static void vcpu_clear(struct kvm_vcpu *vcpu)
else
__vcpu_clear(vcpu);
vmx(vcpu)->launched = 0;
+ vcpu->cpu = -1;
}
static unsigned long vmcs_readl(unsigned long field)
@@ -423,26 +467,33 @@ static void vmx_load_host_state(struct kvm_vcpu *vcpu)
*/
static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
{
- u64 phys_addr = __pa(vmx(vcpu)->vmcs);
int cpu;
u64 tsc_this, delta;
+ unsigned long flags;
cpu = get_cpu();
if (vcpu->cpu != cpu)
vcpu_clear(vcpu);
- if (per_cpu(current_vmcs, cpu) != vmx(vcpu)->vmcs) {
- u8 error;
+ /*
+ * By the time we get here, we know that either our VMCS was previously
+ * loaded on the current CPU, or that its not loaded on any logical CPU
+ * in the system at all due to the vcpu_clear() operation above.
+ * Either way, we must atomically make sure we are the currently
+ * loaded pointer
+ */
+ local_irq_save(flags);
+ if (per_cpu(current_vmcs, cpu) != vmx(vcpu)->vmcs) {
+ /*
+ * Re-establish ourselves as the current VMCS in an unlaunched
+ * state
+ */
+ vmcs_load(vmx(vcpu)->vmcs);
+ per_cpu(current_vmcs, cpu) = vmx(vcpu)->vmcs;
- per_cpu(current_vmcs, cpu) = vmx(vcpu)->vmcs;
- asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
- : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
- : "cc");
- if (error)
- printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
- vmx(vcpu)->vmcs, phys_addr);
}
+ local_irq_restore(flags);
if (vcpu->cpu != cpu) {
struct descriptor_table dt;
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel