From: Marcelo Tosatti <[email protected]>

If a migrated vcpu matches the asid_generation value of the target pcpu,
there will be no TLB flush via TLB_CONTROL_FLUSH_ALL_ASID.

The check for vcpu.cpu in pre_svm_run is meaningless since svm_vcpu_load
already updated it on schedule in.

Such vcpu will VMRUN with stale TLB entries.

Based on original patch from Joerg Roedel 
(http://patchwork.kernel.org/patch/10021/)

Signed-off-by: Marcelo Tosatti <[email protected]>
Acked-by: Joerg Roedel <[email protected]>

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 18085d3..b720b02 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -739,6 +739,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                svm->vmcb->control.tsc_offset += delta;
                vcpu->cpu = cpu;
                kvm_migrate_timers(vcpu);
+               svm->asid_generation = 0;
        }
 
        for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
@@ -1071,7 +1072,6 @@ static void new_asid(struct vcpu_svm *svm, struct 
svm_cpu_data *svm_data)
                svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
        }
 
-       svm->vcpu.cpu = svm_data->cpu;
        svm->asid_generation = svm_data->asid_generation;
        svm->vmcb->control.asid = svm_data->next_asid++;
 }
@@ -2320,8 +2320,8 @@ static void pre_svm_run(struct vcpu_svm *svm)
        struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
 
        svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
-       if (svm->vcpu.cpu != cpu ||
-           svm->asid_generation != svm_data->asid_generation)
+       /* FIXME: handle wraparound of asid_generation */
+       if (svm->asid_generation != svm_data->asid_generation)
                new_asid(svm, svm_data);
 }
 
--
To unsubscribe from this list: send the line "unsubscribe kvm-commits" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to