From: Avi Kivity <[EMAIL PROTECTED]>

Most Intel hosts have a stable tsc, and playing with the offset only
reduces accuracy.  By limiting tsc offset adjustment only to forward updates,
we effectively disable tsc offset adjustment on these hosts.

Signed-off-by: Avi Kivity <[EMAIL PROTECTED]>

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 712fd73..0b0bb0b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -522,7 +522,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u64 phys_addr = __pa(vmx->vmcs);
-       u64 tsc_this, delta;
+       u64 tsc_this, delta, new_offset;
 
        if (vcpu->cpu != cpu) {
                vcpu_clear(vmx);
@@ -562,8 +562,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 * Make sure the time stamp counter is monotonous.
                 */
                rdtscll(tsc_this);
-               delta = vcpu->arch.host_tsc - tsc_this;
-               vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
+               if (tsc_this < vcpu->arch.host_tsc) {
+                       delta = vcpu->arch.host_tsc - tsc_this;
+                       new_offset = vmcs_read64(TSC_OFFSET) + delta;
+                       vmcs_write64(TSC_OFFSET, new_offset);
+               }
        }
 }
 

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-commits mailing list
kvm-commits@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-commits

Reply via email to