In the unlikely case that L1 does not capture MSR_IA32_TSC, L0 needs to
emulate this MSR write by L2 by modifying vmcs02.tsc_offset.
We also need to set vmcs12.tsc_offset, for this change to survive the next
nested entry (see prepare_vmcs02()).

Signed-off-by: Nadav Har'El <[email protected]>
---
 arch/x86/kvm/vmx.c |   11 +++++++++++
 1 file changed, 11 insertions(+)

--- .before/arch/x86/kvm/vmx.c  2010-10-17 11:52:03.000000000 +0200
+++ .after/arch/x86/kvm/vmx.c   2010-10-17 11:52:03.000000000 +0200
@@ -1674,12 +1674,23 @@ static u64 guest_read_tsc(void)
 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
        vmcs_write64(TSC_OFFSET, offset);
+       if (to_vmx(vcpu)->nested.nested_mode)
+               /*
+                * We are only changing TSC_OFFSET when L2 is running if for
+                * some reason L1 chose not to trap the TSC MSR. Since
+                * prepare_vmcs12() does not copy tsc_offset, we need to also
+                * set the vmcs12 field here.
+                */
+               get_vmcs12_fields(vcpu)->tsc_offset = offset -
+                       to_vmx(vcpu)->nested.vmcs01_fields->tsc_offset;
 }
 
 static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
 {
        u64 offset = vmcs_read64(TSC_OFFSET);
        vmcs_write64(TSC_OFFSET, offset + adjustment);
+       if (to_vmx(vcpu)->nested.nested_mode)
+               get_vmcs12_fields(vcpu)->tsc_offset += adjustment;
 }
 
 /*
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to