VMX and SVM calculate the TSC scaling ratio in a similar logic, so this
patch generalizes it to a common TSC scaling function.

Signed-off-by: Haozhong Zhang <haozhong.zh...@intel.com>
---
 arch/x86/kvm/svm.c       | 48 +++------------------------------
 arch/x86/kvm/x86.c       | 70 ++++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/kvm_host.h |  4 ++-
 3 files changed, 77 insertions(+), 45 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a3186e2..1a333bd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -209,7 +209,6 @@ static int nested_svm_intercept(struct vcpu_svm *svm);
 static int nested_svm_vmexit(struct vcpu_svm *svm);
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code);
-static u64 __scale_tsc(u64 ratio, u64 tsc);
 
 enum {
        VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
@@ -947,21 +946,7 @@ static __init int svm_hardware_setup(void)
                kvm_enable_efer_bits(EFER_FFXSR);
 
        if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
-               u64 max;
-
                kvm_has_tsc_control = true;
-
-               /*
-                * Make sure the user can only configure tsc_khz values that
-                * fit into a signed integer.
-                * A min value is not calculated needed because it will always
-                * be 1 on all machines and a value of 0 is used to disable
-                * tsc-scaling for the vcpu.
-                */
-               max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
-
-               kvm_max_guest_tsc_khz = max;
-
                kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
                kvm_tsc_scaling_ratio_frac_bits = 32;
                kvm_tsc_scaling_ratio_rsvd = TSC_RATIO_RSVD;
@@ -1030,31 +1015,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t 
type)
        seg->base = 0;
 }
 
-static u64 __scale_tsc(u64 ratio, u64 tsc)
-{
-       u64 mult, frac, _tsc;
-
-       mult  = ratio >> 32;
-       frac  = ratio & ((1ULL << 32) - 1);
-
-       _tsc  = tsc;
-       _tsc *= mult;
-       _tsc += (tsc >> 32) * frac;
-       _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
-
-       return _tsc;
-}
-
-static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
-{
-       u64 _tsc = tsc;
-
-       if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
-               _tsc = __scale_tsc(vcpu->arch.tsc_scaling_ratio, tsc);
-
-       return _tsc;
-}
-
 static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool 
scale)
 {
        u64 ratio;
@@ -1123,7 +1083,7 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, 
s64 adjustment, bool ho
        if (host) {
                if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
                        WARN_ON(adjustment < 0);
-               adjustment = svm_scale_tsc(vcpu, (u64)adjustment);
+               adjustment = kvm_scale_tsc(vcpu, (u64)adjustment);
        }
 
        svm->vmcb->control.tsc_offset += adjustment;
@@ -1141,7 +1101,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, 
u64 target_tsc)
 {
        u64 tsc;
 
-       tsc = svm_scale_tsc(vcpu, rdtsc());
+       tsc = kvm_scale_tsc(vcpu, rdtsc());
 
        return target_tsc - tsc;
 }
@@ -3166,7 +3126,7 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 
host_tsc)
 {
        struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
        return vmcb->control.tsc_offset +
-               svm_scale_tsc(vcpu, host_tsc);
+               kvm_scale_tsc(vcpu, host_tsc);
 }
 
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -3176,7 +3136,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
        switch (msr_info->index) {
        case MSR_IA32_TSC: {
                msr_info->data = svm->vmcb->control.tsc_offset +
-                       svm_scale_tsc(vcpu, rdtsc());
+                       kvm_scale_tsc(vcpu, rdtsc());
 
                break;
        }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4a521b4..920c302 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1321,6 +1321,64 @@ static void update_ia32_tsc_adjust_msr(struct kvm_vcpu 
*vcpu, s64 offset)
        vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
 }
 
+/*
+ * Multiply tsc by a fixed point number represented by ratio.
+ *
+ * The most significant 64-N bits (mult) of ratio represent the
+ * integral part of the fixed point number; the remaining N bits
+ * (frac) represent the fractional part, ie. ratio represents a fixed
+ * point number (mult + frac * 2^(-N)).
+ *
+ * N.B: we always assume not all 64 bits of ratio are used for the
+ * fractional part and the ratio has at least 1 bit for the fractional
+ * part, i.e. 0 < N < 64.
+ *
+ * N equals to kvm_tsc_scaling_ratio_frac_bits.
+ */
+static u64 __scale_tsc(u64 ratio, u64 tsc)
+{
+       u64 mult, frac, mask, _tsc;
+       int width, nr;
+
+       BUG_ON(kvm_tsc_scaling_ratio_frac_bits >= 64 ||
+              kvm_tsc_scaling_ratio_frac_bits == 0);
+
+       mult  = ratio >> kvm_tsc_scaling_ratio_frac_bits;
+       mask  = (1ULL << kvm_tsc_scaling_ratio_frac_bits) - 1;
+       frac  = ratio & mask;
+
+       width = 64 - kvm_tsc_scaling_ratio_frac_bits;
+       mask  = (1ULL << width) - 1;
+       nr    = kvm_tsc_scaling_ratio_frac_bits;
+
+       _tsc  = tsc;
+       _tsc *= mult;
+       _tsc += (tsc >> kvm_tsc_scaling_ratio_frac_bits) * frac;
+
+       while (nr >= width) {
+               _tsc += (((tsc >> (nr - width)) & mask) * frac) >> (64 - nr);
+               nr   -= width;
+       }
+
+       if (nr > 0)
+               _tsc += ((tsc & ((1ULL << nr) - 1)) * frac) >>
+                       kvm_tsc_scaling_ratio_frac_bits;
+
+       return _tsc;
+}
+
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
+{
+       u64 _tsc = tsc;
+       u64 ratio = vcpu->arch.tsc_scaling_ratio;
+
+       if (ratio != kvm_default_tsc_scaling_ratio)
+               _tsc = __scale_tsc(ratio, tsc);
+
+       return _tsc;
+}
+EXPORT_SYMBOL_GPL(kvm_scale_tsc);
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -7290,6 +7348,18 @@ int kvm_arch_hardware_setup(void)
        if (r != 0)
                return r;
 
+       /*
+        * Make sure the user can only configure tsc_khz values that
+        * fit into a signed integer.
+        * A min value is not calculated needed because it will always
+        * be 1 on all machines.
+        */
+       if (kvm_has_tsc_control) {
+               u64 max = min(0x7fffffffULL,
+                             __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
+               kvm_max_guest_tsc_khz = max;
+       }
+
        kvm_init_msr_list();
        return 0;
 }
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1bef9e2..3c43e3e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1144,5 +1144,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct 
kvm_vcpu *vcpu, bool val)
 {
 }
 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
-#endif
 
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
+
+#endif
-- 
2.4.8

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to