Turns out it wasn't very precise; it needs to work on 64-bit values.

Signed-off-by: Zachary Amsden <[email protected]>
---
 arch/x86/kvm/x86.c |   18 +++++++++++++++---
 1 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7e2ba3e..792c895 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -803,15 +803,27 @@ static void compute_best_multiplier(unsigned long a, 
unsigned long b,
        *s = shift;
 }
 
-static inline unsigned long mult_precise(unsigned long val, unsigned long mult,
-       int shift)
+static inline u64 mult_precise(u64 val, unsigned long mult, int shift)
 {
+#if (BITS_PER_LONG == 64)
        unsigned long top, bot;
 
-       __asm__ ( "mul %3; shrd %1, %0" :
+       __asm__ ( "mulq %3; shrdq %1, %0" :
                 "=&a" (bot), "=&d" (top) :
                 "0" (mult), "rm" (val), "c" (shift));
        return bot;
+#else
+       unsigned long ltop, lbot;
+       unsigned long htop, hbot;
+
+       __asm__ ( "mull %3; shrd %1, %0" :
+                "=&a" (lbot), "=&d" (ltop) :
+                "0" (mult), "rm" (long)(val), "c" (shift));
+       __asm__ ( "mull %3; shrd %1, %0" :
+                "=&a" (hbot), "=&d" (htop) :
+                "0" (mult), "rm" (long)(val >> 32), "c" (shift));
+       return (u64)lbot + ((u64)ltop + (u64)hbot) << 32;
+#endif
 }
 
 static inline u64 compute_ref_tsc(void)
-- 
1.6.5.2

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to