The commit is pushed to "branch-rh7-3.10.0-327.36.1.vz7.19.x-ovz" and will 
appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.36.1.vz7.19.3
------>
commit 692d327368aaf386c36ad50695cc75b5c5069853
Author: Denis Plotnikov <[email protected]>
Date:   Mon Oct 24 16:07:09 2016 +0400

    ms/math64: add function to multiply two u64 numbers with overflow handling
    
    The function added as a part of Hyper-V TSC reference page clock porting
    https://jira.sw.ru/browse/PSBM-46245
    
    It was initially introduced in commit 
35181e86df97e4223f4a28fb33e2bcf3b73de141
    by Haozhong Zhang <[email protected]>
    
    Signed-off-by: Denis Plotnikov <[email protected]>
    Reviewed-by: Roman Kagan <[email protected]>
    Signed-off-by: "Denis V. Lunev" <[email protected]>
---
 include/linux/math64.h | 51 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)

diff --git a/include/linux/math64.h b/include/linux/math64.h
index c45c089..44282ec 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned 
int shift)
 }
 #endif /* mul_u64_u32_shr */
 
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+{
+       return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u64_shr */
+
 #else
 
 #ifndef mul_u64_u32_shr
@@ -161,6 +168,50 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned 
int shift)
 }
 #endif /* mul_u64_u32_shr */
 
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
+{
+       union {
+               u64 ll;
+               struct {
+#ifdef __BIG_ENDIAN
+                       u32 high, low;
+#else
+                       u32 low, high;
+#endif
+               } l;
+       } rl, rm, rn, rh, a0, b0;
+       u64 c;
+
+       a0.ll = a;
+       b0.ll = b;
+
+       rl.ll = (u64)a0.l.low * b0.l.low;
+       rm.ll = (u64)a0.l.low * b0.l.high;
+       rn.ll = (u64)a0.l.high * b0.l.low;
+       rh.ll = (u64)a0.l.high * b0.l.high;
+
+       /*
+        * Each of these lines computes a 64-bit intermediate result into "c",
+        * starting at bits 32-95.  The low 32-bits go into the result of the
+        * multiplication, the high 32-bits are carried into the next step.
+        */
+       rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
+       rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
+       rh.l.high = (c >> 32) + rh.l.high;
+
+       /*
+        * The 128-bit result of the multiplication is in rl.ll and rh.ll,
+        * shift it right and throw away the high part of the result.
+        */
+       if (shift == 0)
+               return rl.ll;
+       if (shift < 64)
+               return (rl.ll >> shift) | (rh.ll << (64 - shift));
+       return rh.ll >> (shift & 63);
+}
+#endif /* mul_u64_u64_shr */
+
 #endif
 
 #endif /* _LINUX_MATH64_H */
_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to