Kernel timekeeping is designed to keep the change in cycles (since the last
timer interrupt) below max_cycles, which prevents multiplication overflow
when converting cycles to nanoseconds. However, if timer interrupts stop,
the calculation will eventually overflow.

Add protection against that. In timekeeping_cycles_to_ns() calculation,
check against max_cycles, falling back to a slower higher precision
calculation. In timekeeping_forward_now(), process delta in chunks of at
most max_cycles.

Suggested-by: Thomas Gleixner <t...@linutronix.de>
Signed-off-by: Adrian Hunter <adrian.hun...@intel.com>
---
 kernel/time/timekeeping.c | 40 ++++++++++++++++++++++++++++-----------
 1 file changed, 29 insertions(+), 11 deletions(-)

diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d17484082e2c..111dfdbd488f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -364,19 +364,32 @@ static void tk_setup_internals(struct timekeeper *tk, 
struct clocksource *clock)
 }
 
 /* Timekeeper helper functions. */
+static noinline u64 delta_to_ns_safe(const struct tk_read_base *tkr, u64 delta)
+{
+       return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, 
tkr->shift);
+}
+
 static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 
cycles)
 {
        /* Calculate the delta since the last update_wall_time() */
        u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
 
-       if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) {
-               /*
-                * Handle clocksource inconsistency between CPUs to prevent
-                * time from going backwards by checking for the MSB of the
-                * mask being set in the delta.
-                */
-               if (unlikely(delta & ~(mask >> 1)))
-                       return tkr->xtime_nsec >> tkr->shift;
+       /*
+        * This detects the case where the delta overflows the multiplication
+        * with tkr->mult.
+        */
+       if (unlikely(delta > tkr->clock->max_cycles)) {
+               if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) {
+                       /*
+                        * Handle clocksource inconsistency between CPUs to 
prevent
+                        * time from going backwards by checking for the MSB of 
the
+                        * mask being set in the delta.
+                        */
+                       if (unlikely(delta & ~(mask >> 1)))
+                               return tkr->xtime_nsec >> tkr->shift;
+               }
+
+               return delta_to_ns_safe(tkr, delta);
        }
 
        return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;
@@ -789,10 +802,15 @@ static void timekeeping_forward_now(struct timekeeper *tk)
        tk->tkr_mono.cycle_last = cycle_now;
        tk->tkr_raw.cycle_last  = cycle_now;
 
-       tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
-       tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
+       while (delta > 0) {
+               u64 max = tk->tkr_mono.clock->max_cycles;
+               u64 incr = delta < max ? delta : max;
 
-       tk_normalize_xtime(tk);
+               tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult;
+               tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult;
+               tk_normalize_xtime(tk);
+               delta -= incr;
+       }
 }
 
 /**
-- 
2.34.1

Reply via email to