There is no real point in switching to dyntick-idle cputime accounting
mode if the tick is not actually stopped. This just adds overhead,
notably fetching the GTOD, on each idle exit and each idle IRQ entry for
no reason during short idle trips.

Signed-off-by: Frederic Weisbecker <[email protected]>
Tested-by: Shrikanth Hegde <[email protected]>
---
 kernel/time/tick-sched.c | 50 +++++++++++++++++++++-------------------
 1 file changed, 26 insertions(+), 24 deletions(-)

diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index fa03cf7b3cec..c1ee0b256445 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1157,8 +1157,10 @@ void tick_nohz_idle_stop_tick(void)
                ts->idle_sleeps++;
                ts->idle_expires = expires;
 
-               if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED))
+               if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
+                       kcpustat_dyntick_start(ts->idle_entrytime);
                        nohz_balance_enter_idle(cpu);
+               }
        } else {
                tick_nohz_retain_tick(ts);
        }
@@ -1200,7 +1202,6 @@ void tick_nohz_idle_enter(void)
        WARN_ON_ONCE(ts->timer_expires_base);
        tick_sched_flag_set(ts, TS_FLAG_INIDLE);
        ts->idle_entrytime = ktime_get();
-       kcpustat_dyntick_start(ts->idle_entrytime);
        tick_nohz_clock_sleep(ts);
 
        local_irq_enable();
@@ -1230,9 +1231,10 @@ void tick_nohz_irq_exit(void)
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 
        if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) {
-               ts->idle_entrytime = ktime_get();
-               kcpustat_irq_exit(ts->idle_entrytime);
                tick_nohz_clock_sleep(ts);
+               ts->idle_entrytime = ktime_get();
+               if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
+                       kcpustat_irq_exit(ts->idle_entrytime);
        } else {
                tick_nohz_full_update_tick(ts);
        }
@@ -1333,8 +1335,17 @@ void tick_nohz_idle_restart_tick(void)
 {
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 
-       if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
-               tick_nohz_restart_sched_tick(ts, ktime_get());
+       if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
+               /*
+                * Update entrytime here in case the tick restart is due to 
temporary
+                * polling on forced broadcast. The tick may be stopped again 
later within
+                * the same idle trip. The idle_entrytime was updated recently 
but make sure
+                * no tiny amount of idle time is accounted twice.
+                */
+               ts->idle_entrytime = ktime_get();
+               kcpustat_dyntick_stop(ts->idle_entrytime);
+               tick_nohz_restart_sched_tick(ts, ts->idle_entrytime);
+       }
 }
 
 static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now)
@@ -1364,7 +1375,6 @@ static void tick_nohz_idle_update_tick(struct tick_sched 
*ts, ktime_t now)
 void tick_nohz_idle_exit(void)
 {
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
-       bool idle_active, tick_stopped;
        ktime_t now;
 
        local_irq_disable();
@@ -1373,18 +1383,13 @@ void tick_nohz_idle_exit(void)
        WARN_ON_ONCE(ts->timer_expires_base);
 
        tick_sched_flag_clear(ts, TS_FLAG_INIDLE);
-       idle_active = tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE);
-       tick_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
+       tick_nohz_clock_wakeup(ts);
 
-       if (idle_active || tick_stopped)
+       if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
                now = ktime_get();
-
-       if (idle_active)
-               tick_nohz_clock_wakeup(ts);
-
-       if (tick_stopped)
+               kcpustat_dyntick_stop(now);
                tick_nohz_idle_update_tick(ts, now);
-       kcpustat_dyntick_stop(now);
+       }
 
        local_irq_enable();
 }
@@ -1439,15 +1444,13 @@ static inline void tick_nohz_irq_enter(void)
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
        ktime_t now;
 
-       if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE))
+       tick_nohz_clock_wakeup(ts);
+
+       if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED))
                return;
 
        now = ktime_get();
-
-       if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE)) {
-               tick_nohz_clock_wakeup(ts);
-               kcpustat_irq_enter(now);
-       }
+       kcpustat_irq_enter(now);
 
        /*
         * If all CPUs are idle we may need to update a stale jiffies value.
@@ -1456,8 +1459,7 @@ static inline void tick_nohz_irq_enter(void)
         * rare case (typically stop machine). So we must make sure we have a
         * last resort.
         */
-       if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
-               tick_nohz_update_jiffies(now);
+       tick_nohz_update_jiffies(now);
 }
 
 #else
-- 
2.53.0


Reply via email to