Start to update last_blocked_load_update_tick to reduce the possibility
of another cpu starting the update one more time

Signed-off-by: Vincent Guittot <[email protected]>
---
 kernel/sched/fair.c | 16 ++++++----------
 1 file changed, 6 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3d2ab28d5736..968808c2c022 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7852,17 +7852,9 @@ static inline bool others_have_blocked(struct rq *rq)
        return false;
 }
 
-static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
-{
-       rq->last_blocked_load_update_tick = jiffies;
-
-       if (!has_blocked)
-               rq->has_blocked_load = 0;
-}
 #else
 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
 static inline bool others_have_blocked(struct rq *rq) { return false; }
-static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) 
{}
 #endif
 
 static bool __update_blocked_others(struct rq *rq, bool *done)
@@ -8022,12 +8014,16 @@ static void update_blocked_averages(int cpu)
        struct rq_flags rf;
 
        rq_lock_irqsave(rq, &rf);
+       WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
+
        update_rq_clock(rq);
 
        decayed |= __update_blocked_others(rq, &done);
        decayed |= __update_blocked_fair(rq, &done);
 
-       update_blocked_load_status(rq, !done);
+       if (done)
+               rq->has_blocked_load = 0;
+
        if (decayed)
                cpufreq_update_util(rq, 0);
        rq_unlock_irqrestore(rq, &rf);
@@ -8363,7 +8359,7 @@ static bool update_nohz_stats(struct rq *rq)
        if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
                return false;
 
-       if (!time_after(jiffies, rq->last_blocked_load_update_tick))
+       if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
                return true;
 
        update_blocked_averages(cpu);
-- 
2.17.1

Reply via email to