update_nohz_stats() only call _nohz_idle_balance() under CONFIG_NO_HZ_COMMON.

Signed-off-by: Kefeng Wang <wangkefeng.w...@huawei.com>
---
 kernel/sched/fair.c | 40 ++++++++++++++++++----------------------
 1 file changed, 18 insertions(+), 22 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6d73bdbb2d40..2a20ada83cbb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8395,28 +8395,6 @@ group_type group_classify(unsigned int imbalance_pct,
        return group_has_spare;
 }
 
-static bool update_nohz_stats(struct rq *rq)
-{
-#ifdef CONFIG_NO_HZ_COMMON
-       unsigned int cpu = rq->cpu;
-
-       if (!rq->has_blocked_load)
-               return false;
-
-       if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
-               return false;
-
-       if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
-               return true;
-
-       update_blocked_averages(cpu);
-
-       return rq->has_blocked_load;
-#else
-       return false;
-#endif
-}
-
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
  * @env: The load balancing environment.
@@ -10380,6 +10358,24 @@ void nohz_balance_enter_idle(int cpu)
        WRITE_ONCE(nohz.has_blocked, 1);
 }
 
+static bool update_nohz_stats(struct rq *rq)
+{
+       unsigned int cpu = rq->cpu;
+
+       if (!rq->has_blocked_load)
+               return false;
+
+       if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
+               return false;
+
+       if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
+               return true;
+
+       update_blocked_averages(cpu);
+
+       return rq->has_blocked_load;
+}
+
 /*
  * Internal function that runs load balance for all idle cpus. The load balance
  * can be a simple update of blocked load or a complete load balance with
-- 
2.26.2

Reply via email to