Move the update of idle_stamp from idle_balance to the call site in
pick_next_task_fair, to prepare for a future patch that adds work to
pick_next_task_fair which must be included in the idle_stamp interval.
No functional change.

Signed-off-by: Steve Sistare <steven.sist...@oracle.com>
---
 kernel/sched/fair.c | 23 ++++++++++++++---------
 1 file changed, 14 insertions(+), 9 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9031d39..da368ed 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3725,6 +3725,8 @@ static inline void update_misfit_status(struct 
task_struct *p, struct rq *rq)
        rq->misfit_task_load = task_h_load(p);
 }
 
+#define IF_SMP(statement)      statement
+
 static void overload_clear(struct rq *rq)
 {
        struct sparsemask *overload_cpus;
@@ -3770,6 +3772,8 @@ static inline int idle_balance(struct rq *rq, struct 
rq_flags *rf)
        return 0;
 }
 
+#define IF_SMP(statement)      /* empty */
+
 static inline void overload_clear(struct rq *rq) {}
 static inline void overload_set(struct rq *rq) {}
 
@@ -6764,8 +6768,18 @@ static void check_preempt_wakeup(struct rq *rq, struct 
task_struct *p, int wake_
 
 idle:
        update_misfit_status(NULL, rq);
+
+       /*
+        * We must set idle_stamp _before_ calling idle_balance(), such that we
+        * measure the duration of idle_balance() as idle time.
+        */
+       IF_SMP(rq->idle_stamp = rq_clock(rq);)
+
        new_tasks = idle_balance(rq, rf);
 
+       if (new_tasks)
+               IF_SMP(rq->idle_stamp = 0;)
+
        /*
         * Because idle_balance() releases (and re-acquires) rq->lock, it is
         * possible for any higher priority task to appear. In that case we
@@ -9611,12 +9625,6 @@ static int idle_balance(struct rq *this_rq, struct 
rq_flags *rf)
        u64 curr_cost = 0;
 
        /*
-        * We must set idle_stamp _before_ calling idle_balance(), such that we
-        * measure the duration of idle_balance() as idle time.
-        */
-       this_rq->idle_stamp = rq_clock(this_rq);
-
-       /*
         * Do not pull tasks towards !active CPUs...
         */
        if (!cpu_active(this_cpu))
@@ -9707,9 +9715,6 @@ static int idle_balance(struct rq *this_rq, struct 
rq_flags *rf)
        if (this_rq->nr_running != this_rq->cfs.h_nr_running)
                pulled_task = -1;
 
-       if (pulled_task)
-               this_rq->idle_stamp = 0;
-
        rq_repin_lock(this_rq, rf);
 
        return pulled_task;
-- 
1.8.3.1

Reply via email to