[PATCH v4 05/10] sched/fair: Hoist idle_stamp up from idle_balance

2018-12-06 Thread Steve Sistare
Move the update of idle_stamp from idle_balance to the call site in
pick_next_task_fair, to prepare for a future patch that adds work to
pick_next_task_fair which must be included in the idle_stamp interval.
No functional change.

Signed-off-by: Steve Sistare 
---
 kernel/sched/fair.c | 31 ++-
 1 file changed, 22 insertions(+), 9 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4e105db..8a33ad9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3725,6 +3725,16 @@ static inline void update_misfit_status(struct 
task_struct *p, struct rq *rq)
rq->misfit_task_load = task_h_load(p);
 }
 
+static inline void rq_idle_stamp_update(struct rq *rq)
+{
+   rq->idle_stamp = rq_clock(rq);
+}
+
+static inline void rq_idle_stamp_clear(struct rq *rq)
+{
+   rq->idle_stamp = 0;
+}
+
 static void overload_clear(struct rq *rq)
 {
struct sparsemask *overload_cpus;
@@ -3770,6 +3780,8 @@ static inline int idle_balance(struct rq *rq, struct 
rq_flags *rf)
return 0;
 }
 
+static inline void rq_idle_stamp_update(struct rq *rq) {}
+static inline void rq_idle_stamp_clear(struct rq *rq) {}
 static inline void overload_clear(struct rq *rq) {}
 static inline void overload_set(struct rq *rq) {}
 
@@ -6764,8 +6776,18 @@ static void check_preempt_wakeup(struct rq *rq, struct 
task_struct *p, int wake_
 
 idle:
update_misfit_status(NULL, rq);
+
+   /*
+* We must set idle_stamp _before_ calling idle_balance(), such that we
+* measure the duration of idle_balance() as idle time.
+*/
+   rq_idle_stamp_update(rq);
+
new_tasks = idle_balance(rq, rf);
 
+   if (new_tasks)
+   rq_idle_stamp_clear(rq);
+
/*
 * Because idle_balance() releases (and re-acquires) rq->lock, it is
 * possible for any higher priority task to appear. In that case we
@@ -9611,12 +9633,6 @@ static int idle_balance(struct rq *this_rq, struct 
rq_flags *rf)
u64 curr_cost = 0;
 
/*
-* We must set idle_stamp _before_ calling idle_balance(), such that we
-* measure the duration of idle_balance() as idle time.
-*/
-   this_rq->idle_stamp = rq_clock(this_rq);
-
-   /*
 * Do not pull tasks towards !active CPUs...
 */
if (!cpu_active(this_cpu))
@@ -9707,9 +9723,6 @@ static int idle_balance(struct rq *this_rq, struct 
rq_flags *rf)
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
pulled_task = -1;
 
-   if (pulled_task)
-   this_rq->idle_stamp = 0;
-
rq_repin_lock(this_rq, rf);
 
return pulled_task;
-- 
1.8.3.1



[PATCH v4 05/10] sched/fair: Hoist idle_stamp up from idle_balance

2018-12-06 Thread Steve Sistare
Move the update of idle_stamp from idle_balance to the call site in
pick_next_task_fair, to prepare for a future patch that adds work to
pick_next_task_fair which must be included in the idle_stamp interval.
No functional change.

Signed-off-by: Steve Sistare 
---
 kernel/sched/fair.c | 31 ++-
 1 file changed, 22 insertions(+), 9 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4e105db..8a33ad9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3725,6 +3725,16 @@ static inline void update_misfit_status(struct 
task_struct *p, struct rq *rq)
rq->misfit_task_load = task_h_load(p);
 }
 
+static inline void rq_idle_stamp_update(struct rq *rq)
+{
+   rq->idle_stamp = rq_clock(rq);
+}
+
+static inline void rq_idle_stamp_clear(struct rq *rq)
+{
+   rq->idle_stamp = 0;
+}
+
 static void overload_clear(struct rq *rq)
 {
struct sparsemask *overload_cpus;
@@ -3770,6 +3780,8 @@ static inline int idle_balance(struct rq *rq, struct 
rq_flags *rf)
return 0;
 }
 
+static inline void rq_idle_stamp_update(struct rq *rq) {}
+static inline void rq_idle_stamp_clear(struct rq *rq) {}
 static inline void overload_clear(struct rq *rq) {}
 static inline void overload_set(struct rq *rq) {}
 
@@ -6764,8 +6776,18 @@ static void check_preempt_wakeup(struct rq *rq, struct 
task_struct *p, int wake_
 
 idle:
update_misfit_status(NULL, rq);
+
+   /*
+* We must set idle_stamp _before_ calling idle_balance(), such that we
+* measure the duration of idle_balance() as idle time.
+*/
+   rq_idle_stamp_update(rq);
+
new_tasks = idle_balance(rq, rf);
 
+   if (new_tasks)
+   rq_idle_stamp_clear(rq);
+
/*
 * Because idle_balance() releases (and re-acquires) rq->lock, it is
 * possible for any higher priority task to appear. In that case we
@@ -9611,12 +9633,6 @@ static int idle_balance(struct rq *this_rq, struct 
rq_flags *rf)
u64 curr_cost = 0;
 
/*
-* We must set idle_stamp _before_ calling idle_balance(), such that we
-* measure the duration of idle_balance() as idle time.
-*/
-   this_rq->idle_stamp = rq_clock(this_rq);
-
-   /*
 * Do not pull tasks towards !active CPUs...
 */
if (!cpu_active(this_cpu))
@@ -9707,9 +9723,6 @@ static int idle_balance(struct rq *this_rq, struct 
rq_flags *rf)
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
pulled_task = -1;
 
-   if (pulled_task)
-   this_rq->idle_stamp = 0;
-
rq_repin_lock(this_rq, rf);
 
return pulled_task;
-- 
1.8.3.1