Commit-ID:  1a3d027c5a6847e5d349c8527f99aada47e5467a
Gitweb:     http://git.kernel.org/tip/1a3d027c5a6847e5d349c8527f99aada47e5467a
Author:     Josh Poimboeuf <[email protected]>
AuthorDate: Fri, 17 Jun 2016 12:43:23 -0500
Committer:  Ingo Molnar <[email protected]>
CommitDate: Mon, 5 Sep 2016 13:29:45 +0200

sched/debug: Rename and move enqueue_sleeper()

enqueue_sleeper() doesn't actually enqueue, it just handles some
statistics and tracepoints.  Rename it to update_stats_enqueue_sleeper()
and call it from update_stats_enqueue().

Signed-off-by: Josh Poimboeuf <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Matt Fleming <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Srikar Dronamraju <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: 
http://lkml.kernel.org/r/fb20b7159dc4d028c406c0e8d5f8c439b741615b.1466184592.git.jpoim...@redhat.com
Signed-off-by: Ingo Molnar <[email protected]>
---
 kernel/sched/fair.c | 142 +++++++++++++++++++++++++++-------------------------
 1 file changed, 73 insertions(+), 69 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6011bfe..479639f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -862,11 +862,72 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
        se->statistics.wait_start = 0;
 }
 
+static void
+update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       struct task_struct *tsk = NULL;
+
+       if (entity_is_task(se))
+               tsk = task_of(se);
+
+       if (se->statistics.sleep_start) {
+               u64 delta = rq_clock(rq_of(cfs_rq)) - 
se->statistics.sleep_start;
+
+               if ((s64)delta < 0)
+                       delta = 0;
+
+               if (unlikely(delta > se->statistics.sleep_max))
+                       se->statistics.sleep_max = delta;
+
+               se->statistics.sleep_start = 0;
+               se->statistics.sum_sleep_runtime += delta;
+
+               if (tsk) {
+                       account_scheduler_latency(tsk, delta >> 10, 1);
+                       trace_sched_stat_sleep(tsk, delta);
+               }
+       }
+       if (se->statistics.block_start) {
+               u64 delta = rq_clock(rq_of(cfs_rq)) - 
se->statistics.block_start;
+
+               if ((s64)delta < 0)
+                       delta = 0;
+
+               if (unlikely(delta > se->statistics.block_max))
+                       se->statistics.block_max = delta;
+
+               se->statistics.block_start = 0;
+               se->statistics.sum_sleep_runtime += delta;
+
+               if (tsk) {
+                       if (tsk->in_iowait) {
+                               se->statistics.iowait_sum += delta;
+                               se->statistics.iowait_count++;
+                               trace_sched_stat_iowait(tsk, delta);
+                       }
+
+                       trace_sched_stat_blocked(tsk, delta);
+
+                       /*
+                        * Blocking time is in units of nanosecs, so shift by
+                        * 20 to get a milliseconds-range estimation of the
+                        * amount of time that the task spent sleeping:
+                        */
+                       if (unlikely(prof_on == SLEEP_PROFILING)) {
+                               profile_hits(SLEEP_PROFILING,
+                                               (void *)get_wchan(tsk),
+                                               delta >> 20);
+                       }
+                       account_scheduler_latency(tsk, delta >> 10, 0);
+               }
+       }
+}
+
 /*
  * Task is being enqueued - update stats:
  */
 static inline void
-update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
        /*
         * Are we enqueueing a waiting task? (for current tasks
@@ -874,6 +935,9 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
         */
        if (se != cfs_rq->curr)
                update_stats_wait_start(cfs_rq, se);
+
+       if (flags & ENQUEUE_WAKEUP)
+               update_stats_enqueue_sleeper(cfs_rq, se);
 }
 
 static inline void
@@ -910,7 +974,12 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
 }
 
 static inline void
-update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+}
+
+static inline void
+update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
 }
 
@@ -3197,68 +3266,6 @@ static inline int idle_balance(struct rq *rq)
 
 #endif /* CONFIG_SMP */
 
-static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-#ifdef CONFIG_SCHEDSTATS
-       struct task_struct *tsk = NULL;
-
-       if (entity_is_task(se))
-               tsk = task_of(se);
-
-       if (se->statistics.sleep_start) {
-               u64 delta = rq_clock(rq_of(cfs_rq)) - 
se->statistics.sleep_start;
-
-               if ((s64)delta < 0)
-                       delta = 0;
-
-               if (unlikely(delta > se->statistics.sleep_max))
-                       se->statistics.sleep_max = delta;
-
-               se->statistics.sleep_start = 0;
-               se->statistics.sum_sleep_runtime += delta;
-
-               if (tsk) {
-                       account_scheduler_latency(tsk, delta >> 10, 1);
-                       trace_sched_stat_sleep(tsk, delta);
-               }
-       }
-       if (se->statistics.block_start) {
-               u64 delta = rq_clock(rq_of(cfs_rq)) - 
se->statistics.block_start;
-
-               if ((s64)delta < 0)
-                       delta = 0;
-
-               if (unlikely(delta > se->statistics.block_max))
-                       se->statistics.block_max = delta;
-
-               se->statistics.block_start = 0;
-               se->statistics.sum_sleep_runtime += delta;
-
-               if (tsk) {
-                       if (tsk->in_iowait) {
-                               se->statistics.iowait_sum += delta;
-                               se->statistics.iowait_count++;
-                               trace_sched_stat_iowait(tsk, delta);
-                       }
-
-                       trace_sched_stat_blocked(tsk, delta);
-
-                       /*
-                        * Blocking time is in units of nanosecs, so shift by
-                        * 20 to get a milliseconds-range estimation of the
-                        * amount of time that the task spent sleeping:
-                        */
-                       if (unlikely(prof_on == SLEEP_PROFILING)) {
-                               profile_hits(SLEEP_PROFILING,
-                                               (void *)get_wchan(tsk),
-                                               delta >> 20);
-                       }
-                       account_scheduler_latency(tsk, delta >> 10, 0);
-               }
-       }
-#endif
-}
-
 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 #ifdef CONFIG_SCHED_DEBUG
@@ -3385,15 +3392,12 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct 
sched_entity *se, int flags)
        account_entity_enqueue(cfs_rq, se);
        update_cfs_shares(cfs_rq);
 
-       if (flags & ENQUEUE_WAKEUP) {
+       if (flags & ENQUEUE_WAKEUP)
                place_entity(cfs_rq, se, 0);
-               if (schedstat_enabled())
-                       enqueue_sleeper(cfs_rq, se);
-       }
 
        check_schedstat_required();
        if (schedstat_enabled()) {
-               update_stats_enqueue(cfs_rq, se);
+               update_stats_enqueue(cfs_rq, se, flags);
                check_spread(cfs_rq, se);
        }
        if (!curr)

Reply via email to