Clean up the sched code by removing several of the CONFIG_SCHEDSTATS
guards, using schedstat_*() macros where needed.

Code size:

  !CONFIG_SCHEDSTATS defconfig:

      text         data     bss      dec            hex filename
  10209818      4368184 1105920 15683922         ef5152 vmlinux.before.nostats
  10209818      4368184 1105920 15683922         ef5152 vmlinux.after.nostats

  CONFIG_SCHEDSTATS defconfig:

      text         data     bss     dec     hex filename
  10214210      4370040 1105920 15690170         ef69ba vmlinux.before.stats
  10214210      4370680 1105920 15690810         ef6c3a vmlinux.after.stats

Signed-off-by: Josh Poimboeuf <jpoim...@redhat.com>
---
 kernel/sched/core.c  |  29 +++++-----
 kernel/sched/debug.c |  99 ++++++++++++++++++----------------
 kernel/sched/fair.c  | 148 ++++++++++++++++++++++++---------------------------
 3 files changed, 136 insertions(+), 140 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 017a4f5..0aee5dd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1626,10 +1626,15 @@ static inline int __set_cpus_allowed_ptr(struct 
task_struct *p,
 static void
 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
 {
-#ifdef CONFIG_SCHEDSTATS
-       struct rq *rq = this_rq();
+       struct rq *rq;
+
+       if (!schedstat_enabled())
+               return;
+
+       rq = this_rq();
 
 #ifdef CONFIG_SMP
+{
        int this_cpu = smp_processor_id();
 
        if (cpu == this_cpu) {
@@ -1651,7 +1656,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
 
        if (wake_flags & WF_MIGRATED)
                schedstat_inc(p->se.statistics.nr_wakeups_migrate);
-
+}
 #endif /* CONFIG_SMP */
 
        schedstat_inc(rq->ttwu_count);
@@ -1659,8 +1664,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
 
        if (wake_flags & WF_SYNC)
                schedstat_inc(p->se.statistics.nr_wakeups_sync);
-
-#endif /* CONFIG_SCHEDSTATS */
 }
 
 static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int 
en_flags)
@@ -2059,8 +2062,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, 
int wake_flags)
 
        ttwu_queue(p, cpu, wake_flags);
 stat:
-       if (schedstat_enabled())
-               ttwu_stat(p, cpu, wake_flags);
+       ttwu_stat(p, cpu, wake_flags);
 out:
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
@@ -2108,8 +2110,7 @@ static void try_to_wake_up_local(struct task_struct *p, 
struct pin_cookie cookie
                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 
        ttwu_do_wakeup(rq, p, 0, cookie);
-       if (schedstat_enabled())
-               ttwu_stat(p, smp_processor_id(), 0);
+       ttwu_stat(p, smp_processor_id(), 0);
 out:
        raw_spin_unlock(&p->pi_lock);
 }
@@ -7588,12 +7589,10 @@ void normalize_rt_tasks(void)
                if (p->flags & PF_KTHREAD)
                        continue;
 
-               p->se.exec_start                = 0;
-#ifdef CONFIG_SCHEDSTATS
-               p->se.statistics.wait_start     = 0;
-               p->se.statistics.sleep_start    = 0;
-               p->se.statistics.block_start    = 0;
-#endif
+               p->se.exec_start = 0;
+               schedstat_set(p->se.statistics.wait_start,  0);
+               schedstat_set(p->se.statistics.sleep_start, 0);
+               schedstat_set(p->se.statistics.block_start, 0);
 
                if (!dl_task(p) && !rt_task(p)) {
                        /*
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 63ffcaa..1393588 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -369,8 +369,12 @@ static void print_cfs_group_stats(struct seq_file *m, int 
cpu, struct task_group
 
 #define P(F) \
        SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
+#define P_SCHEDSTAT(F) \
+       SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)schedstat_val(F))
 #define PN(F) \
        SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
+#define PN_SCHEDSTAT(F) \
+       SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long 
long)schedstat_val(F)))
 
        if (!se)
                return;
@@ -378,26 +382,27 @@ static void print_cfs_group_stats(struct seq_file *m, int 
cpu, struct task_group
        PN(se->exec_start);
        PN(se->vruntime);
        PN(se->sum_exec_runtime);
-#ifdef CONFIG_SCHEDSTATS
        if (schedstat_enabled()) {
-               PN(se->statistics.wait_start);
-               PN(se->statistics.sleep_start);
-               PN(se->statistics.block_start);
-               PN(se->statistics.sleep_max);
-               PN(se->statistics.block_max);
-               PN(se->statistics.exec_max);
-               PN(se->statistics.slice_max);
-               PN(se->statistics.wait_max);
-               PN(se->statistics.wait_sum);
-               P(se->statistics.wait_count);
+               PN_SCHEDSTAT(se->statistics.wait_start);
+               PN_SCHEDSTAT(se->statistics.sleep_start);
+               PN_SCHEDSTAT(se->statistics.block_start);
+               PN_SCHEDSTAT(se->statistics.sleep_max);
+               PN_SCHEDSTAT(se->statistics.block_max);
+               PN_SCHEDSTAT(se->statistics.exec_max);
+               PN_SCHEDSTAT(se->statistics.slice_max);
+               PN_SCHEDSTAT(se->statistics.wait_max);
+               PN_SCHEDSTAT(se->statistics.wait_sum);
+               P_SCHEDSTAT(se->statistics.wait_count);
        }
-#endif
        P(se->load.weight);
 #ifdef CONFIG_SMP
        P(se->avg.load_avg);
        P(se->avg.util_avg);
 #endif
+
+#undef PN_SCHEDSTAT
 #undef PN
+#undef P_SCHEDSTAT
 #undef P
 }
 #endif
@@ -626,9 +631,7 @@ do {                                                        
                \
 #undef P64
 #endif
 
-#ifdef CONFIG_SCHEDSTATS
-#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
-
+#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
        if (schedstat_enabled()) {
                P(yld_count);
                P(sched_count);
@@ -636,9 +639,8 @@ do {                                                        
                \
                P(ttwu_count);
                P(ttwu_local);
        }
-
 #undef P
-#endif
+
        spin_lock_irqsave(&sched_debug_lock, flags);
        print_cfs_stats(m, cpu);
        print_rt_stats(m, cpu);
@@ -868,10 +870,14 @@ void proc_sched_show_task(struct task_struct *p, struct 
seq_file *m)
        SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
 #define P(F) \
        SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
+#define P_SCHEDSTAT(F) \
+       SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
 #define __PN(F) \
        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
 #define PN(F) \
        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
+#define PN_SCHEDSTAT(F) \
+       SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long 
long)schedstat_val(p->F)))
 
        PN(se.exec_start);
        PN(se.vruntime);
@@ -881,37 +887,36 @@ void proc_sched_show_task(struct task_struct *p, struct 
seq_file *m)
 
        P(se.nr_migrations);
 
-#ifdef CONFIG_SCHEDSTATS
        if (schedstat_enabled()) {
                u64 avg_atom, avg_per_cpu;
 
-               PN(se.statistics.sum_sleep_runtime);
-               PN(se.statistics.wait_start);
-               PN(se.statistics.sleep_start);
-               PN(se.statistics.block_start);
-               PN(se.statistics.sleep_max);
-               PN(se.statistics.block_max);
-               PN(se.statistics.exec_max);
-               PN(se.statistics.slice_max);
-               PN(se.statistics.wait_max);
-               PN(se.statistics.wait_sum);
-               P(se.statistics.wait_count);
-               PN(se.statistics.iowait_sum);
-               P(se.statistics.iowait_count);
-               P(se.statistics.nr_migrations_cold);
-               P(se.statistics.nr_failed_migrations_affine);
-               P(se.statistics.nr_failed_migrations_running);
-               P(se.statistics.nr_failed_migrations_hot);
-               P(se.statistics.nr_forced_migrations);
-               P(se.statistics.nr_wakeups);
-               P(se.statistics.nr_wakeups_sync);
-               P(se.statistics.nr_wakeups_migrate);
-               P(se.statistics.nr_wakeups_local);
-               P(se.statistics.nr_wakeups_remote);
-               P(se.statistics.nr_wakeups_affine);
-               P(se.statistics.nr_wakeups_affine_attempts);
-               P(se.statistics.nr_wakeups_passive);
-               P(se.statistics.nr_wakeups_idle);
+               PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
+               PN_SCHEDSTAT(se.statistics.wait_start);
+               PN_SCHEDSTAT(se.statistics.sleep_start);
+               PN_SCHEDSTAT(se.statistics.block_start);
+               PN_SCHEDSTAT(se.statistics.sleep_max);
+               PN_SCHEDSTAT(se.statistics.block_max);
+               PN_SCHEDSTAT(se.statistics.exec_max);
+               PN_SCHEDSTAT(se.statistics.slice_max);
+               PN_SCHEDSTAT(se.statistics.wait_max);
+               PN_SCHEDSTAT(se.statistics.wait_sum);
+               P_SCHEDSTAT(se.statistics.wait_count);
+               PN_SCHEDSTAT(se.statistics.iowait_sum);
+               P_SCHEDSTAT(se.statistics.iowait_count);
+               P_SCHEDSTAT(se.statistics.nr_migrations_cold);
+               P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
+               P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
+               P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
+               P_SCHEDSTAT(se.statistics.nr_forced_migrations);
+               P_SCHEDSTAT(se.statistics.nr_wakeups);
+               P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
+               P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
+               P_SCHEDSTAT(se.statistics.nr_wakeups_local);
+               P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
+               P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
+               P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
+               P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
+               P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
 
                avg_atom = p->se.sum_exec_runtime;
                if (nr_switches)
@@ -930,7 +935,7 @@ void proc_sched_show_task(struct task_struct *p, struct 
seq_file *m)
                __PN(avg_atom);
                __PN(avg_per_cpu);
        }
-#endif
+
        __P(nr_switches);
        SEQ_printf(m, "%-45s:%21Ld\n",
                   "nr_voluntary_switches", (long long)p->nvcsw);
@@ -947,8 +952,10 @@ void proc_sched_show_task(struct task_struct *p, struct 
seq_file *m)
 #endif
        P(policy);
        P(prio);
+#undef PN_SCHEDSTAT
 #undef PN
 #undef __PN
+#undef P_SCHEDSTAT
 #undef P
 #undef __P
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7257066..ae70600 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -789,26 +789,34 @@ static void update_curr_fair(struct rq *rq)
        update_curr(cfs_rq_of(&rq->curr->se));
 }
 
-#ifdef CONFIG_SCHEDSTATS
 static inline void
 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       u64 wait_start = rq_clock(rq_of(cfs_rq));
+       u64 wait_start, prev_wait_start;
+
+       if (!schedstat_enabled())
+               return;
+
+       wait_start = rq_clock(rq_of(cfs_rq));
+       prev_wait_start = schedstat_val(se->statistics.wait_start);
 
        if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
-           likely(wait_start > se->statistics.wait_start))
-               wait_start -= se->statistics.wait_start;
+           likely(wait_start > prev_wait_start))
+               wait_start -= prev_wait_start;
 
-       se->statistics.wait_start = wait_start;
+       schedstat_set(se->statistics.wait_start, wait_start);
 }
 
-static void
+static inline void
 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        struct task_struct *p;
        u64 delta;
 
-       delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
+       if (!schedstat_enabled())
+               return;
+
+       delta = rq_clock(rq_of(cfs_rq)) - 
schedstat_val(se->statistics.wait_start);
 
        if (entity_is_task(se)) {
                p = task_of(se);
@@ -818,59 +826,67 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
                         * time stamp can be adjusted to accumulate wait time
                         * prior to migration.
                         */
-                       se->statistics.wait_start = delta;
+                       schedstat_set(se->statistics.wait_start, delta);
                        return;
                }
                trace_sched_stat_wait(p, delta);
        }
 
-       se->statistics.wait_max = max(se->statistics.wait_max, delta);
-       se->statistics.wait_count++;
-       se->statistics.wait_sum += delta;
-       se->statistics.wait_start = 0;
+       schedstat_set(se->statistics.wait_max,
+                     max(schedstat_val(se->statistics.wait_max), delta));
+       schedstat_inc(se->statistics.wait_count);
+       schedstat_add(se->statistics.wait_sum, delta);
+       schedstat_set(se->statistics.wait_start, 0);
 }
 
-static void
+static inline void
 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        struct task_struct *tsk = NULL;
+       u64 sleep_start, block_start;
+
+       if (!schedstat_enabled())
+               return;
+
+       sleep_start = schedstat_val(se->statistics.sleep_start);
+       block_start = schedstat_val(se->statistics.block_start);
 
        if (entity_is_task(se))
                tsk = task_of(se);
 
-       if (se->statistics.sleep_start) {
-               u64 delta = rq_clock(rq_of(cfs_rq)) - 
se->statistics.sleep_start;
+       if (sleep_start) {
+               u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
 
                if ((s64)delta < 0)
                        delta = 0;
 
-               if (unlikely(delta > se->statistics.sleep_max))
-                       se->statistics.sleep_max = delta;
+               if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
+                       schedstat_set(se->statistics.sleep_max, delta);
 
-               se->statistics.sleep_start = 0;
-               se->statistics.sum_sleep_runtime += delta;
+               schedstat_set(se->statistics.sleep_start, 0);
+               schedstat_add(se->statistics.sum_sleep_runtime, delta);
 
                if (tsk) {
                        account_scheduler_latency(tsk, delta >> 10, 1);
                        trace_sched_stat_sleep(tsk, delta);
                }
        }
-       if (se->statistics.block_start) {
-               u64 delta = rq_clock(rq_of(cfs_rq)) - 
se->statistics.block_start;
+       if (block_start) {
+               u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
 
                if ((s64)delta < 0)
                        delta = 0;
 
-               if (unlikely(delta > se->statistics.block_max))
-                       se->statistics.block_max = delta;
+               if (unlikely(delta > schedstat_val(se->statistics.block_max)))
+                       schedstat_set(se->statistics.block_max, delta);
 
-               se->statistics.block_start = 0;
-               se->statistics.sum_sleep_runtime += delta;
+               schedstat_set(se->statistics.block_start, 0);
+               schedstat_add(se->statistics.sum_sleep_runtime, delta);
 
                if (tsk) {
                        if (tsk->in_iowait) {
-                               se->statistics.iowait_sum += delta;
-                               se->statistics.iowait_count++;
+                               schedstat_add(se->statistics.iowait_sum, delta);
+                               schedstat_inc(se->statistics.iowait_count);
                                trace_sched_stat_iowait(tsk, delta);
                        }
 
@@ -897,6 +913,9 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
 static inline void
 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
+       if (!schedstat_enabled())
+               return;
+
        /*
         * Are we enqueueing a waiting task? (for current tasks
         * a dequeue/enqueue event is a NOP)
@@ -911,6 +930,10 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct 
sched_entity *se, int flags)
 static inline void
 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
+
+       if (!schedstat_enabled())
+               return;
+
        /*
         * Mark the end of the wait period if dequeueing a
         * waiting task:
@@ -918,45 +941,18 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct 
sched_entity *se, int flags)
        if (se != cfs_rq->curr)
                update_stats_wait_end(cfs_rq, se);
 
-       if (flags & DEQUEUE_SLEEP) {
-               if (entity_is_task(se)) {
-                       struct task_struct *tsk = task_of(se);
+       if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
+               struct task_struct *tsk = task_of(se);
 
-                       if (tsk->state & TASK_INTERRUPTIBLE)
-                               se->statistics.sleep_start = 
rq_clock(rq_of(cfs_rq));
-                       if (tsk->state & TASK_UNINTERRUPTIBLE)
-                               se->statistics.block_start = 
rq_clock(rq_of(cfs_rq));
-               }
+               if (tsk->state & TASK_INTERRUPTIBLE)
+                       schedstat_set(se->statistics.sleep_start,
+                                     rq_clock(rq_of(cfs_rq)));
+               if (tsk->state & TASK_UNINTERRUPTIBLE)
+                       schedstat_set(se->statistics.block_start,
+                                     rq_clock(rq_of(cfs_rq)));
        }
-
-}
-#else
-static inline void
-update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-}
-
-static inline void
-update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-}
-
-static inline void
-update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
 }
 
-static inline void
-update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-{
-}
-
-static inline void
-update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-{
-}
-#endif
-
 /*
  * We are picking a new current task - update its stats:
  */
@@ -3298,10 +3294,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct 
sched_entity *se, int flags)
                place_entity(cfs_rq, se, 0);
 
        check_schedstat_required();
-       if (schedstat_enabled()) {
-               update_stats_enqueue(cfs_rq, se, flags);
-               check_spread(cfs_rq, se);
-       }
+       update_stats_enqueue(cfs_rq, se, flags);
+       check_spread(cfs_rq, se);
        if (!curr)
                __enqueue_entity(cfs_rq, se);
        se->on_rq = 1;
@@ -3368,8 +3362,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity 
*se, int flags)
        update_curr(cfs_rq);
        dequeue_entity_load_avg(cfs_rq, se);
 
-       if (schedstat_enabled())
-               update_stats_dequeue(cfs_rq, se, flags);
+       update_stats_dequeue(cfs_rq, se, flags);
 
        clear_buddies(cfs_rq, se);
 
@@ -3443,25 +3436,25 @@ set_next_entity(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
                 * a CPU. So account for the time it spent waiting on the
                 * runqueue.
                 */
-               if (schedstat_enabled())
-                       update_stats_wait_end(cfs_rq, se);
+               update_stats_wait_end(cfs_rq, se);
                __dequeue_entity(cfs_rq, se);
                update_load_avg(se, 1);
        }
 
        update_stats_curr_start(cfs_rq, se);
        cfs_rq->curr = se;
-#ifdef CONFIG_SCHEDSTATS
+
        /*
         * Track our maximum slice length, if the CPU's load is at
         * least twice that of our own weight (i.e. dont track it
         * when there are only lesser-weight tasks around):
         */
        if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 
2*se->load.weight) {
-               se->statistics.slice_max = max(se->statistics.slice_max,
-                       se->sum_exec_runtime - se->prev_sum_exec_runtime);
+               schedstat_set(se->statistics.slice_max,
+                       max((u64)schedstat_val(se->statistics.slice_max),
+                           se->sum_exec_runtime - se->prev_sum_exec_runtime));
        }
-#endif
+
        se->prev_sum_exec_runtime = se->sum_exec_runtime;
 }
 
@@ -3540,13 +3533,10 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, 
struct sched_entity *prev)
        /* throttle cfs_rqs exceeding runtime */
        check_cfs_rq_runtime(cfs_rq);
 
-       if (schedstat_enabled()) {
-               check_spread(cfs_rq, prev);
-               if (prev->on_rq)
-                       update_stats_wait_start(cfs_rq, prev);
-       }
+       check_spread(cfs_rq, prev);
 
        if (prev->on_rq) {
+               update_stats_wait_start(cfs_rq, prev);
                /* Put 'current' back into the tree. */
                __enqueue_entity(cfs_rq, prev);
                /* in !on_rq case, update occurred at dequeue */
-- 
2.4.11

Reply via email to