Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=6cfb0d5d06bea2b8791f32145eae539d524e5f6c
Commit:     6cfb0d5d06bea2b8791f32145eae539d524e5f6c
Parent:     8179ca23d513717cc5e3dc81a1ffe01af0955468
Author:     Ingo Molnar <[EMAIL PROTECTED]>
AuthorDate: Thu Aug 2 17:41:40 2007 +0200
Committer:  Ingo Molnar <[EMAIL PROTECTED]>
CommitDate: Thu Aug 2 17:41:40 2007 +0200

    [PATCH] sched: reduce debug code
    
    move the rest of the debugging/instrumentation code to under
    CONFIG_SCHEDSTATS too. This reduces code size and speeds code up:
    
        text    data     bss     dec     hex filename
       33044    4122      28   37194    914a sched.o.before
       32708    4122      28   36858    8ffa sched.o.after
    
    Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>
---
 kernel/sched.c       |   28 ++++++++++++++++++----------
 kernel/sched_debug.c |   22 ++++++++++++++++------
 kernel/sched_fair.c  |    4 ++--
 kernel/sched_rt.c    |    4 ++--
 4 files changed, 38 insertions(+), 20 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index a9d3740..72bb948 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -983,18 +983,21 @@ void set_task_cpu(struct task_struct *p, unsigned int 
new_cpu)
        u64 clock_offset, fair_clock_offset;
 
        clock_offset = old_rq->clock - new_rq->clock;
-       fair_clock_offset = old_rq->cfs.fair_clock -
-                                                new_rq->cfs.fair_clock;
-       if (p->se.wait_start)
-               p->se.wait_start -= clock_offset;
+       fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock;
+
        if (p->se.wait_start_fair)
                p->se.wait_start_fair -= fair_clock_offset;
+       if (p->se.sleep_start_fair)
+               p->se.sleep_start_fair -= fair_clock_offset;
+
+#ifdef CONFIG_SCHEDSTATS
+       if (p->se.wait_start)
+               p->se.wait_start -= clock_offset;
        if (p->se.sleep_start)
                p->se.sleep_start -= clock_offset;
        if (p->se.block_start)
                p->se.block_start -= clock_offset;
-       if (p->se.sleep_start_fair)
-               p->se.sleep_start_fair -= fair_clock_offset;
+#endif
 
        __set_task_cpu(p, new_cpu);
 }
@@ -1555,17 +1558,19 @@ int fastcall wake_up_state(struct task_struct *p, 
unsigned int state)
 static void __sched_fork(struct task_struct *p)
 {
        p->se.wait_start_fair           = 0;
-       p->se.wait_start                = 0;
        p->se.exec_start                = 0;
        p->se.sum_exec_runtime          = 0;
        p->se.delta_exec                = 0;
        p->se.delta_fair_run            = 0;
        p->se.delta_fair_sleep          = 0;
        p->se.wait_runtime              = 0;
+       p->se.sleep_start_fair          = 0;
+
+#ifdef CONFIG_SCHEDSTATS
+       p->se.wait_start                = 0;
        p->se.sum_wait_runtime          = 0;
        p->se.sum_sleep_runtime         = 0;
        p->se.sleep_start               = 0;
-       p->se.sleep_start_fair          = 0;
        p->se.block_start               = 0;
        p->se.sleep_max                 = 0;
        p->se.block_max                 = 0;
@@ -1573,6 +1578,7 @@ static void __sched_fork(struct task_struct *p)
        p->se.wait_max                  = 0;
        p->se.wait_runtime_overruns     = 0;
        p->se.wait_runtime_underruns    = 0;
+#endif
 
        INIT_LIST_HEAD(&p->run_list);
        p->se.on_rq = 0;
@@ -6579,12 +6585,14 @@ void normalize_rt_tasks(void)
        do_each_thread(g, p) {
                p->se.fair_key                  = 0;
                p->se.wait_runtime              = 0;
+               p->se.exec_start                = 0;
                p->se.wait_start_fair           = 0;
+               p->se.sleep_start_fair          = 0;
+#ifdef CONFIG_SCHEDSTATS
                p->se.wait_start                = 0;
-               p->se.exec_start                = 0;
                p->se.sleep_start               = 0;
-               p->se.sleep_start_fair          = 0;
                p->se.block_start               = 0;
+#endif
                task_rq(p)->cfs.fair_clock      = 0;
                task_rq(p)->clock               = 0;
 
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 0eca442..1c61e53 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -44,11 +44,16 @@ print_task(struct seq_file *m, struct rq *rq, struct 
task_struct *p, u64 now)
                (long long)p->se.wait_runtime,
                (long long)(p->nvcsw + p->nivcsw),
                p->prio,
+#ifdef CONFIG_SCHEDSTATS
                (long long)p->se.sum_exec_runtime,
                (long long)p->se.sum_wait_runtime,
                (long long)p->se.sum_sleep_runtime,
                (long long)p->se.wait_runtime_overruns,
-               (long long)p->se.wait_runtime_underruns);
+               (long long)p->se.wait_runtime_underruns
+#else
+               0LL, 0LL, 0LL, 0LL, 0LL
+#endif
+       );
 }
 
 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu, u64 now)
@@ -171,7 +176,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
        u64 now = ktime_to_ns(ktime_get());
        int cpu;
 
-       SEQ_printf(m, "Sched Debug Version: v0.05, %s %.*s\n",
+       SEQ_printf(m, "Sched Debug Version: v0.05-v20, %s %.*s\n",
                init_utsname()->release,
                (int)strcspn(init_utsname()->version, " "),
                init_utsname()->version);
@@ -235,21 +240,24 @@ void proc_sched_show_task(struct task_struct *p, struct 
seq_file *m)
 #define P(F) \
        SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F)
 
-       P(se.wait_start);
+       P(se.wait_runtime);
        P(se.wait_start_fair);
        P(se.exec_start);
-       P(se.sleep_start);
        P(se.sleep_start_fair);
+       P(se.sum_exec_runtime);
+
+#ifdef CONFIG_SCHEDSTATS
+       P(se.wait_start);
+       P(se.sleep_start);
        P(se.block_start);
        P(se.sleep_max);
        P(se.block_max);
        P(se.exec_max);
        P(se.wait_max);
-       P(se.wait_runtime);
        P(se.wait_runtime_overruns);
        P(se.wait_runtime_underruns);
        P(se.sum_wait_runtime);
-       P(se.sum_exec_runtime);
+#endif
        SEQ_printf(m, "%-25s:%20Ld\n",
                   "nr_switches", (long long)(p->nvcsw + p->nivcsw));
        P(se.load.weight);
@@ -269,7 +277,9 @@ void proc_sched_show_task(struct task_struct *p, struct 
seq_file *m)
 
 void proc_sched_set_task(struct task_struct *p)
 {
+#ifdef CONFIG_SCHEDSTATS
        p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0;
        p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
+#endif
        p->se.sum_exec_runtime = 0;
 }
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5bf7285..6f579ff 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -349,7 +349,7 @@ static inline void
 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 
now)
 {
        se->wait_start_fair = cfs_rq->fair_clock;
-       se->wait_start = now;
+       schedstat_set(se->wait_start, now);
 }
 
 /*
@@ -447,7 +447,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct 
sched_entity *se, u64 now)
        }
 
        se->wait_start_fair = 0;
-       se->wait_start = 0;
+       schedstat_set(se->wait_start, 0);
 }
 
 static inline void
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index ade20dc..002fcf8 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -18,8 +18,8 @@ static inline void update_curr_rt(struct rq *rq, u64 now)
        delta_exec = now - curr->se.exec_start;
        if (unlikely((s64)delta_exec < 0))
                delta_exec = 0;
-       if (unlikely(delta_exec > curr->se.exec_max))
-               curr->se.exec_max = delta_exec;
+
+       schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
 
        curr->se.sum_exec_runtime += delta_exec;
        curr->se.exec_start = now;
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to