Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=b2be5e96dc0b5a179cf4cb98e65cfb605752ca26
Commit:     b2be5e96dc0b5a179cf4cb98e65cfb605752ca26
Parent:     2cb8600e6be4281e381d39e44de4359e46333e23
Author:     Peter Zijlstra <[EMAIL PROTECTED]>
AuthorDate: Fri Nov 9 22:39:37 2007 +0100
Committer:  Ingo Molnar <[EMAIL PROTECTED]>
CommitDate: Fri Nov 9 22:39:37 2007 +0100

    sched: reintroduce the sched_min_granularity tunable
    
    we lost the sched_min_granularity tunable to a clever optimization
    that uses the sched_latency/min_granularity ratio - but the ratio
    is quite unintuitive to users and can also crash the kernel if the
    ratio is set to 0. So reintroduce the min_granularity tunable,
    while keeping the ratio maintained internally.
    
    no functionality changed.
    
    [ [EMAIL PROTECTED]: some fixlets. ]
    
    Signed-off-by: Peter Zijlstra <[EMAIL PROTECTED]>
    Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>
---
 include/linux/sched.h |    6 +++++-
 kernel/sched_debug.c  |    2 +-
 kernel/sched_fair.c   |   35 ++++++++++++++++++++++++++++-------
 kernel/sysctl.c       |   11 +++++++----
 4 files changed, 41 insertions(+), 13 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 155d743..5457b62 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1460,12 +1460,16 @@ extern void sched_idle_next(void);
 
 #ifdef CONFIG_SCHED_DEBUG
 extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_nr_latency;
+extern unsigned int sysctl_sched_min_granularity;
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern unsigned int sysctl_sched_batch_wakeup_granularity;
 extern unsigned int sysctl_sched_child_runs_first;
 extern unsigned int sysctl_sched_features;
 extern unsigned int sysctl_sched_migration_cost;
+
+int sched_nr_latency_handler(struct ctl_table *table, int write,
+               struct file *file, void __user *buffer, size_t *length,
+               loff_t *ppos);
 #endif
 
 extern unsigned int sysctl_sched_compat_yield;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 415e5c3..ca198a7 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -211,7 +211,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
 #define PN(x) \
        SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
        PN(sysctl_sched_latency);
-       PN(sysctl_sched_nr_latency);
+       PN(sysctl_sched_min_granularity);
        PN(sysctl_sched_wakeup_granularity);
        PN(sysctl_sched_batch_wakeup_granularity);
        PN(sysctl_sched_child_runs_first);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8763bee..c495dcf 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -35,16 +35,21 @@
 const_debug unsigned int sysctl_sched_latency = 20000000ULL;
 
 /*
- * After fork, child runs first. (default) If set to 0 then
- * parent will (try to) run first.
+ * Minimal preemption granularity for CPU-bound tasks:
+ * (default: 1 msec, units: nanoseconds)
  */
-const_debug unsigned int sysctl_sched_child_runs_first = 1;
+const_debug unsigned int sysctl_sched_min_granularity = 1000000ULL;
 
 /*
- * Minimal preemption granularity for CPU-bound tasks:
- * (default: 2 msec, units: nanoseconds)
+ * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  */
-const_debug unsigned int sysctl_sched_nr_latency = 20;
+const_debug unsigned int sched_nr_latency = 20;
+
+/*
+ * After fork, child runs first. (default) If set to 0 then
+ * parent will (try to) run first.
+ */
+const_debug unsigned int sysctl_sched_child_runs_first = 1;
 
 /*
  * sys_sched_yield() compat mode
@@ -212,6 +217,22 @@ static inline struct sched_entity 
*__pick_last_entity(struct cfs_rq *cfs_rq)
  * Scheduling class statistics methods:
  */
 
+#ifdef CONFIG_SCHED_DEBUG
+int sched_nr_latency_handler(struct ctl_table *table, int write,
+               struct file *filp, void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+
+       if (ret || !write)
+               return ret;
+
+       sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
+                                       sysctl_sched_min_granularity);
+
+       return 0;
+}
+#endif
 
 /*
  * The idea is to set a period in which each task runs once.
@@ -224,7 +245,7 @@ static inline struct sched_entity 
*__pick_last_entity(struct cfs_rq *cfs_rq)
 static u64 __sched_period(unsigned long nr_running)
 {
        u64 period = sysctl_sched_latency;
-       unsigned long nr_latency = sysctl_sched_nr_latency;
+       unsigned long nr_latency = sched_nr_latency;
 
        if (unlikely(nr_running > nr_latency)) {
                period *= nr_running;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 3b4efbe..6e3b63c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -235,11 +235,14 @@ static struct ctl_table kern_table[] = {
 #ifdef CONFIG_SCHED_DEBUG
        {
                .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "sched_nr_latency",
-               .data           = &sysctl_sched_nr_latency,
+               .procname       = "sched_min_granularity_ns",
+               .data           = &sysctl_sched_min_granularity,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = &proc_dointvec,
+               .proc_handler   = &sched_nr_latency_handler,
+               .strategy       = &sysctl_intvec,
+               .extra1         = &min_sched_granularity_ns,
+               .extra2         = &max_sched_granularity_ns,
        },
        {
                .ctl_name       = CTL_UNNUMBERED,
@@ -247,7 +250,7 @@ static struct ctl_table kern_table[] = {
                .data           = &sysctl_sched_latency,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
+               .proc_handler   = &sched_nr_latency_handler,
                .strategy       = &sysctl_intvec,
                .extra1         = &min_sched_granularity_ns,
                .extra2         = &max_sched_granularity_ns,
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to