Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=f6cf891c4d7128f9f91243fc0b9ce99e10fa1586
Commit:     f6cf891c4d7128f9f91243fc0b9ce99e10fa1586
Parent:     5f01d519e60a6ca1a7d9be9f2d73c5f521383992
Author:     Ingo Molnar <[EMAIL PROTECTED]>
AuthorDate: Tue Aug 28 12:53:24 2007 +0200
Committer:  Ingo Molnar <[EMAIL PROTECTED]>
CommitDate: Tue Aug 28 12:53:24 2007 +0200

    sched: make the scheduler converge to the ideal latency
    
    de-HZ-ification of the granularity defaults unearthed a pre-existing
    property of CFS: while it correctly converges to the granularity goal,
    it does not prevent run-time fluctuations in the range of
    [-gran ... 0 ... +gran].
    
    With the increase of the granularity due to the removal of HZ
    dependencies, this becomes visible in chew-max output (with 5 tasks
    running):
    
     out:  28 . 27. 32 | flu:  0 .  0 | ran:    9 .   13 | per:   37 .   40
     out:  27 . 27. 32 | flu:  0 .  0 | ran:   17 .   13 | per:   44 .   40
     out:  27 . 27. 32 | flu:  0 .  0 | ran:    9 .   13 | per:   36 .   40
     out:  29 . 27. 32 | flu:  2 .  0 | ran:   17 .   13 | per:   46 .   40
     out:  28 . 27. 32 | flu:  0 .  0 | ran:    9 .   13 | per:   37 .   40
     out:  29 . 27. 32 | flu:  0 .  0 | ran:   18 .   13 | per:   47 .   40
     out:  28 . 27. 32 | flu:  0 .  0 | ran:    9 .   13 | per:   37 .   40
    
    average slice is the ideal 13 msecs and the period is picture-perfect 40
    msecs. But the 'ran' field fluctuates around 13.33 msecs and there's no
    mechanism in CFS to keep that from happening: it's a perfectly valid
    solution that CFS finds.
    
    to fix this we add a granularity/preemption rule that knows about
    the "target latency", which makes tasks that run longer than the ideal
    latency run a bit less. The simplest approach is to simply decrease the
    preemption granularity when a task overruns its ideal latency. For this
    we have to track how much the task executed since its last preemption.
    
    ( this adds a new field to task_struct, but we can eliminate that
      overhead in 2.6.24 by putting all the scheduler timestamps into an
      anonymous union. )
    
    with this change in place, chew-max output is fluctuation-less all
    around:
    
     out:  28 . 27. 39 | flu:  0 .  2 | ran:   13 .   13 | per:   41 .   40
     out:  28 . 27. 39 | flu:  0 .  2 | ran:   13 .   13 | per:   41 .   40
     out:  28 . 27. 39 | flu:  0 .  2 | ran:   13 .   13 | per:   41 .   40
     out:  28 . 27. 39 | flu:  0 .  2 | ran:   13 .   13 | per:   41 .   40
     out:  28 . 27. 39 | flu:  0 .  1 | ran:   13 .   13 | per:   41 .   40
     out:  28 . 27. 39 | flu:  0 .  1 | ran:   13 .   13 | per:   41 .   40
    
    this patch has no impact on any fastpath or on any globally observable
    scheduling property. (unless you have sharp enough eyes to see
    millisecond-level ruckles in glxgears smoothness :-)
    
    Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>
    Signed-off-by: Peter Zijlstra <[EMAIL PROTECTED]>
    Signed-off-by: Mike Galbraith <[EMAIL PROTECTED]>
---
 include/linux/sched.h |    1 +
 kernel/sched.c        |    1 +
 kernel/sched_fair.c   |   26 ++++++++++++++++++++++----
 3 files changed, 24 insertions(+), 4 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index bd6a032..f4e324e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -904,6 +904,7 @@ struct sched_entity {
 
        u64                     exec_start;
        u64                     sum_exec_runtime;
+       u64                     prev_sum_exec_runtime;
        u64                     wait_start_fair;
        u64                     sleep_start_fair;
 
diff --git a/kernel/sched.c b/kernel/sched.c
index 9fe473a..b533d6d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1587,6 +1587,7 @@ static void __sched_fork(struct task_struct *p)
        p->se.wait_start_fair           = 0;
        p->se.exec_start                = 0;
        p->se.sum_exec_runtime          = 0;
+       p->se.prev_sum_exec_runtime     = 0;
        p->se.delta_exec                = 0;
        p->se.delta_fair_run            = 0;
        p->se.delta_fair_sleep          = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9f53d49..721fe77 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -668,7 +668,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity 
*se, int sleep)
 /*
  * Preempt the current task with a newly woken task if needed:
  */
-static void
+static int
 __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
                          struct sched_entity *curr, unsigned long granularity)
 {
@@ -679,8 +679,11 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct 
sched_entity *se,
         * preempt the current task unless the best task has
         * a larger than sched_granularity fairness advantage:
         */
-       if (__delta > niced_granularity(curr, granularity))
+       if (__delta > niced_granularity(curr, granularity)) {
                resched_task(rq_of(cfs_rq)->curr);
+               return 1;
+       }
+       return 0;
 }
 
 static inline void
@@ -725,6 +728,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct 
sched_entity *prev)
 
 static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 {
+       unsigned long gran, ideal_runtime, delta_exec;
        struct sched_entity *next;
 
        /*
@@ -741,8 +745,22 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct 
sched_entity *curr)
        if (next == curr)
                return;
 
-       __check_preempt_curr_fair(cfs_rq, next, curr,
-                       sched_granularity(cfs_rq));
+       gran = sched_granularity(cfs_rq);
+       ideal_runtime = niced_granularity(curr,
+               max(sysctl_sched_latency / cfs_rq->nr_running,
+                   (unsigned long)sysctl_sched_min_granularity));
+       /*
+        * If we executed more than what the latency constraint suggests,
+        * reduce the rescheduling granularity. This way the total latency
+        * of how much a task is not scheduled converges to
+        * sysctl_sched_latency:
+        */
+       delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+       if (delta_exec > ideal_runtime)
+               gran = 0;
+
+       if (__check_preempt_curr_fair(cfs_rq, next, curr, gran))
+               curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
 }
 
 /**************************************************
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to