Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=647e7cac2d215fb8890f79252d7eaee3d6743d66
Commit:     647e7cac2d215fb8890f79252d7eaee3d6743d66
Parent:     3a2520157234d58abce89526756a32c272824f3f
Author:     Ingo Molnar <[EMAIL PROTECTED]>
AuthorDate: Mon Oct 15 17:00:13 2007 +0200
Committer:  Ingo Molnar <[EMAIL PROTECTED]>
CommitDate: Mon Oct 15 17:00:13 2007 +0200

    sched: vslice fixups for non-0 nice levels
    
    Make vslice accurate wrt nice levels, and add some comments
    while we're at it.
    
    Signed-off-by: Peter Zijlstra <[EMAIL PROTECTED]>
    Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>
    Reviewed-by: Thomas Gleixner <[EMAIL PROTECTED]>
---
 kernel/sched_fair.c |   53 ++++++++++++++++++++++++++++++++++++++------------
 1 files changed, 40 insertions(+), 13 deletions(-)

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 32fd976..1f14b56 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -217,6 +217,15 @@ static inline struct sched_entity 
*__pick_last_entity(struct cfs_rq *cfs_rq)
  * Scheduling class statistics methods:
  */
 
+
+/*
+ * The idea is to set a period in which each task runs once.
+ *
+ * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
+ * this period because otherwise the slices get too small.
+ *
+ * p = (nr <= nl) ? l : l*nr/nl
+ */
 static u64 __sched_period(unsigned long nr_running)
 {
        u64 period = sysctl_sched_latency;
@@ -230,27 +239,45 @@ static u64 __sched_period(unsigned long nr_running)
        return period;
 }
 
+/*
+ * We calculate the wall-time slice from the period by taking a part
+ * proportional to the weight.
+ *
+ * s = p*w/rw
+ */
 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       u64 period = __sched_period(cfs_rq->nr_running);
+       u64 slice = __sched_period(cfs_rq->nr_running);
 
-       period *= se->load.weight;
-       do_div(period, cfs_rq->load.weight);
+       slice *= se->load.weight;
+       do_div(slice, cfs_rq->load.weight);
 
-       return period;
+       return slice;
 }
 
-static u64 __sched_vslice(unsigned long nr_running)
+/*
+ * We calculate the vruntime slice.
+ *
+ * vs = s/w = p/rw
+ */
+static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
 {
-       unsigned long period = sysctl_sched_latency;
-       unsigned long nr_latency = sysctl_sched_nr_latency;
+       u64 vslice = __sched_period(nr_running);
 
-       if (unlikely(nr_running > nr_latency))
-               nr_running = nr_latency;
+       do_div(vslice, rq_weight);
 
-       period /= nr_running;
+       return vslice;
+}
 
-       return (u64)period;
+static u64 sched_vslice(struct cfs_rq *cfs_rq)
+{
+       return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
+}
+
+static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       return __sched_vslice(cfs_rq->load.weight + se->load.weight,
+                       cfs_rq->nr_running + 1);
 }
 
 /*
@@ -469,10 +496,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity 
*se, int initial)
                        vruntime >>= 1;
                }
        } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
-               vruntime += __sched_vslice(cfs_rq->nr_running)/2;
+               vruntime += sched_vslice(cfs_rq)/2;
 
        if (initial && sched_feat(START_DEBIT))
-               vruntime += __sched_vslice(cfs_rq->nr_running + 1);
+               vruntime += sched_vslice_add(cfs_rq, se);
 
        if (!initial) {
                if (sched_feat(NEW_FAIR_SLEEPERS))
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to