Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=a4ec24b48ddef1e93f7578be53270f0b95ad666c
Commit:     a4ec24b48ddef1e93f7578be53270f0b95ad666c
Parent:     a9957449b08ab561a33e1e038df06843b8d8dd9f
Author:     Dmitry Adamushko <[EMAIL PROTECTED]>
AuthorDate: Mon Oct 15 17:00:13 2007 +0200
Committer:  Ingo Molnar <[EMAIL PROTECTED]>
CommitDate: Mon Oct 15 17:00:13 2007 +0200

    sched: tidy up SCHED_RR
    
    - make timeslices of SCHED_RR tasks constant and not
    dependent on task's static_prio [1] ;
    - remove obsolete code (timeslice related bits);
    - make sched_rr_get_interval() return something more
    meaningful [2] for SCHED_OTHER tasks.
    
    [1] according to the following link, it's not compliant with SUSv3
    (not sure though, what is the reference for us :-)
    http://lkml.org/lkml/2007/3/7/656
    
    [2] the interval is dynamic and can be depicted as follows "should a
    task be one of the runnable tasks at this particular moment, it would
    expect to run for this interval of time before being re-scheduled by the
    scheduler tick".
    (i.e. it's more precise if a task is runnable at the moment)
    
    yeah, this seems to require task_rq_lock/unlock() but this is not a hot
    path.
    
    results:
    
    (SCHED_FIFO)
    
    [EMAIL PROTECTED]:~/storage/prog$ sudo chrt -f 10 ./rr_interval
    time_slice: 0 : 0
    
    (SCHED_RR)
    
    [EMAIL PROTECTED]:~/storage/prog$ sudo chrt 10 ./rr_interval
    time_slice: 0 : 99984800
    
    (SCHED_NORMAL)
    
    [EMAIL PROTECTED]:~/storage/prog$ ./rr_interval
    time_slice: 0 : 19996960
    
    (SCHED_NORMAL + a cpu_hog of similar 'weight' on the same CPU --- so should 
be a half of the previous result)
    
    [EMAIL PROTECTED]:~/storage/prog$ taskset 1 ./rr_interval
    time_slice: 0 : 9998480
    
    Signed-off-by: Dmitry Adamushko <[EMAIL PROTECTED]>
    Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>
---
 kernel/sched.c    |   41 +++++++++++++++++------------------------
 kernel/sched_rt.c |    2 +-
 2 files changed, 18 insertions(+), 25 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index ce9bb7a..f370f10 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -96,7 +96,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
 /*
  * Some helpers for converting nanosecond timing to jiffy resolution
  */
-#define NS_TO_JIFFIES(TIME)    ((TIME) / (1000000000 / HZ))
+#define NS_TO_JIFFIES(TIME)    ((unsigned long)(TIME) / (1000000000 / HZ))
 #define JIFFIES_TO_NS(TIME)    ((TIME) * (1000000000 / HZ))
 
 #define NICE_0_LOAD            SCHED_LOAD_SCALE
@@ -105,11 +105,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
 /*
  * These are the 'tuning knobs' of the scheduler:
  *
- * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
- * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
  * Timeslices get refilled after they expire.
  */
-#define MIN_TIMESLICE          max(5 * HZ / 1000, 1)
 #define DEF_TIMESLICE          (100 * HZ / 1000)
 
 #ifdef CONFIG_SMP
@@ -133,24 +131,6 @@ static inline void sg_inc_cpu_power(struct sched_group 
*sg, u32 val)
 }
 #endif
 
-#define SCALE_PRIO(x, prio) \
-       max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
-
-/*
- * static_prio_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
- * to time slice values: [800ms ... 100ms ... 5ms]
- */
-static unsigned int static_prio_timeslice(int static_prio)
-{
-       if (static_prio == NICE_TO_PRIO(19))
-               return 1;
-
-       if (static_prio < NICE_TO_PRIO(0))
-               return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
-       else
-               return SCALE_PRIO(DEF_TIMESLICE, static_prio);
-}
-
 static inline int rt_policy(int policy)
 {
        if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
@@ -4746,6 +4726,7 @@ asmlinkage
 long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
 {
        struct task_struct *p;
+       unsigned int time_slice;
        int retval = -EINVAL;
        struct timespec t;
 
@@ -4762,9 +4743,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct 
timespec __user *interval)
        if (retval)
                goto out_unlock;
 
-       jiffies_to_timespec(p->policy == SCHED_FIFO ?
-                               0 : static_prio_timeslice(p->static_prio), &t);
+       if (p->policy == SCHED_FIFO)
+               time_slice = 0;
+       else if (p->policy == SCHED_RR)
+               time_slice = DEF_TIMESLICE;
+       else {
+               struct sched_entity *se = &p->se;
+               unsigned long flags;
+               struct rq *rq;
+
+               rq = task_rq_lock(p, &flags);
+               time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
+               task_rq_unlock(rq, &flags);
+       }
        read_unlock(&tasklist_lock);
+       jiffies_to_timespec(time_slice, &t);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
 out_nounlock:
        return retval;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 2f26c3d..d0097a0 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -206,7 +206,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct 
*p)
        if (--p->time_slice)
                return;
 
-       p->time_slice = static_prio_timeslice(p->static_prio);
+       p->time_slice = DEF_TIMESLICE;
 
        /*
         * Requeue to the end of queue if we are not the only element
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to