Author: sparky                       Date: Mon Nov  6 20:39:56 2006 GMT
Module: SOURCES                       Tag: HEAD
---- Log message:
- up to patch-2.6.18-ck1

---- Files affected:
SOURCES:
   kernel-desktop-ck.patch (1.3 -> 1.4) 

---- Diffs:

================================================================
Index: SOURCES/kernel-desktop-ck.patch
diff -u SOURCES/kernel-desktop-ck.patch:1.3 SOURCES/kernel-desktop-ck.patch:1.4
--- SOURCES/kernel-desktop-ck.patch:1.3 Sat Jul  1 00:26:13 2006
+++ SOURCES/kernel-desktop-ck.patch     Mon Nov  6 21:39:50 2006
@@ -1,32 +1,59 @@
-Index: linux-2.6.17-ck1/include/linux/sched.h
+Index: linux-2.6.18-ck1/fs/proc/array.c
 ===================================================================
---- linux-2.6.17-ck1.orig/include/linux/sched.h        2006-06-18 
15:32:49.000000000 +1000
-+++ linux-2.6.17-ck1/include/linux/sched.h     2006-06-18 15:34:37.000000000 
+1000
-@@ -102,6 +102,7 @@ extern unsigned long nr_running(void);
- extern unsigned long nr_uninterruptible(void);
- extern unsigned long nr_active(void);
- extern unsigned long nr_iowait(void);
-+extern unsigned long weighted_cpuload(const int cpu);
- 
- #include <linux/time.h>
- #include <linux/param.h>
-@@ -163,6 +164,15 @@ extern unsigned long nr_iowait(void);
+--- linux-2.6.18-ck1.orig/fs/proc/array.c      2006-09-21 19:54:55.000000000 
+1000
++++ linux-2.6.18-ck1/fs/proc/array.c   2006-09-21 20:00:33.000000000 +1000
+@@ -165,7 +165,7 @@ static inline char * task_state(struct t
+       read_lock(&tasklist_lock);
+       buffer += sprintf(buffer,
+               "State:\t%s\n"
+-              "SleepAVG:\t%lu%%\n"
++              "Bonus:\t%d\n"
+               "Tgid:\t%d\n"
+               "Pid:\t%d\n"
+               "PPid:\t%d\n"
+@@ -173,7 +173,7 @@ static inline char * task_state(struct t
+               "Uid:\t%d\t%d\t%d\t%d\n"
+               "Gid:\t%d\t%d\t%d\t%d\n",
+               get_task_state(p),
+-              (p->sleep_avg/1024)*100/(1020000000/1024),
++              p->bonus,
+               p->tgid,
+               p->pid, pid_alive(p) ? p->group_leader->real_parent->tgid : 0,
+               pid_alive(p) && p->ptrace ? p->parent->pid : 0,
+Index: linux-2.6.18-ck1/kernel/exit.c
+===================================================================
+--- linux-2.6.18-ck1.orig/kernel/exit.c        2006-09-21 19:55:00.000000000 
+1000
++++ linux-2.6.18-ck1/kernel/exit.c     2006-09-21 20:00:33.000000000 +1000
+@@ -166,7 +166,6 @@ repeat:
+               zap_leader = (leader->exit_signal == -1);
+       }
+ 
+-      sched_exit(p);
+       write_unlock_irq(&tasklist_lock);
+       proc_flush_task(p);
+       release_thread(p);
+Index: linux-2.6.18-ck1/include/linux/sched.h
+===================================================================
+--- linux-2.6.18-ck1.orig/include/linux/sched.h        2006-09-21 
19:54:58.000000000 +1000
++++ linux-2.6.18-ck1/include/linux/sched.h     2006-09-21 20:05:32.000000000 
+1000
+@@ -32,9 +32,16 @@
  #define SCHED_FIFO            1
  #define SCHED_RR              2
  #define SCHED_BATCH           3
 +#define SCHED_ISO             4
 +#define SCHED_IDLEPRIO                5
-+
-+#define SCHED_MIN             0
-+#define SCHED_MAX             5
-+
+ 
+ #ifdef __KERNEL__
+ 
++#define SCHED_MAX             SCHED_IDLEPRIO
 +#define SCHED_RANGE(policy)   ((policy) <= SCHED_MAX)
 +#define SCHED_RT(policy)      ((policy) == SCHED_FIFO || \
 +                                      (policy) == SCHED_RR)
- 
++
  struct sched_param {
        int sched_priority;
-@@ -201,6 +211,7 @@ extern void show_stack(struct task_struc
+ };
+@@ -204,6 +211,7 @@ extern void show_stack(struct task_struc
  
  void io_schedule(void);
  long io_schedule_timeout(long timeout);
@@ -34,7 +61,7 @@
  
  extern void cpu_init (void);
  extern void trap_init(void);
-@@ -480,11 +491,16 @@ struct signal_struct {
+@@ -498,14 +506,18 @@ struct signal_struct {
  
  #define MAX_USER_RT_PRIO      100
  #define MAX_RT_PRIO           MAX_USER_RT_PRIO
@@ -45,34 +72,18 @@
 +#define MIN_USER_PRIO         (MAX_PRIO - 2)
 +#define IDLEPRIO_PRIO         (MAX_PRIO - 1)
  
--#define rt_task(p)            (unlikely((p)->prio < MAX_RT_PRIO))
-+#define rt_task(p)            (unlikely(SCHED_RT((p)->policy)))
+ #define rt_prio(prio)         unlikely((prio) < MAX_RT_PRIO)
+ #define rt_task(p)            rt_prio((p)->prio)
  #define batch_task(p)         (unlikely((p)->policy == SCHED_BATCH))
+-#define has_rt_policy(p) \
+-      unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH)
++#define has_rt_policy(p)      unlikely(SCHED_RT((p)->policy))
 +#define iso_task(p)           (unlikely((p)->policy == SCHED_ISO))
 +#define idleprio_task(p)      (unlikely((p)->policy == SCHED_IDLEPRIO))
  
  /*
   * Some day this will be a full-fledged user tracking system..
-@@ -517,7 +533,6 @@ extern struct user_struct *find_user(uid
- extern struct user_struct root_user;
- #define INIT_USER (&root_user)
- 
--typedef struct prio_array prio_array_t;
- struct backing_dev_info;
- struct reclaim_state;
- 
-@@ -547,9 +562,9 @@ enum idle_type
- /*
-  * sched-domains (multiprocessor balancing) declarations:
-  */
--#ifdef CONFIG_SMP
- #define SCHED_LOAD_SCALE      128UL   /* increase resolution of load */
- 
-+#ifdef CONFIG_SMP
- #define SD_LOAD_BALANCE               1       /* Do load balancing on this 
domain. */
- #define SD_BALANCE_NEWIDLE    2       /* Balance when about to become idle */
- #define SD_BALANCE_EXEC               4       /* Balance on exec */
-@@ -638,6 +653,22 @@ extern unsigned int max_cache_size;
+@@ -707,6 +719,22 @@ extern unsigned int max_cache_size;
  
  #endif        /* CONFIG_SMP */
  
@@ -95,7 +106,7 @@
  
  struct io_context;                    /* See blkdev.h */
  void exit_io_context(void);
-@@ -686,13 +717,6 @@ struct audit_context;             /* See audit.c */
+@@ -755,15 +783,6 @@ struct audit_context;             /* See audit.c */
  struct mempolicy;
  struct pipe_inode_info;
  
@@ -106,23 +117,16 @@
 -      SLEEP_INTERRUPTED,
 -};
 -
+-struct prio_array;
+-
  struct task_struct {
        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
        struct thread_info *thread_info;
-@@ -702,24 +726,26 @@ struct task_struct {
- 
-       int lock_depth;         /* BKL lock depth */
- 
--#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
-+#ifdef CONFIG_SMP
-+#ifdef __ARCH_WANT_UNLOCKED_CTXSW
-       int oncpu;
- #endif
-+#endif
-+      int load_weight;        /* for niceness load balancing purposes */
-       int prio, static_prio;
+@@ -781,19 +800,18 @@ struct task_struct {
+       int load_weight;        /* for niceness load balancing purposes */
+       int prio, static_prio, normal_prio;
        struct list_head run_list;
--      prio_array_t *array;
+-      struct prio_array *array;
  
        unsigned short ioprio;
        unsigned int btrace_seq;
@@ -140,49 +144,52 @@
        cpumask_t cpus_allowed;
 -      unsigned int time_slice, first_time_slice;
  
- #ifdef CONFIG_SCHEDSTATS
+ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        struct sched_info sched_info;
-@@ -838,6 +864,7 @@ struct task_struct {
-       /* mutex deadlock detection */
-       struct mutex_waiter *blocked_on;
+@@ -942,6 +960,7 @@ struct task_struct {
+       struct held_lock held_locks[MAX_LOCK_DEPTH];
+       unsigned int lockdep_recursion;
  #endif
 +      unsigned long mutexes_held;
  
  /* journalling filesystem info */
        void *journal_info;
-@@ -948,6 +975,9 @@ static inline void put_task_struct(struc
- #define PF_SPREAD_PAGE        0x04000000      /* Spread page cache over 
cpuset */
- #define PF_SPREAD_SLAB        0x08000000      /* Spread some slab caches over 
cpuset */
+@@ -1054,8 +1073,11 @@ static inline void put_task_struct(struc
+ #define PF_SWAPWRITE  0x00800000      /* Allowed to write to swap */
+ #define PF_SPREAD_PAGE        0x01000000      /* Spread page cache over 
cpuset */
+ #define PF_SPREAD_SLAB        0x02000000      /* Spread some slab caches over 
cpuset */
++#define PF_ISOREF     0x04000000      /* SCHED_ISO task has used up quota */
  #define PF_MEMPOLICY  0x10000000      /* Non-default NUMA mempolicy */
-+#define PF_NONSLEEP   0x20000000      /* Waiting on in kernel activity */
-+#define PF_FORKED     0x40000000      /* Task just forked another process */
-+#define PF_ISOREF     0x80000000      /* SCHED_ISO task has used up quota */
+ #define PF_MUTEX_TESTER       0x20000000      /* Thread belongs to the rt 
mutex tester */
++#define PF_NONSLEEP   0x40000000      /* Waiting on in kernel activity */
++#define PF_FORKED     0x80000000      /* Task just forked another process */
  
  /*
   * Only the _current_ task can read/write to tsk->flags, but other
-@@ -1069,7 +1099,6 @@ extern void FASTCALL(wake_up_new_task(st
+@@ -1191,7 +1213,6 @@ extern void FASTCALL(wake_up_new_task(st
   static inline void kick_process(struct task_struct *tsk) { }
  #endif
- extern void FASTCALL(sched_fork(task_t * p, int clone_flags));
--extern void FASTCALL(sched_exit(task_t * p));
+ extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
+-extern void FASTCALL(sched_exit(struct task_struct * p));
  
  extern int in_group_p(gid_t);
  extern int in_egroup_p(gid_t);
-Index: linux-2.6.17-ck1/kernel/sched.c
+Index: linux-2.6.18-ck1/kernel/sched.c
 ===================================================================
---- linux-2.6.17-ck1.orig/kernel/sched.c       2006-06-18 15:32:49.000000000 
+1000
-+++ linux-2.6.17-ck1/kernel/sched.c    2006-06-18 15:34:34.000000000 +1000
-@@ -16,6 +16,9 @@
+--- linux-2.6.18-ck1.orig/kernel/sched.c       2006-09-21 19:55:00.000000000 
+1000
++++ linux-2.6.18-ck1/kernel/sched.c    2006-09-21 20:05:30.000000000 +1000
+@@ -16,6 +16,10 @@
   *            by Davide Libenzi, preemptible kernel bits by Robert Love.
   *  2003-09-03        Interactivity tuning by Con Kolivas.
   *  2004-04-02        Scheduler domains code by Nick Piggin
-+ *  2006-06-18        Staircase scheduling policy by Con Kolivas with help
-+ *            from William Lee Irwin III, Zwane Mwaikambo & Peter Williams.
-+ *            Staircase v16
++ *  2006-09-21        Staircase scheduling policy by Con Kolivas with help
++ *            from William Lee Irwin III, Zwane Mwaikambo, Peter Williams
++ *            and Andreas Mohr.
++ *            Staircase v16.2
   */
  
  #include <linux/mm.h>
-@@ -55,6 +58,25 @@
+@@ -57,6 +61,25 @@
  #include <asm/unistd.h>
  
  /*
@@ -208,7 +215,7 @@
   * Convert user-nice values [ -20 ... 0 ... 19 ]
   * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
   * and back.
-@@ -75,125 +97,28 @@
+@@ -77,124 +100,26 @@
  /*
   * Some helpers for converting nanosecond timing to jiffy resolution
   */
@@ -291,10 +298,10 @@
 -#define INTERACTIVE_SLEEP(p) \
 -      (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
 -              (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
--
 +#define NSJIFFY                       (1000000000 / HZ)       /* One jiffy in 
ns */
 +#define NS_TO_JIFFIES(TIME)   ((TIME) / NSJIFFY)
 +#define JIFFIES_TO_NS(TIME)   ((TIME) * NSJIFFY)
+ 
  #define TASK_PREEMPTS_CURR(p, rq) \
        ((p)->prio < (rq)->curr->prio)
  
@@ -313,45 +320,35 @@
 +#define DEF_TIMESLICE         (RR_INTERVAL * 19)
  
 -#define SCALE_PRIO(x, prio) \
--      max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+-      max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
 -
--static unsigned int task_timeslice(task_t *p)
+-static unsigned int static_prio_timeslice(int static_prio)
 -{
--      if (p->static_prio < NICE_TO_PRIO(0))
--              return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
+-      if (static_prio < NICE_TO_PRIO(0))
+-              return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
 -      else
--              return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
+-              return SCALE_PRIO(DEF_TIMESLICE, static_prio);
+-}
+-
+-static inline unsigned int task_timeslice(struct task_struct *p)
+-{
+-      return static_prio_timeslice(p->static_prio);
 -}
--#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)     \
-+#define task_hot(p, now, sd) ((long long) ((now) - (p)->timestamp)    \
-                               < (long long) (sd)->cache_hot_time)
  
  /*
   * These are the runqueue data structures:
   */
--
--#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
--
- typedef struct runqueue runqueue_t;
  
 -struct prio_array {
 -      unsigned int nr_active;
--      unsigned long bitmap[BITMAP_SIZE];
+-      DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
 -      struct list_head queue[MAX_PRIO];
 -};
 -
  /*
   * This is the main, per-CPU runqueue data structure.
   *
-@@ -209,6 +134,7 @@ struct runqueue {
-        * remote CPUs use both these fields when doing load calculation.
-        */
-       unsigned long nr_running;
-+      unsigned long raw_weighted_load;
- #ifdef CONFIG_SMP
-       unsigned long cpu_load[3];
- #endif
-@@ -222,12 +148,15 @@ struct runqueue {
+@@ -224,12 +149,14 @@ struct rq {
         */
        unsigned long nr_uninterruptible;
  
@@ -360,19 +357,18 @@
 +      unsigned short cache_ticks, preempted;
 +      unsigned long iso_ticks;
 +      unsigned short iso_refractory;
-+
-       task_t *curr, *idle;
+       struct task_struct *curr, *idle;
        struct mm_struct *prev_mm;
--      prio_array_t *active, *expired, arrays[2];
+-      struct prio_array *active, *expired, arrays[2];
 -      int best_expired_prio;
 +      unsigned long bitmap[BITS_TO_LONGS(MAX_PRIO + 1)];
 +      struct list_head queue[MAX_PRIO];
        atomic_t nr_iowait;
  
  #ifdef CONFIG_SMP
-@@ -492,13 +421,7 @@ static inline runqueue_t *this_rq_lock(v
+@@ -553,13 +480,7 @@ static inline struct rq *this_rq_lock(vo
  
- #ifdef CONFIG_SCHEDSTATS
+ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  /*
 - * Called when a process is dequeued from the active array and given
 - * the cpu.  We should note that with the exception of interactive
@@ -385,7 +381,7 @@
   *
   * This function is only called from sched_info_arrive(), rather than
   * dequeue_task(). Even though a task may be queued and dequeued multiple
-@@ -536,13 +459,11 @@ static void sched_info_arrive(task_t *t)
+@@ -592,13 +513,11 @@ static void sched_info_arrive(struct tas
  }
  
  /*
@@ -396,7 +392,7 @@
 - * and requeuing any tasks, we are interested in queuing to either. It
 - * is unusual but not impossible for tasks to be dequeued and immediately
 - * requeued in the same or another array: this can happen in sched_yield(),
-+ * Called when a process is queued
++ * Called when a process is queued.
 + * The time is noted and later used to determine how long we had to wait for
 + * us to reach the cpu.
 + * It is unusual but not impossible for tasks to be dequeued and immediately
@@ -404,10 +400,14 @@
   * set_user_nice(), and even load_balance() as it moves tasks from runqueue
   * to runqueue.
   *
-@@ -596,85 +517,155 @@ static inline void sched_info_switch(tas
+@@ -657,73 +576,81 @@ sched_info_switch(struct task_struct *pr
  #define sched_info_switch(t, next)    do { } while (0)
- #endif /* CONFIG_SCHEDSTATS */
+ #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
  
+-/*
+- * Adding/removing a task to/from a priority array:
+- */
+-static void dequeue_task(struct task_struct *p, struct prio_array *array)
 +#if BITS_PER_LONG < 64
 +static inline void longlimit(unsigned long long *longlong)
 +{
@@ -422,7 +422,11 @@
 +
 +/* Get nanosecond clock difference without overflowing unsigned long. */
 +static unsigned long ns_diff(unsigned long long v1, unsigned long long v2)
-+{
+ {
+-      array->nr_active--;
+-      list_del(&p->run_list);
+-      if (list_empty(array->queue + p->prio))
+-              __clear_bit(p->prio, array->bitmap);
 +      unsigned long long vdiff;
 +      if (likely(v1 >= v2)) {
 +              vdiff = v1 - v2;
@@ -435,75 +439,48 @@
 +              vdiff = 1;
 +      }
 +      return (unsigned long)vdiff;
-+}
-+
-+static inline int task_queued(const task_t *task)
-+{
-+      return !list_empty(&task->run_list);
-+}
-+
- /*
-- * Adding/removing a task to/from a priority array:
-+ * Adding/removing a task to/from a runqueue:
-  */
--static void dequeue_task(struct task_struct *p, prio_array_t *array)
-+static void dequeue_task(task_t *p, runqueue_t *rq)
- {
--      array->nr_active--;
--      list_del(&p->run_list);
--      if (list_empty(array->queue + p->prio))
--              __clear_bit(p->prio, array->bitmap);
-+      list_del_init(&p->run_list);
-+      if (list_empty(rq->queue + p->prio))
-+              __clear_bit(p->prio, rq->bitmap);
-+      p->ns_debit = 0;
  }
  
--static void enqueue_task(struct task_struct *p, prio_array_t *array)
-+static void enqueue_task(task_t *p, runqueue_t *rq)
+-static void enqueue_task(struct task_struct *p, struct prio_array *array)
++static inline int task_queued(struct task_struct *task)
  {
 -      sched_info_queued(p);
 -      list_add_tail(&p->run_list, array->queue + p->prio);
 -      __set_bit(p->prio, array->bitmap);
 -      array->nr_active++;
 -      p->array = array;
-+      list_add_tail(&p->run_list, rq->queue + p->prio);
-+      __set_bit(p->prio, rq->bitmap);
++      return !list_empty(&task->run_list);
  }
  
  /*
-  * Put task to the end of the run list without the overhead of dequeue
-  * followed by enqueue.
+- * Put task to the end of the run list without the overhead of dequeue
+- * followed by enqueue.
++ * Adding/removing a task to/from a runqueue:
   */
--static void requeue_task(struct task_struct *p, prio_array_t *array)
-+static void requeue_task(task_t *p, runqueue_t *rq, const int prio)
+-static void requeue_task(struct task_struct *p, struct prio_array *array)
++static void dequeue_task(struct task_struct *p, struct rq *rq)
  {
 -      list_move_tail(&p->run_list, array->queue + p->prio);
-+      list_move_tail(&p->run_list, rq->queue + prio);
-+      if (p->prio != prio) {
-+              if (list_empty(rq->queue + p->prio))
-+                      __clear_bit(p->prio, rq->bitmap);
-+              p->prio = prio;
-+              __set_bit(prio, rq->bitmap);
-+      }
++      list_del_init(&p->run_list);
++      if (list_empty(rq->queue + p->prio))
++              __clear_bit(p->prio, rq->bitmap);
 +      p->ns_debit = 0;
  }
  
--static inline void enqueue_task_head(struct task_struct *p, prio_array_t 
*array)
-+static inline void enqueue_task_head(task_t *p, runqueue_t *rq)
+-static inline void
+-enqueue_task_head(struct task_struct *p, struct prio_array *array)
++static void enqueue_task(struct task_struct *p, struct rq *rq)
  {
 -      list_add(&p->run_list, array->queue + p->prio);
 -      __set_bit(p->prio, array->bitmap);
 -      array->nr_active++;
 -      p->array = array;
-+      list_add(&p->run_list, rq->queue + p->prio);
++      list_add_tail(&p->run_list, rq->queue + p->prio);
 +      __set_bit(p->prio, rq->bitmap);
  }
  
-+static unsigned int slice(const task_t *p);
-+
  /*
-- * effective_prio - return the priority that is based on the static
+- * __normal_prio - return the priority that is based on the static
 - * priority but is modified by bonuses/penalties.
 - *
 - * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
@@ -515,63 +492,24 @@
 - * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
 - *
 - * Both properties are important to certain workloads.
-+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
-+ * of tasks with abnormal "nice" values across CPUs the contribution that
-+ * each task makes to its run queue's load is weighted according to its
-+ * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
-+ * scaled version of the new time slice allocation that they receive on time
-+ * slice expiry etc.
++ * Put task to the end of the run list without the overhead of dequeue
++ * followed by enqueue.
   */
--static int effective_prio(task_t *p)
-+
-+/*
-+ * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
-+ * If static_prio_timeslice() is ever changed to break this assumption then
-+ * this code will need modification
-+ */
-+#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
-+#define LOAD_WEIGHT(lp) \
-+      (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
-+#define TASK_LOAD_WEIGHT(p)   LOAD_WEIGHT(slice(p))
-+#define RTPRIO_TO_LOAD_WEIGHT(rp)     \
-+      (LOAD_WEIGHT((RR_INTERVAL + 20 + (rp))))
-+
-+static void set_load_weight(task_t *p)
+-
+-static inline int __normal_prio(struct task_struct *p)
++static void requeue_task(struct task_struct *p, struct rq *rq, const int prio)
  {
 -      int bonus, prio;
-+      if (rt_task(p)) {
-+#ifdef CONFIG_SMP
-+              if (p == task_rq(p)->migration_thread)
-+                      /*
-+                       * The migration thread does the actual balancing.
-+                       * Giving its load any weight will skew balancing
-+                       * adversely.
-+                       */
-+                      p->load_weight = 0;
-+              else
-+#endif
-+                      p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
-+      } else if (idleprio_task(p)) {
-+              /*
-+               * We want idleprio_tasks to have a presence on weighting but
-+               * as small as possible
-+               */
-+              p->load_weight = 1;
-+      } else
-+              p->load_weight = TASK_LOAD_WEIGHT(p);
-+}
- 
--      if (rt_task(p))
--              return p->prio;
-+static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
-+{
-+      rq->raw_weighted_load += p->load_weight;
-+}
- 
+-
 -      bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
-+static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
-+{
-+      rq->raw_weighted_load -= p->load_weight;
++      list_move_tail(&p->run_list, rq->queue + prio);
++      if (p->prio != prio) {
++              if (list_empty(rq->queue + p->prio))
++                      __clear_bit(p->prio, rq->bitmap);
++              p->prio = prio;
++              __set_bit(prio, rq->bitmap);
++      }
++      p->ns_debit = 0;
 +}
  
 -      prio = p->static_prio - bonus;
@@ -580,110 +518,144 @@
 -      if (prio > MAX_PRIO-1)
 -              prio = MAX_PRIO-1;
 -      return prio;
-+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
-+{
-+      rq->nr_running++;
-+      inc_raw_weighted_load(rq, p);
-+}
-+
-+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
++static inline void enqueue_task_head(struct task_struct *p, struct rq *rq)
 +{
-+      rq->nr_running--;
-+      dec_raw_weighted_load(rq, p);
++      list_add(&p->run_list, rq->queue + p->prio);
++      __set_bit(p->prio, rq->bitmap);
  }
  
++static unsigned int slice(const struct task_struct *p);
++
  /*
-  * __activate_task - move a task to the runqueue.
-  */
--static void __activate_task(task_t *p, runqueue_t *rq)
-+static inline void __activate_task(task_t *p, runqueue_t *rq)
+  * To aid in avoiding the subversion of "niceness" due to uneven distribution
+  * of tasks with abnormal "nice" values across CPUs the contribution that
+@@ -741,10 +668,9 @@ static inline int __normal_prio(struct t
+ #define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    http://cvs.pld-linux.org/SOURCES/kernel-desktop-ck.patch?r1=1.3&r2=1.4&f=u

_______________________________________________
pld-cvs-commit mailing list
pld-cvs-commit@lists.pld-linux.org
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to