Author: czarny                       Date: Mon Jul 30 15:45:31 2007 GMT
Module: SOURCES                       Tag: HEAD
---- Log message:
- up to 2.6.22-ck1

---- Files affected:
SOURCES:
   kernel-desktop-ck.patch (1.8 -> 1.9) 

---- Diffs:

================================================================
Index: SOURCES/kernel-desktop-ck.patch
diff -u SOURCES/kernel-desktop-ck.patch:1.8 SOURCES/kernel-desktop-ck.patch:1.9
--- SOURCES/kernel-desktop-ck.patch:1.8 Wed Mar 28 00:48:43 2007
+++ SOURCES/kernel-desktop-ck.patch     Mon Jul 30 17:45:26 2007
@@ -1,41 +1,7 @@
-Index: linux-2.6.20-ck1/fs/proc/array.c
+Index: linux-2.6.22-ck1/include/linux/sched.h
 ===================================================================
---- linux-2.6.20-ck1.orig/fs/proc/array.c      2007-02-05 22:52:03.000000000 
+1100
-+++ linux-2.6.20-ck1/fs/proc/array.c   2007-02-16 19:01:30.000000000 +1100
-@@ -165,7 +165,7 @@ static inline char * task_state(struct t
-       rcu_read_lock();
-       buffer += sprintf(buffer,
-               "State:\t%s\n"
--              "SleepAVG:\t%lu%%\n"
-+              "Bonus:\t%d\n"
-               "Tgid:\t%d\n"
-               "Pid:\t%d\n"
-               "PPid:\t%d\n"
-@@ -173,7 +173,7 @@ static inline char * task_state(struct t
-               "Uid:\t%d\t%d\t%d\t%d\n"
-               "Gid:\t%d\t%d\t%d\t%d\n",
-               get_task_state(p),
--              (p->sleep_avg/1024)*100/(1020000000/1024),
-+              p->bonus,
-               p->tgid, p->pid,
-               pid_alive(p) ? rcu_dereference(p->real_parent)->tgid : 0,
-               pid_alive(p) && p->ptrace ? rcu_dereference(p->parent)->pid : 0,
-Index: linux-2.6.20-ck1/kernel/exit.c
-===================================================================
---- linux-2.6.20-ck1.orig/kernel/exit.c        2007-02-05 22:52:04.000000000 
+1100
-+++ linux-2.6.20-ck1/kernel/exit.c     2007-02-16 19:01:30.000000000 +1100
-@@ -170,7 +170,6 @@ repeat:
-               zap_leader = (leader->exit_signal == -1);
-       }
- 
--      sched_exit(p);
-       write_unlock_irq(&tasklist_lock);
-       proc_flush_task(p);
-       release_thread(p);
-Index: linux-2.6.20-ck1/include/linux/sched.h
-===================================================================
---- linux-2.6.20-ck1.orig/include/linux/sched.h        2007-02-05 
22:52:04.000000000 +1100
-+++ linux-2.6.20-ck1/include/linux/sched.h     2007-02-16 19:01:33.000000000 
+1100
+--- linux-2.6.22-ck1.orig/include/linux/sched.h        2007-07-10 
14:55:00.000000000 +1000
++++ linux-2.6.22-ck1/include/linux/sched.h     2007-07-10 14:55:21.000000000 
+1000
 @@ -34,9 +34,14 @@
  #define SCHED_FIFO            1
  #define SCHED_RR              2
@@ -51,24 +17,34 @@
  struct sched_param {
        int sched_priority;
  };
-@@ -216,6 +221,7 @@ extern void show_stack(struct task_struc
+@@ -129,7 +134,7 @@
+ extern unsigned long nr_active(void);
+ extern unsigned long nr_iowait(void);
+ extern unsigned long weighted_cpuload(const int cpu);
+-
++extern int above_background_load(void);
  
- void io_schedule(void);
- long io_schedule_timeout(long timeout);
-+extern int sched_interactive, sched_compute, sched_iso_cpu;
- 
- extern void cpu_init (void);
- extern void trap_init(void);
-@@ -522,14 +528,20 @@ struct signal_struct {
+ /*
+  * Task state bitmask. NOTE! These bits are also
+@@ -150,8 +155,7 @@
+ #define EXIT_ZOMBIE           16
+ #define EXIT_DEAD             32
+ /* in tsk->state again */
+-#define TASK_NONINTERACTIVE   64
+-#define TASK_DEAD             128
++#define TASK_DEAD             64
+ 
+ #define __set_task_state(tsk, state_value)            \
+       do { (tsk)->state = (state_value); } while (0)
+@@ -537,14 +541,19 @@
  
  #define MAX_USER_RT_PRIO      100
  #define MAX_RT_PRIO           MAX_USER_RT_PRIO
++#define PRIO_RANGE            (40)
 +#define ISO_PRIO              (MAX_RT_PRIO - 1)
  
 -#define MAX_PRIO              (MAX_RT_PRIO + 40)
-+#define MAX_PRIO              (MAX_RT_PRIO + 41)
-+#define MIN_USER_PRIO         (MAX_PRIO - 2)
-+#define IDLEPRIO_PRIO         (MAX_PRIO - 1)
++#define MAX_PRIO              (MAX_RT_PRIO + PRIO_RANGE)
  
 -#define rt_prio(prio)         unlikely((prio) < MAX_RT_PRIO)
 +#define rt_prio(prio)         unlikely((prio) < ISO_PRIO)
@@ -78,35 +54,12 @@
 +#define is_rt_policy(policy)  ((policy) == SCHED_FIFO || \
 +                                      (policy) == SCHED_RR)
  #define has_rt_policy(p)      unlikely(is_rt_policy((p)->policy))
-+#define iso_task(p)           (unlikely((p)->policy == SCHED_ISO))
-+#define idleprio_task(p)      (unlikely((p)->policy == SCHED_IDLEPRIO))
++#define iso_task(p)           unlikely((p)->policy == SCHED_ISO)
++#define idleprio_task(p)      unlikely((p)->policy == SCHED_IDLEPRIO)
  
  /*
   * Some day this will be a full-fledged user tracking system..
-@@ -741,6 +753,22 @@ extern unsigned int max_cache_size;
- 
- #endif        /* CONFIG_SMP */
- 
-+/*
-+ * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
-+ * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
-+ * task of nice 0 or enough lower priority tasks to bring up the
-+ * weighted_cpuload
-+ */
-+static inline int above_background_load(void)
-+{
-+      unsigned long cpu;
-+
-+      for_each_online_cpu(cpu) {
-+              if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
-+                      return 1;
-+      }
-+      return 0;
-+}
- 
- struct io_context;                    /* See blkdev.h */
- struct cpuset;
-@@ -789,15 +817,6 @@ struct mempolicy;
+@@ -809,13 +818,6 @@
  struct pipe_inode_info;
  struct uts_namespace;
  
@@ -117,16 +70,21 @@
 -      SLEEP_INTERRUPTED,
 -};
 -
--struct prio_array;
--
+ struct prio_array;
+ 
  struct task_struct {
-       volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
-       struct thread_info *thread_info;
-@@ -815,20 +834,19 @@ struct task_struct {
+@@ -835,20 +837,33 @@
        int load_weight;        /* for niceness load balancing purposes */
        int prio, static_prio, normal_prio;
        struct list_head run_list;
--      struct prio_array *array;
++      /*
++       * This bitmap shows what priorities this task has received quota
++       * from for this major priority rotation on its current runqueue.
++       */
++      DECLARE_BITMAP(bitmap, PRIO_RANGE + 1);
+       struct prio_array *array;
++      /* Which major runqueue rotation did this task run */
++      unsigned long rotation;
  
        unsigned short ioprio;
  #ifdef CONFIG_BLK_DEV_IO_TRACE
@@ -134,19 +92,25 @@
  #endif
 -      unsigned long sleep_avg;
        unsigned long long timestamp, last_ran;
-+      unsigned long runtime, totalrun, ns_debit, systime;
-+      unsigned int bonus;
-+      unsigned int slice, time_slice;
        unsigned long long sched_time; /* sched_clock time spent running */
 -      enum sleep_type sleep_type;
  
-       unsigned long policy;
+       unsigned int policy;
        cpumask_t cpus_allowed;
 -      unsigned int time_slice, first_time_slice;
++      /*
++       * How much this task is entitled to run at the current priority
++       * before being requeued at a lower priority.
++       */
++      int time_slice;
++      /* Is this the very first time_slice this task has ever run. */
++      unsigned int first_time_slice;
++      /* How much this task receives at each priority level */
++      int quota;
  
  #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        struct sched_info sched_info;
-@@ -993,6 +1011,7 @@ struct task_struct {
+@@ -1013,6 +1028,7 @@
        struct held_lock held_locks[MAX_LOCK_DEPTH];
        unsigned int lockdep_recursion;
  #endif
@@ -154,73 +118,62 @@
  
  /* journalling filesystem info */
        void *journal_info;
-@@ -1155,8 +1174,11 @@ static inline void put_task_struct(struc
+@@ -1181,9 +1197,11 @@
  #define PF_SWAPWRITE  0x00800000      /* Allowed to write to swap */
  #define PF_SPREAD_PAGE        0x01000000      /* Spread page cache over 
cpuset */
  #define PF_SPREAD_SLAB        0x02000000      /* Spread some slab caches over 
cpuset */
 +#define PF_ISOREF     0x04000000      /* SCHED_ISO task has used up quota */
  #define PF_MEMPOLICY  0x10000000      /* Non-default NUMA mempolicy */
  #define PF_MUTEX_TESTER       0x20000000      /* Thread belongs to the rt 
mutex tester */
-+#define PF_NONSLEEP   0x40000000      /* Waiting on in kernel activity */
-+#define PF_FORKED     0x80000000      /* Task just forked another process */
+ #define PF_FREEZER_SKIP       0x40000000      /* Freezer should not count it 
as freezeable */
++#define PF_NONSLEEP   0x80000000      /* Waiting on in-kernel activity */
  
  /*
   * Only the _current_ task can read/write to tsk->flags, but other
-@@ -1291,7 +1313,6 @@ extern void FASTCALL(wake_up_new_task(st
-  static inline void kick_process(struct task_struct *tsk) { }
+@@ -1253,7 +1271,7 @@
  #endif
- extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
--extern void FASTCALL(sched_exit(struct task_struct * p));
  
- extern int in_group_p(gid_t);
- extern int in_egroup_p(gid_t);
-Index: linux-2.6.20-ck1/kernel/sched.c
-===================================================================
---- linux-2.6.20-ck1.orig/kernel/sched.c       2007-02-05 22:52:04.000000000 
+1100
-+++ linux-2.6.20-ck1/kernel/sched.c    2007-02-16 19:01:31.000000000 +1100
-@@ -16,6 +16,10 @@
+ extern void set_user_nice(struct task_struct *p, long nice);
+-extern int task_prio(const struct task_struct *p);
++extern int task_prio(struct task_struct *p);
+ extern int task_nice(const struct task_struct *p);
+ extern int can_nice(const struct task_struct *p, const int nice);
+ extern int task_curr(const struct task_struct *p);
+Index: linux-2.6.22-ck1/kernel/sched.c
+===================================================================
+--- linux-2.6.22-ck1.orig/kernel/sched.c       2007-07-10 14:55:00.000000000 
+1000
++++ linux-2.6.22-ck1/kernel/sched.c    2007-07-10 14:55:24.000000000 +1000
+@@ -16,6 +16,7 @@
   *            by Davide Libenzi, preemptible kernel bits by Robert Love.
   *  2003-09-03        Interactivity tuning by Con Kolivas.
   *  2004-04-02        Scheduler domains code by Nick Piggin
-+ *  2007-02-14        Staircase scheduling policy by Con Kolivas with help
-+ *            from William Lee Irwin III, Zwane Mwaikambo, Peter Williams
-+ *            and Andreas Mohr.
-+ *            Staircase v17
++ *  2007-03-02        Staircase deadline scheduling policy by Con Kolivas
   */
  
  #include <linux/mm.h>
-@@ -57,6 +61,25 @@
+@@ -53,8 +54,9 @@
+ #include <linux/kprobes.h>
+ #include <linux/delayacct.h>
+ #include <linux/reciprocal_div.h>
+-
++#include <linux/log2.h>
+ #include <asm/tlb.h>
++
  #include <asm/unistd.h>
  
  /*
-+ * sched_interactive - sysctl which allows interactive tasks to have bonus
-+ * raise its priority.
-+ * sched_compute - sysctl which enables long timeslices and delayed preemption
-+ * for compute server usage.
-+ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
-+ * are allowed to run (over ISO_PERIOD seconds) as real time tasks.
-+ */
-+int sched_interactive __read_mostly = 1;
-+int sched_compute __read_mostly;
-+int sched_iso_cpu __read_mostly = 80;
-+
-+#define ISO_PERIOD            (5 * HZ)
-+/*
-+ * CACHE_DELAY is the time preemption is delayed in sched_compute mode
-+ * and is set to a nominal 10ms.
-+ */
-+#define CACHE_DELAY   (10 * (HZ) / 1001 + 1)
-+
-+/*
-  * Convert user-nice values [ -20 ... 0 ... 19 ]
-  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
-  * and back.
-@@ -77,123 +100,20 @@
- /*
-  * Some helpers for converting nanosecond timing to jiffy resolution
-  */
+@@ -84,147 +86,85 @@
+ #define USER_PRIO(p)          ((p)-MAX_RT_PRIO)
+ #define TASK_USER_PRIO(p)     USER_PRIO((p)->static_prio)
+ #define MAX_USER_PRIO         (USER_PRIO(MAX_PRIO))
++#define SCHED_PRIO(p)         ((p)+MAX_RT_PRIO)
+ 
+-/*
+- * Some helpers for converting nanosecond timing to jiffy resolution
+- */
 -#define NS_TO_JIFFIES(TIME)   ((TIME) / (1000000000 / HZ))
--#define JIFFIES_TO_NS(TIME)   ((TIME) * (1000000000 / HZ))
++/* Some helpers for converting to/from various scales.*/
+ #define JIFFIES_TO_NS(TIME)   ((TIME) * (1000000000 / HZ))
 -
 -/*
 - * These are the 'tuning knobs' of the scheduler:
@@ -298,13 +251,10 @@
 -#define INTERACTIVE_SLEEP(p) \
 -      (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
 -              (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
-+#define NSJIFFY                       (1000000000 / HZ)       /* One jiffy in 
ns */
-+#define NS_TO_JIFFIES(TIME)   ((TIME) / NSJIFFY)
-+#define JIFFIES_TO_NS(TIME)   ((TIME) * NSJIFFY)
- 
- #define TASK_PREEMPTS_CURR(p, rq) \
-       ((p)->prio < (rq)->curr->prio)
- 
+-
+-#define TASK_PREEMPTS_CURR(p, rq) \
+-      ((p)->prio < (rq)->curr->prio)
+-
 -#define SCALE_PRIO(x, prio) \
 -      max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
 -
@@ -316,170 +266,327 @@
 -              return SCALE_PRIO(DEF_TIMESLICE, static_prio);
 -}
 -
+-#ifdef CONFIG_SMP
+-/*
+- * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
+- * Since cpu_power is a 'constant', we can use a reciprocal divide.
++#define MS_TO_NS(TIME)                ((TIME) * 1000000)
++#define MS_TO_US(TIME)                ((TIME) * 1000)
++#define US_TO_MS(TIME)                ((TIME) / 1000)
++
++#define TASK_PREEMPTS_CURR(p, curr)   ((p)->prio < (curr)->prio)
++
++/*
++ * This is the time all tasks within the same priority round robin.
++ * Value is in ms and set to a minimum of 10ms. Scales with number of cpus.
++ * Tunable via /proc interface.
++ */
++int rr_interval __read_mostly = 6;
++int sched_interactive __read_mostly = 1;
++
++/*
++ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
++ * are allowed to run (over ISO_PERIOD seconds) as real time tasks.
++ * sched_iso_period - sysctl which determines the number of seconds over
++ * which cpu usage of SCHED_ISO tasks is averaged to determine if they are
++ * exceeding their allowable bandwidth.
++*/
++int sched_iso_cpu __read_mostly = 80;
++int sched_iso_period __read_mostly = 5;
++
++#define ISO_PERIOD    ((sched_iso_period * HZ) + 1)
++
++/*
++ * This contains a bitmap for each dynamic priority level with empty slots
++ * for the valid priorities each different nice level can have. It allows
++ * us to stagger the slots where differing priorities run in a way that
++ * keeps latency differences between different nice levels at a minimum.
++ * The purpose of a pre-generated matrix is for rapid lookup of next slot in
++ * O(1) time without having to recalculate every time priority gets demoted.
++ * All nice levels use priority slot 39 as this allows less niced tasks to
++ * get all priority slots better than that before expiration is forced.
++ * ie, where 0 means a slot for that priority, priority running from left to
++ * right is from prio 0 to prio 39:
++ * nice -20 0000000000000000000000000000000000000000
++ * nice -10 1000100010001000100010001000100010010000
++ * nice   0 1010101010101010101010101010101010101010
++ * nice   5 1011010110110101101101011011010110110110
++ * nice  10 1110111011101110111011101110111011101110
++ * nice  15 1111111011111110111111101111111011111110
++ * nice  19 1111111111111111111111111111111111111110
+  */
+-static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
+-{
+-      return reciprocal_divide(load, sg->reciprocal_cpu_power);
+-}
++static unsigned long prio_matrix[PRIO_RANGE][BITS_TO_LONGS(PRIO_RANGE)]
++                               __read_mostly;
+ 
 -/*
+- * Each time a sched group cpu_power is changed,
+- * we must compute its reciprocal value
+- */
+-static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
+-{
+-      sg->__cpu_power += val;
+-      sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
+-}
+-#endif
++struct rq;
+ 
+ /*
 - * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
 - * to time slice values: [800ms ... 100ms ... 5ms]
 - *
 - * The higher a thread's priority, the bigger timeslices
 - * it gets during one round of execution. But even the lowest
 - * priority thread gets MIN_TIMESLICE worth of execution time.
-- */
--
++ * These are the runqueue data structures:
+  */
++struct prio_array {
++      /* Tasks queued at each priority */
++      struct list_head queue[MAX_PRIO + 1];
+ 
 -static inline unsigned int task_timeslice(struct task_struct *p)
 -{
 -      return static_prio_timeslice(p->static_prio);
 -}
--
- /*
++      /*
++       * The bitmap of priorities queued for this array. While the expired
++       * array will never have realtime tasks on it, it is simpler to have
++       * equal sized bitmaps for a cheap array swap. Include 1 bit for
++       * delimiter.
++       */
++      DECLARE_BITMAP(prio_bitmap, MAX_PRIO + 1);
+ 
+-/*
 - * These are the runqueue data structures:
-+ * This is the time all tasks within the same priority round robin.
-+ * Set to a minimum of 6ms. It is 10 times longer in compute mode.
-  */
--
+- */
++      /*
++       * The best static priority (of the dynamic priority tasks) queued
++       * this array.
++       */
++      int best_static_prio;
+ 
 -struct prio_array {
 -      unsigned int nr_active;
 -      DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
 -      struct list_head queue[MAX_PRIO];
--};
-+#define _RR_INTERVAL          ((6 * HZ / 1001) + 1)
-+#define RR_INTERVAL           (_RR_INTERVAL * (1 + 9 * sched_compute))
-+#define DEF_TIMESLICE         (RR_INTERVAL * 19)
++#ifdef CONFIG_SMP
++      /* For convenience looks back at rq */
++      struct rq *rq;
++#endif
+ };
  
  /*
-  * This is the main, per-CPU runqueue data structure.
-@@ -224,14 +144,16 @@ struct rq {
+@@ -260,14 +200,28 @@
         */
        unsigned long nr_uninterruptible;
  
 -      unsigned long expired_timestamp;
        /* Cached timestamp set by update_cpu_clock() */
        unsigned long long most_recent_timestamp;
-+      unsigned short cache_ticks, preempted;
-+      unsigned long iso_ticks;
-+      unsigned short iso_refractory;
        struct task_struct *curr, *idle;
        unsigned long next_balance;
        struct mm_struct *prev_mm;
 -      struct prio_array *active, *expired, arrays[2];
 -      int best_expired_prio;
-+      unsigned long bitmap[BITS_TO_LONGS(MAX_PRIO + 1)];
-+      struct list_head queue[MAX_PRIO];
++
++      struct prio_array *active, *expired, *idleprio, arrays[2];
++      unsigned long *dyn_bitmap, *exp_bitmap;
++
++      /*
++       * The current dynamic priority level this runqueue is at per static
++       * priority level.
++       */
++      int prio_level[PRIO_RANGE];
++
++      /* How many times we have rotated the priority queue */
++      unsigned long prio_rotation;
++      unsigned long iso_ticks;
++      unsigned short iso_refractory;
++
++      /* Number of idleprio tasks running */
++      unsigned long nr_idleprio;
        atomic_t nr_iowait;
  
  #ifdef CONFIG_SMP
-@@ -568,13 +490,7 @@ static inline struct rq *this_rq_lock(vo
- 
+@@ -606,12 +560,9 @@
  #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  /*
-- * Called when a process is dequeued from the active array and given
+  * Called when a process is dequeued from the active array and given
 - * the cpu.  We should note that with the exception of interactive
 - * tasks, the expired queue will become the active queue after the active
 - * queue is empty, without explicitly dequeuing and requeuing tasks in the
 - * expired queue.  (Interactive tasks may be requeued directly to the
 - * active queue, thus delaying tasks in the expired queue from running;
 - * see scheduler_tick()).
-+ * Called when a process is dequeued and given the cpu.
++ * the cpu.  We should note that the expired queue will become the active
++ * queue after the active queue is empty, without explicitly dequeuing and
++ * requeuing tasks in the expired queue.
   *
   * This function is only called from sched_info_arrive(), rather than
   * dequeue_task(). Even though a task may be queued and dequeued multiple
-@@ -607,13 +523,11 @@ static void sched_info_arrive(struct tas
- }
- 
- /*
-- * Called when a process is queued into either the active or expired
-- * array.  The time is noted and later used to determine how long we
-- * had to wait for us to reach the cpu.  Since the expired queue will
-- * become the active queue after active queue is empty, without dequeuing
-- * and requeuing any tasks, we are interested in queuing to either. It
-- * is unusual but not impossible for tasks to be dequeued and immediately
-- * requeued in the same or another array: this can happen in sched_yield(),
-+ * Called when a process is queued.
-+ * The time is noted and later used to determine how long we had to wait for
-+ * us to reach the cpu.
-+ * It is unusual but not impossible for tasks to be dequeued and immediately
-+ * requeued: this can happen in sched_yield(),
-  * set_user_nice(), and even load_balance() as it moves tasks from runqueue
-  * to runqueue.
-  *
-@@ -672,73 +586,81 @@ sched_info_switch(struct task_struct *pr
+@@ -709,71 +660,304 @@
  #define sched_info_switch(t, next)    do { } while (0)
  #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
  
--/*
-- * Adding/removing a task to/from a priority array:
-- */
--static void dequeue_task(struct task_struct *p, struct prio_array *array)
-+#if BITS_PER_LONG < 64
-+static inline void longlimit(unsigned long long *longlong)
++static int idleprio_suitable(struct task_struct *p)
 +{
-+      if (*longlong > (1 << 31))
-+              *longlong = 1 << 31;
++      return (!p->mutexes_held && !freezing(p) && !signal_pending(p) &&
++              !(p->flags & (PF_NONSLEEP | PF_EXITING)));
 +}
-+#else
-+static inline void longlimit(unsigned long long *__unused)
++
++static int idleprio(const struct task_struct *p)
 +{
++      return (p->prio == MAX_PRIO);
++}
++
++static inline int task_queued(struct task_struct *task)
++{
++      return !list_empty(&task->run_list);
++}
++
++static inline void set_dynamic_bit(struct task_struct *p, struct rq *rq)
++{
++      __set_bit(p->prio, p->array->prio_bitmap);
 +}
-+#endif
 +
-+/* Get nanosecond clock difference without overflowing unsigned long. */
-+static unsigned long ns_diff(unsigned long long v1, unsigned long long v2)
+ /*
+- * Adding/removing a task to/from a priority array:
++ * Removing from a runqueue.
+  */
+-static void dequeue_task(struct task_struct *p, struct prio_array *array)
++static void dequeue_task(struct task_struct *p, struct rq *rq)
  {
 -      array->nr_active--;
 -      list_del(&p->run_list);
 -      if (list_empty(array->queue + p->prio))
 -              __clear_bit(p->prio, array->bitmap);
-+      unsigned long long vdiff;
-+      if (likely(v1 >= v2)) {
-+              vdiff = v1 - v2;
-+              longlimit(&vdiff);
-+      } else {
-+              /*
-+               * Rarely the clock appears to go backwards. There should
-+               * always be a positive difference so return 1.
-+               */
-+              vdiff = 1;
-+      }
-+      return (unsigned long)vdiff;
++      list_del_init(&p->run_list);
++      if (idleprio_task(p) && idleprio(p))
++              rq->nr_idleprio--;
++      else if (list_empty(p->array->queue + p->prio))
++              __clear_bit(p->prio, p->array->prio_bitmap);
  }
  
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    http://cvs.pld-linux.org/SOURCES/kernel-desktop-ck.patch?r1=1.8&r2=1.9&f=u

_______________________________________________
pld-cvs-commit mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to