Author: lmasko                       Date: Sat Sep 25 19:26:58 2010 GMT
Module: packages                      Tag: Titanium
---- Log message:
- BFS up to v.0.350

---- Files affected:
packages/kernel-desktop:
   kernel-desktop-sched-bfs.patch (1.1.2.20 -> 1.1.2.21) 

---- Diffs:

================================================================
Index: packages/kernel-desktop/kernel-desktop-sched-bfs.patch
diff -u packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.20 
packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.21
--- packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.20     Mon Aug 
30 23:56:49 2010
+++ packages/kernel-desktop/kernel-desktop-sched-bfs.patch      Sat Sep 25 
21:26:51 2010
@@ -1,4 +1,4 @@
-The Brain Fuck Scheduler v0.330 by Con Kolivas.
+The Brain Fuck Scheduler v0.350 by Con Kolivas.
 
 A single shared runqueue O(n) strict fairness earliest deadline first design.
 
@@ -38,17 +38,17 @@
  kernel/kthread.c                          |    2 
  kernel/posix-cpu-timers.c                 |   14 
  kernel/sched.c                            |    4 
- kernel/sched_bfs.c                        | 6874 
++++++++++++++++++++++++++++++
+ kernel/sched_bfs.c                        | 6984 
++++++++++++++++++++++++++++++
  kernel/slow-work.c                        |    1 
  kernel/sysctl.c                           |   31 
  lib/Kconfig.debug                         |    2 
  mm/oom_kill.c                             |    2 
- 19 files changed, 7481 insertions(+), 28 deletions(-)
+ 19 files changed, 7591 insertions(+), 28 deletions(-)
 
-Index: linux-2.6.35-bfs/Documentation/sysctl/kernel.txt
+Index: linux-2.6.35.5-ck1/Documentation/sysctl/kernel.txt
 ===================================================================
---- linux-2.6.35-bfs.orig/Documentation/sysctl/kernel.txt      2010-02-25 
21:51:46.000000000 +1100
-+++ linux-2.6.35-bfs/Documentation/sysctl/kernel.txt   2010-08-13 
08:39:05.070929446 +1000
+--- linux-2.6.35.5-ck1.orig/Documentation/sysctl/kernel.txt    2010-02-25 
21:51:46.000000000 +1100
++++ linux-2.6.35.5-ck1/Documentation/sysctl/kernel.txt 2010-09-25 
01:17:57.872918484 +1000
 @@ -31,6 +31,7 @@ show up in /proc/sys/kernel:
  - domainname
  - hostname
@@ -103,10 +103,10 @@
  rtsig-max & rtsig-nr:
  
  The file rtsig-max can be used to tune the maximum number
-Index: linux-2.6.35-bfs/include/linux/init_task.h
+Index: linux-2.6.35.5-ck1/include/linux/init_task.h
 ===================================================================
---- linux-2.6.35-bfs.orig/include/linux/init_task.h    2010-08-02 
11:12:25.000000000 +1000
-+++ linux-2.6.35-bfs/include/linux/init_task.h 2010-08-13 08:39:05.070929446 
+1000
+--- linux-2.6.35.5-ck1.orig/include/linux/init_task.h  2010-08-02 
11:12:25.000000000 +1000
++++ linux-2.6.35.5-ck1/include/linux/init_task.h       2010-09-25 
01:17:57.873918535 +1000
 @@ -106,6 +106,69 @@ extern struct cred init_cred;
   *  INIT_TASK is used to set up the first task table, touch at
   * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -186,10 +186,10 @@
  
  #define INIT_CPU_TIMERS(cpu_timers)                                   \
  {                                                                     \
-Index: linux-2.6.35-bfs/include/linux/sched.h
+Index: linux-2.6.35.5-ck1/include/linux/sched.h
 ===================================================================
---- linux-2.6.35-bfs.orig/include/linux/sched.h        2010-08-02 
11:12:25.000000000 +1000
-+++ linux-2.6.35-bfs/include/linux/sched.h     2010-08-29 09:24:24.689454445 
+1000
+--- linux-2.6.35.5-ck1.orig/include/linux/sched.h      2010-09-25 
01:17:40.364012559 +1000
++++ linux-2.6.35.5-ck1/include/linux/sched.h   2010-09-25 01:21:34.406120878 
+1000
 @@ -36,8 +36,15 @@
  #define SCHED_FIFO            1
  #define SCHED_RR              2
@@ -216,7 +216,7 @@
  extern cpumask_var_t nohz_cpu_mask;
  #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
  extern int select_nohz_load_balancer(int cpu);
-@@ -1179,17 +1184,31 @@ struct task_struct {
+@@ -1173,17 +1178,31 @@ struct task_struct {
  
        int lock_depth;         /* BKL lock depth */
  
@@ -233,7 +233,7 @@
        int prio, static_prio, normal_prio;
        unsigned int rt_priority;
 +#ifdef CONFIG_SCHED_BFS
-+      int time_slice, first_time_slice;
++      int time_slice;
 +      u64 deadline;
 +      struct list_head run_list;
 +      u64 last_ran;
@@ -248,7 +248,7 @@
  
  #ifdef CONFIG_PREEMPT_NOTIFIERS
        /* list of struct preempt_notifier: */
-@@ -1284,6 +1303,9 @@ struct task_struct {
+@@ -1278,6 +1297,9 @@ struct task_struct {
        int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
  
        cputime_t utime, stime, utimescaled, stimescaled;
@@ -258,7 +258,7 @@
        cputime_t gtime;
  #ifndef CONFIG_VIRT_CPU_ACCOUNTING
        cputime_t prev_utime, prev_stime;
-@@ -1506,6 +1528,67 @@ struct task_struct {
+@@ -1500,6 +1522,67 @@ struct task_struct {
  #endif
  };
  
@@ -281,7 +281,7 @@
 +
 +static inline void print_scheduler_version(void)
 +{
-+      printk(KERN_INFO"BFS CPU scheduler v0.330 by Con Kolivas.\n");
++      printk(KERN_INFO"BFS CPU scheduler v0.350 by Con Kolivas.\n");
 +}
 +
 +static inline int iso_task(struct task_struct *p)
@@ -326,7 +326,7 @@
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
  #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
  
-@@ -1524,9 +1607,19 @@ struct task_struct {
+@@ -1518,9 +1601,19 @@ struct task_struct {
  
  #define MAX_USER_RT_PRIO      100
  #define MAX_RT_PRIO           MAX_USER_RT_PRIO
@@ -347,7 +347,7 @@
  
  static inline int rt_prio(int prio)
  {
-@@ -1835,7 +1928,7 @@ task_sched_runtime(struct task_struct *t
+@@ -1829,7 +1922,7 @@ task_sched_runtime(struct task_struct *t
  extern unsigned long long thread_group_sched_runtime(struct task_struct 
*task);
  
  /* sched_exec is called by processes performing an exec */
@@ -356,7 +356,7 @@
  extern void sched_exec(void);
  #else
  #define sched_exec()   {}
-@@ -1999,6 +2092,9 @@ extern void wake_up_new_task(struct task
+@@ -1993,6 +2086,9 @@ extern void wake_up_new_task(struct task
   static inline void kick_process(struct task_struct *tsk) { }
  #endif
  extern void sched_fork(struct task_struct *p, int clone_flags);
@@ -366,10 +366,10 @@
  extern void sched_dead(struct task_struct *p);
  
  extern void proc_caches_init(void);
-Index: linux-2.6.35-bfs/kernel/sysctl.c
+Index: linux-2.6.35.5-ck1/kernel/sysctl.c
 ===================================================================
---- linux-2.6.35-bfs.orig/kernel/sysctl.c      2010-08-02 11:12:25.000000000 
+1000
-+++ linux-2.6.35-bfs/kernel/sysctl.c   2010-08-13 08:39:05.071929295 +1000
+--- linux-2.6.35.5-ck1.orig/kernel/sysctl.c    2010-08-02 11:12:25.000000000 
+1000
++++ linux-2.6.35.5-ck1/kernel/sysctl.c 2010-09-25 01:19:19.941498189 +1000
 @@ -115,7 +115,12 @@ static int zero;
  static int __maybe_unused one = 1;
  static int __maybe_unused two = 2;
@@ -379,7 +379,7 @@
 +#ifdef CONFIG_SCHED_BFS
 +extern int rr_interval;
 +extern int sched_iso_cpu;
-+static int __read_mostly five_thousand = 5000;
++static int __read_mostly one_thousand = 1000;
 +#endif
  #ifdef CONFIG_PRINTK
  static int ten_thousand = 10000;
@@ -421,7 +421,7 @@
 +              .mode           = 0644,
 +              .proc_handler   = &proc_dointvec_minmax,
 +              .extra1         = &one,
-+              .extra2         = &five_thousand,
++              .extra2         = &one_thousand,
 +      },
 +      {
 +              .procname       = "iso_cpu",
@@ -436,11 +436,11 @@
  #if defined(CONFIG_S390) && defined(CONFIG_SMP)
        {
                .procname       = "spin_retry",
-Index: linux-2.6.35-bfs/kernel/sched_bfs.c
+Index: linux-2.6.35.5-ck1/kernel/sched_bfs.c
 ===================================================================
 --- /dev/null  1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.35-bfs/kernel/sched_bfs.c        2010-08-29 10:02:43.030955931 
+1000
-@@ -0,0 +1,6874 @@
++++ linux-2.6.35.5-ck1/kernel/sched_bfs.c      2010-09-25 01:21:48.281964938 
+1000
+@@ -0,0 +1,6984 @@
 +/*
 + *  kernel/sched_bfs.c, was sched.c
 + *
@@ -549,10 +549,19 @@
 +#define MAX_USER_PRIO         (USER_PRIO(MAX_PRIO))
 +#define SCHED_PRIO(p)         ((p)+MAX_RT_PRIO)
 +
-+/* Some helpers for converting to/from various scales.*/
++/*
++ * Some helpers for converting to/from various scales. Use shifts to get
++ * approximate multiples of ten for less overhead.
++ */
 +#define JIFFIES_TO_NS(TIME)   ((TIME) * (1000000000 / HZ))
-+#define MS_TO_NS(TIME)                ((TIME) * 1000000)
-+#define MS_TO_US(TIME)                ((TIME) * 1000)
++#define HALF_JIFFY_NS         (1000000000 / HZ / 2)
++#define HALF_JIFFY_US         (1000000 / HZ / 2)
++#define MS_TO_NS(TIME)                ((TIME) << 20)
++#define MS_TO_US(TIME)                ((TIME) << 10)
++#define NS_TO_MS(TIME)                ((TIME) >> 20)
++#define NS_TO_US(TIME)                ((TIME) >> 10)
++
++#define RESCHED_US    (100) /* Reschedule if less than this many us left */
 +
 +/*
 + * This is the time all tasks within the same priority round robin.
@@ -583,8 +592,9 @@
 +}
 +
 +/*
-+ * The global runqueue data that all CPUs work off. All data is protected
-+ * by grq.lock.
++ * The global runqueue data that all CPUs work off. Data is protected either
++ * by the global grq lock, or the discrete lock that precedes the data in this
++ * struct.
 + */
 +struct global_rq {
 +      raw_spinlock_t lock;
@@ -593,17 +603,17 @@
 +      unsigned long long nr_switches;
 +      struct list_head queue[PRIO_LIMIT];
 +      DECLARE_BITMAP(prio_bitmap, PRIO_LIMIT + 1);
-+      int iso_ticks;
-+      int iso_refractory;
 +#ifdef CONFIG_SMP
 +      unsigned long qnr; /* queued not running */
 +      cpumask_t cpu_idle_map;
 +      int idle_cpus;
 +#endif
-+#if BITS_PER_LONG < 64
-+      unsigned long jiffies;
-+      u64 jiffies_64;
-+#endif
++      /* Nanosecond jiffies */
++      u64 niffies;
++
++      raw_spinlock_t iso_lock;
++      int iso_ticks;
++      int iso_refractory;
 +};
 +
 +/* There can be only one */
@@ -619,8 +629,8 @@
 +      u64 nohz_stamp;
 +      unsigned char in_nohz_recently;
 +#endif
++      struct task_struct *last_task;
 +#endif
-+      unsigned int skip_clock_update;
 +
 +      struct task_struct *curr, *idle;
 +      struct mm_struct *prev_mm;
@@ -656,9 +666,11 @@
 +      /* See if all cache siblings are idle */
 +      cpumask_t cache_siblings;
 +#endif
++      u64 last_niffy; /* Last time this RQ updated grq.niffies */
 +#endif
++      u64 clock, old_clock, last_tick;
++      int dither;
 +
-+      u64 clock;
 +#ifdef CONFIG_SCHEDSTATS
 +
 +      /* latency stats */
@@ -729,15 +741,6 @@
 +static struct root_domain def_root_domain;
 +#endif
 +
-+static inline int cpu_of(struct rq *rq)
-+{
-+#ifdef CONFIG_SMP
-+      return rq->cpu;
-+#else
-+      return 0;
-+#endif
-+}
-+
 +#define rcu_dereference_check_sched_domain(p) \
 +      rcu_dereference_check((p), \
 +                            rcu_read_lock_sched_held() || \
@@ -753,17 +756,65 @@
 +#define for_each_domain(cpu, __sd) \
 +      for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; 
__sd = __sd->parent)
 +
++static inline void update_rq_clock(struct rq *rq);
++
 +#ifdef CONFIG_SMP
 +#define cpu_rq(cpu)           (&per_cpu(runqueues, (cpu)))
 +#define this_rq()             (&__get_cpu_var(runqueues))
 +#define task_rq(p)            cpu_rq(task_cpu(p))
 +#define cpu_curr(cpu)         (cpu_rq(cpu)->curr)
++static inline int cpu_of(struct rq *rq)
++{
++      return rq->cpu;
++}
++
++/*
++ * Niffies are a globally increasing nanosecond counter. Whenever a runqueue
++ * clock is updated with the grq.lock held, it is an opportunity to update the
++ * niffies value. Any CPU can update it by adding how much its clock has
++ * increased since it last updated niffies, minus any added niffies by other
++ * CPUs.
++ */
++static inline void update_clocks(struct rq *rq)
++{
++      s64 ndiff;
++
++      update_rq_clock(rq);
++      ndiff = rq->clock - rq->old_clock;
++      /* old_clock is only updated when we are updating niffies */
++      rq->old_clock = rq->clock;
++      ndiff -= grq.niffies - rq->last_niffy;
++      /*
++       * Sanity check should sched_clock return bogus values or be limited to
++       * just jiffy resolution. Some time will always have passed.
++       */
++      if (unlikely(ndiff < 1 || ndiff > MS_TO_NS(rr_interval)))
++              ndiff = 1;
++      grq.niffies += ndiff;
++      rq->last_niffy = grq.niffies;
++}
 +#else /* CONFIG_SMP */
 +static struct rq *uprq;
 +#define cpu_rq(cpu)   (uprq)
 +#define this_rq()     (uprq)
 +#define task_rq(p)    (uprq)
 +#define cpu_curr(cpu) ((uprq)->curr)
++static inline int cpu_of(struct rq *rq)
++{
++      return 0;
++}
++
++static inline void update_clocks(struct rq *rq)
++{
++      s64 ndiff;
++
++      update_rq_clock(rq);
++      ndiff = rq->clock - rq->old_clock;
++      rq->old_clock = rq->clock;
++      if (unlikely(ndiff < 1 || ndiff > MS_TO_US(rr_interval)))
++              ndiff = 1;
++      grq.niffies += ndiff;
++}
 +#endif
 +#define raw_rq()      (&__raw_get_cpu_var(runqueues))
 +
@@ -778,13 +829,13 @@
 +
 +/*
 + * All common locking functions performed on grq.lock. rq->clock is local to
-+ * the cpu accessing it so it can be modified just with interrupts disabled,
-+ * but looking up task_rq must be done under grq.lock to be safe.
++ * the CPU accessing it so it can be modified just with interrupts disabled
++ * when we're not updating niffies.
++ * Looking up task_rq must be done under grq.lock to be safe.
 + */
-+inline void update_rq_clock(struct rq *rq)
++static inline void update_rq_clock(struct rq *rq)
 +{
-+      if (!rq->skip_clock_update)
-+              rq->clock = sched_clock_cpu(cpu_of(rq));
++      rq->clock = sched_clock_cpu(cpu_of(rq));
 +}
 +
 +static inline int task_running(struct task_struct *p)
@@ -813,8 +864,8 @@
 +static inline void time_lock_grq(struct rq *rq)
 +      __acquires(grq.lock)
 +{
-+      update_rq_clock(rq);
 +      grq_lock();
++      update_clocks(rq);
 +}
 +
 +static inline void grq_unlock_irq(void)
@@ -848,7 +899,7 @@
 +      __acquires(grq.lock)
 +{
 +      struct rq *rq = task_grq_lock(p, flags);
-+      update_rq_clock(rq);
++      update_clocks(rq);
 +      return rq;
 +}
 +
@@ -863,7 +914,7 @@
 +      __acquires(grq.lock)
 +{
 +      struct rq *rq = task_grq_lock_irq(p);
-+      update_rq_clock(rq);
++      update_clocks(rq);
 +}
 +
 +static inline void task_grq_unlock_irq(void)
@@ -958,33 +1009,6 @@
 +}
 +#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 +
-+/*
-+ * In order to have a monotonic clock that does not wrap we have a 64 bit
-+ * unsigned long that's protected by grq.lock used in place of jiffies on
-+ * 32 bit builds.
-+ */
-+#if BITS_PER_LONG < 64
-+static inline void update_gjiffies(void)
-+{
-+      if (grq.jiffies != jiffies) {
-+              grq_lock();
-+              grq.jiffies = jiffies;
-+              grq.jiffies_64++;
-+              grq_unlock();
-+      }
-+}
-+
-+#define gjiffies (grq.jiffies_64)
-+
-+#else /* BITS_PER_LONG < 64 */
-+static inline void update_gjiffies(void)
-+{
-+}
-+
-+#define gjiffies jiffies
-+
-+#endif /* BITS_PER_LONG < 64 */
-+
 +static inline int deadline_before(u64 deadline, u64 time)
 +{
 +      return (deadline < time);
@@ -1017,17 +1041,6 @@
 +}
 +
 +/*
-+ * When a task is freshly forked, the first_time_slice flag is set to say
-+ * it has taken time_slice from its parent and if it exits on this first
-+ * time_slice it can return its time_slice back to the parent.
-+ */
-+static inline void reset_first_time_slice(struct task_struct *p)
-+{
-+      if (unlikely(p->first_time_slice))
-+              p->first_time_slice = 0;
-+}
-+
-+/*
 + * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as
 + * an idle task, we ensure none of the following conditions are met.
 + */
@@ -1089,11 +1102,11 @@
 +/*
 + * task_timeslice - all tasks of all priorities get the exact same timeslice
 + * length. CPU distribution is handled by giving different deadlines to
-+ * tasks of different priorities.
++ * tasks of different priorities. Use 128 as the base value for fast shifts.
 + */
 +static inline int task_timeslice(struct task_struct *p)
 +{
-+      return (rr_interval * task_prio_ratio(p) / 100);
++      return (rr_interval * task_prio_ratio(p) / 128);
 +}
 +
 +#ifdef CONFIG_SMP
@@ -1145,6 +1158,15 @@
 +
 +static void resched_task(struct task_struct *p);
 +
++/*
++ * last_task stores the last non-idle task scheduled on the local rq for
++ * cache warmth testing.
++ */
++static inline void set_last_task(struct rq *rq, struct task_struct *p)
++{
++      rq->last_task = p;
++}
++
 +#define CPUIDLE_CACHE_BUSY    (1)
 +#define CPUIDLE_DIFF_CPU      (2)
 +#define CPUIDLE_THREAD_BUSY   (4)
@@ -1167,6 +1189,9 @@
 + * Other node, other CPU, idle cache, idle threads.
 + * Other node, other CPU, busy cache, idle threads.
 + * Other node, other CPU, busy threads.
++ *
++ * If p was the last task running on this rq, then regardless of where
++ * it has been running since then, it is cache warm on this rq.
 + */
 +static void resched_best_idle(struct task_struct *p)
 +{
@@ -1199,11 +1224,14 @@
 +              tmp_rq = cpu_rq(cpu_tmp);
 +
 +              if (rq->cpu_locality[cpu_tmp]) {
++                      /* Check rq->last_task hasn't been dereferenced */
++                      if (rq->last_task && p != rq->last_task) {
 +#ifdef CONFIG_NUMA
-+                      if (rq->cpu_locality[cpu_tmp] > 1)
-+                              ranking |= CPUIDLE_DIFF_NODE;
++                              if (rq->cpu_locality[cpu_tmp] > 1)
++                                      ranking |= CPUIDLE_DIFF_NODE;
 +#endif
-+                      ranking |= CPUIDLE_DIFF_CPU;
++                              ranking |= CPUIDLE_DIFF_CPU;
++                      }
 +              }
 +#ifdef CONFIG_SCHED_MC
 +              if (!(tmp_rq->cache_idle(cpu_tmp)))
@@ -1245,6 +1273,11 @@
 +static inline int
 +cache_distance(struct rq *task_rq, struct rq *rq, struct task_struct *p)
 +{
++      /* Check rq->last_task hasn't been dereferenced */
++      if (likely(rq->last_task)) {
++              if (rq->last_task == p)
++                      return 0;
++      }
 +      return rq->cpu_locality[cpu_of(task_rq)] * task_timeslice(p);
 +}
 +#else /* CONFIG_SMP */
@@ -1283,6 +1316,10 @@
 +{
 +      return 0;
 +}
++
++static inline void set_last_task(struct rq *rq, struct task_struct *p)
++{
++}
 +#endif /* CONFIG_SMP */
 +
 +/*
@@ -1330,7 +1367,7 @@
 + */
 +static void activate_task(struct task_struct *p, struct rq *rq)
 +{
-+      update_rq_clock(rq);
++      update_clocks(rq);
 +
 +      /*
 +       * Sleep time is in units of nanosecs, so shift by 20 to get a
@@ -1600,8 +1637,28 @@
 +#endif
 +
 +#define rq_idle(rq)   ((rq)->rq_prio == PRIO_LIMIT)
-+#define task_idle(p)  ((p)->prio == PRIO_LIMIT)
 +
++/*
++ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
++ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or
++ * between themselves, they cooperatively multitask. An idle rq scores as
++ * prio PRIO_LIMIT so it is always preempted.
++ */
++static inline int
++can_preempt(struct task_struct *p, int prio, unsigned long deadline,
++          unsigned int policy)
++{
++      /* Better static priority RT task or better policy preemption */
++      if (p->prio < prio)
++              return 1;
++      if (p->prio > prio)
++              return 0;
++      /* SCHED_NORMAL, BATCH and ISO will preempt based on deadline */
++      if (!deadline_before(p->deadline, deadline))
++              return 0;
++      return 1;
++}
++#ifdef CONFIG_SMP
 +#ifdef CONFIG_HOTPLUG_CPU
 +/*
 + * Check to see if there is a task that is affined only to offline CPUs but
@@ -1621,14 +1678,20 @@
 +#endif
 +
 +/*
-+ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
-+ * basis of earlier deadlines. SCHED_BATCH, ISO and IDLEPRIO don't preempt
-+ * between themselves, they cooperatively multitask. An idle rq scores as
-+ * prio PRIO_LIMIT so it is always preempted. latest_deadline and
-+ * highest_prio_rq are initialised only to silence the compiler. When
-+ * all else is equal, still prefer this_rq.
++ * Check to see if p can run on cpu, and if not, whether there are any online
++ * CPUs it can run on instead.
++ */
++static inline int needs_other_cpu(struct task_struct *p, int cpu)
++{
++      if (unlikely(!cpu_isset(cpu, p->cpus_allowed) && online_cpus(p)))
++              return 1;
++      return 0;
++}
++
++/*
++ * latest_deadline and highest_prio_rq are initialised only to silence the
++ * compiler. When all else is equal, still prefer this_rq.
 + */
-+#ifdef CONFIG_SMP
 +static void try_preempt(struct task_struct *p, struct rq *this_rq)
 +{
 +      struct rq *highest_prio_rq = this_rq;
@@ -1636,6 +1699,10 @@
 +      int highest_prio;
 +      cpumask_t tmp;
 +
++      /* IDLEPRIO tasks never preempt anything */
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop-sched-bfs.patch?r1=1.1.2.20&r2=1.1.2.21&f=u

_______________________________________________
pld-cvs-commit mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to