Author: lmasko                       Date: Mon Dec 28 17:53:35 2009 GMT
Module: packages                      Tag: Titanium
---- Log message:
- BFS up to 313.

---- Files affected:
packages/kernel-desktop:
   kernel-desktop.spec (1.204.2.83 -> 1.204.2.84) , 
kernel-desktop-sched-bfs.patch (1.1.2.14 -> 1.1.2.15) 

---- Diffs:

================================================================
Index: packages/kernel-desktop/kernel-desktop.spec
diff -u packages/kernel-desktop/kernel-desktop.spec:1.204.2.83 
packages/kernel-desktop/kernel-desktop.spec:1.204.2.84
--- packages/kernel-desktop/kernel-desktop.spec:1.204.2.83      Sat Dec 26 
15:09:30 2009
+++ packages/kernel-desktop/kernel-desktop.spec Mon Dec 28 18:53:29 2009
@@ -537,6 +537,9 @@
 echo "CONFIG_OPEN_TRACER=y" >> %{defconfig}
 %endif
 
+%if %{with bfs}
+echo "CONFIG_SCHED_BFS=y" >> %{defconfig}
+%endif
 }
 
 BuildKernel() {
@@ -977,6 +980,9 @@
 All persons listed below can be reached at <cvs_login>@pld-linux.org
 
 $Log$
+Revision 1.204.2.84  2009/12/28 17:53:29  lmasko
+- BFS up to 313.
+
 Revision 1.204.2.83  2009/12/26 14:09:30  shadzik
 - I2O_EXT_ADAPTEC_DMA64=y for pae kernel
 

================================================================
Index: packages/kernel-desktop/kernel-desktop-sched-bfs.patch
diff -u packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.14 
packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.15
--- packages/kernel-desktop/kernel-desktop-sched-bfs.patch:1.1.2.14     Fri Oct 
16 16:05:57 2009
+++ packages/kernel-desktop/kernel-desktop-sched-bfs.patch      Mon Dec 28 
18:53:29 2009
@@ -1,14 +1,16 @@
-The Brain Fuck Scheduler v0.304 by Con Kolivas.
+The Brain Fuck Scheduler v0.313 by Con Kolivas.
 
 A single shared runqueue O(n) strict fairness earliest deadline first design.
 
-Ultra low latency and excellent desktop performance.
+Ultra low latency and excellent desktop performance for 1 to many CPUs.
 Not recommended for 4096 cpus.
 
 Scalability is optimal when your workload is equal to the number of CPUs on
 bfs. ie you should ONLY do make -j4 on quad core, -j2 on dual core and so on.
 
 Features SCHED_IDLEPRIO and SCHED_ISO scheduling policies as well.
+You do NOT need to use these policies for good performance, they are purely
+optional.
 
 To run something idleprio, use schedtool like so:
 
@@ -22,35 +24,31 @@
 cpu usage may be very different.
 
 ---
- Documentation/scheduler/sched-BFS.txt |  335 +
- Documentation/sysctl/kernel.txt       |   26 
- Makefile                              |    4 
- fs/pipe.c                             |    4 
- fs/proc/base.c                        |    2 
- include/linux/init_task.h             |   15 
- include/linux/ioprio.h                |    2 
- include/linux/sched.h                 |  193 -
- init/Kconfig                          |   61 
- init/main.c                           |    2 
- kernel/Makefile                       |    4 
- kernel/delayacct.c                    |    2 
- kernel/exit.c                         |    7 
- kernel/fork.c                         |    1 
- kernel/kthread.c                      |    3 
- kernel/posix-cpu-timers.c             |   14 
- kernel/sched_bfs.c                    | 6336 
++++++++++++++++++++++++++++++++++
- kernel/sysctl.c                       |  156 
- kernel/timer.c                        |    3 
- kernel/trace/trace.c                  |    4 
- kernel/workqueue.c                    |    2 
- mm/oom_kill.c                         |    2 
- 22 files changed, 6780 insertions(+), 398 deletions(-)
+ Documentation/scheduler/sched-BFS.txt     |  356 +
+ Documentation/sysctl/kernel.txt           |   26 
+ arch/powerpc/platforms/cell/spufs/sched.c |    5 
+ fs/proc/base.c                            |    2 
+ include/linux/init_task.h                 |   65 
+ include/linux/ioprio.h                    |    2 
+ include/linux/sched.h                     |  107 
+ init/Kconfig                              |   20 
+ init/main.c                               |    2 
+ kernel/delayacct.c                        |    2 
+ kernel/exit.c                             |    2 
+ kernel/fork.c                             |    2 
+ kernel/posix-cpu-timers.c                 |   14 
+ kernel/sched.c                            |    4 
+ kernel/sched_bfs.c                        | 6653 
++++++++++++++++++++++++++++++
+ kernel/sysctl.c                           |   35 
+ lib/Kconfig.debug                         |    2 
+ mm/oom_kill.c                             |    2 
+ 18 files changed, 7272 insertions(+), 29 deletions(-)
 
-Index: linux-2.6.31-bfs/Documentation/sysctl/kernel.txt
+Index: linux-2.6.32-bfs/Documentation/sysctl/kernel.txt
 ===================================================================
---- linux-2.6.31-bfs.orig/Documentation/sysctl/kernel.txt      2009-10-06 
21:06:26.175820508 +1100
-+++ linux-2.6.31-bfs/Documentation/sysctl/kernel.txt   2009-10-06 
21:06:48.532821648 +1100
-@@ -27,6 +27,7 @@ show up in /proc/sys/kernel:
+--- linux-2.6.32-bfs.orig/Documentation/sysctl/kernel.txt      2009-12-03 
21:39:54.000000000 +1100
++++ linux-2.6.32-bfs/Documentation/sysctl/kernel.txt   2009-12-19 
00:21:06.935377895 +1100
+@@ -29,6 +29,7 @@ show up in /proc/sys/kernel:
  - domainname
  - hostname
  - hotplug
@@ -58,7 +56,7 @@
  - java-appletviewer           [ binfmt_java, obsolete ]
  - java-interpreter            [ binfmt_java, obsolete ]
  - kstack_depth_to_print       [ X86 only ]
-@@ -49,6 +50,7 @@ show up in /proc/sys/kernel:
+@@ -51,6 +52,7 @@ show up in /proc/sys/kernel:
  - randomize_va_space
  - real-root-dev               ==> Documentation/initrd.txt
  - reboot-cmd                  [ SPARC only ]
@@ -66,11 +64,11 @@
  - rtsig-max
  - rtsig-nr
  - sem
-@@ -171,6 +173,16 @@ Default value is "/sbin/hotplug".
+@@ -209,6 +211,16 @@ Default value is "/sbin/hotplug".
  
  ==============================================================
  
-+iso_cpu:
++iso_cpu: (BFS CPU scheduler only).
 +
 +This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
 +run effectively at realtime priority, averaged over a rolling five
@@ -83,11 +81,11 @@
  l2cr: (PPC only)
  
  This flag controls the L2 cache of G3 processor boards. If
-@@ -333,6 +345,20 @@ rebooting. ???
+@@ -383,6 +395,20 @@ rebooting. ???
  
  ==============================================================
  
-+rr_interval:
++rr_interval: (BFS CPU scheduler only)
 +
 +This is the smallest duration that any cpu process scheduling unit
 +will run for. Increasing this value can increase throughput of cpu
@@ -104,502 +102,325 @@
  rtsig-max & rtsig-nr:
  
  The file rtsig-max can be used to tune the maximum number
-Index: linux-2.6.31-bfs/fs/pipe.c
+Index: linux-2.6.32-bfs/include/linux/init_task.h
 ===================================================================
---- linux-2.6.31-bfs.orig/fs/pipe.c    2009-10-06 21:06:26.150821027 +1100
-+++ linux-2.6.31-bfs/fs/pipe.c 2009-10-06 21:06:48.533821285 +1100
-@@ -78,10 +78,6 @@ void pipe_wait(struct pipe_inode_info *p
- {
-       DEFINE_WAIT(wait);
- 
--      /*
--       * Pipes are system-local resources, so sleeping on them
--       * is considered a noninteractive wait:
--       */
-       prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
-       pipe_unlock(pipe);
-       schedule();
-Index: linux-2.6.31-bfs/include/linux/init_task.h
-===================================================================
---- linux-2.6.31-bfs.orig/include/linux/init_task.h    2009-10-06 
21:06:26.181821043 +1100
-+++ linux-2.6.31-bfs/include/linux/init_task.h 2009-10-06 21:06:48.562821138 
+1100
-@@ -116,21 +116,16 @@ extern struct cred init_cred;
-       .usage          = ATOMIC_INIT(2),                               \
-       .flags          = PF_KTHREAD,                                   \
-       .lock_depth     = -1,                                           \
--      .prio           = MAX_PRIO-20,                                  \
+--- linux-2.6.32-bfs.orig/include/linux/init_task.h    2009-12-03 
21:40:09.000000000 +1100
++++ linux-2.6.32-bfs/include/linux/init_task.h 2009-12-19 00:21:06.936211740 
+1100
+@@ -119,6 +119,69 @@ extern struct cred init_cred;
+  *  INIT_TASK is used to set up the first task table, touch at
+  * your own risk!. Base=0, limit=0x1fffff (=2MB)
+  */
++#ifdef CONFIG_SCHED_BFS
++#define INIT_TASK(tsk)        \
++{                                                                     \
++      .state          = 0,                                            \
++      .stack          = &init_thread_info,                            \
++      .usage          = ATOMIC_INIT(2),                               \
++      .flags          = PF_KTHREAD,                                   \
++      .lock_depth     = -1,                                           \
 +      .prio           = NORMAL_PRIO,                                  \
-       .static_prio    = MAX_PRIO-20,                                  \
--      .normal_prio    = MAX_PRIO-20,                                  \
++      .static_prio    = MAX_PRIO-20,                                  \
 +      .normal_prio    = NORMAL_PRIO,                                  \
 +      .deadline       = 0,                                            \
-       .policy         = SCHED_NORMAL,                                 \
-       .cpus_allowed   = CPU_MASK_ALL,                                 \
-       .mm             = NULL,                                         \
-       .active_mm      = &init_mm,                                     \
--      .se             = {                                             \
--              .group_node     = LIST_HEAD_INIT(tsk.se.group_node),    \
--      },                                                              \
--      .rt             = {                                             \
--              .run_list       = LIST_HEAD_INIT(tsk.rt.run_list),      \
--              .time_slice     = HZ,                                   \
--              .nr_cpus_allowed = NR_CPUS,                             \
--      },                                                              \
++      .policy         = SCHED_NORMAL,                                 \
++      .cpus_allowed   = CPU_MASK_ALL,                                 \
++      .mm             = NULL,                                         \
++      .active_mm      = &init_mm,                                     \
 +      .run_list       = LIST_HEAD_INIT(tsk.run_list),                 \
 +      .time_slice     = HZ,                                   \
-       .tasks          = LIST_HEAD_INIT(tsk.tasks),                    \
-       .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
-       .ptraced        = LIST_HEAD_INIT(tsk.ptraced),                  \
-Index: linux-2.6.31-bfs/include/linux/sched.h
++      .tasks          = LIST_HEAD_INIT(tsk.tasks),                    \
++      .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
++      .ptraced        = LIST_HEAD_INIT(tsk.ptraced),                  \
++      .ptrace_entry   = LIST_HEAD_INIT(tsk.ptrace_entry),             \
++      .real_parent    = &tsk,                                         \
++      .parent         = &tsk,                                         \
++      .children       = LIST_HEAD_INIT(tsk.children),                 \
++      .sibling        = LIST_HEAD_INIT(tsk.sibling),                  \
++      .group_leader   = &tsk,                                         \
++      .real_cred      = &init_cred,                                   \
++      .cred           = &init_cred,                                   \
++      .cred_guard_mutex =                                             \
++               __MUTEX_INITIALIZER(tsk.cred_guard_mutex),             \
++      .comm           = "swapper",                                    \
++      .thread         = INIT_THREAD,                                  \
++      .fs             = &init_fs,                                     \
++      .files          = &init_files,                                  \
++      .signal         = &init_signals,                                \
++      .sighand        = &init_sighand,                                \
++      .nsproxy        = &init_nsproxy,                                \
++      .pending        = {                                             \
++              .list = LIST_HEAD_INIT(tsk.pending.list),               \
++              .signal = {{0}}},                                       \
++      .blocked        = {{0}},                                        \
++      .alloc_lock     = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock),         \
++      .journal_info   = NULL,                                         \
++      .cpu_timers     = INIT_CPU_TIMERS(tsk.cpu_timers),              \
++      .fs_excl        = ATOMIC_INIT(0),                               \
++      .pi_lock        = __SPIN_LOCK_UNLOCKED(tsk.pi_lock),            \
++      .timer_slack_ns = 50000, /* 50 usec default slack */            \
++      .pids = {                                                       \
++              [PIDTYPE_PID]  = INIT_PID_LINK(PIDTYPE_PID),            \
++              [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),           \
++              [PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),            \
++      },                                                              \
++      .dirties = INIT_PROP_LOCAL_SINGLE(dirties),                     \
++      INIT_IDS                                                        \
++      INIT_PERF_EVENTS(tsk)                                           \
++      INIT_TRACE_IRQFLAGS                                             \
++      INIT_LOCKDEP                                                    \
++      INIT_FTRACE_GRAPH                                               \
++      INIT_TRACE_RECURSION                                            \
++      INIT_TASK_RCU_PREEMPT(tsk)                                      \
++}
++#else /* CONFIG_SCHED_BFS */
+ #define INIT_TASK(tsk)        \
+ {                                                                     \
+       .state          = 0,                                            \
+@@ -185,7 +248,7 @@ extern struct cred init_cred;
+       INIT_TRACE_RECURSION                                            \
+       INIT_TASK_RCU_PREEMPT(tsk)                                      \
+ }
+-
++#endif /* CONFIG_SCHED_BFS */
+ 
+ #define INIT_CPU_TIMERS(cpu_timers)                                   \
+ {                                                                     \
+Index: linux-2.6.32-bfs/include/linux/sched.h
 ===================================================================
---- linux-2.6.31-bfs.orig/include/linux/sched.h        2009-10-06 
21:06:26.192821918 +1100
-+++ linux-2.6.31-bfs/include/linux/sched.h     2009-10-08 22:59:46.191157813 
+1100
-@@ -36,8 +36,11 @@
+--- linux-2.6.32-bfs.orig/include/linux/sched.h        2009-12-03 
21:40:09.000000000 +1100
++++ linux-2.6.32-bfs/include/linux/sched.h     2009-12-28 03:03:44.025251129 
+1100
+@@ -36,8 +36,15 @@
  #define SCHED_FIFO            1
  #define SCHED_RR              2
  #define SCHED_BATCH           3
 -/* SCHED_ISO: reserved but not implemented yet */
--#define SCHED_IDLE            5
++/* SCHED_ISO: Implemented on BFS only */
+ #define SCHED_IDLE            5
++#ifdef CONFIG_SCHED_BFS
 +#define SCHED_ISO             4
-+#define SCHED_IDLEPRIO                5
-+
++#define SCHED_IDLEPRIO                SCHED_IDLE
 +#define SCHED_MAX             (SCHED_IDLEPRIO)
 +#define SCHED_RANGE(policy)   ((policy) <= SCHED_MAX)
++#endif
++
+ /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL 
on fork */
+ #define SCHED_RESET_ON_FORK     0x40000000
  
- #ifdef __KERNEL__
- 
-@@ -144,13 +147,10 @@ extern u64 cpu_nr_migrations(int cpu);
- extern unsigned long get_parent_ip(unsigned long addr);
- 
- struct seq_file;
--struct cfs_rq;
- struct task_group;
- #ifdef CONFIG_SCHED_DEBUG
- extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
- extern void proc_sched_set_task(struct task_struct *p);
--extern void
--print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
- #else
- static inline void
- proc_sched_show_task(struct task_struct *p, struct seq_file *m)
-@@ -159,10 +159,6 @@ proc_sched_show_task(struct task_struct 
- static inline void proc_sched_set_task(struct task_struct *p)
- {
- }
--static inline void
--print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
--{
--}
- #endif
- 
- extern unsigned long long time_sync_thresh;
-@@ -254,8 +250,8 @@ extern asmlinkage void schedule_tail(str
+@@ -261,9 +268,6 @@ extern asmlinkage void schedule_tail(str
  extern void init_idle(struct task_struct *idle, int cpu);
  extern void init_idle_bootup_task(struct task_struct *idle);
  
--extern int runqueue_is_locked(void);
+-extern int runqueue_is_locked(int cpu);
 -extern void task_rq_unlock_wait(struct task_struct *p);
-+extern int grunqueue_is_locked(void);
-+extern void grq_unlock_wait(void);
- 
+-
  extern cpumask_var_t nohz_cpu_mask;
  #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
-@@ -1021,148 +1017,6 @@ struct uts_namespace;
- struct rq;
- struct sched_domain;
- 
--struct sched_class {
--      const struct sched_class *next;
--
--      void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
--      void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
--      void (*yield_task) (struct rq *rq);
--
--      void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int 
sync);
--
--      struct task_struct * (*pick_next_task) (struct rq *rq);
--      void (*put_prev_task) (struct rq *rq, struct task_struct *p);
--
--#ifdef CONFIG_SMP
--      int  (*select_task_rq)(struct task_struct *p, int sync);
--
--      unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
--                      struct rq *busiest, unsigned long max_load_move,
--                      struct sched_domain *sd, enum cpu_idle_type idle,
--                      int *all_pinned, int *this_best_prio);
--
--      int (*move_one_task) (struct rq *this_rq, int this_cpu,
--                            struct rq *busiest, struct sched_domain *sd,
--                            enum cpu_idle_type idle);
--      void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
--      int (*needs_post_schedule) (struct rq *this_rq);
--      void (*post_schedule) (struct rq *this_rq);
--      void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
--
--      void (*set_cpus_allowed)(struct task_struct *p,
--                               const struct cpumask *newmask);
--
--      void (*rq_online)(struct rq *rq);
--      void (*rq_offline)(struct rq *rq);
--#endif
--
--      void (*set_curr_task) (struct rq *rq);
--      void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
--      void (*task_new) (struct rq *rq, struct task_struct *p);
--
--      void (*switched_from) (struct rq *this_rq, struct task_struct *task,
--                             int running);
--      void (*switched_to) (struct rq *this_rq, struct task_struct *task,
--                           int running);
--      void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
--                           int oldprio, int running);
--
--#ifdef CONFIG_FAIR_GROUP_SCHED
--      void (*moved_group) (struct task_struct *p);
--#endif
--};
--
--struct load_weight {
--      unsigned long weight, inv_weight;
--};
--
--/*
-- * CFS stats for a schedulable entity (task, task-group etc)
-- *
-- * Current field usage histogram:
-- *
-- *     4 se->block_start
-- *     4 se->run_node
-- *     4 se->sleep_start
-- *     6 se->load.weight
-- */
--struct sched_entity {
--      struct load_weight      load;           /* for load-balancing */
--      struct rb_node          run_node;
--      struct list_head        group_node;
--      unsigned int            on_rq;
--
--      u64                     exec_start;
--      u64                     sum_exec_runtime;
--      u64                     vruntime;
--      u64                     prev_sum_exec_runtime;
--
--      u64                     last_wakeup;
--      u64                     avg_overlap;
--
--      u64                     nr_migrations;
--
--      u64                     start_runtime;
--      u64                     avg_wakeup;
--
--#ifdef CONFIG_SCHEDSTATS
--      u64                     wait_start;
--      u64                     wait_max;
--      u64                     wait_count;
--      u64                     wait_sum;
--
--      u64                     sleep_start;
--      u64                     sleep_max;
--      s64                     sum_sleep_runtime;
--
--      u64                     block_start;
--      u64                     block_max;
--      u64                     exec_max;
--      u64                     slice_max;
--
--      u64                     nr_migrations_cold;
--      u64                     nr_failed_migrations_affine;
--      u64                     nr_failed_migrations_running;
--      u64                     nr_failed_migrations_hot;
--      u64                     nr_forced_migrations;
--      u64                     nr_forced2_migrations;
--
--      u64                     nr_wakeups;
--      u64                     nr_wakeups_sync;
--      u64                     nr_wakeups_migrate;
--      u64                     nr_wakeups_local;
--      u64                     nr_wakeups_remote;
--      u64                     nr_wakeups_affine;
--      u64                     nr_wakeups_affine_attempts;
--      u64                     nr_wakeups_passive;
--      u64                     nr_wakeups_idle;
--#endif
--
--#ifdef CONFIG_FAIR_GROUP_SCHED
--      struct sched_entity     *parent;
--      /* rq on which this entity is (to be) queued: */
--      struct cfs_rq           *cfs_rq;
--      /* rq "owned" by this entity/group: */
--      struct cfs_rq           *my_q;
--#endif
--};
--
--struct sched_rt_entity {
--      struct list_head run_list;
--      unsigned long timeout;
--      unsigned int time_slice;
--      int nr_cpus_allowed;
--
--      struct sched_rt_entity *back;
--#ifdef CONFIG_RT_GROUP_SCHED
--      struct sched_rt_entity  *parent;
--      /* rq on which this entity is (to be) queued: */
--      struct rt_rq            *rt_rq;
--      /* rq "owned" by this entity/group: */
--      struct rt_rq            *my_q;
--#endif
--};
--
- struct task_struct {
-       volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
-       void *stack;
-@@ -1172,17 +1026,16 @@ struct task_struct {
+ extern int select_nohz_load_balancer(int cpu);
+@@ -1221,17 +1225,31 @@ struct task_struct {
  
        int lock_depth;         /* BKL lock depth */
  
--#ifdef CONFIG_SMP
--#ifdef __ARCH_WANT_UNLOCKED_CTXSW
++#ifndef CONFIG_SCHED_BFS
+ #ifdef CONFIG_SMP
+ #ifdef __ARCH_WANT_UNLOCKED_CTXSW
        int oncpu;
--#endif
--#endif
--
+ #endif
+ #endif
++#else /* CONFIG_SCHED_BFS */
++      int oncpu;
++#endif
+ 
        int prio, static_prio, normal_prio;
+       unsigned int rt_priority;
++#ifdef CONFIG_SCHED_BFS
 +      int time_slice, first_time_slice;
 +      unsigned long deadline;
 +      struct list_head run_list;
-       unsigned int rt_priority;
--      const struct sched_class *sched_class;
--      struct sched_entity se;
--      struct sched_rt_entity rt;
 +      u64 last_ran;
 +      u64 sched_time; /* sched_clock time spent running */
 +
 +      unsigned long rt_timeout;
++#else /* CONFIG_SCHED_BFS */
+       const struct sched_class *sched_class;
+       struct sched_entity se;
+       struct sched_rt_entity rt;
++#endif
  
  #ifdef CONFIG_PREEMPT_NOTIFIERS
        /* list of struct preempt_notifier: */
-@@ -1205,6 +1058,9 @@ struct task_struct {
+@@ -1253,6 +1271,9 @@ struct task_struct {
  
        unsigned int policy;
        cpumask_t cpus_allowed;
-+#ifdef CONFIG_HOTPLUG_CPU
++#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_SCHED_BFS)
 +      cpumask_t unplugged_mask;
 +#endif
  
- #ifdef CONFIG_PREEMPT_RCU
+ #ifdef CONFIG_TREE_PREEMPT_RCU
        int rcu_read_lock_nesting;
-@@ -1273,6 +1129,7 @@ struct task_struct {
+@@ -1330,6 +1351,9 @@ struct task_struct {
        int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
  
        cputime_t utime, stime, utimescaled, stimescaled;
++#ifdef CONFIG_SCHED_BFS
 +      unsigned long utime_pc, stime_pc;
++#endif
        cputime_t gtime;
        cputime_t prev_utime, prev_stime;
        unsigned long nvcsw, nivcsw; /* context switch counts */
-@@ -1497,11 +1354,14 @@ struct task_struct {
-  * priority to a value higher than any user task. Note:
-  * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
-  */
--
-+#define PRIO_RANGE            (40)
+@@ -1541,6 +1565,64 @@ struct task_struct {
+       unsigned long stack_start;
+ };
+ 
++#ifdef CONFIG_SCHED_BFS
++extern int grunqueue_is_locked(void);
++extern void grq_unlock_wait(void);
++#define tsk_seruntime(t)              ((t)->sched_time)
++#define tsk_rttimeout(t)              ((t)->rt_timeout)
++#define task_rq_unlock_wait(tsk)      grq_unlock_wait()
++
++static inline void set_oom_timeslice(struct task_struct *p)
++{
++      p->time_slice = HZ;
++}
++
++static inline void tsk_cpus_current(struct task_struct *p)
++{
++}
++
++#define runqueue_is_locked(cpu)       grunqueue_is_locked()
++
++static inline void print_scheduler_version(void)
++{
++      printk(KERN_INFO"BFS CPU scheduler v0.313 by Con Kolivas.\n");
++}
++
++static inline int iso_task(struct task_struct *p)
++{
++      return (p->policy == SCHED_ISO);
++}
++#else
++extern int runqueue_is_locked(int cpu);
++extern void task_rq_unlock_wait(struct task_struct *p);
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop.spec?r1=1.204.2.83&r2=1.204.2.84&f=u
    
http://cvs.pld-linux.org/cgi-bin/cvsweb.cgi/packages/kernel-desktop/kernel-desktop-sched-bfs.patch?r1=1.1.2.14&r2=1.1.2.15&f=u

_______________________________________________
pld-cvs-commit mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to