Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=c7a1e46aa9782a947cf2ed506245d43396dbf991
Commit:     c7a1e46aa9782a947cf2ed506245d43396dbf991
Parent:     4642dafdf93dc7d66ee33437b93a5e6b8cea20d2
Author:     Steven Rostedt <[EMAIL PROTECTED]>
AuthorDate: Fri Jan 25 21:08:07 2008 +0100
Committer:  Ingo Molnar <[EMAIL PROTECTED]>
CommitDate: Fri Jan 25 21:08:07 2008 +0100

    sched: disable standard balancer for RT tasks
    
    Since we now take an active approach to load balancing, we don't need to
    balance RT tasks via the normal task balancer. In fact, this code was
    found to pull RT tasks away from CPUS that the active movement performed,
    resulting in large latencies.
    
    Signed-off-by: Steven Rostedt <[EMAIL PROTECTED]>
    Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>
---
 kernel/sched_rt.c |   95 ++--------------------------------------------------
 1 files changed, 4 insertions(+), 91 deletions(-)

diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index d38a8a5..c492fd2 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -567,109 +567,22 @@ static void wakeup_balance_rt(struct rq *rq, struct 
task_struct *p)
                push_rt_tasks(rq);
 }
 
-/*
- * Load-balancing iterator. Note: while the runqueue stays locked
- * during the whole iteration, the current task might be
- * dequeued so the iterator has to be dequeue-safe. Here we
- * achieve that by always pre-iterating before returning
- * the current task:
- */
-static struct task_struct *load_balance_start_rt(void *arg)
-{
-       struct rq *rq = arg;
-       struct rt_prio_array *array = &rq->rt.active;
-       struct list_head *head, *curr;
-       struct task_struct *p;
-       int idx;
-
-       idx = sched_find_first_bit(array->bitmap);
-       if (idx >= MAX_RT_PRIO)
-               return NULL;
-
-       head = array->queue + idx;
-       curr = head->prev;
-
-       p = list_entry(curr, struct task_struct, run_list);
-
-       curr = curr->prev;
-
-       rq->rt.rt_load_balance_idx = idx;
-       rq->rt.rt_load_balance_head = head;
-       rq->rt.rt_load_balance_curr = curr;
-
-       return p;
-}
-
-static struct task_struct *load_balance_next_rt(void *arg)
-{
-       struct rq *rq = arg;
-       struct rt_prio_array *array = &rq->rt.active;
-       struct list_head *head, *curr;
-       struct task_struct *p;
-       int idx;
-
-       idx = rq->rt.rt_load_balance_idx;
-       head = rq->rt.rt_load_balance_head;
-       curr = rq->rt.rt_load_balance_curr;
-
-       /*
-        * If we arrived back to the head again then
-        * iterate to the next queue (if any):
-        */
-       if (unlikely(head == curr)) {
-               int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
-
-               if (next_idx >= MAX_RT_PRIO)
-                       return NULL;
-
-               idx = next_idx;
-               head = array->queue + idx;
-               curr = head->prev;
-
-               rq->rt.rt_load_balance_idx = idx;
-               rq->rt.rt_load_balance_head = head;
-       }
-
-       p = list_entry(curr, struct task_struct, run_list);
-
-       curr = curr->prev;
-
-       rq->rt.rt_load_balance_curr = curr;
-
-       return p;
-}
-
 static unsigned long
 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
                unsigned long max_load_move,
                struct sched_domain *sd, enum cpu_idle_type idle,
                int *all_pinned, int *this_best_prio)
 {
-       struct rq_iterator rt_rq_iterator;
-
-       rt_rq_iterator.start = load_balance_start_rt;
-       rt_rq_iterator.next = load_balance_next_rt;
-       /* pass 'busiest' rq argument into
-        * load_balance_[start|next]_rt iterators
-        */
-       rt_rq_iterator.arg = busiest;
-
-       return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
-                            idle, all_pinned, this_best_prio, &rt_rq_iterator);
+       /* don't touch RT tasks */
+       return 0;
 }
 
 static int
 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
                 struct sched_domain *sd, enum cpu_idle_type idle)
 {
-       struct rq_iterator rt_rq_iterator;
-
-       rt_rq_iterator.start = load_balance_start_rt;
-       rt_rq_iterator.next = load_balance_next_rt;
-       rt_rq_iterator.arg = busiest;
-
-       return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
-                                 &rt_rq_iterator);
+       /* don't touch RT tasks */
+       return 0;
 }
 #else /* CONFIG_SMP */
 # define schedule_tail_balance_rt(rq)  do { } while (0)
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to