Author: sparky                       Date: Mon Jun 12 16:56:12 2006 GMT
Module: SOURCES                       Tag: HEAD
---- Log message:
- updated to rt4

---- Files affected:
SOURCES:
   kernel-desktop-preempt-rt.patch (1.7 -> 1.8) 

---- Diffs:

================================================================
Index: SOURCES/kernel-desktop-preempt-rt.patch
diff -u SOURCES/kernel-desktop-preempt-rt.patch:1.7 
SOURCES/kernel-desktop-preempt-rt.patch:1.8
--- SOURCES/kernel-desktop-preempt-rt.patch:1.7 Sun Jun 11 22:18:45 2006
+++ SOURCES/kernel-desktop-preempt-rt.patch     Mon Jun 12 18:56:07 2006
@@ -5638,7 +5638,7 @@
 +
 +      if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
 +                      trigger == IOAPIC_LEVEL)
-+#ifdef CONFIG_PREMMPT_HARDIRQS
++#ifdef CONFIG_PREEMPT_HARDIRQS
 +              set_irq_chip_and_handler(idx, &ioapic_chip,
 +                                       handle_level_irq);
 +#else
@@ -37560,7 +37560,7 @@
 ===================================================================
 --- /dev/null
 +++ linux/kernel/irq/chip.c
-@@ -0,0 +1,549 @@
+@@ -0,0 +1,562 @@
 +/*
 + * linux/kernel/irq/chip.c
 + *
@@ -37771,8 +37771,15 @@
 +
 +      spin_lock(&desc->lock);
 +
-+      if (unlikely(desc->status & IRQ_INPROGRESS))
++      if (unlikely(desc->status & IRQ_INPROGRESS)) {
++              static int once = 0;
++              if (!once) {
++                      once = 1;
++                      printk(KERN_WARNING "handle_simple_irq reentered while "
++                             "processing irq %d\n", irq);
++              }
 +              goto out_unlock;
++      }
 +      desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
 +      kstat_cpu(cpu).irqs[irq]++;
 +
@@ -37819,8 +37826,15 @@
 +      spin_lock(&desc->lock);
 +      mask_ack_irq(desc, irq);
 +
-+      if (unlikely(desc->status & IRQ_INPROGRESS))
-+              goto out_unlock;
++      if (unlikely(desc->status & IRQ_INPROGRESS)) {
++              static int once = 0;
++              if (!once) {
++                      once = 1;
++                      printk(KERN_WARNING "handle_level_irq reentered while "
++                             "processing irq %d\n", irq);
++              }
++              goto out;
++      }
 +      desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
 +      kstat_cpu(cpu).irqs[irq]++;
 +
@@ -37830,7 +37844,7 @@
 +       */
 +      action = desc->action;
 +      if (unlikely(!action || (desc->status & IRQ_DISABLED)))
-+              goto out_unlock;
++              goto out;
 +
 +      desc->status |= IRQ_INPROGRESS;
 +
@@ -37838,7 +37852,7 @@
 +       * hardirq redirection to the irqd process context:
 +       */
 +      if (redirect_hardirq(desc))
-+              goto out_unlock;
++              goto out;
 +
 +      spin_unlock(&desc->lock);
 +
@@ -37848,10 +37862,9 @@
 +
 +      spin_lock(&desc->lock);
 +      desc->status &= ~IRQ_INPROGRESS;
-+
 +      if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
 +              desc->chip->unmask(irq);
-+out_unlock:
++out:
 +      spin_unlock(&desc->lock);
 +}
 +
@@ -38161,7 +38174,7 @@
   * controller. Thus drivers need not be aware of the
   * interrupt-controller.
   *
-@@ -28,41 +48,53 @@
+@@ -28,41 +48,58 @@
   *
   * Controller mappings for all interrupt sources:
   */
@@ -38181,27 +38194,43 @@
        }
  };
 +EXPORT_SYMBOL_GPL(irq_desc);
++
++/*
++ * What should we do if we get a hw irq event on an illegal vector?
++ * Each architecture has to answer this themself.
++ */
++static void ack_bad(unsigned int irq)
++{
++      struct irq_desc *desc = irq_desc + irq;
++
++      if (desc->handle_irq == handle_bad_irq) {
++              print_irq_desc(irq, desc);
++              ack_bad_irq(irq);
++      }
++}
  
  /*
 - * Generic 'no controller' code
-+ * What should we do if we get a hw irq event on an illegal vector?
-+ * Each architecture has to answer this themself.
++ * NOP functions
   */
 -static void end_none(unsigned int irq) { }
 -static void enable_none(unsigned int irq) { }
 -static void disable_none(unsigned int irq) { }
 -static void shutdown_none(unsigned int irq) { }
 -static unsigned int startup_none(unsigned int irq) { return 0; }
--
++static void noop(unsigned int irq)
++{
++}
+ 
 -static void ack_none(unsigned int irq)
-+static void ack_bad(unsigned int irq)
++static unsigned int noop_ret(unsigned int irq)
  {
 -      /*
 -       * 'what should we do if we get a hw irq event on an illegal vector'.
 -       * each architecture has to answer this themself.
 -       */
-+      print_irq_desc(irq, irq_desc + irq);
-       ack_bad_irq(irq);
+-      ack_bad_irq(irq);
++      return 0;
  }
  
 -struct hw_interrupt_type no_irq_type = {
@@ -38214,18 +38243,6 @@
 -      .end =          end_none,
 -      .set_affinity = NULL
 +/*
-+ * NOP functions
-+ */
-+static void noop(unsigned int irq)
-+{
-+}
-+
-+static unsigned int noop_ret(unsigned int irq)
-+{
-+      return 0;
-+}
-+
-+/*
 + * Generic no controller implementation
 + */
 +struct irq_chip no_irq_chip = {
@@ -38235,11 +38252,12 @@
 +      .enable         = noop,
 +      .disable        = noop,
 +      .ack            = ack_bad,
++      .unmask         = noop,
 +      .end            = noop,
  };
  
  /*
-@@ -73,43 +105,118 @@ irqreturn_t no_action(int cpl, void *dev
+@@ -73,43 +110,118 @@ irqreturn_t no_action(int cpl, void *dev
        return IRQ_NONE;
  }
  
@@ -38368,7 +38386,7 @@
        kstat_this_cpu.irqs[irq]++;
        if (CHECK_IRQ_PER_CPU(desc->status)) {
                irqreturn_t action_ret;
-@@ -117,16 +224,16 @@ fastcall unsigned int __do_IRQ(unsigned 
+@@ -117,16 +229,16 @@ fastcall unsigned int __do_IRQ(unsigned 
                /*
                 * No locking required for CPU-local interrupts:
                 */
@@ -38390,7 +38408,7 @@
        /*
         * REPLAY is when Linux resends an IRQ that was dropped earlier
         * WAITING is used by probe to mark irqs that are being tested
-@@ -156,6 +263,12 @@ fastcall unsigned int __do_IRQ(unsigned 
+@@ -156,6 +268,12 @@ fastcall unsigned int __do_IRQ(unsigned 
                goto out;
  
        /*
@@ -38403,7 +38421,7 @@
         * Edge triggered interrupts need to remember
         * pending events.
         * This applies to any hw interrupts that allow a second
-@@ -186,7 +299,8 @@ out:
+@@ -186,7 +304,8 @@ out:
         * The ->end() handler has to deal with interrupts which got
         * disabled while the handler was running.
         */
@@ -38686,7 +38704,7 @@
                return -EINVAL;
  
 -      if (desc->handler == &no_irq_type)
-+      if (desc->chip == &no_irq_chip)
++      if (desc->handle_irq == &handle_bad_irq)
                return -ENOSYS;
        /*
         * Some drivers like serial.c use request_irq() heavily,
@@ -39256,7 +39274,7 @@
                return -EIO;
  
        err = cpumask_parse(buffer, count, new_value);
-@@ -81,37 +78,6 @@ static int irq_affinity_write_proc(struc
+@@ -81,60 +78,28 @@ static int irq_affinity_write_proc(struc
  
  #endif
  
@@ -39294,14 +39312,14 @@
  #define MAX_NAMELEN 10
  
  void register_irq_proc(unsigned int irq)
-@@ -119,22 +85,22 @@ void register_irq_proc(unsigned int irq)
+ {
        char name [MAX_NAMELEN];
  
-       if (!root_irq_dir ||
+-      if (!root_irq_dir ||
 -              (irq_desc[irq].handler == &no_irq_type) ||
 -                      irq_dir[irq])
-+              (irq_desc[irq].chip == &no_irq_chip) ||
-+                      irq_desc[irq].dir)
++      if (!root_irq_dir || (irq_desc[irq].handle_irq == &handle_bad_irq) ||
++          irq_desc[irq].dir)
                return;
  
        memset(name, 0, MAX_NAMELEN);
@@ -39321,7 +39339,7 @@
  
                if (entry) {
                        entry->nlink = 1;
-@@ -142,7 +108,6 @@ void register_irq_proc(unsigned int irq)
+@@ -142,7 +107,6 @@ void register_irq_proc(unsigned int irq)
                        entry->read_proc = irq_affinity_read_proc;
                        entry->write_proc = irq_affinity_write_proc;
                }
@@ -39329,7 +39347,7 @@
        }
  #endif
  }
-@@ -151,10 +116,104 @@ void register_irq_proc(unsigned int irq)
+@@ -151,10 +115,104 @@ void register_irq_proc(unsigned int irq)
  
  void unregister_handler_proc(unsigned int irq, struct irqaction *action)
  {
@@ -39435,7 +39453,7 @@
  void init_irq_proc(void)
  {
        int i;
-@@ -164,6 +223,9 @@ void init_irq_proc(void)
+@@ -164,6 +222,9 @@ void init_irq_proc(void)
        if (!root_irq_dir)
                return;
  
@@ -42600,6 +42618,24 @@
        }
  
        if (error)
+@@ -562,7 +564,7 @@ static void arm_timer(struct k_itimer *t
+               p->cpu_timers : p->signal->cpu_timers);
+       head += CPUCLOCK_WHICH(timer->it_clock);
+ 
+-      BUG_ON(!irqs_disabled());
++      BUG_ON_NONRT(!irqs_disabled());
+       spin_lock(&p->sighand->siglock);
+ 
+       listpos = head;
+@@ -719,7 +721,7 @@ int posix_cpu_timer_set(struct k_itimer 
+       /*
+        * Disarm any old timer after extracting its expiry time.
+        */
+-      BUG_ON(!irqs_disabled());
++      BUG_ON_NONRT(!irqs_disabled());
+ 
+       ret = 0;
+       spin_lock(&p->sighand->siglock);
 @@ -1196,7 +1198,7 @@ static void check_process_timers(struct 
  
                        do {
@@ -47105,43 +47141,41 @@
        rq = task_rq_lock(p, &flags);
        old_state = p->state;
        if (!(old_state & state))
-@@ -1270,12 +1588,45 @@ out_set_cpu:
- 
+@@ -1271,11 +1589,43 @@ out_set_cpu:
                this_cpu = smp_processor_id();
                cpu = task_cpu(p);
-+      } else {
+       }
++      /*
++       * If a newly woken up RT task cannot preempt the
++       * current (RT) task (on a target runqueue) then try
++       * to find another CPU it can preempt:
++       */
++      if (rt_task(p) && !TASK_PREEMPTS_CURR(p, rq)) {
++              this_rq = cpu_rq(this_cpu);
 +              /*
-+               * If a newly woken up RT task cannot preempt the
-+               * current (RT) task (on a target runqueue) then try
-+               * to find another CPU it can preempt:
++               * Special-case: the task on this CPU can be
++               * preempted. In that case there's no need to
++               * trigger reschedules on other CPUs, we can
++               * mark the current task for reschedule.
++               *
++               * (Note that it's safe to access this_rq without
++               * extra locking in this particular case, because
++               * we are on the current CPU.)
 +               */
-+              if (rt_task(p) && !TASK_PREEMPTS_CURR(p, rq)) {
-+                      this_rq = cpu_rq(this_cpu);
++              if (TASK_PREEMPTS_CURR(p, this_rq))
++                      set_tsk_need_resched(this_rq->curr);
++              else
 +                      /*
-+                       * Special-case: the task on this CPU can be
-+                       * preempted. In that case there's no need to
-+                       * trigger reschedules on other CPUs, we can
-+                       * mark the current task for reschedule.
-+                       *
-+                       * (Note that it's safe to access this_rq without
-+                       * extra locking in this particular case, because
-+                       * we are on the current CPU.)
++                       * Neither the intended target runqueue
++                       * nor the current CPU can take this task.
++                       * Trigger a reschedule on all other CPUs
++                       * nevertheless, maybe one of them can take
++                       * this task:
 +                       */
-+                      if (TASK_PREEMPTS_CURR(p, this_rq))
-+                              set_tsk_need_resched(this_rq->curr);
-+                      else
-+                              /*
-+                               * Neither the intended target runqueue
-+                               * nor the current CPU can take this task.
-+                               * Trigger a reschedule on all other CPUs
-+                               * nevertheless, maybe one of them can take
-+                               * this task:
-+                               */
-+                              smp_send_reschedule_allbutself();
++                      smp_send_reschedule_allbutself();
 +
-+                      schedstat_inc(this_rq, rto_wakeup);
-+              }
-       }
++              schedstat_inc(this_rq, rto_wakeup);
++      }
  
  out_activate:
  #endif /* CONFIG_SMP */
@@ -47151,7 +47185,7 @@
                /*
                 * Tasks on involuntary sleep don't earn
                 * sleep_avg beyond just interactive state.
-@@ -1292,7 +1643,6 @@ out_activate:
+@@ -1292,7 +1642,6 @@ out_activate:
                        p->sleep_type = SLEEP_NONINTERACTIVE;
  
  
@@ -47159,7 +47193,7 @@
        /*
         * Sync wakeups (i.e. those types of wakeups where the waker
         * has indicated that it will leave the CPU in short order)
-@@ -1302,13 +1652,32 @@ out_activate:
+@@ -1302,13 +1651,32 @@ out_activate:
         * to be considered on this CPU.)
         */
        if (!sync || cpu != this_cpu) {
@@ -47194,7 +47228,7 @@
  out:
        task_rq_unlock(rq, &flags);
  
-@@ -1317,15 +1686,58 @@ out:
+@@ -1317,15 +1685,58 @@ out:
  
  int fastcall wake_up_process(task_t *p)
  {
@@ -47256,7 +47290,7 @@
  }
  
  /*
-@@ -1348,6 +1760,12 @@ void fastcall sched_fork(task_t *p, int 
+@@ -1348,6 +1759,12 @@ void fastcall sched_fork(task_t *p, int 
         * event cannot wake it up and insert it on the runqueue either.
         */
        p->state = TASK_RUNNING;
@@ -47269,7 +47303,7 @@
        INIT_LIST_HEAD(&p->run_list);
        p->array = NULL;
  #ifdef CONFIG_SCHEDSTATS
-@@ -1427,15 +1845,17 @@ void fastcall wake_up_new_task(task_t *p
+@@ -1427,15 +1844,17 @@ void fastcall wake_up_new_task(task_t *p
                                __activate_task(p, rq);
                        else {
                                p->prio = current->prio;
@@ -47292,7 +47326,7 @@
                /*
                 * We skip the following code due to cpu == this_cpu
                 *
-@@ -1551,11 +1971,26 @@ static inline void finish_task_switch(ru
+@@ -1551,11 +1970,26 @@ static inline void finish_task_switch(ru
         * be dropped twice.
         *              Manfred Spraul <[EMAIL PROTECTED]>
         */
@@ -47321,7 +47355,7 @@
        if (unlikely(prev_task_flags & PF_DEAD)) {
                /*
                 * Remove function-return probe instances associated with this
-@@ -1573,12 +2008,17 @@ static inline void finish_task_switch(ru
+@@ -1573,12 +2007,17 @@ static inline void finish_task_switch(ru
  asmlinkage void schedule_tail(task_t *prev)
        __releases(rq->lock)
  {
@@ -47341,7 +47375,7 @@
        if (current->set_child_tid)
                put_user(current->pid, current->set_child_tid);
  }
-@@ -1606,6 +2046,13 @@ task_t * context_switch(runqueue_t *rq, 
+@@ -1606,6 +2045,13 @@ task_t * context_switch(runqueue_t *rq, 
                rq->prev_mm = oldmm;
        }
  
@@ -47355,7 +47389,7 @@
        /* Here we just switch the register state and the stack. */
        switch_to(prev, next, prev);
  
-@@ -1646,6 +2093,43 @@ unsigned long nr_uninterruptible(void)
+@@ -1646,6 +2092,43 @@ unsigned long nr_uninterruptible(void)
        return sum;
  }
  
@@ -47399,7 +47433,7 @@
  unsigned long long nr_context_switches(void)
  {
        unsigned long long i, sum = 0;
-@@ -1730,7 +2214,7 @@ static void double_rq_unlock(runqueue_t 
+@@ -1730,7 +2213,7 @@ static void double_rq_unlock(runqueue_t 
  /*
   * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
   */
@@ -47408,7 +47442,7 @@
        __releases(this_rq->lock)
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
-@@ -1740,9 +2224,12 @@ static void double_lock_balance(runqueue
+@@ -1740,9 +2223,12 @@ static void double_lock_balance(runqueue
                        spin_unlock(&this_rq->lock);
                        spin_lock(&busiest->lock);
                        spin_lock(&this_rq->lock);
@@ -47421,7 +47455,7 @@
  }
  
  /*
-@@ -2482,6 +2969,7 @@ unsigned long long current_sched_time(co
+@@ -2482,6 +2968,7 @@ unsigned long long current_sched_time(co
  {
        unsigned long long ns;
        unsigned long flags;
@@ -47429,7 +47463,7 @@
        local_irq_save(flags);
        ns = max(tsk->timestamp, task_rq(tsk)->timestamp_last_tick);
        ns = tsk->sched_time + (sched_clock() - ns);
-@@ -2520,7 +3008,9 @@ void account_user_time(struct task_struc
+@@ -2520,7 +3007,9 @@ void account_user_time(struct task_struc
  
        /* Add user time to cpustat. */
        tmp = cputime_to_cputime64(cputime);
@@ -47440,7 +47474,7 @@
                cpustat->nice = cputime64_add(cpustat->nice, tmp);
        else
                cpustat->user = cputime64_add(cpustat->user, tmp);
-@@ -2547,6 +3037,8 @@ void account_system_time(struct task_str
+@@ -2547,6 +3036,8 @@ void account_system_time(struct task_str
                cpustat->irq = cputime64_add(cpustat->irq, tmp);
        else if (softirq_count())
                cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
@@ -47449,7 +47483,7 @@
        else if (p != rq->idle)
                cpustat->system = cputime64_add(cpustat->system, tmp);
        else if (atomic_read(&rq->nr_iowait) > 0)
-@@ -2592,6 +3084,8 @@ void scheduler_tick(void)
+@@ -2592,6 +3083,8 @@ void scheduler_tick(void)
        task_t *p = current;
        unsigned long long now = sched_clock();
  
@@ -47458,7 +47492,7 @@
        update_cpu_clock(p, rq, now);
  
        rq->timestamp_last_tick = now;
-@@ -2615,11 +3109,17 @@ void scheduler_tick(void)
+@@ -2615,11 +3108,17 @@ void scheduler_tick(void)
         * priority until it either goes to sleep or uses up its
         * timeslice. This makes it possible for interactive tasks
         * to use up their timeslices at their highest priority levels.
@@ -47476,7 +47510,7 @@
                 */
                if ((p->policy == SCHED_RR) && !--p->time_slice) {
                        p->time_slice = task_timeslice(p);
-@@ -2745,8 +3245,8 @@ static int dependent_sleeper(int this_cp
+@@ -2745,8 +3244,8 @@ static int dependent_sleeper(int this_cp
        struct sched_domain *tmp, *sd = NULL;
        cpumask_t sibling_map;
        prio_array_t *array;
@@ -47487,7 +47521,7 @@
  
        for_each_domain(this_cpu, tmp)
                if (tmp->flags & SD_SHARE_CPUPOWER)
-@@ -2808,6 +3308,12 @@ static int dependent_sleeper(int this_cp
+@@ -2808,6 +3307,12 @@ static int dependent_sleeper(int this_cp
                                !TASK_PREEMPTS_CURR(p, smt_rq) &&
                                smt_slice(smt_curr, sd) > task_timeslice(p))
                                        ret = 1;
@@ -47500,7 +47534,7 @@
  
  check_smt_task:
                if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
-@@ -2838,6 +3344,8 @@ check_smt_task:
+@@ -2838,6 +3343,8 @@ check_smt_task:
  out_unlock:
        for_each_cpu_mask(i, sibling_map)
                spin_unlock(&cpu_rq(i)->lock);
@@ -47509,7 +47543,7 @@
        return ret;
  }
  #else
-@@ -2851,35 +3359,41 @@ static inline int dependent_sleeper(int 
+@@ -2851,35 +3358,41 @@ static inline int dependent_sleeper(int 
  }
  #endif
  
@@ -47574,7 +47608,7 @@
  
  #endif
  
-@@ -2890,9 +3404,9 @@ static inline int interactive_sleep(enum
+@@ -2890,9 +3403,9 @@ static inline int interactive_sleep(enum
  }
  
  /*
@@ -47586,7 +47620,7 @@
  {
        long *switch_count;
        task_t *prev, *next;
-@@ -2903,12 +3417,15 @@ asmlinkage void __sched schedule(void)
+@@ -2903,12 +3416,15 @@ asmlinkage void __sched schedule(void)
        unsigned long run_time;
        int cpu, idx, new_prio;
  
@@ -47602,7 +47636,7 @@
                printk(KERN_ERR "BUG: scheduling while atomic: "
                        "%s/0x%08x/%d\n",
                        current->comm, preempt_count(), current->pid);
-@@ -2916,11 +3433,9 @@ asmlinkage void __sched schedule(void)
+@@ -2916,11 +3432,9 @@ asmlinkage void __sched schedule(void)
        }
        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
  
@@ -47615,7 +47649,7 @@
        rq = this_rq();
  
        /*
-@@ -2928,7 +3443,7 @@ need_resched_nonpreemptible:
+@@ -2928,7 +3442,7 @@ need_resched_nonpreemptible:
         * Remove this check after it has been exercised a bit.
         */
        if (unlikely(prev == rq->idle) && prev->state != TASK_RUNNING) {
@@ -47624,7 +47658,7 @@
                dump_stack();
        }
  
-@@ -2947,25 +3462,42 @@ need_resched_nonpreemptible:
+@@ -2947,25 +3461,42 @@ need_resched_nonpreemptible:
         */
        run_time /= (CURRENT_BONUS(prev) ? : 1);
  
@@ -47674,7 +47708,7 @@
        if (unlikely(!rq->nr_running)) {
  go_idle:
                idle_balance(cpu, rq);
<<Diff was trimmed, longer than 597 lines>>

---- CVS-web:
    
http://cvs.pld-linux.org/SOURCES/kernel-desktop-preempt-rt.patch?r1=1.7&r2=1.8&f=u

_______________________________________________
pld-cvs-commit mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to