Module: xenomai-2.5 Branch: master Commit: 033a9ad253acafbe64a8ca3174862b2c7d858a2a URL: http://git.xenomai.org/?p=xenomai-2.5.git;a=commit;h=033a9ad253acafbe64a8ca3174862b2c7d858a2a
Author: Philippe Gerum <[email protected]> Date: Sun Oct 31 12:04:07 2010 +0100 powerpc: upgrade I-pipe support to 2.6.35.7-powerpc-2.12-00 --- ... => adeos-ipipe-2.6.35.7-powerpc-2.12-00.patch} | 376 +++++++++----------- 1 files changed, 167 insertions(+), 209 deletions(-) diff --git a/ksrc/arch/powerpc/patches/adeos-ipipe-2.6.35.7-powerpc-2.11-02.patch b/ksrc/arch/powerpc/patches/adeos-ipipe-2.6.35.7-powerpc-2.12-00.patch similarity index 98% rename from ksrc/arch/powerpc/patches/adeos-ipipe-2.6.35.7-powerpc-2.11-02.patch rename to ksrc/arch/powerpc/patches/adeos-ipipe-2.6.35.7-powerpc-2.12-00.patch index 60e9a5f..68b4458 100644 --- a/ksrc/arch/powerpc/patches/adeos-ipipe-2.6.35.7-powerpc-2.11-02.patch +++ b/ksrc/arch/powerpc/patches/adeos-ipipe-2.6.35.7-powerpc-2.12-00.patch @@ -265,10 +265,10 @@ index bd100fc..8fa1901 100644 * or should we not care like we do now ? --BenH. diff --git a/arch/powerpc/include/asm/ipipe.h b/arch/powerpc/include/asm/ipipe.h new file mode 100644 -index 0000000..2e7f178 +index 0000000..d06ec57 --- /dev/null +++ b/arch/powerpc/include/asm/ipipe.h -@@ -0,0 +1,277 @@ +@@ -0,0 +1,248 @@ +/* + * include/asm-powerpc/ipipe.h + * @@ -316,10 +316,10 @@ index 0000000..2e7f178 +#include <asm/paca.h> +#endif + -+#define IPIPE_ARCH_STRING "2.11-02" ++#define IPIPE_ARCH_STRING "2.12-00" +#define IPIPE_MAJOR_NUMBER 2 -+#define IPIPE_MINOR_NUMBER 11 -+#define IPIPE_PATCH_NUMBER 2 ++#define IPIPE_MINOR_NUMBER 12 ++#define IPIPE_PATCH_NUMBER 0 + +#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH + @@ -331,12 +331,7 @@ index 0000000..2e7f178 + +#define task_hijacked(p) \ + ({ \ -+ unsigned long __flags__; \ -+ int __x__; \ -+ local_irq_save_hw_smp(__flags__); \ -+ __x__ = __ipipe_root_domain_p; \ -+ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \ -+ local_irq_restore_hw_smp(__flags__); \ ++ int __x__ = ipipe_root_domain_p; \ + !__x__; \ + }) + @@ -351,8 +346,9 @@ index 0000000..2e7f178 +#define task_hijacked(p) \ + ({ \ + int __x__ = __ipipe_root_domain_p; \ -+ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \ -+ if (__x__) local_irq_enable_hw(); !__x__; \ ++ if (__x__) \ ++ local_irq_enable_hw(); \ ++ !__x__; \ + }) + +#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */ @@ -496,31 +492,6 @@ index 0000000..2e7f178 +#endif +} + -+/* -+ * When running handlers, enable hw interrupts for all domains but the -+ * one heading the pipeline, so that IRQs can never be significantly -+ * deferred for the latter. -+ */ -+#define __ipipe_run_isr(ipd, irq) \ -+do { \ -+ if (!__ipipe_pipeline_head_p(ipd)) \ -+ local_irq_enable_hw(); \ -+ if (ipd == ipipe_root_domain) \ -+ if (likely(!ipipe_virtual_irq_p(irq))) \ -+ ipd->irqs[irq].handler(irq, NULL); \ -+ else { \ -+ irq_enter(); \ -+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);\ -+ irq_exit(); \ -+ } \ -+ else { \ -+ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ -+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ -+ __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ -+ } \ -+ local_irq_disable_hw(); \ -+} while(0) -+ +#define __ipipe_syscall_watched_p(p, sc) \ + (ipipe_notifier_enabled_p(p) || (unsigned long)sc >= NR_syscalls) + @@ -5407,60 +5378,31 @@ index b5043a9..52c56ea 100644 #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h -index d5b3876..010aa8b 100644 +index d5b3876..92b7abe 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h -@@ -207,24 +207,28 @@ extern void irq_enter(void); - */ - extern void irq_exit(void); - --#define nmi_enter() \ -- do { \ -- ftrace_nmi_enter(); \ -- BUG_ON(in_nmi()); \ -- add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ -- lockdep_off(); \ -- rcu_nmi_enter(); \ -- trace_hardirq_enter(); \ -+#define nmi_enter() \ -+ do { \ -+ if (likely(!ipipe_test_foreign_stack())) { \ -+ ftrace_nmi_enter(); \ -+ BUG_ON(in_nmi()); \ -+ add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ -+ lockdep_off(); \ -+ rcu_nmi_enter(); \ -+ trace_hardirq_enter(); \ -+ } \ - } while (0) +@@ -209,6 +209,7 @@ extern void irq_exit(void); --#define nmi_exit() \ -- do { \ -- trace_hardirq_exit(); \ -- rcu_nmi_exit(); \ -- lockdep_on(); \ -- BUG_ON(!in_nmi()); \ -- sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ -- ftrace_nmi_exit(); \ -+#define nmi_exit() \ -+ do { \ -+ if (likely(!ipipe_test_foreign_stack())) { \ -+ trace_hardirq_exit(); \ -+ rcu_nmi_exit(); \ -+ lockdep_on(); \ -+ BUG_ON(!in_nmi()); \ -+ sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ -+ ftrace_nmi_exit(); \ -+ } \ + #define nmi_enter() \ + do { \ ++ ipipe_nmi_enter(); \ + ftrace_nmi_enter(); \ + BUG_ON(in_nmi()); \ + add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ +@@ -225,6 +226,7 @@ extern void irq_exit(void); + BUG_ON(!in_nmi()); \ + sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ + ftrace_nmi_exit(); \ ++ ipipe_nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/ipipe.h b/include/linux/ipipe.h new file mode 100644 -index 0000000..8d82852 +index 0000000..4e04013 --- /dev/null +++ b/include/linux/ipipe.h -@@ -0,0 +1,769 @@ +@@ -0,0 +1,780 @@ +/* -*- linux-c -*- + * include/linux/ipipe.h + * @@ -5728,6 +5670,16 @@ index 0000000..8d82852 +#define __ipipe_sync_pipeline() __ipipe_sync_stage() +#endif + ++#ifndef __ipipe_do_root_xirq ++#define __ipipe_do_root_xirq(ipd, irq) \ ++ (ipd)->irqs[irq].handler(irq, (ipd)->irqs[irq].cookie) ++#endif ++ ++#ifndef __ipipe_do_root_virq ++#define __ipipe_do_root_virq(ipd, irq) \ ++ (ipd)->irqs[irq].handler(irq, (ipd)->irqs[irq].cookie) ++#endif ++ +#ifndef __ipipe_run_irqtail +#define __ipipe_run_irqtail() do { } while(0) +#endif @@ -5888,7 +5840,8 @@ index 0000000..8d82852 + ipipe_irq_ackfn_t acknowledge, + unsigned modemask); + -+int ipipe_control_irq(unsigned irq, ++int ipipe_control_irq(struct ipipe_domain *ipd, ++ unsigned int irq, + unsigned clrmask, + unsigned setmask); + @@ -6232,10 +6185,10 @@ index 0000000..8d82852 +#endif /* !__LINUX_IPIPE_H */ diff --git a/include/linux/ipipe_base.h b/include/linux/ipipe_base.h new file mode 100644 -index 0000000..5260e8b +index 0000000..3f43ba5 --- /dev/null +++ b/include/linux/ipipe_base.h -@@ -0,0 +1,135 @@ +@@ -0,0 +1,134 @@ +/* -*- linux-c -*- + * include/linux/ipipe_base.h + * @@ -6291,11 +6244,9 @@ index 0000000..5260e8b + +/* Per-cpu pipeline status */ +#define IPIPE_STALL_FLAG 0 /* Stalls a pipeline stage -- guaranteed at bit #0 */ -+#define IPIPE_SYNC_FLAG 1 /* The interrupt syncer is running for the domain */ -+#define IPIPE_NOSTACK_FLAG 2 /* Domain currently runs on a foreign stack */ ++#define IPIPE_NOSTACK_FLAG 1 /* Domain currently runs on a foreign stack */ + +#define IPIPE_STALL_MASK (1L << IPIPE_STALL_FLAG) -+#define IPIPE_SYNC_MASK (1L << IPIPE_SYNC_FLAG) +#define IPIPE_NOSTACK_MASK (1L << IPIPE_NOSTACK_FLAG) + +typedef void (*ipipe_irq_handler_t)(unsigned int irq, @@ -6349,6 +6300,7 @@ index 0000000..5260e8b +#endif +#define __IPIPE_FEATURE_PREPARE_PANIC 1 +#define __IPIPE_FEATURE_ROOT_PREEMPT_NOTIFIER 1 ++#define __IPIPE_FEATURE_CONTROL_IRQ 1 + +#else /* !CONFIG_IPIPE */ + @@ -6373,10 +6325,10 @@ index 0000000..5260e8b +#endif /* !__LINUX_IPIPE_BASE_H */ diff --git a/include/linux/ipipe_lock.h b/include/linux/ipipe_lock.h new file mode 100644 -index 0000000..032080f +index 0000000..75bf0e8 --- /dev/null +++ b/include/linux/ipipe_lock.h -@@ -0,0 +1,230 @@ +@@ -0,0 +1,240 @@ +/* -*- linux-c -*- + * include/linux/ipipe_lock.h + * @@ -6459,10 +6411,13 @@ index 0000000..032080f + do { \ + if (ipipe_spinlock_p(lock)) \ + __ipipe_spin_unlock_irqrestore(ipipe_spinlock(lock), flags); \ -+ else if (std_spinlock_raw_p(lock)) \ -+ __real_raw_spin_unlock_irqrestore(std_spinlock_raw(lock), flags); \ -+ else if (std_spinlock_p(lock)) \ -+ __real_raw_spin_unlock_irqrestore(&std_spinlock(lock)->rlock, flags); \ ++ else { \ ++ __ipipe_spin_unlock_debug(flags); \ ++ if (std_spinlock_raw_p(lock)) \ ++ __real_raw_spin_unlock_irqrestore(std_spinlock_raw(lock), flags); \ ++ else if (std_spinlock_p(lock)) \ ++ __real_raw_spin_unlock_irqrestore(&std_spinlock(lock)->rlock, flags); \ ++ } \ + } while (0) + +#define PICK_SPINOP(op, lock) \ @@ -6566,6 +6521,12 @@ index 0000000..032080f + +void __ipipe_spin_unlock_irqcomplete(unsigned long x); + ++#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP) ++void __ipipe_spin_unlock_debug(unsigned long flags); ++#else ++#define __ipipe_spin_unlock_debug(flags) do { } while (0) ++#endif ++ +#define ipipe_rwlock_t __ipipe_rwlock_t +#define IPIPE_DEFINE_RWLOCK(x) ipipe_rwlock_t x = IPIPE_RW_LOCK_UNLOCKED +#define IPIPE_DECLARE_RWLOCK(x) extern ipipe_rwlock_t x @@ -6598,6 +6559,7 @@ index 0000000..032080f +#define __ipipe_spin_unlock_irqrestore(lock, x) do { (void)(x); } while (0) +#define __ipipe_spin_unlock_irqbegin(lock) do { } while (0) +#define __ipipe_spin_unlock_irqcomplete(x) do { (void)(x); } while (0) ++#define __ipipe_spin_unlock_debug(flags) do { } while (0) + +#define ipipe_rwlock_t rwlock_t +#define IPIPE_DEFINE_RWLOCK(x) DEFINE_RWLOCK(x) @@ -7643,10 +7605,10 @@ index 0000000..6257dfa +obj-$(CONFIG_IPIPE_TRACE) += tracer.o diff --git a/kernel/ipipe/core.c b/kernel/ipipe/core.c new file mode 100644 -index 0000000..17479dc +index 0000000..7f1df1f --- /dev/null +++ b/kernel/ipipe/core.c -@@ -0,0 +1,2153 @@ +@@ -0,0 +1,2149 @@ +/* -*- linux-c -*- + * linux/kernel/ipipe/core.c + * @@ -8290,7 +8252,7 @@ index 0000000..17479dc + +static inline int __ipipe_next_irq(struct ipipe_percpu_domain_data *p) +{ -+ int l0b, l1b, l2b, vl0b, vl1b; ++ int l0b, l1b, l2b; + unsigned long l0m, l1m, l2m; + unsigned int irq; + @@ -8478,7 +8440,7 @@ index 0000000..17479dc + + this_domain = next_domain = __ipipe_current_domain; + p = ipipe_cpudom_ptr(this_domain); -+ p->status &= ~(IPIPE_STALL_MASK|IPIPE_SYNC_MASK); ++ p->status &= ~IPIPE_STALL_MASK; + + if (__ipipe_ipending_p(p)) + goto sync_stage; @@ -8539,11 +8501,11 @@ index 0000000..17479dc +} + +/* -+ * ipipe_control_irq() -- Change modes of a pipelined interrupt for -+ * the current domain. ++ * ipipe_virtualize_irq() -- Set a per-domain pipelined interrupt ++ * handler. + */ +int ipipe_virtualize_irq(struct ipipe_domain *ipd, -+ unsigned irq, ++ unsigned int irq, + ipipe_irq_handler_t handler, + void *cookie, + ipipe_irq_ackfn_t acknowledge, @@ -8552,7 +8514,7 @@ index 0000000..17479dc + ipipe_irq_handler_t old_handler; + struct irq_desc *desc; + unsigned long flags; -+ int err; ++ int ret = 0; + + if (irq >= IPIPE_NR_IRQS) + return -EINVAL; @@ -8568,39 +8530,51 @@ index 0000000..17479dc + + old_handler = ipd->irqs[irq].handler; + -+ if (handler != NULL) { -+ if (handler == IPIPE_SAME_HANDLER) { -+ handler = old_handler; -+ cookie = ipd->irqs[irq].cookie; ++ if (handler == NULL) { ++ modemask &= ++ ~(IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK | ++ IPIPE_EXCLUSIVE_MASK | IPIPE_WIRED_MASK); + -+ if (handler == NULL) { -+ err = -EINVAL; -+ goto unlock_and_exit; -+ } -+ } else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 && -+ old_handler != NULL) { -+ err = -EBUSY; -+ goto unlock_and_exit; ++ ipd->irqs[irq].handler = NULL; ++ ipd->irqs[irq].cookie = NULL; ++ ipd->irqs[irq].acknowledge = NULL; ++ ipd->irqs[irq].control = modemask; ++ ++ if (irq < NR_IRQS && !ipipe_virtual_irq_p(irq)) { ++ desc = irq_to_desc(irq); ++ if (old_handler && desc) ++ __ipipe_disable_irqdesc(ipd, irq); + } + -+ /* Wired interrupts can only be delivered to domains -+ * always heading the pipeline, and using dynamic -+ * propagation. */ ++ goto unlock_and_exit; ++ } + -+ if ((modemask & IPIPE_WIRED_MASK) != 0) { -+ if ((modemask & (IPIPE_PASS_MASK | IPIPE_STICKY_MASK)) != 0) { -+ err = -EINVAL; -+ goto unlock_and_exit; -+ } -+ modemask |= (IPIPE_HANDLE_MASK); ++ if (handler == IPIPE_SAME_HANDLER) { ++ cookie = ipd->irqs[irq].cookie; ++ handler = old_handler; ++ if (handler == NULL) { ++ ret = -EINVAL; ++ goto unlock_and_exit; + } ++ } else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 && old_handler) { ++ ret = -EBUSY; ++ goto unlock_and_exit; ++ } + -+ if ((modemask & IPIPE_STICKY_MASK) != 0) -+ modemask |= IPIPE_HANDLE_MASK; -+ } else -+ modemask &= -+ ~(IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK | -+ IPIPE_EXCLUSIVE_MASK | IPIPE_WIRED_MASK); ++ /* ++ * Wired interrupts can only be delivered to domains always ++ * heading the pipeline, and using dynamic propagation. ++ */ ++ if ((modemask & IPIPE_WIRED_MASK) != 0) { ++ if ((modemask & (IPIPE_PASS_MASK | IPIPE_STICKY_MASK)) != 0) { ++ ret = -EINVAL; ++ goto unlock_and_exit; ++ } ++ modemask |= IPIPE_HANDLE_MASK; ++ } ++ ++ if ((modemask & IPIPE_STICKY_MASK) != 0) ++ modemask |= IPIPE_HANDLE_MASK; + + if (acknowledge == NULL) + /* @@ -8614,14 +8588,12 @@ index 0000000..17479dc + ipd->irqs[irq].acknowledge = acknowledge; + ipd->irqs[irq].control = modemask; + -+ if (irq < NR_IRQS && !ipipe_virtual_irq_p(irq)) { -+ desc = irq_to_desc(irq); -+ if (handler != NULL) { -+ if (desc) -+ __ipipe_enable_irqdesc(ipd, irq); ++ desc = irq_to_desc(irq); ++ if (desc == NULL) ++ goto unlock_and_exit; + -+ if ((modemask & IPIPE_ENABLE_MASK) != 0) { -+ if (ipd != __ipipe_current_domain) { ++ if (irq < NR_IRQS && !ipipe_virtual_irq_p(irq)) { ++ __ipipe_enable_irqdesc(ipd, irq); + /* + * IRQ enable/disable state is domain-sensitive, so we + * may not change it for another domain. What is @@ -8630,43 +8602,37 @@ index 0000000..17479dc + * descriptor which thus may be different from + * __ipipe_current_domain. + */ -+ err = -EPERM; -+ goto unlock_and_exit; -+ } -+ if (desc) -+ __ipipe_enable_irq(irq); -+ } -+ } else if (old_handler != NULL && desc) -+ __ipipe_disable_irqdesc(ipd, irq); ++ if ((modemask & IPIPE_ENABLE_MASK) != 0) { ++ if (ipd != __ipipe_current_domain) ++ ret = -EPERM; ++ else ++ __ipipe_enable_irq(irq); ++ } + } + -+ err = 0; -+ -+ unlock_and_exit: ++unlock_and_exit: + + spin_unlock_irqrestore(&__ipipe_pipelock, flags); + -+ return err; ++ return ret; +} + -+/* ipipe_control_irq() -- Change modes of a pipelined interrupt for -+ * the current domain. */ ++/* ipipe_control_irq() -- Change control mode of a pipelined interrupt. */ + -+int ipipe_control_irq(unsigned irq, unsigned clrmask, unsigned setmask) ++int ipipe_control_irq(struct ipipe_domain *ipd, unsigned int irq, ++ unsigned clrmask, unsigned setmask) +{ -+ struct ipipe_domain *ipd; + unsigned long flags; ++ int ret = 0; + + if (irq >= IPIPE_NR_IRQS) + return -EINVAL; + -+ spin_lock_irqsave(&__ipipe_pipelock, flags); -+ -+ ipd = __ipipe_current_domain; ++ flags = ipipe_critical_enter(NULL); + + if (ipd->irqs[irq].control & IPIPE_SYSTEM_MASK) { -+ spin_unlock_irqrestore(&__ipipe_pipelock, flags); -+ return -EPERM; ++ ret = -EPERM; ++ goto out; + } + + if (ipd->irqs[irq].handler == NULL) @@ -8686,9 +8652,10 @@ index 0000000..17479dc + else if ((clrmask & IPIPE_ENABLE_MASK) != 0) + __ipipe_disable_irq(irq); + -+ spin_unlock_irqrestore(&__ipipe_pipelock, flags); ++out: ++ ipipe_critical_exit(flags); + -+ return 0; ++ return ret; +} + +/* __ipipe_dispatch_event() -- Low-level event dispatcher. */ @@ -8814,8 +8781,10 @@ index 0000000..17479dc + + p->irqall[irq]++; + __set_bit(IPIPE_STALL_FLAG, &p->status); ++ barrier(); + head->irqs[irq].handler(irq, head->irqs[irq].cookie); /* Call the ISR. */ + __ipipe_run_irqtail(); ++ barrier(); + __clear_bit(IPIPE_STALL_FLAG, &p->status); + + if (__ipipe_current_domain == head) { @@ -8845,78 +8814,52 @@ index 0000000..17479dc +{ + struct ipipe_percpu_domain_data *p; + struct ipipe_domain *ipd; -+ int cpu, irq; ++ int irq; + + ipd = __ipipe_current_domain; + p = ipipe_cpudom_ptr(ipd); + -+ if (__test_and_set_bit(IPIPE_SYNC_FLAG, &p->status)) { -+#ifdef __IPIPE_FEATURE_NESTED_ROOTIRQS -+ /* -+ * Caution: some archs do not support this -+ * (mis)feature (e.g. x86_32). -+ */ -+ if (ipd != ipipe_root_domain) -+#endif -+ return; -+ } ++ __set_bit(IPIPE_STALL_FLAG, &p->status); ++ smp_wmb(); + -+ cpu = ipipe_processor_id(); ++ if (ipd == ipipe_root_domain) ++ trace_hardirqs_off(); + + for (;;) { + irq = __ipipe_next_irq(p); + if (irq < 0) + break; + /* -+ * Make sure the compiler does not reorder -+ * wrongly, so that all updates to maps are -+ * done before the handler gets called. ++ * Make sure the compiler does not reorder wrongly, so ++ * that all updates to maps are done before the ++ * handler gets called. + */ + barrier(); + + if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) + continue; + -+ __set_bit(IPIPE_STALL_FLAG, &p->status); -+ smp_wmb(); ++ if (!__ipipe_pipeline_head_p(ipd)) ++ local_irq_enable_hw(); + -+ if (ipd == ipipe_root_domain) -+ trace_hardirqs_off(); ++ if (likely(ipd != ipipe_root_domain)) { ++ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); ++ __ipipe_run_irqtail(); ++ } else if (ipipe_virtual_irq_p(irq)) { ++ irq_enter(); ++ __ipipe_do_root_virq(ipd, irq); ++ irq_exit(); ++ } else ++ __ipipe_do_root_xirq(ipd, irq); + -+ __ipipe_run_isr(ipd, irq); -+ barrier(); ++ local_irq_disable_hw(); + p = ipipe_cpudom_ptr(__ipipe_current_domain); -+#ifdef CONFIG_SMP -+ { -+ int newcpu = ipipe_processor_id(); -+ -+ if (newcpu != cpu) { /* Handle CPU migration. */ -+ /* -+ * We expect any domain to clear the SYNC bit each -+ * time it switches in a new task, so that preemptions -+ * and/or CPU migrations (in the SMP case) over the -+ * ISR do not lock out the log syncer for some -+ * indefinite amount of time. In the Linux case, -+ * schedule() handles this (see kernel/sched.c). For -+ * this reason, we don't bother clearing it here for -+ * the source CPU in the migration handling case, -+ * since it must have scheduled another task in by -+ * now. -+ */ -+ __set_bit(IPIPE_SYNC_FLAG, &p->status); -+ cpu = newcpu; -+ } -+ } -+#endif /* CONFIG_SMP */ -+#ifdef CONFIG_TRACE_IRQFLAGS -+ if (__ipipe_root_domain_p && -+ test_bit(IPIPE_STALL_FLAG, &p->status)) -+ trace_hardirqs_on(); -+#endif -+ __clear_bit(IPIPE_STALL_FLAG, &p->status); + } + -+ __clear_bit(IPIPE_SYNC_FLAG, &p->status); ++ if (ipd == ipipe_root_domain) ++ trace_hardirqs_on(); ++ ++ __clear_bit(IPIPE_STALL_FLAG, &p->status); +} + +/* ipipe_register_domain() -- Link a new domain to the pipeline. */ @@ -9066,6 +9009,7 @@ index 0000000..17479dc + + for (irq = 0; irq < IPIPE_NR_IRQS; irq++) { + clear_bit(IPIPE_HANDLE_FLAG, &ipd->irqs[irq].control); ++ clear_bit(IPIPE_WIRED_FLAG, &ipd->irqs[irq].control); + clear_bit(IPIPE_STICKY_FLAG, &ipd->irqs[irq].control); + set_bit(IPIPE_PASS_FLAG, &ipd->irqs[irq].control); + } @@ -9739,6 +9683,20 @@ index 0000000..17479dc + return ret; +} + ++void __ipipe_spin_unlock_debug(unsigned long flags) ++{ ++ /* ++ * We catch a nasty issue where spin_unlock_irqrestore() on a ++ * regular kernel spinlock is about to re-enable hw interrupts ++ * in a section entered with hw irqs off. This is clearly the ++ * sign of a massive breakage coming. Usual suspect is a ++ * regular spinlock which was overlooked, used within a ++ * section which must run with hw irqs disabled. ++ */ ++ WARN_ON_ONCE(!raw_irqs_disabled_flags(flags) && irqs_disabled_hw()); ++} ++EXPORT_SYMBOL(__ipipe_spin_unlock_debug); ++ +#endif /* CONFIG_IPIPE_DEBUG_INTERNAL && CONFIG_SMP */ + + _______________________________________________ Xenomai-git mailing list [email protected] https://mail.gna.org/listinfo/xenomai-git
