Module: xenomai-head Branch: master Commit: 6aa393b91c63bb67e92d48eb9bc5f95d339bb012 URL: http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=6aa393b91c63bb67e92d48eb9bc5f95d339bb012
Author: Philippe Gerum <[email protected]> Date: Sun Oct 31 12:07:01 2010 +0100 x86: upgrade I-pipe support to 2.6.35.7-x86-2.8-00 --- ...patch => adeos-ipipe-2.6.35.7-x86-2.8-00.patch} | 461 ++++++++++---------- 1 files changed, 232 insertions(+), 229 deletions(-) diff --git a/ksrc/arch/x86/patches/adeos-ipipe-2.6.35.7-x86-2.7-04.patch b/ksrc/arch/x86/patches/adeos-ipipe-2.6.35.7-x86-2.8-00.patch similarity index 97% rename from ksrc/arch/x86/patches/adeos-ipipe-2.6.35.7-x86-2.7-04.patch rename to ksrc/arch/x86/patches/adeos-ipipe-2.6.35.7-x86-2.8-00.patch index e8280d6..149eff4 100644 --- a/ksrc/arch/x86/patches/adeos-ipipe-2.6.35.7-x86-2.7-04.patch +++ b/ksrc/arch/x86/patches/adeos-ipipe-2.6.35.7-x86-2.8-00.patch @@ -94,7 +94,7 @@ index 8e8ec66..b2f8e98 100644 BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h -index 46c0fe0..ce04cf5 100644 +index 46c0fe0..0cf2ce3 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -35,6 +35,13 @@ extern void spurious_interrupt(void); @@ -111,7 +111,15 @@ index 46c0fe0..ce04cf5 100644 extern void invalidate_interrupt(void); extern void invalidate_interrupt0(void); -@@ -127,6 +134,7 @@ extern void smp_invalidate_interrupt(struct pt_regs *); +@@ -115,6 +122,7 @@ extern void smp_apic_timer_interrupt(struct pt_regs *); + extern void smp_spurious_interrupt(struct pt_regs *); + extern void smp_x86_platform_ipi(struct pt_regs *); + extern void smp_error_interrupt(struct pt_regs *); ++extern void smp_perf_pending_interrupt(struct pt_regs *); + #ifdef CONFIG_X86_IO_APIC + extern asmlinkage void smp_irq_move_cleanup_interrupt(void); + #endif +@@ -127,6 +135,7 @@ extern void smp_invalidate_interrupt(struct pt_regs *); #else extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *); #endif @@ -176,10 +184,10 @@ index 0b72282..6574056 100644 /* diff --git a/arch/x86/include/asm/ipipe.h b/arch/x86/include/asm/ipipe.h new file mode 100644 -index 0000000..c2b4165 +index 0000000..8af0104 --- /dev/null +++ b/arch/x86/include/asm/ipipe.h -@@ -0,0 +1,183 @@ +@@ -0,0 +1,157 @@ +/* -*- linux-c -*- + * arch/x86/include/asm/ipipe.h + * @@ -207,10 +215,10 @@ index 0000000..c2b4165 +#ifdef CONFIG_IPIPE + +#ifndef IPIPE_ARCH_STRING -+#define IPIPE_ARCH_STRING "2.7-04" ++#define IPIPE_ARCH_STRING "2.8-00" +#define IPIPE_MAJOR_NUMBER 2 -+#define IPIPE_MINOR_NUMBER 7 -+#define IPIPE_PATCH_NUMBER 4 ++#define IPIPE_MINOR_NUMBER 8 ++#define IPIPE_PATCH_NUMBER 0 +#endif + +DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs); @@ -251,7 +259,6 @@ index 0000000..c2b4165 + +#define task_hijacked(p) \ + ({ int x = __ipipe_root_domain_p; \ -+ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \ + if (x) local_irq_enable_hw(); !x; }) + +struct ipipe_domain; @@ -317,31 +324,6 @@ index 0000000..c2b4165 + +#define __ipipe_root_tick_p(regs) ((regs)->flags & X86_EFLAGS_IF) + -+/* -+ * When running handlers, enable hw interrupts for all domains but the -+ * one heading the pipeline, so that IRQs can never be significantly -+ * deferred for the latter. -+ */ -+#define __ipipe_run_isr(ipd, irq) \ -+ do { \ -+ if (!__ipipe_pipeline_head_p(ipd)) \ -+ local_irq_enable_hw(); \ -+ if (ipd == ipipe_root_domain) { \ -+ if (likely(!ipipe_virtual_irq_p(irq))) \ -+ __ipipe_call_root_xirq_handler( \ -+ irq, (ipd)->irqs[irq].handler); \ -+ else \ -+ __ipipe_call_root_virq_handler( \ -+ irq, (ipd)->irqs[irq].handler, \ -+ (ipd)->irqs[irq].cookie); \ -+ } else { \ -+ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ -+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ -+ __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ -+ } \ -+ local_irq_disable_hw(); \ -+ } while(0) -+ +#else /* !CONFIG_IPIPE */ + +#define ipipe_update_tick_evtdev(evtdev) do { } while (0) @@ -365,10 +347,10 @@ index 0000000..c2b4165 +#endif /* !__X86_IPIPE_H */ diff --git a/arch/x86/include/asm/ipipe_32.h b/arch/x86/include/asm/ipipe_32.h new file mode 100644 -index 0000000..356e6df +index 0000000..4263a7f --- /dev/null +++ b/arch/x86/include/asm/ipipe_32.h -@@ -0,0 +1,131 @@ +@@ -0,0 +1,136 @@ +/* -*- linux-c -*- + * arch/x86/include/asm/ipipe_32.h + * @@ -433,8 +415,8 @@ index 0000000..356e6df + +void __ipipe_end_edge_irq(unsigned irq, struct irq_desc *desc); + -+static inline void __ipipe_call_root_xirq_handler(unsigned irq, -+ ipipe_irq_handler_t handler) ++static inline void __do_root_xirq(ipipe_irq_handler_t handler, ++ unsigned int irq) +{ + struct pt_regs *regs = &__raw_get_cpu_var(__ipipe_tick_regs); + @@ -463,13 +445,15 @@ index 0000000..356e6df + : "a" (~irq), "r" (handler), "rm" (regs)); +} + -+void irq_enter(void); -+void irq_exit(void); ++#define __ipipe_do_root_xirq(ipd, irq) \ ++ __do_root_xirq((ipd)->irqs[irq].handler, irq) + -+static inline void __ipipe_call_root_virq_handler(unsigned irq, -+ ipipe_irq_handler_t handler, -+ void *cookie) ++static inline void __do_root_virq(ipipe_irq_handler_t handler, ++ void *cookie, unsigned int irq) +{ ++ void irq_enter(void); ++ void irq_exit(void); ++ + irq_enter(); + __asm__ __volatile__("pushfl\n\t" + "pushl %%cs\n\t" @@ -499,13 +483,16 @@ index 0000000..356e6df + : /* no input */); +} + ++#define __ipipe_do_root_virq(ipd, irq) \ ++ __do_root_virq((ipd)->irqs[irq].handler, (ipd)->irqs[irq].cookie, irq) ++ +#endif /* !__X86_IPIPE_32_H */ diff --git a/arch/x86/include/asm/ipipe_64.h b/arch/x86/include/asm/ipipe_64.h new file mode 100644 -index 0000000..d4bc6a8 +index 0000000..b9367f6 --- /dev/null +++ b/arch/x86/include/asm/ipipe_64.h -@@ -0,0 +1,136 @@ +@@ -0,0 +1,141 @@ +/* -*- linux-c -*- + * arch/x86/include/asm/ipipe_64.h + * @@ -568,8 +555,8 @@ index 0000000..d4bc6a8 + +void __ipipe_end_edge_irq(unsigned irq, struct irq_desc *desc); + -+static inline void __ipipe_call_root_xirq_handler(unsigned irq, -+ void (*handler)(unsigned, void *)) ++static inline void __do_root_xirq(ipipe_irq_handler_t handler, ++ unsigned int irq) +{ + struct pt_regs *regs = &__raw_get_cpu_var(__ipipe_tick_regs); + @@ -603,13 +590,15 @@ index 0000000..d4bc6a8 + : "rax"); +} + -+void irq_enter(void); -+void irq_exit(void); ++#define __ipipe_do_root_xirq(ipd, irq) \ ++ __do_root_xirq((ipd)->irqs[irq].handler, irq) + -+static inline void __ipipe_call_root_virq_handler(unsigned irq, -+ void (*handler)(unsigned, void *), -+ void *cookie) ++static inline void __do_root_virq(ipipe_irq_handler_t handler, ++ void *cookie, unsigned int irq) +{ ++ void irq_enter(void); ++ void irq_exit(void); ++ + irq_enter(); + __asm__ __volatile__("movq %%rsp, %%rax\n\t" + "pushq $0\n\t" @@ -641,6 +630,9 @@ index 0000000..d4bc6a8 + : /* no input */); +} + ++#define __ipipe_do_root_virq(ipd, irq) \ ++ __do_root_virq((ipd)->irqs[irq].handler, (ipd)->irqs[irq].cookie, irq) ++ +#endif /* !__X86_IPIPE_64_H */ diff --git a/arch/x86/include/asm/ipipe_base.h b/arch/x86/include/asm/ipipe_base.h new file mode 100644 @@ -3092,7 +3084,7 @@ index cafa7c8..28d72ce 100644 handle_real_irq: diff --git a/arch/x86/kernel/ipipe.c b/arch/x86/kernel/ipipe.c new file mode 100644 -index 0000000..8b00a8d +index 0000000..6720ba8 --- /dev/null +++ b/arch/x86/kernel/ipipe.c @@ -0,0 +1,947 @@ @@ -3333,19 +3325,19 @@ index 0000000..8b00a8d + + ipipe_virtualize_irq(ipipe_root_domain, + ipipe_apic_vector_irq(X86_PLATFORM_IPI_VECTOR), -+ (ipipe_irq_handler_t)&x86_platform_ipi, ++ (ipipe_irq_handler_t)&smp_x86_platform_ipi, + NULL, + &__ipipe_ack_apic, + IPIPE_STDROOT_MASK); + -+#ifdef CONFIG_PERF_COUNTERS ++#ifdef CONFIG_PERF_EVENTS + ipipe_virtualize_irq(ipipe_root_domain, + ipipe_apic_vector_irq(LOCAL_PENDING_VECTOR), -+ (ipipe_irq_handler_t)&perf_pending_interrupt, ++ (ipipe_irq_handler_t)&smp_perf_pending_interrupt, + NULL, + &__ipipe_ack_apic, + IPIPE_STDROOT_MASK); -+#endif /* CONFIG_PERF_COUNTERS */ ++#endif /* CONFIG_PERF_EVENTS */ + +#endif /* CONFIG_X86_LOCAL_APIC */ + @@ -4242,6 +4234,29 @@ index 3c2422a..00aa572 100644 struct tss_struct *tss = &per_cpu(init_tss, cpu); unsigned fsindex, gsindex; bool preload_fpu; +diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c +index 70c4872..367f800 100644 +--- a/arch/x86/kernel/ptrace.c ++++ b/arch/x86/kernel/ptrace.c +@@ -19,6 +19,7 @@ + #include <linux/audit.h> + #include <linux/seccomp.h> + #include <linux/signal.h> ++#include <linux/unistd.h> + #include <linux/perf_event.h> + #include <linux/hw_breakpoint.h> + +@@ -1397,6 +1398,10 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) + { + bool step; + ++#ifdef CONFIG_IPIPE ++ if (syscall_get_nr(current, regs) >= NR_syscalls) ++ return; ++#endif + if (unlikely(current->audit_context)) + audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); + diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index d801210..88b0758 100644 --- a/arch/x86/kernel/smp.c @@ -4830,60 +4845,31 @@ index b5043a9..52c56ea 100644 #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h -index d5b3876..010aa8b 100644 +index d5b3876..92b7abe 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h -@@ -207,24 +207,28 @@ extern void irq_enter(void); - */ - extern void irq_exit(void); - --#define nmi_enter() \ -- do { \ -- ftrace_nmi_enter(); \ -- BUG_ON(in_nmi()); \ -- add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ -- lockdep_off(); \ -- rcu_nmi_enter(); \ -- trace_hardirq_enter(); \ -+#define nmi_enter() \ -+ do { \ -+ if (likely(!ipipe_test_foreign_stack())) { \ -+ ftrace_nmi_enter(); \ -+ BUG_ON(in_nmi()); \ -+ add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ -+ lockdep_off(); \ -+ rcu_nmi_enter(); \ -+ trace_hardirq_enter(); \ -+ } \ - } while (0) +@@ -209,6 +209,7 @@ extern void irq_exit(void); --#define nmi_exit() \ -- do { \ -- trace_hardirq_exit(); \ -- rcu_nmi_exit(); \ -- lockdep_on(); \ -- BUG_ON(!in_nmi()); \ -- sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ -- ftrace_nmi_exit(); \ -+#define nmi_exit() \ -+ do { \ -+ if (likely(!ipipe_test_foreign_stack())) { \ -+ trace_hardirq_exit(); \ -+ rcu_nmi_exit(); \ -+ lockdep_on(); \ -+ BUG_ON(!in_nmi()); \ -+ sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ -+ ftrace_nmi_exit(); \ -+ } \ + #define nmi_enter() \ + do { \ ++ ipipe_nmi_enter(); \ + ftrace_nmi_enter(); \ + BUG_ON(in_nmi()); \ + add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ +@@ -225,6 +226,7 @@ extern void irq_exit(void); + BUG_ON(!in_nmi()); \ + sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ + ftrace_nmi_exit(); \ ++ ipipe_nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/ipipe.h b/include/linux/ipipe.h new file mode 100644 -index 0000000..8d82852 +index 0000000..4e04013 --- /dev/null +++ b/include/linux/ipipe.h -@@ -0,0 +1,769 @@ +@@ -0,0 +1,780 @@ +/* -*- linux-c -*- + * include/linux/ipipe.h + * @@ -5151,6 +5137,16 @@ index 0000000..8d82852 +#define __ipipe_sync_pipeline() __ipipe_sync_stage() +#endif + ++#ifndef __ipipe_do_root_xirq ++#define __ipipe_do_root_xirq(ipd, irq) \ ++ (ipd)->irqs[irq].handler(irq, (ipd)->irqs[irq].cookie) ++#endif ++ ++#ifndef __ipipe_do_root_virq ++#define __ipipe_do_root_virq(ipd, irq) \ ++ (ipd)->irqs[irq].handler(irq, (ipd)->irqs[irq].cookie) ++#endif ++ +#ifndef __ipipe_run_irqtail +#define __ipipe_run_irqtail() do { } while(0) +#endif @@ -5311,7 +5307,8 @@ index 0000000..8d82852 + ipipe_irq_ackfn_t acknowledge, + unsigned modemask); + -+int ipipe_control_irq(unsigned irq, ++int ipipe_control_irq(struct ipipe_domain *ipd, ++ unsigned int irq, + unsigned clrmask, + unsigned setmask); + @@ -5655,10 +5652,10 @@ index 0000000..8d82852 +#endif /* !__LINUX_IPIPE_H */ diff --git a/include/linux/ipipe_base.h b/include/linux/ipipe_base.h new file mode 100644 -index 0000000..5260e8b +index 0000000..3f43ba5 --- /dev/null +++ b/include/linux/ipipe_base.h -@@ -0,0 +1,135 @@ +@@ -0,0 +1,134 @@ +/* -*- linux-c -*- + * include/linux/ipipe_base.h + * @@ -5714,11 +5711,9 @@ index 0000000..5260e8b + +/* Per-cpu pipeline status */ +#define IPIPE_STALL_FLAG 0 /* Stalls a pipeline stage -- guaranteed at bit #0 */ -+#define IPIPE_SYNC_FLAG 1 /* The interrupt syncer is running for the domain */ -+#define IPIPE_NOSTACK_FLAG 2 /* Domain currently runs on a foreign stack */ ++#define IPIPE_NOSTACK_FLAG 1 /* Domain currently runs on a foreign stack */ + +#define IPIPE_STALL_MASK (1L << IPIPE_STALL_FLAG) -+#define IPIPE_SYNC_MASK (1L << IPIPE_SYNC_FLAG) +#define IPIPE_NOSTACK_MASK (1L << IPIPE_NOSTACK_FLAG) + +typedef void (*ipipe_irq_handler_t)(unsigned int irq, @@ -5772,6 +5767,7 @@ index 0000000..5260e8b +#endif +#define __IPIPE_FEATURE_PREPARE_PANIC 1 +#define __IPIPE_FEATURE_ROOT_PREEMPT_NOTIFIER 1 ++#define __IPIPE_FEATURE_CONTROL_IRQ 1 + +#else /* !CONFIG_IPIPE */ + @@ -5796,10 +5792,10 @@ index 0000000..5260e8b +#endif /* !__LINUX_IPIPE_BASE_H */ diff --git a/include/linux/ipipe_lock.h b/include/linux/ipipe_lock.h new file mode 100644 -index 0000000..032080f +index 0000000..cf33925 --- /dev/null +++ b/include/linux/ipipe_lock.h -@@ -0,0 +1,230 @@ +@@ -0,0 +1,240 @@ +/* -*- linux-c -*- + * include/linux/ipipe_lock.h + * @@ -5882,10 +5878,13 @@ index 0000000..032080f + do { \ + if (ipipe_spinlock_p(lock)) \ + __ipipe_spin_unlock_irqrestore(ipipe_spinlock(lock), flags); \ -+ else if (std_spinlock_raw_p(lock)) \ -+ __real_raw_spin_unlock_irqrestore(std_spinlock_raw(lock), flags); \ -+ else if (std_spinlock_p(lock)) \ -+ __real_raw_spin_unlock_irqrestore(&std_spinlock(lock)->rlock, flags); \ ++ else { \ ++ __ipipe_spin_unlock_debug(flags); \ ++ if (std_spinlock_raw_p(lock)) \ ++ __real_raw_spin_unlock_irqrestore(std_spinlock_raw(lock), flags); \ ++ else if (std_spinlock_p(lock)) \ ++ __real_raw_spin_unlock_irqrestore(&std_spinlock(lock)->rlock, flags); \ ++ } \ + } while (0) + +#define PICK_SPINOP(op, lock) \ @@ -5989,6 +5988,12 @@ index 0000000..032080f + +void __ipipe_spin_unlock_irqcomplete(unsigned long x); + ++#ifdef CONFIG_IPIPE_DEBUG_INTERNAL ++void __ipipe_spin_unlock_debug(unsigned long flags); ++#else ++#define __ipipe_spin_unlock_debug(flags) do { } while (0) ++#endif ++ +#define ipipe_rwlock_t __ipipe_rwlock_t +#define IPIPE_DEFINE_RWLOCK(x) ipipe_rwlock_t x = IPIPE_RW_LOCK_UNLOCKED +#define IPIPE_DECLARE_RWLOCK(x) extern ipipe_rwlock_t x @@ -6021,6 +6026,7 @@ index 0000000..032080f +#define __ipipe_spin_unlock_irqrestore(lock, x) do { (void)(x); } while (0) +#define __ipipe_spin_unlock_irqbegin(lock) do { } while (0) +#define __ipipe_spin_unlock_irqcomplete(x) do { (void)(x); } while (0) ++#define __ipipe_spin_unlock_debug(flags) do { } while (0) + +#define ipipe_rwlock_t rwlock_t +#define IPIPE_DEFINE_RWLOCK(x) DEFINE_RWLOCK(x) @@ -7066,10 +7072,10 @@ index 0000000..6257dfa +obj-$(CONFIG_IPIPE_TRACE) += tracer.o diff --git a/kernel/ipipe/core.c b/kernel/ipipe/core.c new file mode 100644 -index 0000000..17479dc +index 0000000..7f1df1f --- /dev/null +++ b/kernel/ipipe/core.c -@@ -0,0 +1,2153 @@ +@@ -0,0 +1,2149 @@ +/* -*- linux-c -*- + * linux/kernel/ipipe/core.c + * @@ -7713,7 +7719,7 @@ index 0000000..17479dc + +static inline int __ipipe_next_irq(struct ipipe_percpu_domain_data *p) +{ -+ int l0b, l1b, l2b, vl0b, vl1b; ++ int l0b, l1b, l2b; + unsigned long l0m, l1m, l2m; + unsigned int irq; + @@ -7901,7 +7907,7 @@ index 0000000..17479dc + + this_domain = next_domain = __ipipe_current_domain; + p = ipipe_cpudom_ptr(this_domain); -+ p->status &= ~(IPIPE_STALL_MASK|IPIPE_SYNC_MASK); ++ p->status &= ~IPIPE_STALL_MASK; + + if (__ipipe_ipending_p(p)) + goto sync_stage; @@ -7962,11 +7968,11 @@ index 0000000..17479dc +} + +/* -+ * ipipe_control_irq() -- Change modes of a pipelined interrupt for -+ * the current domain. ++ * ipipe_virtualize_irq() -- Set a per-domain pipelined interrupt ++ * handler. + */ +int ipipe_virtualize_irq(struct ipipe_domain *ipd, -+ unsigned irq, ++ unsigned int irq, + ipipe_irq_handler_t handler, + void *cookie, + ipipe_irq_ackfn_t acknowledge, @@ -7975,7 +7981,7 @@ index 0000000..17479dc + ipipe_irq_handler_t old_handler; + struct irq_desc *desc; + unsigned long flags; -+ int err; ++ int ret = 0; + + if (irq >= IPIPE_NR_IRQS) + return -EINVAL; @@ -7991,39 +7997,51 @@ index 0000000..17479dc + + old_handler = ipd->irqs[irq].handler; + -+ if (handler != NULL) { -+ if (handler == IPIPE_SAME_HANDLER) { -+ handler = old_handler; -+ cookie = ipd->irqs[irq].cookie; ++ if (handler == NULL) { ++ modemask &= ++ ~(IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK | ++ IPIPE_EXCLUSIVE_MASK | IPIPE_WIRED_MASK); + -+ if (handler == NULL) { -+ err = -EINVAL; -+ goto unlock_and_exit; -+ } -+ } else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 && -+ old_handler != NULL) { -+ err = -EBUSY; -+ goto unlock_and_exit; ++ ipd->irqs[irq].handler = NULL; ++ ipd->irqs[irq].cookie = NULL; ++ ipd->irqs[irq].acknowledge = NULL; ++ ipd->irqs[irq].control = modemask; ++ ++ if (irq < NR_IRQS && !ipipe_virtual_irq_p(irq)) { ++ desc = irq_to_desc(irq); ++ if (old_handler && desc) ++ __ipipe_disable_irqdesc(ipd, irq); + } + -+ /* Wired interrupts can only be delivered to domains -+ * always heading the pipeline, and using dynamic -+ * propagation. */ ++ goto unlock_and_exit; ++ } + -+ if ((modemask & IPIPE_WIRED_MASK) != 0) { -+ if ((modemask & (IPIPE_PASS_MASK | IPIPE_STICKY_MASK)) != 0) { -+ err = -EINVAL; -+ goto unlock_and_exit; -+ } -+ modemask |= (IPIPE_HANDLE_MASK); ++ if (handler == IPIPE_SAME_HANDLER) { ++ cookie = ipd->irqs[irq].cookie; ++ handler = old_handler; ++ if (handler == NULL) { ++ ret = -EINVAL; ++ goto unlock_and_exit; + } ++ } else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 && old_handler) { ++ ret = -EBUSY; ++ goto unlock_and_exit; ++ } + -+ if ((modemask & IPIPE_STICKY_MASK) != 0) -+ modemask |= IPIPE_HANDLE_MASK; -+ } else -+ modemask &= -+ ~(IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK | -+ IPIPE_EXCLUSIVE_MASK | IPIPE_WIRED_MASK); ++ /* ++ * Wired interrupts can only be delivered to domains always ++ * heading the pipeline, and using dynamic propagation. ++ */ ++ if ((modemask & IPIPE_WIRED_MASK) != 0) { ++ if ((modemask & (IPIPE_PASS_MASK | IPIPE_STICKY_MASK)) != 0) { ++ ret = -EINVAL; ++ goto unlock_and_exit; ++ } ++ modemask |= IPIPE_HANDLE_MASK; ++ } ++ ++ if ((modemask & IPIPE_STICKY_MASK) != 0) ++ modemask |= IPIPE_HANDLE_MASK; + + if (acknowledge == NULL) + /* @@ -8037,14 +8055,12 @@ index 0000000..17479dc + ipd->irqs[irq].acknowledge = acknowledge; + ipd->irqs[irq].control = modemask; + -+ if (irq < NR_IRQS && !ipipe_virtual_irq_p(irq)) { -+ desc = irq_to_desc(irq); -+ if (handler != NULL) { -+ if (desc) -+ __ipipe_enable_irqdesc(ipd, irq); ++ desc = irq_to_desc(irq); ++ if (desc == NULL) ++ goto unlock_and_exit; + -+ if ((modemask & IPIPE_ENABLE_MASK) != 0) { -+ if (ipd != __ipipe_current_domain) { ++ if (irq < NR_IRQS && !ipipe_virtual_irq_p(irq)) { ++ __ipipe_enable_irqdesc(ipd, irq); + /* + * IRQ enable/disable state is domain-sensitive, so we + * may not change it for another domain. What is @@ -8053,43 +8069,37 @@ index 0000000..17479dc + * descriptor which thus may be different from + * __ipipe_current_domain. + */ -+ err = -EPERM; -+ goto unlock_and_exit; -+ } -+ if (desc) -+ __ipipe_enable_irq(irq); -+ } -+ } else if (old_handler != NULL && desc) -+ __ipipe_disable_irqdesc(ipd, irq); ++ if ((modemask & IPIPE_ENABLE_MASK) != 0) { ++ if (ipd != __ipipe_current_domain) ++ ret = -EPERM; ++ else ++ __ipipe_enable_irq(irq); ++ } + } + -+ err = 0; -+ -+ unlock_and_exit: ++unlock_and_exit: + + spin_unlock_irqrestore(&__ipipe_pipelock, flags); + -+ return err; ++ return ret; +} + -+/* ipipe_control_irq() -- Change modes of a pipelined interrupt for -+ * the current domain. */ ++/* ipipe_control_irq() -- Change control mode of a pipelined interrupt. */ + -+int ipipe_control_irq(unsigned irq, unsigned clrmask, unsigned setmask) ++int ipipe_control_irq(struct ipipe_domain *ipd, unsigned int irq, ++ unsigned clrmask, unsigned setmask) +{ -+ struct ipipe_domain *ipd; + unsigned long flags; ++ int ret = 0; + + if (irq >= IPIPE_NR_IRQS) + return -EINVAL; + -+ spin_lock_irqsave(&__ipipe_pipelock, flags); -+ -+ ipd = __ipipe_current_domain; ++ flags = ipipe_critical_enter(NULL); + + if (ipd->irqs[irq].control & IPIPE_SYSTEM_MASK) { -+ spin_unlock_irqrestore(&__ipipe_pipelock, flags); -+ return -EPERM; ++ ret = -EPERM; ++ goto out; + } + + if (ipd->irqs[irq].handler == NULL) @@ -8109,9 +8119,10 @@ index 0000000..17479dc + else if ((clrmask & IPIPE_ENABLE_MASK) != 0) + __ipipe_disable_irq(irq); + -+ spin_unlock_irqrestore(&__ipipe_pipelock, flags); ++out: ++ ipipe_critical_exit(flags); + -+ return 0; ++ return ret; +} + +/* __ipipe_dispatch_event() -- Low-level event dispatcher. */ @@ -8237,8 +8248,10 @@ index 0000000..17479dc + + p->irqall[irq]++; + __set_bit(IPIPE_STALL_FLAG, &p->status); ++ barrier(); + head->irqs[irq].handler(irq, head->irqs[irq].cookie); /* Call the ISR. */ + __ipipe_run_irqtail(); ++ barrier(); + __clear_bit(IPIPE_STALL_FLAG, &p->status); + + if (__ipipe_current_domain == head) { @@ -8268,78 +8281,52 @@ index 0000000..17479dc +{ + struct ipipe_percpu_domain_data *p; + struct ipipe_domain *ipd; -+ int cpu, irq; ++ int irq; + + ipd = __ipipe_current_domain; + p = ipipe_cpudom_ptr(ipd); + -+ if (__test_and_set_bit(IPIPE_SYNC_FLAG, &p->status)) { -+#ifdef __IPIPE_FEATURE_NESTED_ROOTIRQS -+ /* -+ * Caution: some archs do not support this -+ * (mis)feature (e.g. x86_32). -+ */ -+ if (ipd != ipipe_root_domain) -+#endif -+ return; -+ } ++ __set_bit(IPIPE_STALL_FLAG, &p->status); ++ smp_wmb(); + -+ cpu = ipipe_processor_id(); ++ if (ipd == ipipe_root_domain) ++ trace_hardirqs_off(); + + for (;;) { + irq = __ipipe_next_irq(p); + if (irq < 0) + break; + /* -+ * Make sure the compiler does not reorder -+ * wrongly, so that all updates to maps are -+ * done before the handler gets called. ++ * Make sure the compiler does not reorder wrongly, so ++ * that all updates to maps are done before the ++ * handler gets called. + */ + barrier(); + + if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) + continue; + -+ __set_bit(IPIPE_STALL_FLAG, &p->status); -+ smp_wmb(); ++ if (!__ipipe_pipeline_head_p(ipd)) ++ local_irq_enable_hw(); + -+ if (ipd == ipipe_root_domain) -+ trace_hardirqs_off(); ++ if (likely(ipd != ipipe_root_domain)) { ++ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); ++ __ipipe_run_irqtail(); ++ } else if (ipipe_virtual_irq_p(irq)) { ++ irq_enter(); ++ __ipipe_do_root_virq(ipd, irq); ++ irq_exit(); ++ } else ++ __ipipe_do_root_xirq(ipd, irq); + -+ __ipipe_run_isr(ipd, irq); -+ barrier(); ++ local_irq_disable_hw(); + p = ipipe_cpudom_ptr(__ipipe_current_domain); -+#ifdef CONFIG_SMP -+ { -+ int newcpu = ipipe_processor_id(); -+ -+ if (newcpu != cpu) { /* Handle CPU migration. */ -+ /* -+ * We expect any domain to clear the SYNC bit each -+ * time it switches in a new task, so that preemptions -+ * and/or CPU migrations (in the SMP case) over the -+ * ISR do not lock out the log syncer for some -+ * indefinite amount of time. In the Linux case, -+ * schedule() handles this (see kernel/sched.c). For -+ * this reason, we don't bother clearing it here for -+ * the source CPU in the migration handling case, -+ * since it must have scheduled another task in by -+ * now. -+ */ -+ __set_bit(IPIPE_SYNC_FLAG, &p->status); -+ cpu = newcpu; -+ } -+ } -+#endif /* CONFIG_SMP */ -+#ifdef CONFIG_TRACE_IRQFLAGS -+ if (__ipipe_root_domain_p && -+ test_bit(IPIPE_STALL_FLAG, &p->status)) -+ trace_hardirqs_on(); -+#endif -+ __clear_bit(IPIPE_STALL_FLAG, &p->status); + } + -+ __clear_bit(IPIPE_SYNC_FLAG, &p->status); ++ if (ipd == ipipe_root_domain) ++ trace_hardirqs_on(); ++ ++ __clear_bit(IPIPE_STALL_FLAG, &p->status); +} + +/* ipipe_register_domain() -- Link a new domain to the pipeline. */ @@ -8489,6 +8476,7 @@ index 0000000..17479dc + + for (irq = 0; irq < IPIPE_NR_IRQS; irq++) { + clear_bit(IPIPE_HANDLE_FLAG, &ipd->irqs[irq].control); ++ clear_bit(IPIPE_WIRED_FLAG, &ipd->irqs[irq].control); + clear_bit(IPIPE_STICKY_FLAG, &ipd->irqs[irq].control); + set_bit(IPIPE_PASS_FLAG, &ipd->irqs[irq].control); + } @@ -9162,6 +9150,20 @@ index 0000000..17479dc + return ret; +} + ++void __ipipe_spin_unlock_debug(unsigned long flags) ++{ ++ /* ++ * We catch a nasty issue where spin_unlock_irqrestore() on a ++ * regular kernel spinlock is about to re-enable hw interrupts ++ * in a section entered with hw irqs off. This is clearly the ++ * sign of a massive breakage coming. Usual suspect is a ++ * regular spinlock which was overlooked, used within a ++ * section which must run with hw irqs disabled. ++ */ ++ WARN_ON_ONCE(!raw_irqs_disabled_flags(flags) && irqs_disabled_hw()); ++} ++EXPORT_SYMBOL(__ipipe_spin_unlock_debug); ++ +#endif /* CONFIG_IPIPE_DEBUG_INTERNAL && CONFIG_SMP */ + + @@ -9225,10 +9227,10 @@ index 0000000..17479dc +EXPORT_SYMBOL(ipipe_get_sysinfo); diff --git a/kernel/ipipe/tracer.c b/kernel/ipipe/tracer.c new file mode 100644 -index 0000000..001a83e +index 0000000..f013ef4 --- /dev/null +++ b/kernel/ipipe/tracer.c -@@ -0,0 +1,1441 @@ +@@ -0,0 +1,1442 @@ +/* -*- linux-c -*- + * kernel/ipipe/tracer.c + * @@ -10635,6 +10637,7 @@ index 0000000..001a83e +#ifdef CONFIG_IPIPE_TRACE_ENABLE + ipipe_trace_enable = 1; +#ifdef CONFIG_IPIPE_TRACE_MCOUNT ++ ftrace_enabled = 1; + register_ftrace_function(&ipipe_trace_ops); +#endif /* CONFIG_IPIPE_TRACE_MCOUNT */ +#endif /* CONFIG_IPIPE_TRACE_ENABLE */ _______________________________________________ Xenomai-git mailing list [email protected] https://mail.gna.org/listinfo/xenomai-git
