Module: xenomai-3
Branch: wip/dovetail
Commit: f8f9b5200dc8323ac4d6157deb2573368e2aab33
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=f8f9b5200dc8323ac4d6157deb2573368e2aab33

Author: Philippe Gerum <r...@xenomai.org>
Date:   Tue Dec 29 10:15:25 2015 +0100

cobalt/kernel: WIP: convert to dovetail interface

This is work in progress, with more changes expected from the IRQ
pipeline and dovetail interfaces.

---

 include/cobalt/kernel/apc.h                        |    4 +-
 include/cobalt/kernel/assert.h                     |    4 +-
 include/cobalt/kernel/clock.h                      |    4 +-
 include/cobalt/kernel/intr.h                       |   56 +--
 include/cobalt/kernel/lock.h                       |    2 +-
 include/cobalt/kernel/rtdm/driver.h                |    4 +-
 include/cobalt/kernel/sched.h                      |    4 +-
 include/cobalt/kernel/timer.h                      |    2 +
 kernel/cobalt/Kconfig                              |    2 +-
 kernel/cobalt/apc.c                                |    8 +-
 .../cobalt/arch/arm/include/asm/xenomai/machine.h  |    2 -
 .../cobalt/arch/arm/include/asm/xenomai/thread.h   |   11 +-
 kernel/cobalt/arch/arm/machine.c                   |   56 ---
 kernel/cobalt/arch/arm/mayday.c                    |    1 -
 kernel/cobalt/arch/arm/syscall.c                   |   14 +-
 kernel/cobalt/arch/arm/thread.c                    |    5 +-
 .../arch/arm64/include/asm/xenomai/machine.h       |    2 -
 .../cobalt/arch/arm64/include/asm/xenomai/thread.h |    9 +-
 kernel/cobalt/arch/arm64/machine.c                 |   56 ---
 kernel/cobalt/arch/arm64/mayday.c                  |    1 -
 kernel/cobalt/arch/arm64/syscall.c                 |   13 +-
 kernel/cobalt/arch/arm64/thread.c                  |    3 +-
 .../arch/blackfin/include/asm/xenomai/machine.h    |    2 -
 .../arch/blackfin/include/asm/xenomai/thread.h     |    3 +-
 kernel/cobalt/arch/blackfin/machine.c              |    6 -
 kernel/cobalt/arch/blackfin/mayday.c               |    1 -
 kernel/cobalt/arch/blackfin/thread.c               |    3 +-
 .../arch/powerpc/include/asm/xenomai/machine.h     |    2 -
 .../arch/powerpc/include/asm/xenomai/thread.h      |    5 +-
 kernel/cobalt/arch/powerpc/machine.c               |    6 -
 kernel/cobalt/arch/powerpc/mayday.c                |    1 -
 kernel/cobalt/arch/powerpc/thread.c                |    1 -
 .../arch/x86/include/asm/xenomai/calibration.h     |    4 +-
 .../cobalt/arch/x86/include/asm/xenomai/machine.h  |    2 -
 .../arch/x86/include/asm/xenomai/syscall32.h       |    4 +-
 .../cobalt/arch/x86/include/asm/xenomai/thread.h   |    5 +-
 kernel/cobalt/arch/x86/machine.c                   |   36 --
 kernel/cobalt/arch/x86/mayday.c                    |    1 -
 kernel/cobalt/arch/x86/thread.c                    |    4 +-
 kernel/cobalt/clock.c                              |   24 +-
 .../cobalt/include/asm-generic/xenomai/machine.h   |   12 +-
 kernel/cobalt/init.c                               |  117 ++---
 kernel/cobalt/intr.c                               |  448 ++++++++------------
 kernel/cobalt/posix/corectl.c                      |    4 +-
 kernel/cobalt/posix/process.c                      |   75 ++--
 kernel/cobalt/posix/syscall.c                      |   28 +-
 kernel/cobalt/rtdm/drvlib.c                        |    6 +-
 kernel/cobalt/rtdm/fd.c                            |   12 +-
 kernel/cobalt/sched.c                              |   56 +--
 kernel/cobalt/thread.c                             |   37 +-
 kernel/cobalt/timer.c                              |  375 +++++++---------
 kernel/cobalt/trace/cobalt-core.h                  |    2 +-
 scripts/Kconfig.frag                               |    3 +-
 scripts/prepare-kernel.sh                          |    4 +-
 54 files changed, 599 insertions(+), 953 deletions(-)

diff --git a/include/cobalt/kernel/apc.h b/include/cobalt/kernel/apc.h
index 7075ad0..1ed88b4 100644
--- a/include/cobalt/kernel/apc.h
+++ b/include/cobalt/kernel/apc.h
@@ -19,7 +19,7 @@
 #ifndef _COBALT_KERNEL_APC_H
 #define _COBALT_KERNEL_APC_H
 
-#include <linux/ipipe.h>
+#include <linux/irq_pipeline.h>
 #include <asm/xenomai/machine.h>
 
 /**
@@ -72,7 +72,7 @@ static inline void xnapc_schedule(int apc)
        ipipe_restore_head(flags);
 }
 
-void apc_dispatch(unsigned int virq, void *arg);
+irqreturn_t apc_dispatch(int virq, void *dev_id);
 
 /** @} */
 
diff --git a/include/cobalt/kernel/assert.h b/include/cobalt/kernel/assert.h
index 2d2d653..4434571 100644
--- a/include/cobalt/kernel/assert.h
+++ b/include/cobalt/kernel/assert.h
@@ -57,8 +57,8 @@
        do { } while (0)
 #endif
 
-#define primary_mode_only()    XENO_BUG_ON(CONTEXT, ipipe_root_p)
-#define secondary_mode_only()  XENO_BUG_ON(CONTEXT, !ipipe_root_p)
+#define primary_mode_only()    XENO_BUG_ON(CONTEXT, on_root_stage())
+#define secondary_mode_only()  XENO_BUG_ON(CONTEXT, !on_root_stage())
 #define interrupt_only()       XENO_BUG_ON(CONTEXT, !xnsched_interrupt_p())
 #define realtime_cpu_only()    XENO_BUG_ON(CONTEXT, 
!xnsched_supported_cpu(ipipe_processor_id()))
 #define thread_only()          XENO_BUG_ON(CONTEXT, xnsched_interrupt_p())
diff --git a/include/cobalt/kernel/clock.h b/include/cobalt/kernel/clock.h
index c290b1d..302fa44 100644
--- a/include/cobalt/kernel/clock.h
+++ b/include/cobalt/kernel/clock.h
@@ -19,7 +19,7 @@
 #ifndef _COBALT_KERNEL_CLOCK_H
 #define _COBALT_KERNEL_CLOCK_H
 
-#include <linux/ipipe.h>
+#include <linux/irq_pipeline.h>
 #include <cobalt/kernel/list.h>
 #include <cobalt/kernel/vfile.h>
 #include <cobalt/uapi/kernel/types.h>
@@ -92,8 +92,6 @@ struct xnclock {
 
 extern struct xnclock nkclock;
 
-extern unsigned long nktimerlat;
-
 extern unsigned int nkclock_lock;
 
 int xnclock_register(struct xnclock *clock,
diff --git a/include/cobalt/kernel/intr.h b/include/cobalt/kernel/intr.h
index af20ca1..7d6fe24 100644
--- a/include/cobalt/kernel/intr.h
+++ b/include/cobalt/kernel/intr.h
@@ -20,6 +20,8 @@
 #define _COBALT_KERNEL_INTR_H
 
 #include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/cpumask.h>
 #include <cobalt/kernel/stat.h>
 
 /**
@@ -49,8 +51,6 @@ struct xnsched;
 
 typedef int (*xnisr_t)(struct xnintr *intr);
 
-typedef void (*xniack_t)(unsigned irq, void *arg);
-
 struct xnirqstat {
        /** Number of handled receipts since attachment. */
        xnstat_counter_t hits;
@@ -63,22 +63,20 @@ struct xnirqstat {
 struct xnintr {
 #ifdef CONFIG_XENO_OPT_SHIRQ
        /** Next object in the IRQ-sharing chain. */
-       struct xnintr *next;
+       struct xnintr *next_handler;
 #endif
        /** Number of consequent unhandled interrupts */
        unsigned int unhandled;
        /** Interrupt service routine. */
        xnisr_t isr;
-       /** User-defined cookie value. */
-       void *cookie;
+       /** Opaque device id. */
+       void *dev_id;
        /** runtime status */
        unsigned long status;
        /** Creation flags. */
        int flags;
        /** IRQ number. */
        unsigned int irq;
-       /** Interrupt acknowledge routine. */
-       xniack_t iack;
        /** Symbolic name. */
        const char *name;
        /** Descriptor maintenance lock. */
@@ -87,25 +85,36 @@ struct xnintr {
        /** Statistics. */
        struct xnirqstat *stats;
 #endif
+       struct list_head next;
 };
 
 struct xnintr_iterator {
-    int cpu;           /** Current CPU in iteration. */
-    unsigned long hits;        /** Current hit counter. */
-    xnticks_t exectime_period; /** Used CPU time in current accounting period. 
*/
-    xnticks_t account_period; /** Length of accounting period. */
-    xnticks_t exectime_total;  /** Overall CPU time consumed. */
-    int list_rev;      /** System-wide xnintr list revision (internal use). */
-    struct xnintr *prev;       /** Previously visited xnintr object (internal 
use). */
+       /** Current CPU. */
+       int cpu;
+       /** Remaining CPUs to iterate over. */
+       cpumask_t cpus;
+       /** Current hit counter. */
+       unsigned long hits;
+       /** Used CPU time in current accounting period. */
+       xnticks_t exectime_period;
+       /** Length of accounting period. */
+       xnticks_t account_period;
+       /** Overall CPU time consumed. */
+       xnticks_t exectime_total;
+       /** System-wide xnintr list revision (internal use). */
+       int list_rev;
+       /** Currently visited xnintr object (internal use). */
+       struct xnintr *curr;
+       /** Previously visited xnintr object (internal use). */
+       struct xnintr *prev;
 };
 
 extern struct xnintr nktimer;
 
 int xnintr_mount(void);
 
-void xnintr_core_clock_handler(void);
-
-void xnintr_host_tick(struct xnsched *sched);
+irqreturn_t xnintr_core_clock_handler(int irq,
+                                     void *dev_id);
 
 void xnintr_init_proc(void);
 
@@ -115,15 +124,14 @@ void xnintr_cleanup_proc(void);
 
 int xnintr_init(struct xnintr *intr,
                const char *name,
-               unsigned irq,
+               int irq,
                xnisr_t isr,
-               xniack_t iack,
                int flags);
 
 void xnintr_destroy(struct xnintr *intr);
 
 int xnintr_attach(struct xnintr *intr,
-                 void *cookie);
+                 void *dev_id);
 
 void xnintr_detach(struct xnintr *intr);
 
@@ -136,12 +144,12 @@ void xnintr_affinity(struct xnintr *intr,
 
 int xnintr_query_init(struct xnintr_iterator *iterator);
 
-int xnintr_get_query_lock(void);
+int xnintr_query_next(struct xnintr_iterator *iterator,
+                     char *name_buf);
 
-void xnintr_put_query_lock(void);
+void xnintr_list_lock(void);
 
-int xnintr_query_next(int irq, struct xnintr_iterator *iterator,
-                     char *name_buf);
+void xnintr_list_unlock(void);
 
 /** @} */
 
diff --git a/include/cobalt/kernel/lock.h b/include/cobalt/kernel/lock.h
index 36f8168..80cce74 100644
--- a/include/cobalt/kernel/lock.h
+++ b/include/cobalt/kernel/lock.h
@@ -20,7 +20,7 @@
 #ifndef _COBALT_KERNEL_LOCK_H
 #define _COBALT_KERNEL_LOCK_H
 
-#include <linux/ipipe.h>
+#include <linux/irq_pipeline.h>
 #include <linux/percpu.h>
 #include <cobalt/kernel/assert.h>
 
diff --git a/include/cobalt/kernel/rtdm/driver.h 
b/include/cobalt/kernel/rtdm/driver.h
index b0a37c4..a87b2ef 100644
--- a/include/cobalt/kernel/rtdm/driver.h
+++ b/include/cobalt/kernel/rtdm/driver.h
@@ -828,7 +828,7 @@ typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle);
  *
  * @coretags{unrestricted}
  */
-#define rtdm_irq_get_arg(irq_handle, type)     ((type *)irq_handle->cookie)
+#define rtdm_irq_get_arg(irq_handle, type)     ((type *)irq_handle->dev_id)
 /** @} rtdm_irq */
 
 int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
@@ -1292,7 +1292,7 @@ static inline int rtdm_rt_capable(struct rtdm_fd *fd)
 
 static inline int rtdm_in_rt_context(void)
 {
-       return (ipipe_current_domain != ipipe_root_domain);
+       return current_irq_stage != &root_irq_stage;
 }
 
 #endif /* !DOXYGEN_CPP */
diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index d5d93c2..bc27cde 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -209,8 +209,6 @@ static inline void xnsched_set_self_resched(struct xnsched 
*sched)
        sched->status |= XNRESCHED;
 }
 
-#define xnsched_realtime_domain  cobalt_pipeline.domain
-
 /* Set resched flag for the given scheduler. */
 #ifdef CONFIG_SMP
 
@@ -256,7 +254,7 @@ static inline int xnsched_supported_cpu(int cpu)
 
 int ___xnsched_run(struct xnsched *sched);
 
-void __xnsched_run_handler(void);
+irqreturn_t __xnsched_run_handler(int irq, void *dev_id);
 
 static inline int __xnsched_run(struct xnsched *sched)
 {
diff --git a/include/cobalt/kernel/timer.h b/include/cobalt/kernel/timer.h
index 00aa411..f14f007 100644
--- a/include/cobalt/kernel/timer.h
+++ b/include/cobalt/kernel/timer.h
@@ -529,6 +529,8 @@ static inline void xntimer_set_sched(struct xntimer *timer,
 char *xntimer_format_time(xnticks_t ns,
                          char *buf, size_t bufsz);
 
+void xntimer_host_tick(struct xnsched *sched);
+
 int xntimer_grab_hardware(void);
 
 void xntimer_release_hardware(void);
diff --git a/kernel/cobalt/Kconfig b/kernel/cobalt/Kconfig
index afa62d2..7a9f423 100644
--- a/kernel/cobalt/Kconfig
+++ b/kernel/cobalt/Kconfig
@@ -207,7 +207,7 @@ config XENO_OPT_TIMER_HEAP_CAPACITY
        Set the maximum number of timers the binary heap can index.
 
 config XENO_OPT_HOSTRT
-       depends on IPIPE_HAVE_HOSTRT
+       depends on DOVETAIL_HAVE_HOSTRT
        def_bool y
 
 config XENO_OPT_PIPE
diff --git a/kernel/cobalt/apc.c b/kernel/cobalt/apc.c
index 3383a26..930cbfc 100644
--- a/kernel/cobalt/apc.c
+++ b/kernel/cobalt/apc.c
@@ -17,7 +17,7 @@
  * 02111-1307, USA.
  */
 #include <linux/spinlock.h>
-#include <linux/ipipe.h>
+#include <cobalt/kernel/assert.h>
 #include <cobalt/kernel/apc.h>
 
 /**
@@ -40,7 +40,7 @@
  */
 static IPIPE_DEFINE_SPINLOCK(apc_lock);
 
-void apc_dispatch(unsigned int virq, void *arg)
+irqreturn_t apc_dispatch(int virq, void *dev_id)
 {
        void (*handler)(void *), *cookie;
        unsigned long *p;
@@ -71,6 +71,8 @@ void apc_dispatch(unsigned int virq, void *arg)
        }
 
        spin_unlock(&apc_lock);
+
+       return IRQ_HANDLED;
 }
 
 /**
@@ -151,7 +153,7 @@ EXPORT_SYMBOL_GPL(xnapc_alloc);
  */
 void xnapc_free(int apc)
 {
-       BUG_ON(apc < 0 || apc >= BITS_PER_LONG);
+       XENO_BUG_ON(COBALT, apc < 0 || apc >= BITS_PER_LONG);
        clear_bit(apc, &cobalt_pipeline.apc_map);
        smp_mb__after_atomic();
 }
diff --git a/kernel/cobalt/arch/arm/include/asm/xenomai/machine.h 
b/kernel/cobalt/arch/arm/include/asm/xenomai/machine.h
index d6e965f..ffc1bb7 100644
--- a/kernel/cobalt/arch/arm/include/asm/xenomai/machine.h
+++ b/kernel/cobalt/arch/arm/include/asm/xenomai/machine.h
@@ -26,8 +26,6 @@
 #include <linux/version.h>
 #include <asm/byteorder.h>
 
-#define XNARCH_HOST_TICK_IRQ __ipipe_hrtimer_irq
-
 #include <asm/barrier.h>
 #include <asm/compiler.h>
 #include <asm/cmpxchg.h>
diff --git a/kernel/cobalt/arch/arm/include/asm/xenomai/thread.h 
b/kernel/cobalt/arch/arm/include/asm/xenomai/thread.h
index 3e332b2..4849020 100644
--- a/kernel/cobalt/arch/arm/include/asm/xenomai/thread.h
+++ b/kernel/cobalt/arch/arm/include/asm/xenomai/thread.h
@@ -67,7 +67,7 @@ static inline void xnarch_enter_root(struct xnthread *root) { 
}
 
 static inline int xnarch_escalate(void)
 {
-       if (ipipe_root_p) {
+       if (on_root_stage()) {
                ipipe_raise_irq(cobalt_pipeline.escalate_virq);
                return 1;
        }
@@ -85,7 +85,7 @@ static inline void xnarch_init_root_tcb(struct xnthread 
*thread)
 
 void xnarch_init_shadow_tcb(struct xnthread *thread);
 
-int xnarch_fault_fpu_p(struct ipipe_trap_data *d);
+int xnarch_fault_fpu_p(struct dovetail_trap_data *d);
 
 void xnarch_leave_root(struct xnthread *root);
 
@@ -94,7 +94,7 @@ void xnarch_save_fpu(struct xnthread *thread);
 void xnarch_switch_fpu(struct xnthread *from, struct xnthread *thread);
 
 int xnarch_handle_fpu_fault(struct xnthread *from, 
-                       struct xnthread *to, struct ipipe_trap_data *d);
+                       struct xnthread *to, struct dovetail_trap_data *d);
 
 #else /* !CONFIG_XENO_ARCH_FPU || !CONFIG_VFP */
 
@@ -106,7 +106,7 @@ static inline void xnarch_init_shadow_tcb(struct xnthread 
*thread) { }
  * of whether real-time threads actually use FPU, so we simply ignore
  * these faults.
  */
-static inline int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
+static inline int xnarch_fault_fpu_p(struct dovetail_trap_data *d)
 {
        return 0;
 }
@@ -118,7 +118,8 @@ static inline void xnarch_save_fpu(struct xnthread *thread) 
{ }
 static inline void xnarch_switch_fpu(struct xnthread *f, struct xnthread *t) { 
}
 
 static inline int xnarch_handle_fpu_fault(struct xnthread *from, 
-                                       struct xnthread *to, struct 
ipipe_trap_data *d)
+                                       struct xnthread *to,
+                                         struct dovetail_trap_data *d)
 {
        return 0;
 }
diff --git a/kernel/cobalt/arch/arm/machine.c b/kernel/cobalt/arch/arm/machine.c
index f48d4a8..90d0818 100644
--- a/kernel/cobalt/arch/arm/machine.c
+++ b/kernel/cobalt/arch/arm/machine.c
@@ -19,7 +19,6 @@
  */
 
 #include <linux/mm.h>
-#include <linux/ipipe_tickdev.h>
 #include <cobalt/kernel/arith.h>
 #include <asm/cacheflush.h>
 #include <asm/xenomai/machine.h>
@@ -39,60 +38,6 @@ static void mach_arm_prefault(struct vm_area_struct *vma)
        }
 }
 
-static unsigned long mach_arm_calibrate(void)
-{
-       unsigned long long start, end, sum = 0, sum_sq = 0;
-       volatile unsigned const_delay = 0xffffffff;
-       unsigned long result, flags, tsc_lat;
-       unsigned int delay = const_delay;
-       long long diff;
-       int i, j;
-
-       flags = ipipe_critical_enter(NULL);
-
-       /*
-        * Hw interrupts off, other CPUs quiesced, no migration
-        * possible. We can now fiddle with the timer chip (per-cpu
-        * local or global, ipipe_timer_set() will handle this
-        * transparently).
-        */
-       ipipe_read_tsc(start);
-       barrier();
-       ipipe_read_tsc(end);
-       tsc_lat = end - start;
-       barrier();
-
-       for (i = 0; i < CALIBRATION_LOOPS; i++) {
-               flush_cache_all();
-               for (j = 0; j < CALIBRATION_LOOPS; j++) {
-                       ipipe_read_tsc(start);
-                       barrier();
-                       ipipe_timer_set(delay);
-                       barrier();
-                       ipipe_read_tsc(end);
-                       diff = end - start - tsc_lat;
-                       if (diff > 0) {
-                               sum += diff;
-                               sum_sq += diff * diff;
-                       }
-               }
-       }
-
-       ipipe_critical_exit(flags);
-
-       /* Use average + standard deviation as timer programming latency. */
-       do_div(sum, CALIBRATION_LOOPS * CALIBRATION_LOOPS);
-       do_div(sum_sq, CALIBRATION_LOOPS * CALIBRATION_LOOPS);
-       result = sum + int_sqrt(sum_sq - sum * sum) + 1;
-       /*
-        * Reset the max trace, since it contains the calibration time
-        * now.
-        */
-       ipipe_trace_max_reset();
-
-       return result;
-}
-
 static const char *const fault_labels[] = {
        [IPIPE_TRAP_ACCESS] = "Data or instruction access",
        [IPIPE_TRAP_SECTION] = "Section fault",
@@ -113,7 +58,6 @@ struct cobalt_machine cobalt_machine = {
        .init = NULL,
        .late_init = NULL,
        .cleanup = NULL,
-       .calibrate = mach_arm_calibrate,
        .prefault = mach_arm_prefault,
        .fault_labels = fault_labels,
 };
diff --git a/kernel/cobalt/arch/arm/mayday.c b/kernel/cobalt/arch/arm/mayday.c
index 20e4559..ad9c0e9 100644
--- a/kernel/cobalt/arch/arm/mayday.c
+++ b/kernel/cobalt/arch/arm/mayday.c
@@ -17,7 +17,6 @@
  * 02111-1307, USA.
  */
 #include <linux/types.h>
-#include <linux/ipipe.h>
 #include <linux/vmalloc.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
diff --git a/kernel/cobalt/arch/arm/syscall.c b/kernel/cobalt/arch/arm/syscall.c
index ee78243..5c3e51d 100644
--- a/kernel/cobalt/arch/arm/syscall.c
+++ b/kernel/cobalt/arch/arm/syscall.c
@@ -17,8 +17,7 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  * 02111-1307, USA.
  */
-
-#include <linux/ipipe.h>
+#include <asm/xenomai/machine.h>
 #include <asm/xenomai/syscall.h>
 #include <asm/xenomai/uapi/tsc.h>
 
@@ -26,18 +25,15 @@ int xnarch_local_syscall(unsigned long a1, unsigned long a2,
                         unsigned long a3, unsigned long a4,
                         unsigned long a5)
 {
-       struct ipipe_sysinfo ipipe_info;
-       struct __ipipe_tscinfo *p = &ipipe_info.arch.tsc;
+       struct __ipipe_tscinfo *p;
        struct __xn_tscinfo info;
        int ret;
 
        if (a1 != XENOMAI_SYSARCH_TSCINFO)
                return -EINVAL;
-
-       ret = ipipe_get_sysinfo(&ipipe_info);
-       if (ret)
-               return ret;
-
+       
+       p = &cobalt_pipeline.clock_data.arch.tsc;
+       
        switch (p->type) {
        case IPIPE_TSC_TYPE_DECREMENTER:
                info.counter = p->u.dec.counter;
diff --git a/kernel/cobalt/arch/arm/thread.c b/kernel/cobalt/arch/arm/thread.c
index 7e2b7cf..7b9250a 100644
--- a/kernel/cobalt/arch/arm/thread.c
+++ b/kernel/cobalt/arch/arm/thread.c
@@ -21,7 +21,6 @@
  */
 
 #include <linux/sched.h>
-#include <linux/ipipe.h>
 #include <linux/mm.h>
 #include <linux/jump_label.h>
 #include <asm/mmu_context.h>
@@ -104,7 +103,7 @@ static inline union vfp_state *get_fpu_owner(void)
                _fpexc;                                                 \
        })
 
-int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
+int xnarch_fault_fpu_p(struct dovetail_trap_data *d)
 {
        /* This function does the same thing to decode the faulting instruct as
           "call_fpe" in arch/arm/entry-armv.S */
@@ -268,7 +267,7 @@ void xnarch_switch_fpu(struct xnthread *from, struct 
xnthread *to)
 }
 
 int xnarch_handle_fpu_fault(struct xnthread *from, 
-                       struct xnthread *to, struct ipipe_trap_data *d)
+                       struct xnthread *to, struct dovetail_trap_data *d)
 {
        if (xnthread_test_state(to, XNFPU))
                /* FPU is already enabled, probably an exception */
diff --git a/kernel/cobalt/arch/arm64/include/asm/xenomai/machine.h 
b/kernel/cobalt/arch/arm64/include/asm/xenomai/machine.h
index 7444cc8..7f8bfe4 100644
--- a/kernel/cobalt/arch/arm64/include/asm/xenomai/machine.h
+++ b/kernel/cobalt/arch/arm64/include/asm/xenomai/machine.h
@@ -26,8 +26,6 @@
 #include <linux/version.h>
 #include <asm/byteorder.h>
 
-#define XNARCH_HOST_TICK_IRQ __ipipe_hrtimer_irq
-
 #include <asm/barrier.h>
 #include <asm/compiler.h>
 #include <asm/cmpxchg.h>
diff --git a/kernel/cobalt/arch/arm64/include/asm/xenomai/thread.h 
b/kernel/cobalt/arch/arm64/include/asm/xenomai/thread.h
index 4b247ac..967da75 100644
--- a/kernel/cobalt/arch/arm64/include/asm/xenomai/thread.h
+++ b/kernel/cobalt/arch/arm64/include/asm/xenomai/thread.h
@@ -68,7 +68,7 @@ static inline void xnarch_init_root_tcb(struct xnthread 
*thread)
 
 void xnarch_init_shadow_tcb(struct xnthread *thread);
 
-static inline int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
+static inline int xnarch_fault_fpu_p(struct dovetail_trap_data *d)
 {
        return xnarch_fault_trap(d) == IPIPE_TRAP_FPU_ACC;
 }
@@ -81,7 +81,7 @@ void xnarch_switch_fpu(struct xnthread *from, struct xnthread 
*thread);
 
 static inline int
 xnarch_handle_fpu_fault(struct xnthread *from,
-                       struct xnthread *to, struct ipipe_trap_data *d)
+                       struct xnthread *to, struct dovetail_trap_data *d)
 {
        return 0;
 }
@@ -96,7 +96,7 @@ static inline void xnarch_init_shadow_tcb(struct xnthread 
*thread) { }
  * of whether real-time threads actually use FPU, so we simply ignore
  * these faults.
  */
-static inline int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
+static inline int xnarch_fault_fpu_p(struct dovetail_trap_data *d)
 {
        return 0;
 }
@@ -108,7 +108,8 @@ static inline void xnarch_save_fpu(struct xnthread *thread) 
{ }
 static inline void xnarch_switch_fpu(struct xnthread *f, struct xnthread *t) { 
}
 
 static inline int xnarch_handle_fpu_fault(struct xnthread *from, 
-                                       struct xnthread *to, struct 
ipipe_trap_data *d)
+                                       struct xnthread *to,
+                                         struct dovetail_trap_data *d)
 {
        return 0;
 }
diff --git a/kernel/cobalt/arch/arm64/machine.c 
b/kernel/cobalt/arch/arm64/machine.c
index ddf65a8..a644d7f 100644
--- a/kernel/cobalt/arch/arm64/machine.c
+++ b/kernel/cobalt/arch/arm64/machine.c
@@ -19,7 +19,6 @@
  */
 
 #include <linux/mm.h>
-#include <linux/ipipe_tickdev.h>
 #include <cobalt/kernel/arith.h>
 #include <asm/cacheflush.h>
 #include <asm/xenomai/machine.h>
@@ -39,60 +38,6 @@ static void mach_arm_prefault(struct vm_area_struct *vma)
        }
 }
 
-static unsigned long mach_arm_calibrate(void)
-{
-       unsigned long long start, end, sum = 0, sum_sq = 0;
-       volatile unsigned const_delay = 0xffffffff;
-       unsigned long result, flags, tsc_lat;
-       unsigned int delay = const_delay;
-       long long diff;
-       int i, j;
-
-       flags = ipipe_critical_enter(NULL);
-
-       /*
-        * Hw interrupts off, other CPUs quiesced, no migration
-        * possible. We can now fiddle with the timer chip (per-cpu
-        * local or global, ipipe_timer_set() will handle this
-        * transparently).
-        */
-       ipipe_read_tsc(start);
-       barrier();
-       ipipe_read_tsc(end);
-       tsc_lat = end - start;
-       barrier();
-
-       for (i = 0; i < CALIBRATION_LOOPS; i++) {
-               flush_cache_all();
-               for (j = 0; j < CALIBRATION_LOOPS; j++) {
-                       ipipe_read_tsc(start);
-                       barrier();
-                       ipipe_timer_set(delay);
-                       barrier();
-                       ipipe_read_tsc(end);
-                       diff = end - start - tsc_lat;
-                       if (diff > 0) {
-                               sum += diff;
-                               sum_sq += diff * diff;
-                       }
-               }
-       }
-
-       ipipe_critical_exit(flags);
-
-       /* Use average + standard deviation as timer programming latency. */
-       do_div(sum, CALIBRATION_LOOPS * CALIBRATION_LOOPS);
-       do_div(sum_sq, CALIBRATION_LOOPS * CALIBRATION_LOOPS);
-       result = sum + int_sqrt(sum_sq - sum * sum) + 1;
-       /*
-        * Reset the max trace, since it contains the calibration time
-        * now.
-        */
-       ipipe_trace_max_reset();
-
-       return result;
-}
-
 static const char *const fault_labels[] = {
        [IPIPE_TRAP_ACCESS] = "Data or instruction access",
        [IPIPE_TRAP_SECTION] = "Section fault",
@@ -113,7 +58,6 @@ struct cobalt_machine cobalt_machine = {
        .init = NULL,
        .late_init = NULL,
        .cleanup = NULL,
-       .calibrate = mach_arm_calibrate,
        .prefault = mach_arm_prefault,
        .fault_labels = fault_labels,
 };
diff --git a/kernel/cobalt/arch/arm64/mayday.c 
b/kernel/cobalt/arch/arm64/mayday.c
index ca1151c..7a67c35 100644
--- a/kernel/cobalt/arch/arm64/mayday.c
+++ b/kernel/cobalt/arch/arm64/mayday.c
@@ -17,7 +17,6 @@
  * 02111-1307, USA.
  */
 #include <linux/types.h>
-#include <linux/ipipe.h>
 #include <linux/vmalloc.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
diff --git a/kernel/cobalt/arch/arm64/syscall.c 
b/kernel/cobalt/arch/arm64/syscall.c
index ee78243..313a28b 100644
--- a/kernel/cobalt/arch/arm64/syscall.c
+++ b/kernel/cobalt/arch/arm64/syscall.c
@@ -18,7 +18,7 @@
  * 02111-1307, USA.
  */
 
-#include <linux/ipipe.h>
+#include <linux/irq_pipeline.h>
 #include <asm/xenomai/syscall.h>
 #include <asm/xenomai/uapi/tsc.h>
 
@@ -26,18 +26,15 @@ int xnarch_local_syscall(unsigned long a1, unsigned long a2,
                         unsigned long a3, unsigned long a4,
                         unsigned long a5)
 {
-       struct ipipe_sysinfo ipipe_info;
-       struct __ipipe_tscinfo *p = &ipipe_info.arch.tsc;
+       struct __ipipe_tscinfo *p;
        struct __xn_tscinfo info;
        int ret;
 
        if (a1 != XENOMAI_SYSARCH_TSCINFO)
                return -EINVAL;
-
-       ret = ipipe_get_sysinfo(&ipipe_info);
-       if (ret)
-               return ret;
-
+       
+       p = &cobalt_pipeline.clock_data.arch.tsc;
+       
        switch (p->type) {
        case IPIPE_TSC_TYPE_DECREMENTER:
                info.counter = p->u.dec.counter;
diff --git a/kernel/cobalt/arch/arm64/thread.c 
b/kernel/cobalt/arch/arm64/thread.c
index b987e09..cd093b5 100644
--- a/kernel/cobalt/arch/arm64/thread.c
+++ b/kernel/cobalt/arch/arm64/thread.c
@@ -25,7 +25,6 @@
  */
 
 #include <linux/sched.h>
-#include <linux/ipipe.h>
 #include <linux/mm.h>
 #include <linux/jump_label.h>
 #include <asm/mmu_context.h>
@@ -132,7 +131,7 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread 
*in)
 
 int xnarch_escalate(void)
 {
-       if (ipipe_root_p) {
+       if (on_root_stage()) {
                ipipe_raise_irq(cobalt_pipeline.escalate_virq);
                return 1;
        }
diff --git a/kernel/cobalt/arch/blackfin/include/asm/xenomai/machine.h 
b/kernel/cobalt/arch/blackfin/include/asm/xenomai/machine.h
index 8e3c03a..d918550 100644
--- a/kernel/cobalt/arch/blackfin/include/asm/xenomai/machine.h
+++ b/kernel/cobalt/arch/blackfin/include/asm/xenomai/machine.h
@@ -24,8 +24,6 @@
 #include <linux/linkage.h>
 #include <linux/bitops.h>
 
-#define XNARCH_HOST_TICK_IRQ   __ipipe_hrtimer_irq
-
 static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
 {
        return ffs(ul) - 1;
diff --git a/kernel/cobalt/arch/blackfin/include/asm/xenomai/thread.h 
b/kernel/cobalt/arch/blackfin/include/asm/xenomai/thread.h
index e270fa8..8f9bb07 100644
--- a/kernel/cobalt/arch/blackfin/include/asm/xenomai/thread.h
+++ b/kernel/cobalt/arch/blackfin/include/asm/xenomai/thread.h
@@ -62,7 +62,8 @@ xnarch_switch_fpu(struct xnthread *from, struct xnthread 
*thread)
 
 static inline int 
 xnarch_handle_fpu_fault(struct xnthread *from, 
-                       struct xnthread *to, struct ipipe_trap_data *d)
+                       struct xnthread *to,
+                       struct dovetail_trap_data *d)
 {
        return 0;
 }
diff --git a/kernel/cobalt/arch/blackfin/machine.c 
b/kernel/cobalt/arch/blackfin/machine.c
index b0a70fe..49bb6ca 100644
--- a/kernel/cobalt/arch/blackfin/machine.c
+++ b/kernel/cobalt/arch/blackfin/machine.c
@@ -21,11 +21,6 @@
 #include <cobalt/kernel/sched.h>
 #include <asm/xenomai/machine.h>
 
-static unsigned long mach_blackfin_calibrate(void)
-{
-       return 20;      /* 20 clock cycles */
-}
-
 static void schedule_deferred(void)
 {
        xnsched_run();
@@ -84,7 +79,6 @@ struct cobalt_machine cobalt_machine = {
        .init = NULL,
        .late_init = mach_blackfin_late_init,
        .cleanup = mach_blackfin_cleanup,
-       .calibrate = mach_blackfin_calibrate,
        .prefault = NULL,
        .fault_labels = fault_labels,
 };
diff --git a/kernel/cobalt/arch/blackfin/mayday.c 
b/kernel/cobalt/arch/blackfin/mayday.c
index bd76fee..5c5fa1c 100644
--- a/kernel/cobalt/arch/blackfin/mayday.c
+++ b/kernel/cobalt/arch/blackfin/mayday.c
@@ -17,7 +17,6 @@
  * 02111-1307, USA.
  */
 #include <linux/types.h>
-#include <linux/ipipe.h>
 #include <linux/vmalloc.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
diff --git a/kernel/cobalt/arch/blackfin/thread.c 
b/kernel/cobalt/arch/blackfin/thread.c
index d4cda91..9e6d194 100644
--- a/kernel/cobalt/arch/blackfin/thread.c
+++ b/kernel/cobalt/arch/blackfin/thread.c
@@ -18,7 +18,6 @@
  */
 
 #include <linux/sched.h>
-#include <linux/ipipe.h>
 #include <linux/mm.h>
 #include <asm/mmu_context.h>
 #include <cobalt/kernel/thread.h>
@@ -94,7 +93,7 @@ int xnarch_escalate(void)
                return 1;
        }
 
-       if (ipipe_root_p) {
+       if (on_root_stage()) {
                ipipe_raise_irq(cobalt_pipeline.escalate_virq);
                __ipipe_unlock_root();
                return 1;
diff --git a/kernel/cobalt/arch/powerpc/include/asm/xenomai/machine.h 
b/kernel/cobalt/arch/powerpc/include/asm/xenomai/machine.h
index 6737ddc..72910ae 100644
--- a/kernel/cobalt/arch/powerpc/include/asm/xenomai/machine.h
+++ b/kernel/cobalt/arch/powerpc/include/asm/xenomai/machine.h
@@ -25,8 +25,6 @@
 
 #include <linux/compiler.h>
 
-#define XNARCH_HOST_TICK_IRQ   __ipipe_hrtimer_irq
-
 static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
 {
 #ifdef CONFIG_PPC64
diff --git a/kernel/cobalt/arch/powerpc/include/asm/xenomai/thread.h 
b/kernel/cobalt/arch/powerpc/include/asm/xenomai/thread.h
index 8d6be84..661889b 100644
--- a/kernel/cobalt/arch/powerpc/include/asm/xenomai/thread.h
+++ b/kernel/cobalt/arch/powerpc/include/asm/xenomai/thread.h
@@ -76,14 +76,15 @@ static inline void xnarch_leave_root(struct xnthread *root) 
{ }
 
 static inline int 
 xnarch_handle_fpu_fault(struct xnthread *from, 
-                       struct xnthread *to, struct ipipe_trap_data *d)
+                       struct xnthread *to,
+                       struct dovetail_trap_data *d)
 {
        return 0;
 }
 
 static inline int xnarch_escalate(void)
 {
-       if (ipipe_root_p) {
+       if (on_root_stage()) {
                ipipe_raise_irq(cobalt_pipeline.escalate_virq);
                return 1;
        }
diff --git a/kernel/cobalt/arch/powerpc/machine.c 
b/kernel/cobalt/arch/powerpc/machine.c
index 2e1643a..14e2c4f 100644
--- a/kernel/cobalt/arch/powerpc/machine.c
+++ b/kernel/cobalt/arch/powerpc/machine.c
@@ -24,11 +24,6 @@
 #include <asm/cputable.h>
 #include <asm/xenomai/machine.h>
 
-static unsigned long mach_powerpc_calibrate(void)
-{
-       return 5;       /* 5 clock cycles. */
-}
-
 static int mach_powerpc_init(void)
 {
 #ifdef CONFIG_ALTIVEC
@@ -67,7 +62,6 @@ struct cobalt_machine cobalt_machine = {
        .init = mach_powerpc_init,
        .late_init = NULL,
        .cleanup = NULL,
-       .calibrate = mach_powerpc_calibrate,
        .prefault = NULL,
        .fault_labels = fault_labels,
 };
diff --git a/kernel/cobalt/arch/powerpc/mayday.c 
b/kernel/cobalt/arch/powerpc/mayday.c
index 254f9ab..7051ba7 100644
--- a/kernel/cobalt/arch/powerpc/mayday.c
+++ b/kernel/cobalt/arch/powerpc/mayday.c
@@ -18,7 +18,6 @@
  */
 
 #include <linux/types.h>
-#include <linux/ipipe.h>
 #include <linux/vmalloc.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
diff --git a/kernel/cobalt/arch/powerpc/thread.c 
b/kernel/cobalt/arch/powerpc/thread.c
index 7f6ff87..44441a4 100644
--- a/kernel/cobalt/arch/powerpc/thread.c
+++ b/kernel/cobalt/arch/powerpc/thread.c
@@ -21,7 +21,6 @@
  */
 
 #include <linux/sched.h>
-#include <linux/ipipe.h>
 #include <linux/mm.h>
 #include <asm/mmu_context.h>
 #include <cobalt/kernel/thread.h>
diff --git a/kernel/cobalt/arch/x86/include/asm/xenomai/calibration.h 
b/kernel/cobalt/arch/x86/include/asm/xenomai/calibration.h
index a9190e1..0971055 100644
--- a/kernel/cobalt/arch/x86/include/asm/xenomai/calibration.h
+++ b/kernel/cobalt/arch/x86/include/asm/xenomai/calibration.h
@@ -34,7 +34,7 @@ static inline void xnarch_get_latencies(struct 
xnclock_gravity *p)
        sched_latency = CONFIG_XENO_OPT_TIMING_SCHEDLAT;
 #else /* !CONFIG_XENO_OPT_TIMING_SCHEDLAT */
 
-       if (strcmp(ipipe_timer_name(), "lapic") == 0) {
+       if (strcmp(tick_device_name(), "lapic") == 0) {
 #ifdef CONFIG_SMP
                if (num_online_cpus() > 1)
                        sched_latency = 3350;
@@ -43,7 +43,7 @@ static inline void xnarch_get_latencies(struct 
xnclock_gravity *p)
 #else /* !SMP */
                sched_latency = 1000;
 #endif /* !SMP */
-       } else if (strcmp(ipipe_timer_name(), "pit")) { /* HPET */
+       } else if (strcmp(tick_device_name(), "pit")) { /* HPET */
 #ifdef CONFIG_SMP
                if (num_online_cpus() > 1)
                        sched_latency = 3350;
diff --git a/kernel/cobalt/arch/x86/include/asm/xenomai/machine.h 
b/kernel/cobalt/arch/x86/include/asm/xenomai/machine.h
index bb5ccc1..c34ce7f 100644
--- a/kernel/cobalt/arch/x86/include/asm/xenomai/machine.h
+++ b/kernel/cobalt/arch/x86/include/asm/xenomai/machine.h
@@ -31,8 +31,6 @@ static inline __attribute_const__ unsigned long ffnz(unsigned 
long ul)
        return ul;
 }
 
-#define XNARCH_HOST_TICK_IRQ   __ipipe_hrtimer_irq
-
 long strncpy_from_user_nocheck(char *dst,
                               const char __user *src,
                               long count);
diff --git a/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h 
b/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h
index 4fc1be8..3885cc7 100644
--- a/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h
+++ b/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h
@@ -98,13 +98,13 @@
 #define __COBALT_SYSNR32emu(__reg)                                     \
        ({                                                              \
                long __nr = __reg;                                      \
-               if (current_thread_info()->status & TS_COMPAT)          \
+               if (test_thread_local_flags(TS_COMPAT))                 \
                        __nr += __COBALT_IA32_BASE;                     \
                __nr;                                                   \
        })
 
 #define __COBALT_COMPAT32emu(__reg)                                    \
-       ((current_thread_info()->status & TS_COMPAT) ? __COBALT_COMPAT_BIT : 0)
+       (test_thread_local_flags(TS_COMPAT) ? __COBALT_COMPAT_BIT : 0)
 
 #if __NR_COBALT_SYSCALLS > __COBALT_IA32_BASE
 #error "__NR_COBALT_SYSCALLS > __COBALT_IA32_BASE"
diff --git a/kernel/cobalt/arch/x86/include/asm/xenomai/thread.h 
b/kernel/cobalt/arch/x86/include/asm/xenomai/thread.h
index 3862e33..6f4393d 100644
--- a/kernel/cobalt/arch/x86/include/asm/xenomai/thread.h
+++ b/kernel/cobalt/arch/x86/include/asm/xenomai/thread.h
@@ -64,7 +64,8 @@ void xnarch_save_fpu(struct xnthread *thread);
 void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to);
 
 int xnarch_handle_fpu_fault(struct xnthread *from, 
-                       struct xnthread *to, struct ipipe_trap_data *d);
+                       struct xnthread *to,
+                           struct dovetail_trap_data *d);
 
 void xnarch_leave_root(struct xnthread *root);
 
@@ -78,7 +79,7 @@ static inline void xnarch_enter_root(struct xnthread *root) { 
}
 
 static inline int xnarch_escalate(void)
 {
-       if (ipipe_root_p) {
+       if (on_root_stage()) {
                ipipe_raise_irq(cobalt_pipeline.escalate_virq);
                return 1;
        }
diff --git a/kernel/cobalt/arch/x86/machine.c b/kernel/cobalt/arch/x86/machine.c
index 14443e0..611af62 100644
--- a/kernel/cobalt/arch/x86/machine.c
+++ b/kernel/cobalt/arch/x86/machine.c
@@ -18,7 +18,6 @@
  *   02111-1307, USA.
  */
 
-#include <linux/ipipe_tickdev.h>
 #include <cobalt/kernel/arith.h>
 #include <asm/xenomai/machine.h>
 #include <asm/xenomai/smi.h>
@@ -112,40 +111,6 @@ long strncpy_from_user_nocheck(char *dst, const char 
__user *src, long count)
 }
 EXPORT_SYMBOL_GPL(strncpy_from_user_nocheck);
 
-static unsigned long mach_x86_calibrate(void)
-{
-       unsigned long delay = (cobalt_pipeline.timer_freq + HZ / 2) / HZ;
-       unsigned long long t0, t1, dt;
-       unsigned long flags;
-       int i;
-
-       flags = ipipe_critical_enter(NULL);
-
-       ipipe_timer_set(delay);
-
-       ipipe_read_tsc(t0);
-
-       for (i = 0; i < 100; i++)
-               ipipe_timer_set(delay);
-
-       ipipe_read_tsc(t1);
-       dt = t1 - t0;
-
-       ipipe_critical_exit(flags);
-
-       /*
-        * Reset the max trace, since it contains the calibration time
-        * now.
-        */
-       ipipe_trace_max_reset();
-
-       /*
-        * Compute average with a 5% margin to avoid negative
-        * latencies with PIT.
-        */
-       return xnarch_ulldiv(dt, i + 5, NULL);
-}
-
 static int mach_x86_init(void)
 {
        mach_x86_c1e_disable();
@@ -189,7 +154,6 @@ struct cobalt_machine cobalt_machine = {
        .init = mach_x86_init,
        .late_init = NULL,
        .cleanup = mach_x86_cleanup,
-       .calibrate = mach_x86_calibrate,
        .prefault = NULL,
        .fault_labels = fault_labels,
 };
diff --git a/kernel/cobalt/arch/x86/mayday.c b/kernel/cobalt/arch/x86/mayday.c
index 15f07bd..bcc535f 100644
--- a/kernel/cobalt/arch/x86/mayday.c
+++ b/kernel/cobalt/arch/x86/mayday.c
@@ -18,7 +18,6 @@
  */
 
 #include <linux/types.h>
-#include <linux/ipipe.h>
 #include <linux/vmalloc.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
diff --git a/kernel/cobalt/arch/x86/thread.c b/kernel/cobalt/arch/x86/thread.c
index d367d13..aff5fe0 100644
--- a/kernel/cobalt/arch/x86/thread.c
+++ b/kernel/cobalt/arch/x86/thread.c
@@ -19,7 +19,7 @@
  */
 
 #include <linux/sched.h>
-#include <linux/ipipe.h>
+#include <linux/dovetail.h>
 #include <linux/mm.h>
 #include <cobalt/kernel/thread.h>
 #include <asm/mmu_context.h>
@@ -277,7 +277,7 @@ static inline void __do_restore_i387(x86_fpustate *fpup)
 }
 
 int xnarch_handle_fpu_fault(struct xnthread *from, 
-                       struct xnthread *to, struct ipipe_trap_data *d)
+                       struct xnthread *to, struct dovetail_trap_data *d)
 {
        struct xnarchtcb *tcb = xnthread_archtcb(to);
        struct task_struct *p = tcb->core.host_task;
diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c
index a696f0e..3f9c50b 100644
--- a/kernel/cobalt/clock.c
+++ b/kernel/cobalt/clock.c
@@ -16,9 +16,12 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  * 02111-1307, USA.
  */
+#include <linux/kernel.h>
 #include <linux/percpu.h>
 #include <linux/errno.h>
-#include <linux/ipipe_tickdev.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/tick.h>
 #include <cobalt/kernel/sched.h>
 #include <cobalt/kernel/timer.h>
 #include <cobalt/kernel/clock.h>
@@ -33,8 +36,6 @@
  *
  * @{
  */
-unsigned long nktimerlat;
-
 unsigned int nkclock_lock;
 
 static unsigned long long clockfreq;
@@ -178,12 +179,12 @@ void xnclock_core_local_shot(struct xnsched *sched)
        delay = xntimerh_date(&timer->aplink) - xnclock_core_read_raw();
        if (delay < 0)
                delay = 0;
-       else if (delay > ULONG_MAX)
-               delay = ULONG_MAX;
+       else if (delay > UINT_MAX)
+               delay = UINT_MAX;
 
-       xntrace_tick((unsigned)delay);
+       xntrace_tick((unsigned int)delay);
 
-       ipipe_timer_set(delay);
+       tick_next_head_event(delay);
 }
 
 #ifdef CONFIG_SMP
@@ -499,10 +500,9 @@ void print_core_clock_status(struct xnclock *clock,
 #endif /* CONFIG_XENO_OPT_WATCHDOG */
 
        xnvfile_printf(it, "%7s: timer=%s, clock=%s\n",
-                      "devices", ipipe_timer_name(), ipipe_clock_name());
+                      "devices", tick_device_name(),
+                      cobalt_pipeline.clock_data.hrclock_name);
        xnvfile_printf(it, "%7s: %s%s\n", "status", tm_status, wd_status);
-       xnvfile_printf(it, "%7s: %Lu\n", "setup",
-                      xnclock_ticks_to_ns(&nkclock, nktimerlat));
 }
 
 static int clock_show(struct xnvfile_regular_iterator *it, void *data)
@@ -832,11 +832,8 @@ static void reset_core_clock_gravity(struct xnclock *clock)
        struct xnclock_gravity gravity;
 
        xnarch_get_latencies(&gravity);
-       gravity.user += nktimerlat;
        if (gravity.kernel == 0)
                gravity.kernel = gravity.user;
-       if (gravity.irq == 0)
-               gravity.irq = nktimerlat;
        set_core_clock_gravity(clock, &gravity);
 }
 
@@ -869,7 +866,6 @@ int __init xnclock_init(unsigned long long freq)
        xnarch_init_u32frac(&bln_frac, 1, 1000000000);
 #endif
 #endif
-       nktimerlat = xnarch_timer_calibrate();
        xnclock_reset_gravity(&nkclock);
        xnclock_register(&nkclock, &xnsched_realtime_cpus);
 
diff --git a/kernel/cobalt/include/asm-generic/xenomai/machine.h 
b/kernel/cobalt/include/asm-generic/xenomai/machine.h
index 25764f9..366dbc1 100644
--- a/kernel/cobalt/include/asm-generic/xenomai/machine.h
+++ b/kernel/cobalt/include/asm-generic/xenomai/machine.h
@@ -19,8 +19,8 @@
 #ifndef _COBALT_ASM_GENERIC_MACHINE_H
 #define _COBALT_ASM_GENERIC_MACHINE_H
 
-#include <linux/ipipe.h>
 #include <linux/percpu.h>
+#include <linux/dovetail.h>
 #include <asm/byteorder.h>
 #include <asm/xenomai/wrappers.h>
 
@@ -32,7 +32,6 @@ struct cobalt_machine {
        int (*late_init)(void);
        void (*cleanup)(void);
        void (*prefault)(struct vm_area_struct *vma);
-       unsigned long (*calibrate)(void);
        const char *const *fault_labels;
 };
 
@@ -47,8 +46,8 @@ struct cobalt_machine_cpudata {
 DECLARE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata);
 
 struct cobalt_pipeline {
-       struct ipipe_domain domain;
-       unsigned long timer_freq;
+       struct irq_stage stage;
+       struct ipipe_sysinfo clock_data;
        unsigned long clock_freq;
        unsigned int apc_virq;
        unsigned long apc_map;
@@ -65,11 +64,6 @@ struct cobalt_pipeline {
 
 extern struct cobalt_pipeline cobalt_pipeline;
 
-static inline unsigned long xnarch_timer_calibrate(void)
-{
-       return cobalt_machine.calibrate();
-}
-
 #ifndef xnarch_cache_aliasing
 #define xnarch_cache_aliasing()  0
 #endif
diff --git a/kernel/cobalt/init.c b/kernel/cobalt/init.c
index 0eab3be..a0ae270 100644
--- a/kernel/cobalt/init.c
+++ b/kernel/cobalt/init.c
@@ -18,7 +18,6 @@
  */
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/ipipe_tickdev.h>
 #include <xenomai/version.h>
 #include <cobalt/kernel/sched.h>
 #include <cobalt/kernel/clock.h>
@@ -47,9 +46,6 @@
  * based on a set of generic RTOS building blocks.
  */
 
-static unsigned long timerfreq_arg;
-module_param_named(timerfreq, timerfreq_arg, ulong, 0444);
-
 static unsigned long clockfreq_arg;
 module_param_named(clockfreq, clockfreq_arg, ulong, 0444);
 
@@ -131,9 +127,11 @@ static void sys_shutdown(void)
        int cpu;
        spl_t s;
 
-       xntimer_release_hardware();
+       if (realtime_core_state() == COBALT_STATE_RUNNING)
+               xntimer_release_hardware();
+
 #ifdef CONFIG_SMP
-       ipipe_free_irq(&xnsched_realtime_domain, IPIPE_RESCHEDULE_IPI);
+       free_irq(IPIPE_RESCHEDULE_IPI, &cobalt_pipeline.stage);
 #endif
 
        xnlock_get_irqsave(&nklock, s);
@@ -162,59 +160,51 @@ static void sys_shutdown(void)
 
 static int __init mach_setup(void)
 {
-       struct ipipe_sysinfo sysinfo;
        int ret, virq;
 
-       ret = ipipe_select_timers(&xnsched_realtime_cpus);
-       if (ret < 0)
-               return ret;
-
-       ipipe_get_sysinfo(&sysinfo);
-
-       if (timerfreq_arg == 0)
-               timerfreq_arg = sysinfo.sys_hrtimer_freq;
+       ipipe_get_sysinfo(&cobalt_pipeline.clock_data);
 
        if (clockfreq_arg == 0)
-               clockfreq_arg = sysinfo.sys_hrclock_freq;
+               clockfreq_arg = cobalt_pipeline.clock_data.sys_hrclock_freq;
 
        if (clockfreq_arg == 0) {
                printk(XENO_ERR "null clock frequency? Aborting.\n");
                return -ENODEV;
        }
 
-       cobalt_pipeline.timer_freq = timerfreq_arg;
        cobalt_pipeline.clock_freq = clockfreq_arg;
 
        if (cobalt_machine.init) {
                ret = cobalt_machine.init();
-               if (ret)
+               if (ret) {
+                       printk(XENO_ERR "machine.init() failed\n");
                        return ret;
+               }
        }
 
-       ipipe_register_head(&xnsched_realtime_domain, "Xenomai");
+       ipipe_register_head(&cobalt_pipeline.stage, "Xenomai");
 
-       ret = -EBUSY;
-       virq = ipipe_alloc_virq();
+       ret = -EAGAIN;
+       virq = irq_create_direct_mapping(ipipe_virq_domain);
        if (virq == 0)
-               goto fail_apc;
+               goto fail_apc_alloc;
 
        cobalt_pipeline.apc_virq = virq;
+       ret = request_irq(virq, apc_dispatch, 0,
+                         "Cobalt APC", &root_irq_stage);
+       if (ret)
+               goto fail_apc_request;
 
-       ipipe_request_irq(ipipe_root_domain,
-                         cobalt_pipeline.apc_virq,
-                         apc_dispatch,
-                         NULL, NULL);
-
-       virq = ipipe_alloc_virq();
+       ret = -EAGAIN;
+       virq = irq_create_direct_mapping(ipipe_virq_domain);
        if (virq == 0)
-               goto fail_escalate;
+               goto fail_escalate_alloc;
 
        cobalt_pipeline.escalate_virq = virq;
-
-       ipipe_request_irq(&xnsched_realtime_domain,
-                         cobalt_pipeline.escalate_virq,
-                         (ipipe_irq_handler_t)__xnsched_run_handler,
-                         NULL, NULL);
+       ret = request_irq(virq, __xnsched_run_handler, IRQF_PIPELINED,
+                         "Cobalt escalation", &cobalt_pipeline.stage);
+       if (virq == 0)
+               goto fail_escalate_request;
 
        ret = xnclock_init(cobalt_pipeline.clock_freq);
        if (ret)
@@ -223,15 +213,15 @@ static int __init mach_setup(void)
        return 0;
 
 fail_clock:
-       ipipe_free_irq(&xnsched_realtime_domain,
-                      cobalt_pipeline.escalate_virq);
-       ipipe_free_virq(cobalt_pipeline.escalate_virq);
-fail_escalate:
-       ipipe_free_irq(ipipe_root_domain,
-                      cobalt_pipeline.apc_virq);
-       ipipe_free_virq(cobalt_pipeline.apc_virq);
-fail_apc:
-       ipipe_unregister_head(&xnsched_realtime_domain);
+       free_irq(cobalt_pipeline.escalate_virq, &cobalt_pipeline.stage);
+fail_escalate_request:
+       irq_dispose_mapping(cobalt_pipeline.escalate_virq);
+fail_escalate_alloc:
+       free_irq(cobalt_pipeline.apc_virq, &root_irq_stage);
+fail_apc_request:
+       irq_dispose_mapping(cobalt_pipeline.apc_virq);
+fail_apc_alloc:
+       ipipe_unregister_head(&cobalt_pipeline.stage);
 
        if (cobalt_machine.cleanup)
                cobalt_machine.cleanup();
@@ -249,12 +239,12 @@ static inline int __init mach_late_setup(void)
 
 static __init void mach_cleanup(void)
 {
-       ipipe_unregister_head(&xnsched_realtime_domain);
-       ipipe_free_irq(&xnsched_realtime_domain,
-                      cobalt_pipeline.escalate_virq);
-       ipipe_free_virq(cobalt_pipeline.escalate_virq);
-       ipipe_timers_release();
        xnclock_cleanup();
+       free_irq(cobalt_pipeline.escalate_virq, &cobalt_pipeline.stage);
+       irq_dispose_mapping(cobalt_pipeline.escalate_virq);
+       free_irq(cobalt_pipeline.apc_virq, &root_irq_stage);
+       irq_dispose_mapping(cobalt_pipeline.apc_virq);
+       ipipe_unregister_head(&cobalt_pipeline.stage);
 }
 
 static struct {
@@ -283,6 +273,7 @@ static void __init setup_init_state(void)
 
 static __init int sys_init(void)
 {
+       unsigned long heapsize;
        struct xnsched *sched;
        void *heapaddr;
        int ret, cpu;
@@ -290,25 +281,35 @@ static __init int sys_init(void)
        if (sysheap_size_arg == 0)
                sysheap_size_arg = CONFIG_XENO_OPT_SYS_HEAPSZ;
 
-       heapaddr = alloc_pages_exact(sysheap_size_arg * 1024, GFP_KERNEL);
-       if (heapaddr == NULL ||
-           xnheap_init(&cobalt_heap, heapaddr, sysheap_size_arg * 1024)) {
+       heapsize = sysheap_size_arg * 1024;
+       heapaddr = alloc_pages_exact(heapsize, GFP_KERNEL);
+       if (heapaddr == NULL)
                return -ENOMEM;
+
+       ret = xnheap_init(&cobalt_heap, heapaddr, heapsize);
+       if (ret) {
+               free_pages_exact(heapaddr, heapsize);
+               return ret;
        }
+
        xnheap_set_name(&cobalt_heap, "system heap");
 
+#ifdef CONFIG_SMP
+       ret = request_irq(IPIPE_RESCHEDULE_IPI, __xnsched_run_handler,
+                         IRQF_PIPELINED, "Cobalt reschedule",
+                         &cobalt_pipeline.stage);
+       if (ret) {
+               xnheap_destroy(&cobalt_heap);
+               free_pages_exact(heapaddr, heapsize);
+               return ret;
+       }
+#endif
+
        for_each_online_cpu(cpu) {
                sched = &per_cpu(nksched, cpu);
                xnsched_init(sched, cpu);
        }
 
-#ifdef CONFIG_SMP
-       ipipe_request_irq(&xnsched_realtime_domain,
-                         IPIPE_RESCHEDULE_IPI,
-                         (ipipe_irq_handler_t)__xnsched_run_handler,
-                         NULL, NULL);
-#endif
-
        xnregistry_init();
 
        /*
diff --git a/kernel/cobalt/intr.c b/kernel/cobalt/intr.c
index 5123363..ae7324d 100644
--- a/kernel/cobalt/intr.c
+++ b/kernel/cobalt/intr.c
@@ -19,8 +19,10 @@
  * 02111-1307, USA.
 */
 #include <linux/mutex.h>
-#include <linux/ipipe.h>
-#include <linux/ipipe_tickdev.h>
+#include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
+#include <linux/kernel_stat.h>
+#include <linux/clockchips.h>
 #include <cobalt/kernel/sched.h>
 #include <cobalt/kernel/intr.h>
 #include <cobalt/kernel/stat.h>
@@ -33,9 +35,24 @@
  * @defgroup cobalt_core_irq Interrupt management
  * @{
  */
+
+static irqreturn_t xnintr_irq_handler(int irq, void *dev_id);
+
 #define XNINTR_MAX_UNHANDLED   1000
 
-static DEFINE_MUTEX(intrlock);
+static DEFINE_MUTEX(intr_lock);
+
+static LIST_HEAD(intr_list);
+
+void xnintr_list_lock(void)
+{
+       mutex_lock(&intr_lock);
+}
+
+void xnintr_list_unlock(void)
+{
+       mutex_unlock(&intr_lock);
+}
 
 #ifdef CONFIG_XENO_OPT_STATS
 struct xnintr nktimer;      /* Only for statistics */
@@ -131,18 +148,6 @@ static inline void switch_irqstats(struct xnintr *intr, 
struct xnsched *sched)
        xnstat_exectime_switch(sched, &statp->account);
 }
 
-static inline xnstat_exectime_t *switch_core_irqstats(struct xnsched *sched)
-{
-       struct xnirqstat *statp;
-       xnstat_exectime_t *prev;
-
-       statp = xnstat_percpu_data;
-       prev = xnstat_exectime_switch(sched, &statp->account);
-       xnstat_counter_inc(&statp->hits);
-
-       return prev;
-}
-
 #else  /* !CONFIG_XENO_OPT_STATS */
 
 static inline void stat_counter_inc(void) {}
@@ -164,71 +169,8 @@ static inline void inc_irqstats(struct xnintr *intr, 
struct xnsched *sched, xnti
 
 static inline void switch_irqstats(struct xnintr *intr, struct xnsched *sched) 
{}
 
-static inline xnstat_exectime_t *switch_core_irqstats(struct xnsched *sched)
-{
-       return NULL;
-}
-
 #endif /* !CONFIG_XENO_OPT_STATS */
 
-static void xnintr_irq_handler(unsigned int irq, void *cookie);
-
-void xnintr_host_tick(struct xnsched *sched) /* Interrupts off. */
-{
-       sched->lflags &= ~XNHTICK;
-#ifdef XNARCH_HOST_TICK_IRQ
-       ipipe_post_irq_root(XNARCH_HOST_TICK_IRQ);
-#endif
-}
-
-/*
- * Low-level core clock irq handler. This one forwards ticks from the
- * Xenomai platform timer to nkclock exclusively.
- */
-void xnintr_core_clock_handler(void)
-{
-       struct xnsched *sched = xnsched_current();
-       int cpu  __maybe_unused = xnsched_cpu(sched);
-       xnstat_exectime_t *prev;
-
-       if (!xnsched_supported_cpu(cpu)) {
-#ifdef XNARCH_HOST_TICK_IRQ
-               ipipe_post_irq_root(XNARCH_HOST_TICK_IRQ);
-#endif
-               return;
-       }
-
-       prev = switch_core_irqstats(sched);
-
-       trace_cobalt_clock_entry(per_cpu(ipipe_percpu.hrtimer_irq, cpu));
-
-       ++sched->inesting;
-       sched->lflags |= XNINIRQ;
-
-       xnlock_get(&nklock);
-       xnclock_tick(&nkclock);
-       xnlock_put(&nklock);
-
-       trace_cobalt_clock_exit(per_cpu(ipipe_percpu.hrtimer_irq, cpu));
-       xnstat_exectime_switch(sched, prev);
-
-       if (--sched->inesting == 0) {
-               sched->lflags &= ~XNINIRQ;
-               xnsched_run();
-               sched = xnsched_current();
-       }
-       /*
-        * If the core clock interrupt preempted a real-time thread,
-        * any transition to the root thread has already triggered a
-        * host tick propagation from xnsched_run(), so at this point,
-        * we only need to propagate the host tick in case the
-        * interrupt preempted the root thread.
-        */
-       if ((sched->lflags & XNHTICK) &&
-           xnthread_test_state(sched->curr, XNROOT))
-               xnintr_host_tick(sched);
-}
-
 struct irqdisable_work {
        struct ipipe_work_header work; /* Must be first. */
        int irq;
@@ -239,7 +181,7 @@ static void lostage_irqdisable_line(struct 
ipipe_work_header *work)
        struct irqdisable_work *rq;
 
        rq = container_of(work, struct irqdisable_work, work);
-       ipipe_disable_irq(rq->irq);
+       disable_irq(rq->irq);
 }
 
 static void disable_irq_line(int irq)
@@ -274,7 +216,7 @@ static inline struct xnintr *xnintr_vec_first(unsigned int 
irq)
 
 static inline struct xnintr *xnintr_vec_next(struct xnintr *prev)
 {
-       return prev->next;
+       return prev->next_handler;
 }
 
 static void disable_shared_irq_line(struct xnintr_vector *vec)
@@ -286,7 +228,7 @@ static void disable_shared_irq_line(struct xnintr_vector 
*vec)
        intr = vec->handlers;
        while (intr) {
                set_bit(XN_IRQSTAT_DISABLED, &intr->status);
-               intr = intr->next;
+               intr = intr->next_handler;
        }
        xnlock_put(&vec->lock);
        disable_irq_line(irq);
@@ -296,7 +238,7 @@ static void disable_shared_irq_line(struct xnintr_vector 
*vec)
  * Low-level interrupt handler dispatching the user-defined ISRs for
  * shared interrupts -- Called with interrupts off.
  */
-static void xnintr_vec_handler(unsigned int irq, void *cookie)
+static irqreturn_t xnintr_vec_handler(int irq, void *dev_id)
 {
        struct xnsched *sched = xnsched_current();
        struct xnintr_vector *vec = vectors + irq;
@@ -332,7 +274,7 @@ static void xnintr_vec_handler(unsigned int irq, void 
*cookie)
                        inc_irqstats(intr, sched, start);
                        start = xnstat_exectime_now();
                }
-               intr = intr->next;
+               intr = intr->next_handler;
        }
 
        xnlock_put(&vec->lock);
@@ -351,7 +293,7 @@ static void xnintr_vec_handler(unsigned int irq, void 
*cookie)
        else if (s & XN_IRQ_DISABLE)
                disable_shared_irq_line(vec);
        else
-               ipipe_end_irq(irq);
+               release_irq(irq);
 out:
        xnstat_exectime_switch(sched, prev);
 
@@ -361,13 +303,15 @@ out:
        }
 
        trace_cobalt_irq_exit(irq);
+
+       return IRQ_HANDLED;
 }
 
 /*
  * Low-level interrupt handler dispatching the user-defined ISRs for
  * shared edge-triggered interrupts -- Called with interrupts off.
  */
-static void xnintr_edge_vec_handler(unsigned int irq, void *cookie)
+static irqreturn_t xnintr_edge_vec_handler(int irq, void *dev_id)
 {
        const int MAX_EDGEIRQ_COUNTER = 128;
        struct xnsched *sched = xnsched_current();
@@ -412,7 +356,7 @@ static void xnintr_edge_vec_handler(unsigned int irq, void 
*cookie)
                if (counter++ > MAX_EDGEIRQ_COUNTER)
                        break;
 
-               intr = intr->next;
+               intr = intr->next_handler;
                if (intr  == NULL)
                        intr = vec->handlers;
        }
@@ -437,7 +381,7 @@ static void xnintr_edge_vec_handler(unsigned int irq, void 
*cookie)
        else if (s & XN_IRQ_DISABLE)
                disable_shared_irq_line(vec);
        else
-               ipipe_end_irq(irq);
+               release_irq(irq);
 out:
        xnstat_exectime_switch(sched, prev);
 
@@ -447,6 +391,8 @@ out:
        }
 
        trace_cobalt_irq_exit(irq);
+
+       return IRQ_HANDLED;
 }
 
 static inline int xnintr_irq_attach(struct xnintr *intr)
@@ -459,9 +405,8 @@ static inline int xnintr_irq_attach(struct xnintr *intr)
        if (prev) {
                /* Check on whether the shared mode is allowed. */
                if ((prev->flags & intr->flags & XN_IRQTYPE_SHARED) == 0 ||
-                   (prev->iack != intr->iack)
-                   || ((prev->flags & XN_IRQTYPE_EDGE) !=
-                       (intr->flags & XN_IRQTYPE_EDGE)))
+                   ((prev->flags & XN_IRQTYPE_EDGE) !=
+                    (intr->flags & XN_IRQTYPE_EDGE)))
                        return -EBUSY;
 
                /*
@@ -469,12 +414,12 @@ static inline int xnintr_irq_attach(struct xnintr *intr)
                 * new element.
                 */
                while (prev) {
-                       p = &prev->next;
+                       p = &prev->next_handler;
                        prev = *p;
                }
        } else {
                /* Initialize the corresponding interrupt channel */
-               void (*handler) (unsigned, void *) = xnintr_irq_handler;
+               irq_handler_t handler = xnintr_irq_handler;
 
                if (intr->flags & XN_IRQTYPE_SHARED) {
                        if (intr->flags & XN_IRQTYPE_EDGE)
@@ -485,14 +430,13 @@ static inline int xnintr_irq_attach(struct xnintr *intr)
                }
                vec->unhandled = 0;
 
-               ret = ipipe_request_irq(&xnsched_realtime_domain,
-                                       intr->irq, handler, intr,
-                                       (ipipe_irq_ackfn_t)intr->iack);
+               ret = request_irq(intr->irq, handler, IRQF_PIPELINED,
+                                 intr->name, intr);
                if (ret)
                        return ret;
        }
 
-       intr->next = NULL;
+       intr->next_handler = NULL;
        /*
         * Add the given interrupt object. No need to synchronise with
         * the IRQ handler, we are only extending the chain.
@@ -511,18 +455,18 @@ static inline void xnintr_irq_detach(struct xnintr *intr)
                if (e == intr) {
                        /* Remove the given interrupt object from the list. */
                        xnlock_get(&vec->lock);
-                       *p = e->next;
+                       *p = e->next_handler;
                        xnlock_put(&vec->lock);
 
                        sync_stat_references(intr);
 
                        /* Release the IRQ line if this was the last user */
                        if (vec->handlers == NULL)
-                               ipipe_free_irq(&xnsched_realtime_domain, 
intr->irq);
+                               free_irq(intr->irq, intr);
 
                        return;
                }
-               p = &e->next;
+               p = &e->next_handler;
        }
 
        printk(XENO_ERR "attempted to detach an unregistered interrupt 
descriptor\n");
@@ -540,7 +484,9 @@ static struct xnintr_vector vectors[IPIPE_NR_IRQS];
 
 static inline struct xnintr *xnintr_vec_first(unsigned int irq)
 {
-       return __ipipe_irq_cookie(&xnsched_realtime_domain, irq);
+       struct irq_desc *desc = irq_to_desc(irq);
+       
+       return desc->action ? desc->action->dev_id : NULL;
 }
 
 static inline struct xnintr *xnintr_vec_next(struct xnintr *prev)
@@ -550,19 +496,13 @@ static inline struct xnintr *xnintr_vec_next(struct 
xnintr *prev)
 
 static inline int xnintr_irq_attach(struct xnintr *intr)
 {
-       return ipipe_request_irq(&xnsched_realtime_domain,
-                                intr->irq, xnintr_irq_handler, intr,
-                                (ipipe_irq_ackfn_t)intr->iack);
+       return request_irq(intr->irq, xnintr_irq_handler,
+                          IRQF_PIPELINED, intr->name, intr);
 }
 
 static inline void xnintr_irq_detach(struct xnintr *intr)
 {
-       int irq = intr->irq;
-
-       xnlock_get(&vectors[irq].lock);
-       ipipe_free_irq(&xnsched_realtime_domain, irq);
-       xnlock_put(&vectors[irq].lock);
-
+       free_irq(intr->irq, intr);
        sync_stat_references(intr);
 }
 
@@ -572,7 +512,7 @@ static inline void xnintr_irq_detach(struct xnintr *intr)
  * Low-level interrupt handler dispatching non-shared ISRs -- Called
  * with interrupts off.
  */
-static void xnintr_irq_handler(unsigned int irq, void *cookie)
+static irqreturn_t xnintr_irq_handler(int irq, void *dev_id)
 {
        struct xnintr_vector __maybe_unused *vec = vectors + irq;
        struct xnsched *sched = xnsched_current();
@@ -581,6 +521,15 @@ static void xnintr_irq_handler(unsigned int irq, void 
*cookie)
        xnticks_t start;
        int s = 0;
 
+       /*
+        * CAUTION: we assume that no race is possible with
+        * xnintr_detach() in the current implementation, such as we
+        * might receive an interrupt on the local CPU while the
+        * remote one is releasing the xnintr descriptor. The pipeline
+        * is expected to provide such guarantee under the hood.
+        */
+       intr = dev_id;
+
        prev  = xnstat_exectime_get_current(sched);
        start = xnstat_exectime_now();
        trace_cobalt_irq_entry(irq);
@@ -590,20 +539,6 @@ static void xnintr_irq_handler(unsigned int irq, void 
*cookie)
 
        xnlock_get(&vec->lock);
 
-#ifdef CONFIG_SMP
-       /*
-        * In SMP case, we have to reload the cookie under the per-IRQ
-        * lock to avoid racing with xnintr_detach.  However, we
-        * assume that no CPU migration will occur while running the
-        * interrupt service routine, so the scheduler pointer will
-        * remain valid throughout this function.
-        */
-       intr = __ipipe_irq_cookie(&xnsched_realtime_domain, irq);
-       if (unlikely(intr == NULL))
-               goto done;
-#else
-       intr = cookie;
-#endif
        if (unlikely(test_bit(XN_IRQSTAT_DISABLED, &intr->status))) {
                /* irqdisable_work is on its way, ignore. */
                xnlock_put(&vec->lock);
@@ -633,7 +568,7 @@ static void xnintr_irq_handler(unsigned int irq, void 
*cookie)
        else if (s & XN_IRQ_PROPAGATE)
                ipipe_post_irq_root(irq);
        else
-               ipipe_end_irq(irq);
+               release_irq(irq);
 out:
        xnstat_exectime_switch(sched, prev);
 
@@ -643,18 +578,31 @@ out:
        }
 
        trace_cobalt_irq_exit(irq);
+
+       return IRQ_HANDLED;
 }
 
 int __init xnintr_mount(void)
 {
+#if defined(CONFIG_SMP) || XENO_DEBUG(LOCKING) || 
defined(CONFIG_XENO_OPT_SHIRQ)
        int i;
-       for (i = 0; i < IPIPE_NR_IRQS; ++i)
+       for (i = 0; i < ARRAY_SIZE(vectors); ++i)
                xnlock_init(&vectors[i].lock);
+#endif
        return 0;
 }
 
+static void register_intr(struct xnintr *intr, void *dev_id)
+{
+       intr->dev_id = dev_id;
+       clear_irqstats(intr);
+       xnintr_list_lock();
+       stat_counter_inc();
+       list_add_tail(&intr->next, &intr_list);
+       xnintr_list_unlock();
+}
+
 /**
- * @fn int xnintr_init(struct xnintr *intr,const char *name,unsigned int 
irq,xnisr_t isr,xniack_t iack,int flags)
  * @brief Initialize an interrupt descriptor.
  *
  * When an interrupt occurs on the given @a irq line, the interrupt
@@ -708,14 +656,6 @@ int __init xnintr_mount(void)
  * @param isr The address of an interrupt handler, which is passed the
  * address of the interrupt descriptor receiving the IRQ.
  *
- * @param iack The address of an optional interrupt acknowledge
- * routine, aimed at replacing the default one. Only very specific
- * situations actually require to override the default setting for
- * this parameter, like having to acknowledge non-standard PIC
- * hardware. @a iack should return a non-zero value to indicate that
- * the interrupt has been properly acknowledged. If @a iack is NULL,
- * the default routine will be used instead.
- *
  * @param flags A set of creation flags affecting the operation. The
  * valid flags are:
  *
@@ -732,27 +672,34 @@ int __init xnintr_mount(void)
  * @coretags{secondary-only}
  */
 int xnintr_init(struct xnintr *intr, const char *name,
-               unsigned int irq, xnisr_t isr, xniack_t iack,
-               int flags)
+               int irq, xnisr_t isr, int flags)
 {
        secondary_mode_only();
 
-       if (irq >= IPIPE_NR_IRQS)
+       /*
+        * A descriptor with a negative IRQ number is a placeholder
+        * and won't be attached, register it on the fly. Otherwise,
+        * this number must be valid.
+        */
+       if (irq >= 0 && irq_to_desc(irq) == NULL)
                return -EINVAL;
 
        intr->irq = irq;
        intr->isr = isr;
-       intr->iack = iack;
-       intr->cookie = NULL;
+       intr->dev_id = NULL;
        intr->name = name ? : "<unknown>";
        intr->flags = flags;
        intr->status = _XN_IRQSTAT_DISABLED;
        intr->unhandled = 0;
        raw_spin_lock_init(&intr->lock);
 #ifdef CONFIG_XENO_OPT_SHIRQ
-       intr->next = NULL;
+       intr->next_handler = NULL;
 #endif
        alloc_irqstats(intr);
+       INIT_LIST_HEAD(&intr->next);
+
+       if (irq < 0)
+               register_intr(intr, NULL);
 
        return 0;
 }
@@ -780,7 +727,7 @@ void xnintr_destroy(struct xnintr *intr)
 EXPORT_SYMBOL_GPL(xnintr_destroy);
 
 /**
- * @fn int xnintr_attach(struct xnintr *intr, void *cookie)
+ * @fn int xnintr_attach(struct xnintr *intr, void *dev_id)
  * @brief Attach an interrupt descriptor.
  *
  * Attach an interrupt descriptor previously initialized by
@@ -791,7 +738,7 @@ EXPORT_SYMBOL_GPL(xnintr_destroy);
  *
  * @param intr The address of the interrupt descriptor to attach.
  *
- * @param cookie A user-defined opaque value which is stored into the
+ * @param dev_id A user-defined opaque value which is stored into the
  * descriptor for further retrieval by the interrupt handler.
  *
  * @return 0 is returned on success. Otherwise:
@@ -809,18 +756,16 @@ EXPORT_SYMBOL_GPL(xnintr_destroy);
  * @note Attaching an interrupt descriptor resets the tracked number
  * of IRQ receipts to zero.
  */
-int xnintr_attach(struct xnintr *intr, void *cookie)
+int xnintr_attach(struct xnintr *intr, void *dev_id)
 {
        int ret;
 
        secondary_mode_only();
        trace_cobalt_irq_attach(intr->irq);
-
-       intr->cookie = cookie;
-       clear_irqstats(intr);
+       register_intr(intr, dev_id);
 
 #ifdef CONFIG_SMP
-       ipipe_set_irq_affinity(intr->irq, cobalt_cpu_affinity);
+       irq_set_affinity(intr->irq, &cobalt_cpu_affinity);
 #endif /* CONFIG_SMP */
 
        raw_spin_lock(&intr->lock);
@@ -831,12 +776,8 @@ int xnintr_attach(struct xnintr *intr, void *cookie)
        }
 
        ret = xnintr_irq_attach(intr);
-       if (ret) {
+       if (ret)
                clear_bit(XN_IRQSTAT_ATTACHED, &intr->status);
-               goto out;
-       }
-
-       stat_counter_inc();
 out:
        raw_spin_unlock(&intr->lock);
 
@@ -866,6 +807,8 @@ void xnintr_detach(struct xnintr *intr)
        secondary_mode_only();
        trace_cobalt_irq_detach(intr->irq);
 
+       xnintr_list_lock();
+
        raw_spin_lock(&intr->lock);
 
        if (test_and_clear_bit(XN_IRQSTAT_ATTACHED, &intr->status)) {
@@ -874,6 +817,11 @@ void xnintr_detach(struct xnintr *intr)
        }
 
        raw_spin_unlock(&intr->lock);
+
+       if (!list_empty(&intr->next))
+               list_del(&intr->next);
+
+       xnintr_list_unlock();
 }
 EXPORT_SYMBOL_GPL(xnintr_detach);
 
@@ -901,7 +849,7 @@ void xnintr_enable(struct xnintr *intr)
         * disable_irq_line().
         */
        if (test_and_clear_bit(XN_IRQSTAT_DISABLED, &intr->status))
-               ipipe_enable_irq(intr->irq);
+               enable_irq(intr->irq);
 
        raw_spin_unlock_irqrestore(&intr->lock, flags);
 }
@@ -928,14 +876,9 @@ void xnintr_disable(struct xnintr *intr)
        /* We only need a virtual masking. */
        raw_spin_lock_irqsave(&intr->lock, flags);
 
-       /*
-        * Racing with disable_irq_line() is innocuous, the pipeline
-        * would serialize calls to ipipe_disable_irq() across CPUs,
-        * and the descriptor status would still properly match the
-        * line status in the end.
-        */
+       /* Racing with disable_irq_line() is innocuous. */
        if (!test_and_set_bit(XN_IRQSTAT_DISABLED, &intr->status))
-               ipipe_disable_irq(intr->irq);
+               disable_irq(intr->irq);
 
        raw_spin_unlock_irqrestore(&intr->lock, flags);
 }
@@ -962,39 +905,26 @@ void xnintr_affinity(struct xnintr *intr, cpumask_t 
cpumask)
 {
        secondary_mode_only();
 #ifdef CONFIG_SMP
-       ipipe_set_irq_affinity(intr->irq, cpumask);
+       irq_set_affinity(intr->irq, &cpumask);
 #endif
 }
 EXPORT_SYMBOL_GPL(xnintr_affinity);
 
-static inline int xnintr_is_timer_irq(int irq)
-{
-       int cpu;
-
-       for_each_realtime_cpu(cpu)
-               if (irq == per_cpu(ipipe_percpu.hrtimer_irq, cpu))
-                       return 1;
-
-       return 0;
-}
-
 #ifdef CONFIG_XENO_OPT_STATS
 
-int xnintr_get_query_lock(void)
-{
-       return mutex_lock_interruptible(&intrlock) ? -ERESTARTSYS : 0;
-}
-
-void xnintr_put_query_lock(void)
-{
-       mutex_unlock(&intrlock);
-}
-
 int xnintr_query_init(struct xnintr_iterator *iterator)
 {
-       iterator->cpu = -1;
+       iterator->cpus = *cpu_online_mask;
        iterator->prev = NULL;
 
+       if (list_empty(&intr_list)) {
+               iterator->curr = NULL;
+               return 0;
+       }
+
+       iterator->curr = list_first_entry(&intr_list,
+                                         struct xnintr, next);
+
        /* The order is important here: first xnintr_list_rev then
         * xnintr_count.  On the other hand, xnintr_attach/detach()
         * update xnintr_count first and then xnintr_list_rev.  This
@@ -1010,47 +940,52 @@ int xnintr_query_init(struct xnintr_iterator *iterator)
        return xnintr_count;
 }
 
-int xnintr_query_next(int irq, struct xnintr_iterator *iterator,
+int xnintr_query_next(struct xnintr_iterator *iterator,
                      char *name_buf)
 {
-       int cpu, nr_cpus = num_present_cpus();
        struct xnintr *intr;
 
-       for (cpu = iterator->cpu + 1; cpu < nr_cpus; ++cpu) {
-               if (cpu_online(cpu))
-                       break;
-       }
-       if (cpu == nr_cpus)
-               cpu = 0;
-       iterator->cpu = cpu;
-
        if (iterator->list_rev != xnintr_list_rev)
                return -EAGAIN;
-
-       if (!iterator->prev) {
-               if (xnintr_is_timer_irq(irq))
-                       intr = &nktimer;
-               else
-                       intr = xnintr_vec_first(irq);
-       } else
-               intr = xnintr_vec_next(iterator->prev);
-
-       if (intr == NULL) {
-               cpu = -1;
-               iterator->prev = NULL;
+redo:
+       if (iterator->curr == NULL)
                return -ENODEV;
+
+       if (cpumask_empty(&iterator->cpus)) {
+               iterator->cpus = *cpu_online_mask;
+               intr = NULL;
+               if (iterator->prev != &nktimer)
+                       intr = xnintr_vec_next(iterator->prev);
+               if (intr == NULL) {
+                       intr = iterator->curr;
+                       iterator->curr = NULL;
+                       iterator->prev = NULL;
+                       if (!list_is_last(&intr->next, &intr_list))
+                               iterator->curr = list_next_entry(intr, next);
+                       goto redo;
+               }
+       } else {
+               intr = iterator->prev;
+               if (intr == NULL) {
+                       if (iterator->curr == &nktimer)
+                               intr = &nktimer;
+                       else
+                               intr = xnintr_vec_first(iterator->curr->irq);
+               }
        }
 
-       ksformat(name_buf, XNOBJECT_NAME_LEN, "IRQ%d: %s", irq, intr->name);
+       iterator->cpu = cpumask_first(&iterator->cpus);
+       cpumask_clear_cpu(iterator->cpu, &iterator->cpus);
+       iterator->prev = intr;
 
-       query_irqstats(intr, cpu, iterator);
+       if (intr->irq < 0)
+               ksformat(name_buf, XNOBJECT_NAME_LEN, "%s",
+                        intr->name);
+       else
+               ksformat(name_buf, XNOBJECT_NAME_LEN, "IRQ%d: %s",
+                        intr->irq, intr->name);
 
-       /*
-        * Proceed to next entry in shared IRQ chain when all CPUs
-        * have been visited for this one.
-        */
-       if (cpu + 1 == nr_cpus)
-               iterator->prev = intr;
+       query_irqstats(intr, iterator->cpu, iterator);
 
        return 0;
 }
@@ -1061,63 +996,28 @@ int xnintr_query_next(int irq, struct xnintr_iterator 
*iterator,
 
 #include <cobalt/kernel/vfile.h>
 
-static inline int format_irq_proc(unsigned int irq,
-                                 struct xnvfile_regular_iterator *it)
+static inline void format_irq_proc(struct xnintr *intr,
+                                  struct xnvfile_regular_iterator *it)
 {
-       struct xnintr *intr;
-       int cpu;
-
-       for_each_realtime_cpu(cpu)
-               if (xnintr_is_timer_irq(irq)) {
-                       xnvfile_printf(it, "         [timer/%d]", cpu);
-                       return 0;
-               }
+       xnvfile_puts(it, "        ");
 
-#ifdef CONFIG_SMP
-       /*
-        * IPI numbers on ARM are not compile time constants, so do
-        * not use switch/case here.
-        */
-       if (irq == IPIPE_HRTIMER_IPI) {
-               xnvfile_puts(it, "         [timer-ipi]");
-               return 0;
-       }
-       if (irq == IPIPE_RESCHEDULE_IPI) {
-               xnvfile_puts(it, "         [reschedule]");
-               return 0;
-       }
-       if (irq == IPIPE_CRITICAL_IPI) {
-               xnvfile_puts(it, "         [sync]");
-               return 0;
-       }
-#endif /* CONFIG_SMP */
-       if (ipipe_virtual_irq_p(irq)) {
-               xnvfile_puts(it, "         [virtual]");
-               return 0;
-       }
-
-       mutex_lock(&intrlock);
-
-       intr = xnintr_vec_first(irq);
-       if (intr) {
-               xnvfile_puts(it, "        ");
-
-               do {
-                       xnvfile_putc(it, ' ');
-                       xnvfile_puts(it, intr->name);
-                       intr = xnintr_vec_next(intr);
-               } while (intr);
-       }
-
-       mutex_unlock(&intrlock);
-
-       return 0;
+       do {
+               xnvfile_putc(it, ' ');
+               xnvfile_puts(it, intr->name);
+               intr = xnintr_vec_next(intr);
+       } while (intr);
 }
 
 static int irq_vfile_show(struct xnvfile_regular_iterator *it,
                          void *data)
 {
-       int cpu, irq;
+       struct xnintr *intr;
+       int irq, cpu;
+
+       xnintr_list_lock();
+
+       if (list_empty(&intr_list))
+               goto done;
 
        /* FIXME: We assume the entire output fits in a single page. */
 
@@ -1126,22 +1026,20 @@ static int irq_vfile_show(struct 
xnvfile_regular_iterator *it,
        for_each_realtime_cpu(cpu)
                xnvfile_printf(it, "        CPU%d", cpu);
 
-       for (irq = 0; irq < IPIPE_NR_IRQS; irq++) {
-               if (__ipipe_irq_handler(&xnsched_realtime_domain, irq) == NULL)
+       list_for_each_entry(intr, &intr_list, next) {
+               irq = intr->irq;
+               if (irq < 0)
                        continue;
-
                xnvfile_printf(it, "\n%5d:", irq);
-
-               for_each_realtime_cpu(cpu) {
-                       xnvfile_printf(it, "%12lu",
-                                      
__ipipe_cpudata_irq_hits(&xnsched_realtime_domain, cpu,
-                                                               irq));
-               }
-
-               format_irq_proc(irq, it);
+               for_each_realtime_cpu(cpu)
+                       xnvfile_printf(it, "%12u",
+                                      kstat_irqs_cpu(irq, cpu));
+               format_irq_proc(intr, it);
        }
 
        xnvfile_putc(it, '\n');
+done:
+       xnintr_list_unlock();
 
        return 0;
 }
diff --git a/kernel/cobalt/posix/corectl.c b/kernel/cobalt/posix/corectl.c
index f01a562..aba36dd 100644
--- a/kernel/cobalt/posix/corectl.c
+++ b/kernel/cobalt/posix/corectl.c
@@ -17,7 +17,7 @@
  */
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/ipipe.h>
+#include <linux/irq_pipeline.h>
 #include <linux/kconfig.h>
 #include <linux/atomic.h>
 #include <linux/printk.h>
@@ -91,7 +91,7 @@ static int do_conf_option(int option, void __user *u_buf, 
size_t u_bufsz)
                val = realtime_core_state();
                break;
        default:
-               if (!ipipe_root_p)
+               if (!on_root_stage())
                        /* Switch to secondary mode first. */
                        return -ENOSYS;
                vec.u_buf = u_buf;
diff --git a/kernel/cobalt/posix/process.c b/kernel/cobalt/posix/process.c
index 788a24a..30d62d2 100644
--- a/kernel/cobalt/posix/process.c
+++ b/kernel/cobalt/posix/process.c
@@ -36,8 +36,7 @@
 #include <linux/vmalloc.h>
 #include <linux/signal.h>
 #include <linux/kallsyms.h>
-#include <linux/ipipe.h>
-#include <linux/ipipe_tickdev.h>
+#include <linux/dovetail.h>
 #include <cobalt/kernel/sched.h>
 #include <cobalt/kernel/heap.h>
 #include <cobalt/kernel/synch.h>
@@ -587,7 +586,7 @@ static inline int disable_ondemand_memory(void)
                return 0;
        }
 
-       return __ipipe_disable_ondemand_mappings(p);
+       return disable_ondemand_mappings(p);
 }
 
 static inline int get_mayday_prot(void)
@@ -619,13 +618,12 @@ static inline int get_mayday_prot(void)
 /**
  * @fn int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff)
  * @internal
- * @brief Create a shadow thread context over a user task.
+ * @brief Create a shadow thread context over the current user task.
  *
- * This call maps a Xenomai thread to the current regular Linux task
- * running in userland.  The priority and scheduling class of the
- * underlying Linux task are not affected; it is assumed that the
- * interface library did set them appropriately before issuing the
- * shadow mapping request.
+ * This call maps a Cobalt thread to the current user task.  The
+ * priority and scheduling class of the underlying task are not
+ * affected; it is assumed that the interface library did set them
+ * appropriately before issuing the shadow mapping request.
  *
  * @param thread The descriptor address of the new shadow thread to be
  * mapped to current. This descriptor must have been previously
@@ -678,12 +676,6 @@ int cobalt_map_user(struct xnthread *thread, __u32 __user 
*u_winoff)
 
        trace_cobalt_shadow_map(thread);
 
-       /*
-        * CAUTION: we enable the pipeline notifier only when our
-        * shadow TCB is consistent, so that we won't trigger false
-        * positive in debug code from handle_schedule_event() and
-        * friends.
-        */
        xnthread_init_shadow_tcb(thread);
        xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
        init_uthread_info(thread);
@@ -699,7 +691,12 @@ int cobalt_map_user(struct xnthread *thread, __u32 __user 
*u_winoff)
         * it.
         */
        xnthread_run_handler(thread, map_thread);
-       ipipe_enable_notifier(current);
+       /*
+        * CAUTION: we enable dovetailing only when our shadow TCB is
+        * consistent, so that we won't trigger false positive in
+        * debug code from handle_schedule_event() and friends.
+        */
+       dovetail_enable(0);
 
        attr.mode = 0;
        attr.entry = NULL;
@@ -716,7 +713,7 @@ int cobalt_map_user(struct xnthread *thread, __u32 __user 
*u_winoff)
        return 0;
 }
 
-static inline int handle_exception(struct ipipe_trap_data *d)
+static inline int handle_exception(struct dovetail_trap_data *d)
 {
        struct xnthread *thread;
        struct xnsched *sched;
@@ -799,7 +796,7 @@ static int handle_mayday_event(struct pt_regs *regs)
        return KEVENT_PROPAGATE;
 }
 
-int ipipe_trap_hook(struct ipipe_trap_data *data)
+int dovetail_trap_hook(struct dovetail_trap_data *data)
 {
        if (data->exception == IPIPE_TRAP_MAYDAY)
                return handle_mayday_event(data->regs);
@@ -824,7 +821,7 @@ int ipipe_trap_hook(struct ipipe_trap_data *data)
 
 #ifdef CONFIG_SMP
 
-static int handle_setaffinity_event(struct ipipe_cpu_migration_data *d)
+static int handle_setaffinity_event(struct dovetail_migration_data *d)
 {
        struct task_struct *p = d->task;
        struct xnthread *thread;
@@ -925,9 +922,9 @@ static inline void check_affinity(struct task_struct *p) /* 
nklocked, IRQs off *
 
 #else /* !CONFIG_SMP */
 
-struct ipipe_cpu_migration_data;
+struct dovetail_migration_data;
 
-static int handle_setaffinity_event(struct ipipe_cpu_migration_data *d)
+static int handle_setaffinity_event(struct dovetail_migration_data *d)
 {
        return KEVENT_PROPAGATE;
 }
@@ -936,7 +933,7 @@ static inline void check_affinity(struct task_struct *p) { }
 
 #endif /* CONFIG_SMP */
 
-void ipipe_migration_hook(struct task_struct *p) /* hw IRQs off */
+void dovetail_migration_hook(struct task_struct *p) /* hw IRQs off */
 {
        struct xnthread *thread = xnthread_from_task(p);
 
@@ -1029,7 +1026,7 @@ static void unregister_debugged_thread(struct xnthread 
*thread)
        xnlock_put_irqrestore(&nklock, s);
 }
 
-static void __handle_taskexit_event(struct task_struct *p)
+static void __handle_taskexit_event(void)
 {
        struct cobalt_ppd *sys_ppd;
        struct xnthread *thread;
@@ -1061,9 +1058,9 @@ static void __handle_taskexit_event(struct task_struct *p)
        }
 }
 
-static int handle_taskexit_event(struct task_struct *p) /* p == current */
+static int handle_taskexit_event(void) /* exiting current. */
 {
-       __handle_taskexit_event(p);
+       __handle_taskexit_event();
 
        /*
         * __xnthread_cleanup() -> ... -> finalize_thread
@@ -1199,7 +1196,7 @@ static int handle_sigwake_event(struct task_struct *p)
         * we don't break any undergoing ptrace.
         */
        if (p->state & (TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE))
-               set_task_state(p, p->state | TASK_NOWAKEUP);
+               set_task_state(p, p->state | TASK_STALL);
 
        __xnthread_kick(thread);
 
@@ -1240,8 +1237,8 @@ static int handle_cleanup_event(struct mm_struct *mm)
                thread = xnthread_current();
                running_exec = thread && (current->flags & PF_EXITING) == 0;
                if (running_exec) {
-                       __handle_taskexit_event(current);
-                       ipipe_disable_notifier(current);
+                       __handle_taskexit_event();
+                       dovetail_disable();
                }
                if (atomic_dec_and_test(&sys_ppd->refcnt))
                        remove_process(process);
@@ -1261,27 +1258,27 @@ static int handle_cleanup_event(struct mm_struct *mm)
        return KEVENT_PROPAGATE;
 }
 
-int ipipe_kevent_hook(int kevent, void *data)
+int dovetail_kevent_hook(int kevent, void *data)
 {
        int ret;
 
        switch (kevent) {
-       case IPIPE_KEVT_SCHEDULE:
+       case KEVENT_SCHEDULE:
                ret = handle_schedule_event(data);
                break;
-       case IPIPE_KEVT_SIGWAKE:
+       case KEVENT_SIGWAKE:
                ret = handle_sigwake_event(data);
                break;
-       case IPIPE_KEVT_EXIT:
-               ret = handle_taskexit_event(data);
+       case KEVENT_EXIT:
+               ret = handle_taskexit_event();
                break;
-       case IPIPE_KEVT_CLEANUP:
+       case KEVENT_CLEANUP:
                ret = handle_cleanup_event(data);
                break;
-       case IPIPE_KEVT_HOSTRT:
+       case KEVENT_HOSTRT:
                ret = handle_hostrt_event(data);
                break;
-       case IPIPE_KEVT_SETAFFINITY:
+       case KEVENT_SETAFFINITY:
                ret = handle_setaffinity_event(data);
                break;
        default:
@@ -1520,8 +1517,10 @@ __init int cobalt_init(void)
                goto fail_siginit;
 
        init_hostrt();
-       ipipe_set_hooks(ipipe_root_domain, IPIPE_SYSCALL|IPIPE_KEVENT);
-       ipipe_set_hooks(&xnsched_realtime_domain, IPIPE_SYSCALL|IPIPE_TRAP);
+       dovetail_host_events(&root_irq_stage,
+                            DOVETAIL_SYSCALLS|DOVETAIL_KEVENTS);
+       dovetail_host_events(&cobalt_pipeline.stage,
+                            DOVETAIL_SYSCALLS|DOVETAIL_TRAPS);
 
        if (gid_arg != -1)
                printk(XENO_INFO "allowing access to group %d\n", gid_arg);
diff --git a/kernel/cobalt/posix/syscall.c b/kernel/cobalt/posix/syscall.c
index dd4dfba..cd2cb32 100644
--- a/kernel/cobalt/posix/syscall.c
+++ b/kernel/cobalt/posix/syscall.c
@@ -18,7 +18,7 @@
  */
 #include <linux/types.h>
 #include <linux/err.h>
-#include <linux/ipipe.h>
+#include <linux/dovetail.h>
 #include <linux/kconfig.h>
 #include <cobalt/uapi/corectl.h>
 #include <cobalt/kernel/tree.h>
@@ -108,7 +108,7 @@ static COBALT_SYSCALL(migrate, current, (int domain))
 {
        struct xnthread *thread = xnthread_current();
 
-       if (ipipe_root_p) {
+       if (on_root_stage()) {
                if (domain == COBALT_PRIMARY) {
                        if (thread == NULL)
                                return -EPERM;
@@ -125,7 +125,7 @@ static COBALT_SYSCALL(migrate, current, (int domain))
                return 0;
        }
 
-       /* ipipe_current_domain != ipipe_root_domain */
+       /* current_irq_stage != &root_irq_stage */
        if (domain == COBALT_SECONDARY) {
                xnthread_relax(0, 0);
                return 1;
@@ -241,7 +241,7 @@ static COBALT_SYSCALL(serialdbg, current,
                        n = sizeof(buf);
                if (cobalt_copy_from_user(buf, u_msg, n))
                        return -EFAULT;
-               __ipipe_serial_debug("%.*s", n, buf);
+               raw_printk("%.*s", n, buf);
                u_msg += n;
                len -= n;
        }
@@ -478,7 +478,7 @@ static inline int allowed_syscall(struct cobalt_process 
*process,
        return cap_raised(current_cap(), CAP_SYS_NICE);
 }
 
-static int handle_head_syscall(struct ipipe_domain *ipd, struct pt_regs *regs)
+static int handle_head_syscall(struct irq_stage *stage, struct pt_regs *regs)
 {
        struct cobalt_process *process;
        int switched, sigs, sysflags;
@@ -555,7 +555,7 @@ restart:
                /*
                 * The syscall must run from the Linux domain.
                 */
-               if (ipd == &xnsched_realtime_domain) {
+               if (stage == &cobalt_pipeline.stage) {
                        /*
                         * Request originates from the Xenomai domain:
                         * relax the caller then invoke the syscall
@@ -580,7 +580,7 @@ restart:
                 * hand it over to our secondary-mode dispatcher.
                 * Otherwise, invoke the syscall handler immediately.
                 */
-               if (ipd != &xnsched_realtime_domain)
+               if (stage != &cobalt_pipeline.stage)
                        return KEVENT_PROPAGATE;
        }
 
@@ -660,7 +660,7 @@ bad_syscall:
        return KEVENT_STOP;
 }
 
-static int handle_root_syscall(struct ipipe_domain *ipd, struct pt_regs *regs)
+static int handle_root_syscall(struct irq_stage *stage, struct pt_regs *regs)
 {
        int sysflags, switched, sigs;
        struct xnthread *thread;
@@ -764,19 +764,19 @@ ret_handled:
        return KEVENT_STOP;
 }
 
-int ipipe_syscall_hook(struct ipipe_domain *ipd, struct pt_regs *regs)
+int dovetail_syscall_hook(struct irq_stage *stage, struct pt_regs *regs)
 {
-       if (unlikely(ipipe_root_p))
-               return handle_root_syscall(ipd, regs);
+       if (unlikely(on_root_stage()))
+               return handle_root_syscall(stage, regs);
 
-       return handle_head_syscall(ipd, regs);
+       return handle_head_syscall(stage, regs);
 }
 
-int ipipe_fastcall_hook(struct pt_regs *regs)
+int dovetail_fastcall_hook(struct pt_regs *regs)
 {
        int ret;
 
-       ret = handle_head_syscall(&xnsched_realtime_domain, regs);
+       ret = handle_head_syscall(&cobalt_pipeline.stage, regs);
        XENO_BUG_ON(COBALT, ret == KEVENT_PROPAGATE);
 
        return ret;
diff --git a/kernel/cobalt/rtdm/drvlib.c b/kernel/cobalt/rtdm/drvlib.c
index 481ad83..42307bb 100644
--- a/kernel/cobalt/rtdm/drvlib.c
+++ b/kernel/cobalt/rtdm/drvlib.c
@@ -1346,7 +1346,7 @@ int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int 
irq_no,
        if (!XENO_ASSERT(COBALT, xnsched_root_p()))
                return -EPERM;
 
-       err = xnintr_init(irq_handle, device_name, irq_no, handler, NULL, 
flags);
+       err = xnintr_init(irq_handle, device_name, irq_no, handler, flags);
        if (err)
                return err;
 
@@ -1356,8 +1356,6 @@ int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int 
irq_no,
                return err;
        }
 
-       xnintr_enable(irq_handle);
-
        return 0;
 }
 
@@ -1526,7 +1524,7 @@ void rtdm_schedule_nrt_work(struct work_struct 
*lostage_work)
                .lostage_work = lostage_work,
        };
 
-       if (ipipe_root_p)
+       if (on_root_stage())
                schedule_work(lostage_work);
        else
                ipipe_post_work_root(&ipipe_work, work);
diff --git a/kernel/cobalt/rtdm/fd.c b/kernel/cobalt/rtdm/fd.c
index eacc1cc..9113f35 100644
--- a/kernel/cobalt/rtdm/fd.c
+++ b/kernel/cobalt/rtdm/fd.c
@@ -269,7 +269,7 @@ static void __put_fd(struct rtdm_fd *fd, spl_t s)
        if (!destroy)
                return;
 
-       if (ipipe_root_p)
+       if (on_root_stage())
                fd->ops->close(fd);
        else {
                struct lostage_trigger_close closework = {
@@ -412,7 +412,7 @@ int rtdm_fd_ioctl(int ufd, unsigned int request, ...)
 
        trace_cobalt_fd_ioctl(current, fd, ufd, request);
 
-       if (ipipe_root_p)
+       if (on_root_stage())
                err = fd->ops->ioctl_nrt(fd, request, arg);
        else
                err = fd->ops->ioctl_rt(fd, request, arg);
@@ -451,7 +451,7 @@ rtdm_fd_read(int ufd, void __user *buf, size_t size)
 
        trace_cobalt_fd_read(current, fd, ufd, size);
 
-       if (ipipe_root_p)
+       if (on_root_stage())
                err = fd->ops->read_nrt(fd, buf, size);
        else
                err = fd->ops->read_rt(fd, buf, size);
@@ -484,7 +484,7 @@ ssize_t rtdm_fd_write(int ufd, const void __user *buf, 
size_t size)
 
        trace_cobalt_fd_write(current, fd, ufd, size);
 
-       if (ipipe_root_p)
+       if (on_root_stage())
                err = fd->ops->write_nrt(fd, buf, size);
        else
                err = fd->ops->write_rt(fd, buf, size);
@@ -517,7 +517,7 @@ ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, 
int flags)
 
        trace_cobalt_fd_recvmsg(current, fd, ufd, flags);
 
-       if (ipipe_root_p)
+       if (on_root_stage())
                err = fd->ops->recvmsg_nrt(fd, msg, flags);
        else
                err = fd->ops->recvmsg_rt(fd, msg, flags);
@@ -549,7 +549,7 @@ ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr 
*msg, int flags)
 
        trace_cobalt_fd_sendmsg(current, fd, ufd, flags);
 
-       if (ipipe_root_p)
+       if (on_root_stage())
                err = fd->ops->sendmsg_nrt(fd, msg, flags);
        else
                err = fd->ops->sendmsg_rt(fd, msg, flags);
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 3863dab..37a19fe 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -791,7 +791,7 @@ static inline void enter_root(struct xnthread *root)
        if (rootcb->core.mm == NULL)
                set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
-       ipipe_unmute_pic();
+       irq_unmute_all();
 }
 
 static inline void leave_root(struct xnthread *root)
@@ -799,20 +799,22 @@ static inline void leave_root(struct xnthread *root)
        struct xnarchtcb *rootcb = xnthread_archtcb(root);
        struct task_struct *p = current;
 
-       ipipe_notify_root_preemption();
-       ipipe_mute_pic();
+       dovetail_leave_root();
        /* Remember the preempted Linux task pointer. */
        rootcb->core.host_task = p;
        rootcb->core.tsp = &p->thread;
-       rootcb->core.mm = rootcb->core.active_mm = ipipe_get_active_mm();
+       rootcb->core.mm = rootcb->core.active_mm = dovetail_get_active_mm();
        rootcb->core.tip = task_thread_info(p);
        xnarch_leave_root(root);
 }
 
-void __xnsched_run_handler(void) /* hw interrupts off. */
+irqreturn_t __xnsched_run_handler(int irq, void *dev_id)
 {
+       /* hw interrupts are off. */
        trace_cobalt_schedule_remote(xnsched_current());
        xnsched_run();
+
+       return IRQ_HANDLED;
 }
 
 int ___xnsched_run(struct xnsched *sched)
@@ -845,7 +847,7 @@ reschedule:
        if (next == curr) {
                if (unlikely(xnthread_test_state(next, XNROOT))) {
                        if (sched->lflags & XNHTICK)
-                               xnintr_host_tick(sched);
+                               xntimer_host_tick(sched);
                        if (sched->lflags & XNHDEFER)
                                xnclock_program_shot(&nkclock, sched);
                }
@@ -867,7 +869,7 @@ reschedule:
                shadow = 0;
        } else if (xnthread_test_state(next, XNROOT)) {
                if (sched->lflags & XNHTICK)
-                       xnintr_host_tick(sched);
+                       xntimer_host_tick(sched);
                if (sched->lflags & XNHDEFER)
                        xnclock_program_shot(&nkclock, sched);
                enter_root(next);
@@ -883,8 +885,13 @@ reschedule:
         * over a shadow thread, caused by a call to xnthread_relax().
         * In such a case, we are running over the regular schedule()
         * tail code, so we have to skip our tail code.
+        *
+        * CAUTION: TLF_HEAD is still present in the local flags
+        * although we have switched to the root domain, until
+        * dovetail_leave_head() clears it. Always use on_root_stage()
+        * for determining the current domain in context switch code.
         */
-       if (shadow && ipipe_root_p)
+       if (shadow && on_root_stage())
                goto shadow_epilogue;
 
        switched = 1;
@@ -906,7 +913,8 @@ out:
        return switched;
 
 shadow_epilogue:
-       __ipipe_complete_domain_migration();
+
+       dovetail_complete_domain_migration();
 
        XENO_BUG_ON(COBALT, xnthread_current() == NULL);
 
@@ -1060,19 +1068,16 @@ static spl_t vfile_schedstat_lock_s;
 
 static int vfile_schedstat_get_lock(struct xnvfile *vfile)
 {
-       int ret;
-
-       ret = xnintr_get_query_lock();
-       if (ret < 0)
-               return ret;
+       xnintr_list_lock();
        xnlock_get_irqsave(&nklock, vfile_schedstat_lock_s);
+
        return 0;
 }
 
 static void vfile_schedstat_put_lock(struct xnvfile *vfile)
 {
        xnlock_put_irqrestore(&nklock, vfile_schedstat_lock_s);
-       xnintr_put_query_lock();
+       xnintr_list_unlock();
 }
 
 static struct xnvfile_lock_ops vfile_schedstat_lockops = {
@@ -1081,7 +1086,6 @@ static struct xnvfile_lock_ops vfile_schedstat_lockops = {
 };
 
 struct vfile_schedstat_priv {
-       int irq;
        struct xnthread *curr;
        struct xnintr_iterator intr_it;
 };
@@ -1123,7 +1127,6 @@ static int vfile_schedstat_rewind(struct 
xnvfile_snapshot_iterator *it)
         * grouped under a pseudo-thread.
         */
        priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
-       priv->irq = 0;
        irqnr = xnintr_query_init(&priv->intr_it) * NR_CPUS;
 
        return irqnr + cobalt_nrthreads;
@@ -1139,11 +1142,10 @@ static int vfile_schedstat_next(struct 
xnvfile_snapshot_iterator *it,
        xnticks_t period;
        int ret;
 
+       /*
+        * When done with actual threads, scan interrupt descriptors.
+        */
        if (priv->curr == NULL)
-               /*
-                * We are done with actual threads, scan interrupt
-                * descriptors.
-                */
                goto scan_irqs;
 
        thread = priv->curr;
@@ -1183,15 +1185,13 @@ static int vfile_schedstat_next(struct 
xnvfile_snapshot_iterator *it,
        return 1;
 
 scan_irqs:
-       if (priv->irq >= IPIPE_NR_IRQS)
-               return 0;       /* All done. */
-
-       ret = xnintr_query_next(priv->irq, &priv->intr_it, p->name);
+       ret = xnintr_query_next(&priv->intr_it, p->name);
        if (ret) {
-               if (ret == -EAGAIN)
+               if (ret == -EAGAIN) {
                        xnvfile_touch(it->vfile); /* force rewind. */
-               priv->irq++;
-               return VFILE_SEQ_SKIP;
+                       return VFILE_SEQ_SKIP;
+               }
+               return 0;  /* Done. */
        }
 
        if (!xnsched_supported_cpu(priv->intr_it.cpu))
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 26ce90f..d64c4e6 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -248,14 +248,6 @@ void xnthread_init_shadow_tcb(struct xnthread *thread)
        struct xnarchtcb *tcb = xnthread_archtcb(thread);
        struct task_struct *p = current;
 
-       /*
-        * If the current task is a kthread, the pipeline will take
-        * the necessary steps to make the FPU usable in such
-        * context. The kernel already took care of this issue for
-        * userland tasks (e.g. setting up a clean backup area).
-        */
-       __ipipe_share_current(0);
-
        memset(tcb, 0, sizeof(*tcb));
        tcb->core.host_task = p;
        tcb->core.tsp = &p->thread;
@@ -1628,7 +1620,7 @@ int xnthread_join(struct xnthread *thread, bool 
uninterruptible)
 
        trace_cobalt_thread_join(thread);
 
-       if (ipipe_root_p) {
+       if (on_root_stage()) {
                if (xnthread_test_state(thread, XNJOINED)) {
                        ret = -EBUSY;
                        goto out;
@@ -1942,7 +1934,7 @@ int xnthread_harden(void)
 
        xnthread_clear_sync_window(thread, XNRELAX);
 
-       ret = __ipipe_migrate_head();
+       ret = dovetail_enter_head();
        if (ret) {
                xnthread_set_sync_window(thread, XNRELAX);
                return ret;
@@ -2065,7 +2057,7 @@ void xnthread_relax(int notify, int reason)
         * dropped by xnthread_suspend().
         */
        xnlock_get(&nklock);
-       set_task_state(p, p->state & ~TASK_NOWAKEUP);
+       set_task_state(p, p->state & ~TASK_STALL);
        xnthread_run_handler_stack(thread, relax_thread);
        xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
        splnone();
@@ -2074,11 +2066,11 @@ void xnthread_relax(int notify, int reason)
         * Basic sanity check after an expected transition to secondary
         * mode.
         */
-       XENO_WARN(COBALT, !ipipe_root_p,
+       XENO_WARN(COBALT, !on_root_stage(),
                  "xnthread_relax() failed for thread %s[%d]",
                  thread->name, xnthread_host_pid(thread));
 
-       __ipipe_reenter_root();
+       dovetail_leave_head();
 
        /* Account for secondary mode switch. */
        xnstat_counter_inc(&thread->stat.ssw);
@@ -2266,7 +2258,7 @@ void __xnthread_kick(struct xnthread *thread) /* nklock 
locked, irqs off */
         */
        if (thread != xnsched_current_thread() &&
            xnthread_test_state(thread, XNUSER))
-               ipipe_raise_mayday(p);
+               dovetail_send_mayday(p);
 }
 
 void xnthread_kick(struct xnthread *thread)
@@ -2407,13 +2399,12 @@ static inline void init_kthread_info(struct xnthread 
*thread)
 /**
  * @fn int xnthread_map(struct xnthread *thread, struct completion *done)
  * @internal
- * @brief Create a shadow thread context over a kernel task.
+ * @brief Create a shadow thread context over the current kernel task.
  *
- * This call maps a nucleus thread to the "current" Linux task running
- * in kernel space.  The priority and scheduling class of the
- * underlying Linux task are not affected; it is assumed that the
- * caller did set them appropriately before issuing the shadow mapping
- * request.
+ * This call maps a Cobalt thread to the current kthread.  The
+ * priority and scheduling class of the underlying task are not
+ * affected; it is assumed that the caller did set them appropriately
+ * before issuing the shadow mapping request.
  *
  * This call immediately moves the calling kernel thread to the
  * Xenomai domain.
@@ -2448,7 +2439,6 @@ static inline void init_kthread_info(struct xnthread 
*thread)
  */
 int xnthread_map(struct xnthread *thread, struct completion *done)
 {
-       struct task_struct *p = current;
        int ret;
        spl_t s;
 
@@ -2469,7 +2459,8 @@ int xnthread_map(struct xnthread *thread, struct 
completion *done)
        xnthread_set_state(thread, XNMAPPED);
        xndebug_shadow_init(thread);
        xnthread_run_handler(thread, map_thread);
-       ipipe_enable_notifier(p);
+       /* Enable dovetailing in the host kernel. */
+       dovetail_enable(0);
 
        /*
         * CAUTION: Soon after xnthread_init() has returned,
@@ -2517,7 +2508,7 @@ void xnthread_call_mayday(struct xnthread *thread, int 
reason)
        XENO_BUG_ON(COBALT, !xnthread_test_state(thread, XNUSER));
        xnthread_set_info(thread, XNKICKED);
        xnthread_signal(thread, SIGDEBUG, reason);
-       ipipe_raise_mayday(p);
+       dovetail_send_mayday(p);
 }
 EXPORT_SYMBOL_GPL(xnthread_call_mayday);
 
diff --git a/kernel/cobalt/timer.c b/kernel/cobalt/timer.c
index db6263d..8b7583b 100644
--- a/kernel/cobalt/timer.c
+++ b/kernel/cobalt/timer.c
@@ -17,9 +17,8 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  * 02111-1307, USA.
  */
-#include <linux/ipipe.h>
-#include <linux/ipipe_tickdev.h>
 #include <linux/sched.h>
+#include <linux/tick.h>
 #include <cobalt/kernel/sched.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/kernel/timer.h>
@@ -29,6 +28,105 @@
 #include <cobalt/kernel/arith.h>
 #include <trace/events/cobalt-core.h>
 
+static unsigned int synthetic_tick_irq;
+
+#ifdef CONFIG_XENO_OPT_STATS
+static inline xnstat_exectime_t *switch_timer_stats(struct xnsched *sched)
+{
+       struct xnirqstat *statp;
+       xnstat_exectime_t *prev;
+
+       statp = xnstat_percpu_data;
+       prev = xnstat_exectime_switch(sched, &statp->account);
+       xnstat_counter_inc(&statp->hits);
+
+       return prev;
+}
+
+#else  /* !CONFIG_XENO_OPT_STATS */
+static inline xnstat_exectime_t *switch_timer_stats(struct xnsched *sched)
+{
+       return NULL;
+}
+#endif /* CONFIG_XENO_OPT_STATS */
+
+void xntimer_host_tick(struct xnsched *sched) /* hw IRQs off. */
+{
+       /*
+        * A synthetic clock device is active on this CPU, make it
+        * tick asap when the host kernel resumes; this will honour a
+        * previous set_next_event() request received from the kernel
+        * we have carried out using our core timing services.
+        *
+        * Note that being asked to relay a host tick event implies
+        * that our synthetic device received a set_next_event()
+        * request in the first place, so we know for sure that we
+        * have a valid synthetic tick IRQ number.
+        *
+        * Also, installing or dismantling the synthetic tick device
+        * in tick_switch_head/root() is done in a way that guarantees
+        * raceless operations with respect to this issue.
+        */
+       sched->lflags &= ~XNHTICK;
+       ipipe_post_irq_root(synthetic_tick_irq);
+}
+
+/*
+ * This is our core timer handler. We only have two possible callers,
+ * each of them may only run over a CPU which is a member of the
+ * real-time set:
+ *
+ * - the HRTIMER_IPI handler, such IPI is directed to members of our
+ * real-time CPU set exclusively.
+ *
+ * - the real_clock_event_handler() routine. The IRQ pipeline
+ * guarantees that such handler always runs over a CPU which is a
+ * member of the CPU set passed to tick_switch_head() (i.e. our
+ * real-time CPU set).
+ */
+static void timer_handler(int irq)
+{
+       struct xnsched *sched = xnsched_current();
+       xnstat_exectime_t *prev;
+
+       XENO_BUG_ON(COBALT, !xnsched_supported_cpu(xnsched_cpu(sched)));
+
+       prev = switch_timer_stats(sched);
+
+       trace_cobalt_clock_entry(irq);
+
+       ++sched->inesting;
+       sched->lflags |= XNINIRQ;
+
+       xnlock_get(&nklock);
+       xnclock_tick(&nkclock);
+       xnlock_put(&nklock);
+
+       trace_cobalt_clock_exit(irq);
+       xnstat_exectime_switch(sched, prev);
+
+       if (--sched->inesting == 0) {
+               sched->lflags &= ~XNINIRQ;
+               xnsched_run();
+               sched = xnsched_current();
+       }
+       /*
+        * If the core clock interrupt preempted a real-time thread,
+        * any transition to the root thread has already triggered a
+        * host tick propagation from xnsched_run(), so at this point,
+        * we only need to propagate the host tick in case the
+        * interrupt preempted the root thread.
+        */
+       if ((sched->lflags & XNHTICK) &&
+           xnthread_test_state(sched->curr, XNROOT))
+               xntimer_host_tick(sched);
+}
+
+static void real_clock_event_handler(struct clock_event_device *ced)
+{
+       timer_handler(ced->irq);
+}
+
 /**
  * @ingroup cobalt_core
  * @defgroup cobalt_core_timer Timer services
@@ -404,8 +502,8 @@ EXPORT_SYMBOL_GPL(xntimer_set_gravity);
 
 #ifdef CONFIG_XENO_OPT_STATS
 
-static void __xntimer_switch_tracking(struct xntimer *timer,
-                                     struct xnclock *newclock)
+static void switch_clock_tracking(struct xntimer *timer,
+                                 struct xnclock *newclock)
 {
        struct xnclock *oldclock = timer->tracker;
 
@@ -424,7 +522,7 @@ void xntimer_switch_tracking(struct xntimer *timer,
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
-       __xntimer_switch_tracking(timer, newclock);
+       switch_clock_tracking(timer, newclock);
        xnlock_put_irqrestore(&nklock, s);
 }
 EXPORT_SYMBOL_GPL(xntimer_switch_tracking);
@@ -432,8 +530,8 @@ EXPORT_SYMBOL_GPL(xntimer_switch_tracking);
 #else
 
 static inline
-void __xntimer_switch_tracking(struct xntimer *timer,
-                              struct xnclock *newclock)
+void switch_clock_tracking(struct xntimer *timer,
+                          struct xnclock *newclock)
 { }
 
 #endif /* CONFIG_XENO_OPT_STATS */
@@ -450,7 +548,7 @@ static inline void __xntimer_set_clock(struct xntimer 
*timer,
        cpu = xnclock_get_default_cpu(newclock, xnsched_cpu(timer->sched));
        xntimer_migrate(timer, xnsched_struct(cpu));
 #endif
-       __xntimer_switch_tracking(timer, newclock);
+       switch_clock_tracking(timer, newclock);
 }
 
 /**
@@ -555,17 +653,23 @@ void __xntimer_migrate(struct xntimer *timer, struct 
xnsched *sched)
 }
 EXPORT_SYMBOL_GPL(__xntimer_migrate);
 
+static irqreturn_t timer_ipi_handler(int irq, void *dev_id)
+{
+       timer_handler(irq);
+
+       return IRQ_HANDLED;
+}
+
 int xntimer_setup_ipi(void)
 {
-       return ipipe_request_irq(&xnsched_realtime_domain,
-                                IPIPE_HRTIMER_IPI,
-                                (ipipe_irq_handler_t)xnintr_core_clock_handler,
-                                NULL, NULL);
+       return request_irq(IPIPE_HRTIMER_IPI, timer_ipi_handler,
+                          IRQF_PIPELINED, "Cobalt timer IPI",
+                          &cobalt_pipeline.stage);
 }
 
 void xntimer_release_ipi(void)
 {
-       ipipe_free_irq(&xnsched_realtime_domain, IPIPE_HRTIMER_IPI);
+       free_irq(IPIPE_HRTIMER_IPI, &cobalt_pipeline.stage);
 }
 
 #endif /* CONFIG_SMP */
@@ -590,8 +694,8 @@ void xntimer_release_ipi(void)
 unsigned long long xntimer_get_overruns(struct xntimer *timer, xnticks_t now)
 {
        xnticks_t period = timer->interval;
-       xnsticks_t delta;
        unsigned long long overruns = 0;
+       xnsticks_t delta;
 
        delta = now - xntimer_pexpect(timer);
        if (unlikely(delta >= (xnsticks_t) period)) {
@@ -650,25 +754,8 @@ char *xntimer_format_time(xnticks_t ns, char *buf, size_t 
bufsz)
 }
 EXPORT_SYMBOL_GPL(xntimer_format_time);
 
-/**
- * @internal
- * @fn static int program_htick_shot(unsigned long delay, struct 
clock_event_device *cdev)
- *
- * @brief Program next host tick as a Xenomai timer event.
- *
- * Program the next shot for the host tick on the current CPU.
- * Emulation is done using a nucleus timer attached to the master
- * timebase.
- *
- * @param delay The time delta from the current date to the next tick,
- * expressed as a count of nanoseconds.
- *
- * @param cdev An pointer to the clock device which notifies us.
- *
- * @coretags{unrestricted}
- */
-static int program_htick_shot(unsigned long delay,
-                             struct clock_event_device *cdev)
+static int synthetic_clock_set_next_event(unsigned long delay_ns,
+                                         struct clock_event_device *ced)
 {
        struct xnsched *sched;
        int ret;
@@ -676,85 +763,18 @@ static int program_htick_shot(unsigned long delay,
 
        xnlock_get_irqsave(&nklock, s);
        sched = xnsched_current();
-       ret = xntimer_start(&sched->htimer, delay, XN_INFINITE, XN_RELATIVE);
+       ret = xntimer_start(&sched->htimer, delay_ns,
+                           XN_INFINITE, XN_RELATIVE);
        xnlock_put_irqrestore(&nklock, s);
 
        return ret ? -ETIME : 0;
 }
 
 /**
- * @internal
- * @fn void switch_htick_mode(enum clock_event_mode mode, struct 
clock_event_device *cdev)
- *
- * @brief Tick mode switch emulation callback.
- *
- * Changes the host tick mode for the tick device of the current CPU.
- *
- * @param mode The new mode to switch to. The possible values are:
- *
- * - CLOCK_EVT_MODE_ONESHOT, for a switch to oneshot mode.
- *
- * - CLOCK_EVT_MODE_PERIODIC, for a switch to periodic mode. The current
- * implementation for the generic clockevent layer Linux exhibits
- * should never downgrade from a oneshot to a periodic tick mode, so
- * this mode should not be encountered. This said, the associated code
- * is provided, basically for illustration purposes.
- *
- * - CLOCK_EVT_MODE_SHUTDOWN, indicates the removal of the current
- * tick device. Normally, the nucleus only interposes on tick devices
- * which should never be shut down, so this mode should not be
- * encountered.
- *
- * @param cdev An opaque pointer to the clock device which notifies us.
- *
- * @coretags{unrestricted}
- *
- * @note GENERIC_CLOCKEVENTS is required from the host kernel.
- */
-static void switch_htick_mode(enum clock_event_mode mode,
-                             struct clock_event_device *cdev)
-{
-       struct xnsched *sched;
-       xnticks_t tickval;
-       spl_t s;
-
-       if (mode == CLOCK_EVT_MODE_ONESHOT)
-               return;
-
-       xnlock_get_irqsave(&nklock, s);
-
-       sched = xnsched_current();
-
-       switch (mode) {
-       case CLOCK_EVT_MODE_PERIODIC:
-               tickval = 1000000000UL / HZ;
-               xntimer_start(&sched->htimer, tickval, tickval, XN_RELATIVE);
-               break;
-       case CLOCK_EVT_MODE_SHUTDOWN:
-               xntimer_stop(&sched->htimer);
-               break;
-       default:
-               XENO_BUG(COBALT);
-       }
-
-       xnlock_put_irqrestore(&nklock, s);
-}
-
-/**
  * @fn int xntimer_grab_hardware(void)
  * @brief Grab the hardware timer on all real-time CPUs.
  *
- * xntimer_grab_hardware() grabs and tunes the hardware timer for all
- * real-time CPUs.
- *
- * Host tick emulation is performed for sharing the clock chip between
- * Linux and Xenomai.
- *
- * @return a positive value is returned on success, representing the
- * duration of a Linux periodic tick expressed as a count of
- * nanoseconds; zero should be returned when the Linux kernel does not
- * undergo periodic timing on the given CPU (e.g. oneshot
- * mode). Otherwise:
+ * @return Zero is returned on success, otherwise:
  *
  * - -EBUSY is returned if the hardware timer has already been
  * grabbed.  xntimer_release_hardware() must be issued before
@@ -767,56 +787,14 @@ static void switch_htick_mode(enum clock_event_mode mode,
  *
  * @coretags{secondary-only}
  */
-static int grab_hardware_timer(int cpu)
-{
-       int tickval, ret;
-
-       ret = ipipe_timer_start(xnintr_core_clock_handler,
-                               switch_htick_mode, program_htick_shot, cpu);
-       switch (ret) {
-       case CLOCK_EVT_MODE_PERIODIC:
-               /*
-                * Oneshot tick emulation callback won't be used, ask
-                * the caller to start an internal timer for emulating
-                * a periodic tick.
-                */
-               tickval = 1000000000UL / HZ;
-               break;
-
-       case CLOCK_EVT_MODE_ONESHOT:
-               /* oneshot tick emulation */
-               tickval = 1;
-               break;
-
-       case CLOCK_EVT_MODE_UNUSED:
-               /* we don't need to emulate the tick at all. */
-               tickval = 0;
-               break;
-
-       case CLOCK_EVT_MODE_SHUTDOWN:
-               return -ENODEV;
-
-       default:
-               return ret;
-       }
-
-       return tickval;
-}
-
 int xntimer_grab_hardware(void)
 {
+#ifdef CONFIG_XENO_OPT_WATCHDOG
        struct xnsched *sched;
-       int ret, cpu, _cpu;
+       int cpu;
        spl_t s;
-
-#ifdef CONFIG_XENO_OPT_STATS
-       /*
-        * Only for statistical purpose, the timer interrupt is
-        * attached by xntimer_grab_hardware().
-        */
-       xnintr_init(&nktimer, "[timer]",
-                   per_cpu(ipipe_percpu.hrtimer_irq, 0), NULL, NULL, 0);
-#endif /* CONFIG_XENO_OPT_STATS */
+#endif
+       int ret;
 
        nkclock.wallclock_offset =
                xnclock_get_host_time() - xnclock_read_monotonic(&nkclock);
@@ -825,69 +803,44 @@ int xntimer_grab_hardware(void)
        if (ret)
                return ret;
 
-       for_each_realtime_cpu(cpu) {
-               ret = grab_hardware_timer(cpu);
-               if (ret < 0)
-                       goto fail;
-
-               xnlock_get_irqsave(&nklock, s);
-
-               /*
-                * If the current tick device for the target CPU is
-                * periodic, we won't be called back for host tick
-                * emulation. Therefore, we need to start a periodic
-                * nucleus timer which will emulate the ticking for
-                * that CPU, since we are going to hijack the hw clock
-                * chip for managing our own system timer.
-                *
-                * CAUTION:
-                *
-                * - nucleus timers may be started only _after_ the hw
-                * timer has been set up for the target CPU through a
-                * call to xntimer_grab_hardware().
-                *
-                * - we don't compensate for the elapsed portion of
-                * the current host tick, since we cannot get this
-                * information easily for all CPUs except the current
-                * one, and also because of the declining relevance of
-                * the jiffies clocksource anyway.
-                *
-                * - we must not hold the nklock across calls to
-                * xntimer_grab_hardware().
-                */
+#ifdef CONFIG_XENO_OPT_STATS
+       xnintr_init(&nktimer, "Core timer", -1, NULL, 0);
+#endif /* CONFIG_XENO_OPT_STATS */
 
-               sched = xnsched_struct(cpu);
-               /* Set up timer with host tick period if valid. */
-               if (ret > 1)
-                       xntimer_start(&sched->htimer, ret, ret, XN_RELATIVE);
-               else if (ret == 1)
-                       xntimer_start(&sched->htimer, 0, 0, XN_RELATIVE);
+       /*
+        * CAUTION:
+        *
+        * - core timers may be started only _after_ the hw
+        * timer has been set up for the target CPU through a
+        * call to xntimer_grab_hardware().
+        *
+        * - do not hold any lock across calls to
+        * xntimer_grab_hardware().
+        *
+        * - upon success, all clock event devices handed over by
+        * tick_switch_head() to us are running in oneshot mode.
+        */
+       ret = tick_switch_head(&xnsched_realtime_cpus,
+                              real_clock_event_handler,
+                              synthetic_clock_set_next_event,
+                              &synthetic_tick_irq);
+       if (ret) {
+               xntimer_release_ipi();
+               return ret;
+       }
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
-               xntimer_start(&sched->wdtimer, 1000000000UL, 1000000000UL, 
XN_RELATIVE);
+       xnlock_get_irqsave(&nklock, s);
+       for_each_realtime_cpu(cpu) {
+               sched = xnsched_struct(cpu);
+               xntimer_start(&sched->wdtimer, 1000000000UL,
+                             1000000000UL, XN_RELATIVE);
                xnsched_reset_watchdog(sched);
-#endif
-               xnlock_put_irqrestore(&nklock, s);
        }
-
-       return 0;
-fail:
-       for_each_realtime_cpu(_cpu) {
-               if (_cpu == cpu)
-                       break;
-               xnlock_get_irqsave(&nklock, s);
-               sched = xnsched_struct(cpu);
-               xntimer_stop(&sched->htimer);
-#ifdef CONFIG_XENO_OPT_WATCHDOG
-               xntimer_stop(&sched->wdtimer);
+       xnlock_put_irqrestore(&nklock, s);
 #endif
-               xnlock_put_irqrestore(&nklock, s);
-               ipipe_timer_stop(_cpu);
-       }
-
-       xntimer_release_ipi();
 
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(xntimer_grab_hardware);
 
@@ -902,18 +855,8 @@ EXPORT_SYMBOL_GPL(xntimer_grab_hardware);
  */
 void xntimer_release_hardware(void)
 {
-       int cpu;
-
-       /*
-        * We must not hold the nklock while stopping the hardware
-        * timer, since this could cause deadlock situations to arise
-        * on SMP systems.
-        */
-       for_each_realtime_cpu(cpu)
-               ipipe_timer_stop(cpu);
-
+       tick_switch_root();     /* Keep lock free while switching. */
        xntimer_release_ipi();
-
 #ifdef CONFIG_XENO_OPT_STATS
        xnintr_destroy(&nktimer);
 #endif /* CONFIG_XENO_OPT_STATS */
diff --git a/kernel/cobalt/trace/cobalt-core.h 
b/kernel/cobalt/trace/cobalt-core.h
index d9875b9..4b39158 100644
--- a/kernel/cobalt/trace/cobalt-core.h
+++ b/kernel/cobalt/trace/cobalt-core.h
@@ -277,7 +277,7 @@ TRACE_EVENT(cobalt_thread_resume,
 );
 
 TRACE_EVENT(cobalt_thread_fault,
-       TP_PROTO(struct xnthread *thread, struct ipipe_trap_data *td),
+       TP_PROTO(struct xnthread *thread, struct dovetail_trap_data *td),
        TP_ARGS(thread, td),
 
        TP_STRUCT__entry(
diff --git a/scripts/Kconfig.frag b/scripts/Kconfig.frag
index a952d88..f4ba1f0 100644
--- a/scripts/Kconfig.frag
+++ b/scripts/Kconfig.frag
@@ -1,8 +1,7 @@
 menuconfig XENOMAI
        depends on X86_TSC || !X86
        bool "Xenomai/cobalt"
-       select IPIPE
-       select IPIPE_WANT_APIREV_2
+       select DOVETAIL
        default y
        help
          Xenomai's Cobalt core is a real-time extension to the Linux
diff --git a/scripts/prepare-kernel.sh b/scripts/prepare-kernel.sh
index 6bb9f06..893585c 100755
--- a/scripts/prepare-kernel.sh
+++ b/scripts/prepare-kernel.sh
@@ -318,7 +318,7 @@ if test x$verbose = x1; then
 echo "Preparing kernel $linux_version$linux_EXTRAVERSION in $linux_tree..."
 fi
 
-if test -r $linux_tree/include/linux/ipipe.h; then
+if test -r $linux_tree/include/linux/irq_pipeline.h; then
     if test x$verbose = x1; then
     echo "I-pipe found - bypassing patch."
     fi
@@ -361,7 +361,7 @@ else
    cd $curdir
 fi
 
-if test \! -r $linux_tree/arch/$linux_arch/include/asm/ipipe.h; then
+if test \! -r $linux_tree/arch/$linux_arch/include/asm/irq_pipeline.h; then
    echo "$me: $linux_tree has no I-pipe support for $linux_arch" >&2
    exit 2
 fi


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://xenomai.org/mailman/listinfo/xenomai-git

Reply via email to