Module: xenomai-3
Branch: next
Commit: daa824592f21fc102fd7edb5aa036412d49f6b2e
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=daa824592f21fc102fd7edb5aa036412d49f6b2e

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sun Oct 19 19:15:38 2014 +0200

cobalt: fix 32/64bit code neutrality issues

---

 include/boilerplate/compiler.h                     |    4 +
 include/cobalt/kernel/ppd.h                        |    2 +-
 include/cobalt/kernel/rtdm/Makefile.am             |    1 +
 include/cobalt/kernel/rtdm/compat.h                |   73 ++
 include/cobalt/kernel/rtdm/fd.h                    |    4 +-
 include/cobalt/uapi/cond.h                         |   10 +-
 include/cobalt/uapi/event.h                        |    9 +-
 include/cobalt/uapi/kernel/vdso.h                  |   11 +-
 include/cobalt/uapi/monitor.h                      |    9 +-
 include/cobalt/uapi/mutex.h                        |    7 +-
 include/cobalt/uapi/sem.h                          |    4 +-
 kernel/cobalt/arch/arm/mayday.c                    |   26 +-
 kernel/cobalt/arch/blackfin/mayday.c               |   26 +-
 kernel/cobalt/arch/nios2/mayday.c                  |   26 +-
 kernel/cobalt/arch/powerpc/mayday.c                |   26 +-
 kernel/cobalt/arch/sh/mayday.c                     |   26 +-
 kernel/cobalt/arch/x86/mayday.c                    |   51 +-
 kernel/cobalt/include/asm-generic/xenomai/mayday.h |    6 +-
 kernel/cobalt/posix/clock.c                        |    4 +-
 kernel/cobalt/posix/cond.c                         |   12 +-
 kernel/cobalt/posix/event.c                        |   52 +-
 kernel/cobalt/posix/event.h                        |    2 +-
 kernel/cobalt/posix/io.h                           |    2 +-
 kernel/cobalt/posix/monitor.c                      |   56 +-
 kernel/cobalt/posix/monitor.h                      |    2 +-
 kernel/cobalt/posix/mutex.c                        |   32 +-
 kernel/cobalt/posix/nsem.c                         |   54 +-
 kernel/cobalt/posix/process.c                      |   70 +-
 kernel/cobalt/posix/sched.c                        |   29 +-
 kernel/cobalt/posix/sched.h                        |    2 +-
 kernel/cobalt/posix/sem.c                          |   42 +-
 kernel/cobalt/posix/sem.h                          |   11 +-
 kernel/cobalt/posix/syscall.c                      |  776 ++++++++++----------
 kernel/cobalt/posix/thread.c                       |   38 +-
 kernel/cobalt/posix/thread.h                       |   24 +-
 lib/cobalt/clock.c                                 |    4 +-
 lib/cobalt/cond.c                                  |   32 +-
 lib/cobalt/current.h                               |    4 +-
 lib/cobalt/internal.c                              |  104 ++-
 lib/cobalt/internal.h                              |   10 +-
 lib/cobalt/mutex.c                                 |   75 +-
 lib/cobalt/semaphore.c                             |   45 +-
 testsuite/clocktest/clocktest.c                    |   14 +-
 43 files changed, 1008 insertions(+), 809 deletions(-)

diff --git a/include/boilerplate/compiler.h b/include/boilerplate/compiler.h
index b52a585..e27d165 100644
--- a/include/boilerplate/compiler.h
+++ b/include/boilerplate/compiler.h
@@ -50,4 +50,8 @@
 #define __maybe_unused __attribute__((__unused__))
 #endif
 
+#ifndef __aligned
+#define __aligned(__n) __attribute__((aligned (__n)))
+#endif
+
 #endif /* _BOILERPLATE_COMPILER_H */
diff --git a/include/cobalt/kernel/ppd.h b/include/cobalt/kernel/ppd.h
index 4d42427..d5cfc69 100644
--- a/include/cobalt/kernel/ppd.h
+++ b/include/cobalt/kernel/ppd.h
@@ -32,7 +32,7 @@ struct cobalt_umm {
 
 struct cobalt_ppd {
        struct cobalt_umm umm;
-       unsigned long mayday_addr;
+       unsigned long mayday_tramp;
        atomic_t refcnt;
        char *exe_path;
        struct rb_root fds;
diff --git a/include/cobalt/kernel/rtdm/Makefile.am 
b/include/cobalt/kernel/rtdm/Makefile.am
index 6279168..2c04cac 100644
--- a/include/cobalt/kernel/rtdm/Makefile.am
+++ b/include/cobalt/kernel/rtdm/Makefile.am
@@ -3,6 +3,7 @@ noinst_HEADERS =        \
        autotune.h      \
        can.h           \
        cobalt.h        \
+       compat.h        \
        driver.h        \
        fd.h            \
        ipc.h           \
diff --git a/include/cobalt/kernel/rtdm/compat.h 
b/include/cobalt/kernel/rtdm/compat.h
new file mode 100644
index 0000000..fdf493f
--- /dev/null
+++ b/include/cobalt/kernel/rtdm/compat.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <r...@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_COMPAT_H
+#define _COBALT_RTDM_COMPAT_H
+
+#ifdef CONFIG_COMPAT
+
+#include <linux/compat.h>
+#include <net/compat.h>
+
+struct compat_rtdm_getsockopt_args {
+       int level;
+       int optname;
+       compat_uptr_t optval;
+       compat_uptr_t optlen;
+};
+
+struct compat_rtdm_setsockopt_args {
+       int level;
+       int optname;
+       const compat_uptr_t optval;
+       socklen_t optlen;
+};
+
+struct compat_rtdm_getsockaddr_args {
+       compat_uptr_t addr;
+       compat_uptr_t addrlen;
+};
+
+struct compat_rtdm_setsockaddr_args {
+       const compat_uptr_t addr;
+       socklen_t addrlen;
+};
+
+#define _RTIOC_GETSOCKOPT_COMPAT       _IOW(RTIOC_TYPE_COMMON, 0x20,   \
+                                            struct compat_rtdm_getsockopt_args)
+#define _RTIOC_SETSOCKOPT_COMPAT       _IOW(RTIOC_TYPE_COMMON, 0x21,   \
+                                            struct compat_rtdm_setsockopt_args)
+#define _RTIOC_BIND_COMPAT             _IOW(RTIOC_TYPE_COMMON, 0x22,   \
+                                            struct 
compat_rtdm_setsockaddr_args)
+#define _RTIOC_CONNECT_COMPAT          _IOW(RTIOC_TYPE_COMMON, 0x23,   \
+                                            struct 
compat_rtdm_setsockaddr_args)
+#define _RTIOC_ACCEPT_COMPAT           _IOW(RTIOC_TYPE_COMMON, 0x25,   \
+                                            struct 
compat_rtdm_getsockaddr_args)
+#define _RTIOC_GETSOCKNAME_COMPAT      _IOW(RTIOC_TYPE_COMMON, 0x26,   \
+                                            struct 
compat_rtdm_getsockaddr_args)
+#define _RTIOC_GETPEERNAME_COMPAT      _IOW(RTIOC_TYPE_COMMON, 0x27,   \
+                                            struct 
compat_rtdm_getsockaddr_args)
+
+#define __COMPAT_CASE(__op)            : case __op
+
+#else  /* !CONFIG_COMPAT */
+
+#define __COMPAT_CASE(__op)
+
+#endif /* !CONFIG_COMPAT */
+
+#endif /* !_COBALT_RTDM_COMPAT_H */
diff --git a/include/cobalt/kernel/rtdm/fd.h b/include/cobalt/kernel/rtdm/fd.h
index 51609b3..ccf0332 100644
--- a/include/cobalt/kernel/rtdm/fd.h
+++ b/include/cobalt/kernel/rtdm/fd.h
@@ -331,12 +331,12 @@ static inline int rtdm_fd_flags(const struct rtdm_fd *fd)
 }
 
 #ifdef CONFIG_COMPAT
-static inline int rtdm_fd_compat(const struct rtdm_fd *fd)
+static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd)
 {
        return fd->compat;
 }
 #else
-static inline int rtdm_fd_compat(const struct rtdm_fd *fd)
+static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd)
 {
        return 0;
 }
diff --git a/include/cobalt/uapi/cond.h b/include/cobalt/uapi/cond.h
index a6ff148..b1106c7 100644
--- a/include/cobalt/uapi/cond.h
+++ b/include/cobalt/uapi/cond.h
@@ -24,21 +24,15 @@
 
 struct cobalt_cond_state {
        __u32 pending_signals;
-       union {
-               __u32 mutex_datp_offset;
-               struct mutex_dat *mutex_datp;
-       };
+       __u32 mutex_state_offset;
 };
 
 union cobalt_cond_union {
        pthread_cond_t native_cond;
        struct cobalt_cond_shadow {
                __u32 magic;
+               __u32 state_offset;
                xnhandle_t handle;
-               union {
-                       __u32 state_offset;
-                       struct cobalt_cond_state *state;
-               };
        } shadow_cond;
 };
 
diff --git a/include/cobalt/uapi/event.h b/include/cobalt/uapi/event.h
index fa7d5fd..8710e8e 100644
--- a/include/cobalt/uapi/event.h
+++ b/include/cobalt/uapi/event.h
@@ -20,7 +20,7 @@
 
 #include <cobalt/uapi/kernel/types.h>
 
-struct cobalt_event_data {
+struct cobalt_event_state {
        __u32 value;
        __u32 flags;
 #define COBALT_EVENT_PENDED  0x1
@@ -39,12 +39,9 @@ struct cobalt_event;
 #define COBALT_EVENT_ANY  0x1
 
 struct cobalt_event_shadow {
-       xnhandle_t handle;
-       union {
-               struct cobalt_event_data *data;
-               __u32 data_offset;
-       } u;
+       __u32 state_offset;
        __u32 flags;
+       xnhandle_t handle;
 };
 
 struct cobalt_event_info {
diff --git a/include/cobalt/uapi/kernel/vdso.h 
b/include/cobalt/uapi/kernel/vdso.h
index d2ef159..396594b 100644
--- a/include/cobalt/uapi/kernel/vdso.h
+++ b/include/cobalt/uapi/kernel/vdso.h
@@ -21,15 +21,16 @@
 #include <cobalt/uapi/kernel/urw.h>
 
 struct xnvdso_hostrt_data {
-       __u16 live;
-       urw_t lock;
-       __u64 wall_time_sec;
-       __u32 wall_time_nsec;
-       struct timespec wall_to_monotonic;
+       __u64 wall_sec;
+       __u64 wtom_sec;
        __u64 cycle_last;
        __u64 mask;
+       __u32 wall_nsec;
+       __u32 wtom_nsec;
        __u32 mult;
        __u32 shift;
+       __u32 live;
+       urw_t lock;
 };
 
 /*
diff --git a/include/cobalt/uapi/monitor.h b/include/cobalt/uapi/monitor.h
index 07d2ebe..6e54daf 100644
--- a/include/cobalt/uapi/monitor.h
+++ b/include/cobalt/uapi/monitor.h
@@ -20,7 +20,7 @@
 
 #include <cobalt/uapi/kernel/types.h>
 
-struct cobalt_monitor_data {
+struct cobalt_monitor_state {
        atomic_t owner;
        __u32 flags;
 #define COBALT_MONITOR_GRANTED    0x01
@@ -33,12 +33,9 @@ struct cobalt_monitor_data {
 struct cobalt_monitor;
 
 struct cobalt_monitor_shadow {
-       xnhandle_t handle;
-       union {
-               struct cobalt_monitor_data *data;
-               __u32 data_offset;
-       } u;
+       __u32 state_offset;
        __u32 flags;
+       xnhandle_t handle;
 #define COBALT_MONITOR_SHARED     0x1
 #define COBALT_MONITOR_WAITGRANT  0x0
 #define COBALT_MONITOR_WAITDRAIN  0x1
diff --git a/include/cobalt/uapi/mutex.h b/include/cobalt/uapi/mutex.h
index 383ad91..5b38783 100644
--- a/include/cobalt/uapi/mutex.h
+++ b/include/cobalt/uapi/mutex.h
@@ -22,7 +22,7 @@
 
 #define COBALT_MUTEX_MAGIC  0x86860303
 
-struct mutex_dat {
+struct cobalt_mutex_state {
        atomic_t owner;
        __u32 flags;
 #define COBALT_MUTEX_COND_SIGNAL 0x00000001
@@ -34,11 +34,8 @@ union cobalt_mutex_union {
        struct cobalt_mutex_shadow {
                __u32 magic;
                __u32 lockcnt;
+               __u32 state_offset;
                xnhandle_t handle;
-               union {
-                       __u32 dat_offset;
-                       struct mutex_dat *dat;
-               };
                struct cobalt_mutexattr attr;
        } shadow_mutex;
 };
diff --git a/include/cobalt/uapi/sem.h b/include/cobalt/uapi/sem.h
index c1700b1..01a9b55 100644
--- a/include/cobalt/uapi/sem.h
+++ b/include/cobalt/uapi/sem.h
@@ -25,7 +25,7 @@
 
 struct cobalt_sem;
 
-struct sem_dat {
+struct cobalt_sem_state {
        atomic_t value;
        __u32 flags;
 };
@@ -34,7 +34,7 @@ union cobalt_sem_union {
        sem_t native_sem;
        struct cobalt_sem_shadow {
                __u32 magic;
-               __s32 datp_offset;
+               __s32 state_offset;
                xnhandle_t handle;
        } shadow_sem;
 };
diff --git a/kernel/cobalt/arch/arm/mayday.c b/kernel/cobalt/arch/arm/mayday.c
index 947e16b..20e4559 100644
--- a/kernel/cobalt/arch/arm/mayday.c
+++ b/kernel/cobalt/arch/arm/mayday.c
@@ -18,12 +18,15 @@
  */
 #include <linux/types.h>
 #include <linux/ipipe.h>
+#include <linux/vmalloc.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
 #include <asm/cacheflush.h>
 #include <asm/ptrace.h>
 
-void xnarch_setup_mayday_page(void *page)
+static void *mayday;
+
+static inline void setup_mayday(void *page)
 {
        /*
         * We want this code to appear at the top of the MAYDAY page:
@@ -93,6 +96,27 @@ void xnarch_setup_mayday_page(void *page)
        flush_dcache_page(vmalloc_to_page(page));
 }
 
+int xnarch_init_mayday(void)
+{
+       mayday = vmalloc(PAGE_SIZE);
+       if (mayday == NULL)
+               return -ENOMEM;
+
+       setup_mayday(mayday);
+
+       return 0;
+}
+
+void xnarch_cleanup_mayday(void)
+{
+       vfree(mayday);
+}
+
+void *xnarch_get_mayday_page(void)
+{
+       return mayday;
+}
+
 void xnarch_handle_mayday(struct xnarchtcb *tcb, struct pt_regs *regs,
                          unsigned long tramp)
 {
diff --git a/kernel/cobalt/arch/blackfin/mayday.c 
b/kernel/cobalt/arch/blackfin/mayday.c
index df2fda3..bd76fee 100644
--- a/kernel/cobalt/arch/blackfin/mayday.c
+++ b/kernel/cobalt/arch/blackfin/mayday.c
@@ -18,13 +18,16 @@
  */
 #include <linux/types.h>
 #include <linux/ipipe.h>
+#include <linux/vmalloc.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
 #include <asm/cacheflush.h>
 #include <asm/ptrace.h>
 #include <asm/bug.h>
 
-void xnarch_setup_mayday_page(void *page)
+static void *mayday;
+
+static inline void setup_mayday(void *page)
 {
        /*
         * We want this code to appear at the top of the MAYDAY page:
@@ -70,6 +73,27 @@ void xnarch_setup_mayday_page(void *page)
                           (unsigned long)page + sizeof(code));
 }
 
+int xnarch_init_mayday(void)
+{
+       mayday = vmalloc(PAGE_SIZE);
+       if (mayday == NULL)
+               return -ENOMEM;
+
+       setup_mayday(mayday);
+
+       return 0;
+}
+
+void xnarch_cleanup_mayday(void)
+{
+       vfree(mayday);
+}
+
+void *xnarch_get_mayday_page(void)
+{
+       return mayday;
+}
+
 void xnarch_handle_mayday(struct xnarchtcb *tcb,
                          struct pt_regs *regs,
                          unsigned long tramp)
diff --git a/kernel/cobalt/arch/nios2/mayday.c 
b/kernel/cobalt/arch/nios2/mayday.c
index 9b5cddf..15775f3 100644
--- a/kernel/cobalt/arch/nios2/mayday.c
+++ b/kernel/cobalt/arch/nios2/mayday.c
@@ -18,12 +18,15 @@
  */
 #include <linux/types.h>
 #include <linux/ipipe.h>
+#include <linux/vmalloc.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
 #include <asm/cacheflush.h>
 #include <asm/ptrace.h>
 
-void xnarch_setup_mayday_page(void *page)
+static void *mayday;
+
+static inline void setup_mayday(void *page)
 {
        /*
         * We want this code to appear at the top of the MAYDAY page:
@@ -54,6 +57,27 @@ void xnarch_setup_mayday_page(void *page)
                           (unsigned long)page + sizeof(code));
 }
 
+int xnarch_init_mayday(void)
+{
+       mayday = vmalloc(PAGE_SIZE);
+       if (mayday == NULL)
+               return -ENOMEM;
+
+       setup_mayday(mayday);
+
+       return 0;
+}
+
+void xnarch_cleanup_mayday(void)
+{
+       vfree(mayday);
+}
+
+void *xnarch_get_mayday_page(void)
+{
+       return mayday;
+}
+
 void xnarch_handle_mayday(struct xnarchtcb *tcb,
                          struct pt_regs *regs, unsigned long tramp)
 {
diff --git a/kernel/cobalt/arch/powerpc/mayday.c 
b/kernel/cobalt/arch/powerpc/mayday.c
index 0f4785c..254f9ab 100644
--- a/kernel/cobalt/arch/powerpc/mayday.c
+++ b/kernel/cobalt/arch/powerpc/mayday.c
@@ -19,12 +19,15 @@
 
 #include <linux/types.h>
 #include <linux/ipipe.h>
+#include <linux/vmalloc.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
 #include <asm/cacheflush.h>
 #include <asm/ptrace.h>
 
-void xnarch_setup_mayday_page(void *page)
+static void *mayday;
+
+static inline void setup_mayday(void *page)
 {
        /*
         * We want this code to appear at the top of the MAYDAY page:
@@ -50,6 +53,27 @@ void xnarch_setup_mayday_page(void *page)
                           (unsigned long)page + sizeof(insn));
 }
 
+int xnarch_init_mayday(void)
+{
+       mayday = vmalloc(PAGE_SIZE);
+       if (mayday == NULL)
+               return -ENOMEM;
+
+       setup_mayday(mayday);
+
+       return 0;
+}
+
+void xnarch_cleanup_mayday(void)
+{
+       vfree(mayday);
+}
+
+void *xnarch_get_mayday_page(void)
+{
+       return mayday;
+}
+
 void xnarch_handle_mayday(struct xnarchtcb *tcb,
                          struct pt_regs *regs,
                          unsigned long tramp)
diff --git a/kernel/cobalt/arch/sh/mayday.c b/kernel/cobalt/arch/sh/mayday.c
index a753f33..a138223 100644
--- a/kernel/cobalt/arch/sh/mayday.c
+++ b/kernel/cobalt/arch/sh/mayday.c
@@ -19,13 +19,16 @@
 
 #include <linux/types.h>
 #include <linux/ipipe.h>
+#include <linux/vmalloc.h>
 #include <linux/mm.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
 #include <asm/cacheflush.h>
 #include <asm/ptrace.h>
 
-void xnarch_setup_mayday_page(void *page)
+static void *mayday;
+
+static inline void setup_mayday(void *page)
 {
        u16 insn[11];
 
@@ -60,6 +63,27 @@ void xnarch_setup_mayday_page(void *page)
        flush_dcache_page(vmalloc_to_page(page));
 }
 
+int xnarch_init_mayday(void)
+{
+       mayday = vmalloc(PAGE_SIZE);
+       if (mayday == NULL)
+               return -ENOMEM;
+
+       setup_mayday(mayday);
+
+       return 0;
+}
+
+void xnarch_cleanup_mayday(void)
+{
+       vfree(mayday);
+}
+
+void *xnarch_get_mayday_page(void)
+{
+       return mayday;
+}
+
 void xnarch_handle_mayday(struct xnarchtcb *tcb,
                          struct pt_regs *regs, unsigned long tramp)
 {
diff --git a/kernel/cobalt/arch/x86/mayday.c b/kernel/cobalt/arch/x86/mayday.c
index cfdf5ca..de52763 100644
--- a/kernel/cobalt/arch/x86/mayday.c
+++ b/kernel/cobalt/arch/x86/mayday.c
@@ -19,13 +19,17 @@
 
 #include <linux/types.h>
 #include <linux/ipipe.h>
+#include <linux/vmalloc.h>
 #include <cobalt/kernel/thread.h>
 #include <cobalt/uapi/syscall.h>
 #include <asm/ptrace.h>
 
-#ifdef CONFIG_X86_32
+static void *mayday;
+#ifdef CONFIG_COMPAT
+static void *mayday_compat;
+#endif
 
-void xnarch_setup_mayday_page(void *page)
+static inline void setup_mayday32(void *page)
 {
        /*
         * We want this code to appear at the top of the MAYDAY page:
@@ -58,9 +62,7 @@ void xnarch_setup_mayday_page(void *page)
        /* no cache flush required. */
 }
 
-#else /* CONFIG_X86_64 */
-
-void xnarch_setup_mayday_page(void *page)
+static inline void setup_mayday64(void *page)
 {
        /*
         * We want this code to appear at the top of the MAYDAY page:
@@ -93,7 +95,44 @@ void xnarch_setup_mayday_page(void *page)
        /* no cache flush required. */
 }
 
-#endif /* CONFIG_X86_64 */
+int xnarch_init_mayday(void)
+{
+       mayday = vmalloc(PAGE_SIZE);
+       if (mayday == NULL)
+               return -ENOMEM;
+
+#ifdef CONFIG_X86_32
+       setup_mayday32(mayday);
+#else
+       setup_mayday64(mayday);
+#ifdef CONFIG_COMPAT
+       mayday_compat = vmalloc(PAGE_SIZE);
+       if (mayday_compat == NULL) {
+               vfree(mayday);
+               return -ENOMEM;
+       }
+       setup_mayday32(mayday_compat);
+#endif
+#endif
+       return 0;
+}
+
+void xnarch_cleanup_mayday(void)
+{
+       vfree(mayday);
+#ifdef CONFIG_COMPAT
+       vfree(mayday_compat);
+#endif
+}
+
+void *xnarch_get_mayday_page(void)
+{
+#if defined(CONFIG_X86_32) || !defined(CONFIG_COMPAT)
+       return mayday;
+#else
+       return test_thread_flag(TIF_IA32) ? mayday_compat : mayday;
+#endif
+}
 
 void xnarch_handle_mayday(struct xnarchtcb *tcb, struct pt_regs *regs,
                          unsigned long tramp)
diff --git a/kernel/cobalt/include/asm-generic/xenomai/mayday.h 
b/kernel/cobalt/include/asm-generic/xenomai/mayday.h
index da1a48b..67f6983 100644
--- a/kernel/cobalt/include/asm-generic/xenomai/mayday.h
+++ b/kernel/cobalt/include/asm-generic/xenomai/mayday.h
@@ -23,7 +23,11 @@ struct xnarchtcb;
 struct task_struct;
 struct pt_regs;
 
-void xnarch_setup_mayday_page(void *page);
+int xnarch_init_mayday(void);
+
+void xnarch_cleanup_mayday(void);
+
+void *xnarch_get_mayday_page(void);
 
 void xnarch_handle_mayday(struct xnarchtcb *tcb,
                          struct pt_regs *regs,
diff --git a/kernel/cobalt/posix/clock.c b/kernel/cobalt/posix/clock.c
index dbd7132..48de8e8 100644
--- a/kernel/cobalt/posix/clock.c
+++ b/kernel/cobalt/posix/clock.c
@@ -60,8 +60,8 @@ static int do_clock_host_realtime(struct timespec *tp)
                mask = hostrt_data->mask;
                mult = hostrt_data->mult;
                shift = hostrt_data->shift;
-               tp->tv_sec = hostrt_data->wall_time_sec;
-               nsec = hostrt_data->wall_time_nsec;
+               tp->tv_sec = hostrt_data->wall_sec;
+               nsec = hostrt_data->wall_nsec;
        }
 
        /*
diff --git a/kernel/cobalt/posix/cond.c b/kernel/cobalt/posix/cond.c
index f2fd7bd..a02800b 100644
--- a/kernel/cobalt/posix/cond.c
+++ b/kernel/cobalt/posix/cond.c
@@ -72,7 +72,7 @@ pthread_cond_init(struct cobalt_cond_shadow *cnd, const 
struct cobalt_condattr *
        }
        cond->state = state;
        state->pending_signals = 0;
-       state->mutex_datp = (struct mutex_dat *)~0UL;
+       state->mutex_state_offset = ~0U;
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -332,11 +332,11 @@ int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow 
__user *u_cnd,
        struct xnthread *cur = xnthread_current();
        struct cobalt_cond *cond;
        struct cobalt_mutex *mx;
-       struct mutex_dat *datp;
        struct us_cond_data d;
        struct timespec ts;
        xnhandle_t handle;
        int err, perr = 0;
+       __u32 offset;
 
        handle = cobalt_get_handle_from_user(&u_cnd->handle);
        cond = xnregistry_lookup(handle, NULL);
@@ -345,8 +345,8 @@ int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow 
__user *u_cnd,
        mx = xnregistry_lookup(handle, NULL);
 
        if (cond->mutex == NULL) {
-               __xn_get_user(datp, &u_mx->dat);
-               cond->state->mutex_datp = datp;
+               __xn_get_user(offset, &u_mx->state_offset);
+               cond->state->mutex_state_offset = offset;
        }
 
        if (fetch_timeout) {
@@ -380,7 +380,7 @@ int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow 
__user *u_cnd,
        }
 
        if (cond->mutex == NULL)
-               cond->state->mutex_datp = (struct mutex_dat *)~0UL;
+               cond->state->mutex_state_offset = ~0U;
 
        if (err == -EINTR)
                __xn_put_user(d.err, u_err);
@@ -418,7 +418,7 @@ COBALT_SYSCALL(cond_wait_epilogue, primary,
        err = cobalt_cond_timedwait_epilogue(cur, cond, mx);
 
        if (cond->mutex == NULL)
-               cond->state->mutex_datp = (struct mutex_dat *)~0UL;
+               cond->state->mutex_state_offset = ~0U;
 
        return err;
 }
diff --git a/kernel/cobalt/posix/event.c b/kernel/cobalt/posix/event.c
index 0d0594e..b0ec72f 100644
--- a/kernel/cobalt/posix/event.c
+++ b/kernel/cobalt/posix/event.c
@@ -51,12 +51,12 @@ COBALT_SYSCALL(event_init, current,
                     unsigned int value, int flags))
 {
        struct cobalt_event_shadow shadow;
-       struct cobalt_event_data *datp;
+       struct cobalt_event_state *state;
        int pshared, synflags, ret;
        struct cobalt_event *event;
        struct cobalt_kqueues *kq;
        struct cobalt_umm *umm;
-       unsigned long datoff;
+       unsigned long stateoff;
        spl_t s;
 
        trace_cobalt_event_init(u_event, value, flags);
@@ -67,20 +67,20 @@ COBALT_SYSCALL(event_init, current,
 
        pshared = (flags & COBALT_EVENT_SHARED) != 0;
        umm = &cobalt_ppd_get(pshared)->umm;
-       datp = cobalt_umm_alloc(umm, sizeof(*datp));
-       if (datp == NULL) {
+       state = cobalt_umm_alloc(umm, sizeof(*state));
+       if (state == NULL) {
                xnfree(event);
                return -EAGAIN;
        }
 
        ret = xnregistry_enter_anon(event, &event->handle);
        if (ret) {
-               cobalt_umm_free(umm, datp);
+               cobalt_umm_free(umm, state);
                xnfree(event);
                return ret;
        }
 
-       event->data = datp;
+       event->state = state;
        event->flags = flags;
        synflags = (flags & COBALT_EVENT_PRIO) ? XNSYNCH_PRIO : XNSYNCH_FIFO;
        xnsynch_init(&event->synch, synflags, NULL);
@@ -93,14 +93,14 @@ COBALT_SYSCALL(event_init, current,
 
        event->magic = COBALT_EVENT_MAGIC;
 
-       datp->value = value;
-       datp->flags = 0;
-       datp->nwaiters = 0;
-       datoff = cobalt_umm_offset(umm, datp);
-       XENO_BUGON(COBALT, datoff != (__u32)datoff);
+       state->value = value;
+       state->flags = 0;
+       state->nwaiters = 0;
+       stateoff = cobalt_umm_offset(umm, state);
+       XENO_BUGON(COBALT, stateoff != (__u32)stateoff);
        shadow.flags = flags;
        shadow.handle = event->handle;
-       shadow.u.data_offset = (__u32)datoff;
+       shadow.state_offset = (__u32)stateoff;
 
        return __xn_safe_copy_to_user(u_event, &shadow, sizeof(*u_event));
 }
@@ -112,7 +112,7 @@ int __cobalt_event_wait(struct cobalt_event_shadow __user 
*u_event,
 {
        unsigned int rbits = 0, testval;
        xnticks_t timeout = XN_INFINITE;
-       struct cobalt_event_data *datp;
+       struct cobalt_event_state *state;
        xntmode_t tmode = XN_RELATIVE;
        struct event_wait_context ewc;
        struct cobalt_event *event;
@@ -141,20 +141,20 @@ int __cobalt_event_wait(struct cobalt_event_shadow __user 
*u_event,
                goto out;
        }
 
-       datp = event->data;
+       state = event->state;
 
        if (bits == 0) {
                /*
                 * Special case: we don't wait for any event, we only
                 * return the current flag group value.
                 */
-               rbits = datp->value;
+               rbits = state->value;
                goto out;
        }
 
-       datp->flags |= COBALT_EVENT_PENDED;
-       rbits = datp->value & bits;
-       testval = mode & COBALT_EVENT_ANY ? rbits : datp->value;
+       state->flags |= COBALT_EVENT_PENDED;
+       rbits = state->value & bits;
+       testval = mode & COBALT_EVENT_ANY ? rbits : state->value;
        if (rbits && rbits == testval)
                goto done;
 
@@ -166,20 +166,20 @@ int __cobalt_event_wait(struct cobalt_event_shadow __user 
*u_event,
        ewc.value = bits;
        ewc.mode = mode;
        xnthread_prepare_wait(&ewc.wc);
-       datp->nwaiters++;
+       state->nwaiters++;
        info = xnsynch_sleep_on(&event->synch, timeout, tmode);
        if (info & XNRMID) {
                ret = -EIDRM;
                goto out;
        }
        if (info & (XNBREAK|XNTIMEO)) {
-               datp->nwaiters--;
+               state->nwaiters--;
                ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
        } else
                rbits = ewc.value;
 done:
        if (!xnsynch_pended_p(&event->synch))
-               datp->flags &= ~COBALT_EVENT_PENDED;
+               state->flags &= ~COBALT_EVENT_PENDED;
 out:
        xnlock_put_irqrestore(&nklock, s);
 
@@ -214,7 +214,7 @@ COBALT_SYSCALL(event_sync, current,
 {
        unsigned int bits, waitval, testval;
        struct xnthread_wait_context *wc;
-       struct cobalt_event_data *datp;
+       struct cobalt_event_state *state;
        struct event_wait_context *ewc;
        struct cobalt_event *event;
        struct xnthread *p, *tmp;
@@ -237,8 +237,8 @@ COBALT_SYSCALL(event_sync, current,
         * wake up any thread which could be satisfied by its current
         * value.
         */
-       datp = event->data;
-       bits = datp->value;
+       state = event->state;
+       bits = state->value;
 
        xnsynch_for_each_sleeper_safe(p, tmp, &event->synch) {
                wc = xnthread_get_wait_context(p);
@@ -246,7 +246,7 @@ COBALT_SYSCALL(event_sync, current,
                waitval = ewc->value & bits;
                testval = ewc->mode & COBALT_EVENT_ANY ? waitval : ewc->value;
                if (waitval && waitval == testval) {
-                       datp->nwaiters--;
+                       state->nwaiters--;
                        ewc->value = waitval;
                        xnsynch_wakeup_this_sleeper(&event->synch, p);
                }
@@ -274,7 +274,7 @@ static void event_destroy(struct cobalt_event *event,
        xnlock_put_irqrestore(&nklock, s);
 
        umm = &cobalt_ppd_get(pshared)->umm;
-       cobalt_umm_free(umm, event->data);
+       cobalt_umm_free(umm, event->state);
        xnfree(event);
        xnlock_get_irqsave(&nklock, s);
 }
diff --git a/kernel/cobalt/posix/event.h b/kernel/cobalt/posix/event.h
index 6dd9f79..5dc7d1e 100644
--- a/kernel/cobalt/posix/event.h
+++ b/kernel/cobalt/posix/event.h
@@ -29,7 +29,7 @@ struct cobalt_event {
        unsigned int magic;
        unsigned int value;
        struct xnsynch synch;
-       struct cobalt_event_data *data;
+       struct cobalt_event_state *state;
        struct cobalt_kqueues *owningq;
        struct list_head link;
        int flags;
diff --git a/kernel/cobalt/posix/io.h b/kernel/cobalt/posix/io.h
index d1fc5fd..b8f4ed4 100644
--- a/kernel/cobalt/posix/io.h
+++ b/kernel/cobalt/posix/io.h
@@ -54,7 +54,7 @@ COBALT_SYSCALL_DECL(sendmsg, ssize_t,
 
 COBALT_SYSCALL_DECL(mmap, int,
                    (int fd, struct _rtdm_mmap_request __user *u_rma,
-                    void __user **u_addrp));
+                    void __user * __user *u_addrp));
 
 COBALT_SYSCALL_DECL(select,
                    int, (int nfds,
diff --git a/kernel/cobalt/posix/monitor.c b/kernel/cobalt/posix/monitor.c
index fa05477..a413a19 100644
--- a/kernel/cobalt/posix/monitor.c
+++ b/kernel/cobalt/posix/monitor.c
@@ -53,12 +53,12 @@ COBALT_SYSCALL(monitor_init, current,
                     clockid_t clk_id, int flags))
 {
        struct cobalt_monitor_shadow shadow;
-       struct cobalt_monitor_data *datp;
+       struct cobalt_monitor_state *state;
        struct cobalt_monitor *mon;
        struct cobalt_kqueues *kq;
        int pshared, tmode, ret;
        struct cobalt_umm *umm;
-       unsigned long datoff;
+       unsigned long stateoff;
        spl_t s;
 
        tmode = clock_flag(TIMER_ABSTIME, clk_id);
@@ -71,21 +71,21 @@ COBALT_SYSCALL(monitor_init, current,
 
        pshared = (flags & COBALT_MONITOR_SHARED) != 0;
        umm = &cobalt_ppd_get(pshared)->umm;
-       datp = cobalt_umm_alloc(umm, sizeof(*datp));
-       if (datp == NULL) {
+       state = cobalt_umm_alloc(umm, sizeof(*state));
+       if (state == NULL) {
                xnfree(mon);
                return -EAGAIN;
        }
 
        ret = xnregistry_enter_anon(mon, &mon->handle);
        if (ret) {
-               cobalt_umm_free(umm, datp);
+               cobalt_umm_free(umm, state);
                xnfree(mon);
                return ret;
        }
 
-       mon->data = datp;
-       xnsynch_init(&mon->gate, XNSYNCH_PIP, &datp->owner);
+       mon->state = state;
+       xnsynch_init(&mon->gate, XNSYNCH_PIP, &state->owner);
        xnsynch_init(&mon->drain, XNSYNCH_PRIO, NULL);
        mon->flags = flags;
        mon->tmode = tmode;
@@ -99,12 +99,12 @@ COBALT_SYSCALL(monitor_init, current,
 
        mon->magic = COBALT_MONITOR_MAGIC;
 
-       datp->flags = 0;
-       datoff = cobalt_umm_offset(umm, datp);
-       XENO_BUGON(COBALT, datoff != (__u32)datoff);
+       state->flags = 0;
+       stateoff = cobalt_umm_offset(umm, state);
+       XENO_BUGON(COBALT, stateoff != (__u32)stateoff);
        shadow.flags = flags;
        shadow.handle = mon->handle;
-       shadow.u.data_offset = (__u32)datoff;
+       shadow.state_offset = (__u32)stateoff;
 
        return __xn_safe_copy_to_user(u_mon, &shadow, sizeof(*u_mon));
 }
@@ -142,7 +142,7 @@ static int monitor_enter(xnhandle_t handle, struct xnthread 
*curr)
                break;
        }
 
-       mon->data->flags &= ~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST);
+       mon->state->flags &= 
~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST);
 
        return 0;
 }
@@ -167,7 +167,7 @@ COBALT_SYSCALL(monitor_enter, primary,
 /* nklock held, irqs off */
 static void monitor_wakeup(struct cobalt_monitor *mon)
 {
-       struct cobalt_monitor_data *datp = mon->data;
+       struct cobalt_monitor_state *state = mon->state;
        struct cobalt_thread *thread, *tmp;
        struct xnthread *p;
        int bcast;
@@ -177,8 +177,8 @@ static void monitor_wakeup(struct cobalt_monitor *mon)
         * that somebody is actually waiting for it, so we have to
         * check both conditions below.
         */
-       bcast = (datp->flags & COBALT_MONITOR_BROADCAST) != 0;
-       if ((datp->flags & COBALT_MONITOR_GRANTED) == 0 ||
+       bcast = (state->flags & COBALT_MONITOR_BROADCAST) != 0;
+       if ((state->flags & COBALT_MONITOR_GRANTED) == 0 ||
            list_empty(&mon->waiters))
                goto drain;
 
@@ -212,7 +212,7 @@ drain:
         * pending, either one or all, depending on the broadcast
         * flag.
         */
-       if ((datp->flags & COBALT_MONITOR_DRAINED) != 0 &&
+       if ((state->flags & COBALT_MONITOR_DRAINED) != 0 &&
            xnsynch_pended_p(&mon->drain)) {
                if (bcast)
                        xnsynch_flush(&mon->drain, 0);
@@ -221,7 +221,7 @@ drain:
        }
 
        if (list_empty(&mon->waiters) && !xnsynch_pended_p(&mon->drain))
-               datp->flags &= ~COBALT_MONITOR_PENDED;
+               state->flags &= ~COBALT_MONITOR_PENDED;
 }
 
 int __cobalt_monitor_wait(struct cobalt_monitor_shadow __user *u_mon,
@@ -229,7 +229,7 @@ int __cobalt_monitor_wait(struct cobalt_monitor_shadow 
__user *u_mon,
                          int __user *u_ret)
 {
        struct cobalt_thread *curr = cobalt_current_thread();
-       struct cobalt_monitor_data *datp;
+       struct cobalt_monitor_state *state;
        xnticks_t timeout = XN_INFINITE;
        int ret = 0, opret = 0, info;
        struct cobalt_monitor *mon;
@@ -256,8 +256,8 @@ int __cobalt_monitor_wait(struct cobalt_monitor_shadow 
__user *u_mon,
         * it wants to sleep on: wake up satisfied waiters before
         * going to sleep.
         */
-       datp = mon->data;
-       if (datp->flags & COBALT_MONITOR_SIGNALED)
+       state = mon->state;
+       if (state->flags & COBALT_MONITOR_SIGNALED)
                monitor_wakeup(mon);
 
        /* Release the gate prior to waiting, all atomically. */
@@ -270,7 +270,7 @@ int __cobalt_monitor_wait(struct cobalt_monitor_shadow 
__user *u_mon,
                curr->threadbase.u_window->grant_value = 0;
                list_add_tail(&curr->monitor_link, &mon->waiters);
        }
-       datp->flags |= COBALT_MONITOR_PENDED;
+       state->flags |= COBALT_MONITOR_PENDED;
 
        tmode = ts ? mon->tmode : XN_RELATIVE;
        info = xnsynch_sleep_on(synch, timeout, tmode);
@@ -280,7 +280,7 @@ int __cobalt_monitor_wait(struct cobalt_monitor_shadow 
__user *u_mon,
                        list_del_init(&curr->monitor_link);
 
                if (list_empty(&mon->waiters) && !xnsynch_pended_p(&mon->drain))
-                       datp->flags &= ~COBALT_MONITOR_PENDED;
+                       state->flags &= ~COBALT_MONITOR_PENDED;
 
                if (info & XNBREAK) {
                        opret = -EINTR;
@@ -334,7 +334,7 @@ COBALT_SYSCALL(monitor_sync, nonrestartable,
        mon = xnregistry_lookup(handle, NULL);
        if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
                ret = -EINVAL;
-       else if (mon->data->flags & COBALT_MONITOR_SIGNALED) {
+       else if (mon->state->flags & COBALT_MONITOR_SIGNALED) {
                monitor_wakeup(mon);
                xnsynch_release(&mon->gate, curr);
                xnsched_run();
@@ -364,7 +364,7 @@ COBALT_SYSCALL(monitor_exit, primary,
        if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
                ret = -EINVAL;
        else {
-               if (mon->data->flags & COBALT_MONITOR_SIGNALED)
+               if (mon->state->flags & COBALT_MONITOR_SIGNALED)
                        monitor_wakeup(mon);
 
                xnsynch_release(&mon->gate, curr);
@@ -392,14 +392,14 @@ static void monitor_destroy(struct cobalt_monitor *mon,
 
        pshared = (mon->flags & COBALT_MONITOR_SHARED) != 0;
        umm = &cobalt_ppd_get(pshared)->umm;
-       cobalt_umm_free(umm, mon->data);
+       cobalt_umm_free(umm, mon->state);
        xnfree(mon);
 }
 
 COBALT_SYSCALL(monitor_destroy, primary,
               int, (struct cobalt_monitor_shadow __user *u_mon))
 {
-       struct cobalt_monitor_data *datp;
+       struct cobalt_monitor_state *state;
        struct cobalt_monitor *mon;
        struct xnthread *curr;
        xnhandle_t handle;
@@ -417,8 +417,8 @@ COBALT_SYSCALL(monitor_destroy, primary,
                goto fail;
        }
 
-       datp = mon->data;
-       if ((datp->flags & COBALT_MONITOR_PENDED) != 0 ||
+       state = mon->state;
+       if ((state->flags & COBALT_MONITOR_PENDED) != 0 ||
            xnsynch_pended_p(&mon->drain) || !list_empty(&mon->waiters)) {
                ret = -EBUSY;
                goto fail;
diff --git a/kernel/cobalt/posix/monitor.h b/kernel/cobalt/posix/monitor.h
index ff84acf..c7c3ab9 100644
--- a/kernel/cobalt/posix/monitor.h
+++ b/kernel/cobalt/posix/monitor.h
@@ -29,7 +29,7 @@ struct cobalt_monitor {
        unsigned int magic;
        struct xnsynch gate;
        struct xnsynch drain;
-       struct cobalt_monitor_data *data;
+       struct cobalt_monitor_state *state;
        struct cobalt_kqueues *owningq;
        struct list_head link;
        struct list_head waiters;
diff --git a/kernel/cobalt/posix/mutex.c b/kernel/cobalt/posix/mutex.c
index e78ebde..62b8285 100644
--- a/kernel/cobalt/posix/mutex.c
+++ b/kernel/cobalt/posix/mutex.c
@@ -24,7 +24,7 @@
 
 static int cobalt_mutex_init_inner(struct cobalt_mutex_shadow *shadow,
                                   struct cobalt_mutex *mutex,
-                                  struct mutex_dat *datp,
+                                  struct cobalt_mutex_state *state,
                                   const struct cobalt_mutexattr *attr)
 {
        int synch_flags = XNSYNCH_PRIO | XNSYNCH_OWNER;
@@ -44,15 +44,15 @@ static int cobalt_mutex_init_inner(struct 
cobalt_mutex_shadow *shadow,
        shadow->lockcnt = 0;
 
        shadow->attr = *attr;
-       shadow->dat_offset = cobalt_umm_offset(&sys_ppd->umm, datp);
+       shadow->state_offset = cobalt_umm_offset(&sys_ppd->umm, state);
 
        if (attr->protocol == PTHREAD_PRIO_INHERIT)
                synch_flags |= XNSYNCH_PIP;
 
        mutex->magic = COBALT_MUTEX_MAGIC;
-       xnsynch_init(&mutex->synchbase, synch_flags, &datp->owner);
-       datp->flags = (attr->type == PTHREAD_MUTEX_ERRORCHECK
-                      ? COBALT_MUTEX_ERRORCHECK : 0);
+       xnsynch_init(&mutex->synchbase, synch_flags, &state->owner);
+       state->flags = (attr->type == PTHREAD_MUTEX_ERRORCHECK
+                       ? COBALT_MUTEX_ERRORCHECK : 0);
        mutex->attr = *attr;
        mutex->owningq = kq;
        INIT_LIST_HEAD(&mutex->conds);
@@ -121,8 +121,8 @@ int __cobalt_mutex_acquire_unchecked(struct xnthread *cur,
 int cobalt_mutex_release(struct xnthread *cur,
                         struct cobalt_mutex *mutex)
 {
+       struct cobalt_mutex_state *state;
        struct cobalt_cond *cond;
-       struct mutex_dat *datp;
        unsigned long flags;
        int need_resched;
 
@@ -133,11 +133,11 @@ int cobalt_mutex_release(struct xnthread *cur,
        if (mutex->owningq != cobalt_kqueues(mutex->attr.pshared))
                return -EPERM;
 #endif
-       datp = container_of(mutex->synchbase.fastlock, struct mutex_dat, owner);
-       flags = datp->flags;
+       state = container_of(mutex->synchbase.fastlock, struct 
cobalt_mutex_state, owner);
+       flags = state->flags;
        need_resched = 0;
        if ((flags & COBALT_MUTEX_COND_SIGNAL)) {
-               datp->flags = flags & ~COBALT_MUTEX_COND_SIGNAL;
+               state->flags = flags & ~COBALT_MUTEX_COND_SIGNAL;
                if (!list_empty(&mutex->conds)) {
                        list_for_each_entry(cond, &mutex->conds, mutex_link)
                                need_resched |=
@@ -253,10 +253,10 @@ COBALT_SYSCALL(mutex_init, current,
               int, (struct cobalt_mutex_shadow __user *u_mx,
                     const struct cobalt_mutexattr __user *u_attr))
 {
+       struct cobalt_mutex_state *state;
        struct cobalt_mutexattr attr;
-       struct cobalt_mutex *mutex;
        struct cobalt_mutex_shadow mx;
-       struct mutex_dat *datp;
+       struct cobalt_mutex *mutex;
        int err;
 
        if (__xn_safe_copy_from_user(&mx, u_mx, sizeof(mx)))
@@ -269,17 +269,17 @@ COBALT_SYSCALL(mutex_init, current,
        if (mutex == NULL)
                return -ENOMEM;
 
-       datp = cobalt_umm_alloc(&cobalt_ppd_get(attr.pshared)->umm,
-                               sizeof(*datp));
-       if (datp == NULL) {
+       state = cobalt_umm_alloc(&cobalt_ppd_get(attr.pshared)->umm,
+                                sizeof(*state));
+       if (state == NULL) {
                xnfree(mutex);
                return -EAGAIN;
        }
 
-       err = cobalt_mutex_init_inner(&mx, mutex, datp, &attr);
+       err = cobalt_mutex_init_inner(&mx, mutex, state, &attr);
        if (err) {
                xnfree(mutex);
-               cobalt_umm_free(&cobalt_ppd_get(attr.pshared)->umm, datp);
+               cobalt_umm_free(&cobalt_ppd_get(attr.pshared)->umm, state);
                return err;
        }
 
diff --git a/kernel/cobalt/posix/nsem.c b/kernel/cobalt/posix/nsem.c
index aa6af60..af0cd49 100644
--- a/kernel/cobalt/posix/nsem.c
+++ b/kernel/cobalt/posix/nsem.c
@@ -185,40 +185,46 @@ void __cobalt_sem_unlink(xnhandle_t handle)
                xnregistry_unlink(xnregistry_key(handle));
 }
 
-COBALT_SYSCALL(sem_open, current,
-              int, (struct cobalt_sem_shadow __user *__user *u_addr,
-                    const char __user *u_name,
-                    int oflags, mode_t mode, unsigned value))
+struct cobalt_sem_shadow __user *
+__cobalt_sem_open(struct cobalt_sem_shadow __user *usm,
+                 const char __user *u_name,
+                 int oflags, mode_t mode, unsigned int value)
 {
-       struct cobalt_sem_shadow __user *usm;
-       char name[COBALT_MAXNAME + 1];
+       struct filename *filename;
        struct cobalt_process *cc;
-       long len;
 
        cc = cobalt_current_process();
        if (cc == NULL)
-               return -EPERM;
-
-       __xn_get_user(usm, u_addr);
+               return ERR_PTR(-EPERM);
 
-       len = __xn_safe_strncpy_from_user(name, u_name, sizeof(name));
-       if (len < 0)
-               return len;
-       if (len >= sizeof(name))
-               return -ENAMETOOLONG;
-       if (len == 0)
-               return -EINVAL;
+       filename = getname(u_name);
+       if (IS_ERR(filename))
+               return ERR_CAST(filename);
 
-       usm = sem_open(cc, usm, name, oflags, mode, value);
-       if (IS_ERR(usm)) {
-               trace_cobalt_psem_open_failed(name, oflags, mode,
+       usm = sem_open(cc, usm, filename->name, oflags, mode, value);
+       if (IS_ERR(usm))
+               trace_cobalt_psem_open_failed(filename->name, oflags, mode,
                                              value, PTR_ERR(usm));
-               return PTR_ERR(usm);
-       }
+       putname(filename);
 
-       __xn_put_user(usm, u_addr);
+       return usm;
+}
 
-       return 0;
+COBALT_SYSCALL(sem_open, current,
+              int, (struct cobalt_sem_shadow __user *__user *u_addrp,
+                    const char __user *u_name,
+                    int oflags, mode_t mode, unsigned int value))
+{
+       struct cobalt_sem_shadow __user *usm;
+
+       if (__xn_get_user(usm, u_addrp))
+               return -EFAULT;
+
+       usm = __cobalt_sem_open(usm, u_name, oflags, mode, value);
+       if (IS_ERR(usm))
+               return PTR_ERR(usm);
+
+       return __xn_put_user(usm, u_addrp) ? -EFAULT : 0;
 }
 
 COBALT_SYSCALL(sem_close, current,
diff --git a/kernel/cobalt/posix/process.c b/kernel/cobalt/posix/process.c
index 42a33eb..46e2f66 100644
--- a/kernel/cobalt/posix/process.c
+++ b/kernel/cobalt/posix/process.c
@@ -83,8 +83,6 @@ DEFINE_PRIVATE_XNLOCK(process_hash_lock);
 
 struct xnthread_personality *cobalt_personalities[NR_PERSONALITIES];
 
-static void *mayday_page;
-
 static struct xnsynch yield_sync;
 
 static unsigned __attribute__((pure)) process_hash_crunch(struct mm_struct *mm)
@@ -763,7 +761,7 @@ static int handle_mayday_event(struct pt_regs *regs)
        /* We enter the mayday handler with hw IRQs off. */
        sys_ppd = cobalt_ppd_get(0);
 
-       xnarch_handle_mayday(tcb, regs, sys_ppd->mayday_addr);
+       xnarch_handle_mayday(tcb, regs, sys_ppd->mayday_tramp);
 
        return KEVENT_PROPAGATE;
 }
@@ -791,38 +789,6 @@ int ipipe_trap_hook(struct ipipe_trap_data *data)
        return KEVENT_PROPAGATE;
 }
 
-static inline int init_mayday_page(void)
-{
-       mayday_page = vmalloc(PAGE_SIZE);
-       if (mayday_page == NULL) {
-               printk(XENO_ERR "can't alloc MAYDAY page\n");
-               return -ENOMEM;
-       }
-
-       xnarch_setup_mayday_page(mayday_page);
-
-       return 0;
-}
-
-static inline void free_mayday_page(void)
-{
-       if (mayday_page)
-               vfree(mayday_page);
-}
-
-static inline unsigned long map_mayday_page(void)
-{
-       void __user *u_addr = NULL;
-       int ret;
-
-       ret = rtdm_mmap_to_user(NULL, mayday_page, PAGE_SIZE,
-                               PROT_READ|PROT_EXEC, &u_addr, NULL, NULL);
-       if (ret)
-               return 0UL;
-
-       return (unsigned long)u_addr;
-}
-
 #ifdef CONFIG_SMP
 
 static int handle_setaffinity_event(struct ipipe_cpu_migration_data *d)
@@ -978,9 +944,10 @@ static int handle_hostrt_event(struct ipipe_hostrt_data 
*hostrt)
                nkvdso->hostrt_data.mask = hostrt->mask;
                nkvdso->hostrt_data.mult = hostrt->mult;
                nkvdso->hostrt_data.shift = hostrt->shift;
-               nkvdso->hostrt_data.wall_time_sec = hostrt->wall_time_sec;
-               nkvdso->hostrt_data.wall_time_nsec = hostrt->wall_time_nsec;
-               nkvdso->hostrt_data.wall_to_monotonic = 
hostrt->wall_to_monotonic;
+               nkvdso->hostrt_data.wall_sec = hostrt->wall_time_sec;
+               nkvdso->hostrt_data.wall_nsec = hostrt->wall_time_nsec;
+               nkvdso->hostrt_data.wtom_sec = hostrt->wall_to_monotonic.tv_sec;
+               nkvdso->hostrt_data.wtom_nsec = 
hostrt->wall_to_monotonic.tv_nsec;
        }
 
        spin_unlock_irqrestore(&__hostrtlock, flags);
@@ -1282,6 +1249,21 @@ int ipipe_kevent_hook(int kevent, void *data)
        return ret;
 }
 
+static inline unsigned long map_mayday_page(void)
+{
+       void __user *u_addr = NULL;
+       void *mayday_page;
+       int ret;
+
+       mayday_page = xnarch_get_mayday_page();
+       ret = rtdm_mmap_to_user(NULL, mayday_page, PAGE_SIZE,
+                               PROT_READ|PROT_EXEC, &u_addr, NULL, NULL);
+       if (ret)
+               return 0UL;
+
+       return (unsigned long)u_addr;
+}
+
 static int attach_process(struct cobalt_process *process)
 {
        struct cobalt_ppd *p = &process->sys_ppd;
@@ -1295,8 +1277,8 @@ static int attach_process(struct cobalt_process *process)
 
        cobalt_umm_set_name(&p->umm, "private heap[%d]", current->pid);
 
-       p->mayday_addr = map_mayday_page();
-       if (p->mayday_addr == 0) {
+       p->mayday_tramp = map_mayday_page();
+       if (p->mayday_tramp == 0) {
                printk(XENO_WARN
                       "%s[%d] cannot map MAYDAY page\n",
                       current->comm, current->pid);
@@ -1413,10 +1395,10 @@ int cobalt_process_init(void)
                goto fail_debug;
 
        /*
-        * Setup the mayday page early, before userland can mess with
+        * Setup the mayday stuff early, before userland can mess with
         * real-time ops.
         */
-       ret = init_mayday_page();
+       ret = xnarch_init_mayday();
        if (ret)
                goto fail_mayday;
 
@@ -1442,7 +1424,7 @@ fail_register:
        cobalt_memdev_cleanup();
 fail_memdev:
        xnsynch_destroy(&yield_sync);
-       free_mayday_page();
+       xnarch_cleanup_mayday();
 fail_mayday:
        xndebug_cleanup();
 fail_debug:
@@ -1459,7 +1441,7 @@ void cobalt_process_cleanup(void)
        ipipe_set_hooks(&xnsched_realtime_domain, 0);
        ipipe_set_hooks(ipipe_root_domain, 0);
 
-       free_mayday_page();
+       xnarch_cleanup_mayday();
        xndebug_cleanup();
 
        if (process_hash) {
diff --git a/kernel/cobalt/posix/sched.c b/kernel/cobalt/posix/sched.c
index f2fe09a..2dcdfcd 100644
--- a/kernel/cobalt/posix/sched.c
+++ b/kernel/cobalt/posix/sched.c
@@ -302,6 +302,7 @@ ssize_t get_tp_config(int cpu, void __user *u_config, 
size_t len,
                      (int policy, const void __user *u_config,
                       size_t *len),
                      ssize_t (*put_config)(int policy, void __user *u_config,
+                                           size_t u_len,
                                            const union sched_config *config,
                                            size_t len))
 {
@@ -326,18 +327,12 @@ ssize_t get_tp_config(int cpu, void __user *u_config, 
size_t len,
        xnlock_put_irqrestore(&nklock, s);
 
        elen = sched_tp_confsz(gps->pwin_nr);
-       if (elen > len) {
-               ret = -ENOSPC;
-               goto out;
-       }
-
        config = xnmalloc(elen);
        if (config == NULL) {
                ret = -ENOMEM;
                goto out;
        }
 
-       ret = elen;
        config->tp.nr_windows = gps->pwin_nr;
        for (n = 0, pp = p = config->tp.windows, pw = w = gps->pwins;
             n < gps->pwin_nr; pp = p, p++, pw = w, w++, n++) {
@@ -346,7 +341,7 @@ ssize_t get_tp_config(int cpu, void __user *u_config, 
size_t len,
                p->ptid = w->w_part;
        }
        ns2ts(&pp->duration, gps->tf_duration - pw->w_offset);
-       ret = put_config(SCHED_TP, u_config, config, elen);
+       ret = put_config(SCHED_TP, u_config, len, config, elen);
        xnfree(config);
 out:
        xnsched_tp_put_schedule(gps);
@@ -368,6 +363,7 @@ get_tp_config(int cpu, union sched_config __user *u_config, 
size_t len,
              (int policy, const void __user *u_config,
               size_t *len),
              ssize_t (*put_config)(int policy, void __user *u_config,
+                                   size_t u_len,
                                    const union sched_config *config,
                                    size_t len))
 {
@@ -467,6 +463,7 @@ ssize_t get_quota_config(int cpu, void __user *u_config, 
size_t len,
                         (int policy, const void __user *u_config,
                          size_t *len),
                         ssize_t (*put_config)(int policy, void __user 
*u_config,
+                                              size_t u_len,
                                               const union sched_config *config,
                                               size_t len))
 {
@@ -477,9 +474,6 @@ ssize_t get_quota_config(int cpu, void __user *u_config, 
size_t len,
        ssize_t ret;
        spl_t s;
 
-       if (len < sizeof(config->quota))
-               return -EINVAL;
-
        config = fetch_config(SCHED_QUOTA, u_config, &len);
        if (IS_ERR(config))
                return PTR_ERR(config);
@@ -500,7 +494,7 @@ ssize_t get_quota_config(int cpu, void __user *u_config, 
size_t len,
        config->quota.info.quota_sum = xnsched_quota_sum_all(sched);
        xnlock_put_irqrestore(&nklock, s);
 
-       ret = put_config(SCHED_QUOTA, u_config, config, sizeof(*config));
+       ret = put_config(SCHED_QUOTA, u_config, len, config, sizeof(*config));
        xnfree(config);
 
        return ret;
@@ -526,6 +520,7 @@ ssize_t get_quota_config(int cpu, void __user *u_config,
                         (int policy, const void __user *u_config,
                          size_t *len),
                         ssize_t (*put_config)(int policy, void __user 
*u_config,
+                                              size_t u_len,
                                               const union sched_config *config,
                                               size_t len))
 {
@@ -543,6 +538,9 @@ sched_fetch_config(int policy, const void __user *u_config, 
size_t *len)
        if (u_config == NULL)
                return ERR_PTR(-EFAULT);
 
+       if (policy == SCHED_QUOTA && *len < sizeof(buf->quota))
+               return ERR_PTR(-EINVAL);
+
        buf = xnmalloc(*len);
        if (buf == NULL)
                return ERR_PTR(-ENOMEM);
@@ -569,7 +567,8 @@ static int sched_ack_config(int policy, const union 
sched_config *config,
                                       sizeof(u_p->quota.info));
 }
 
-static ssize_t sched_put_config(int policy, void __user *u_config,
+static ssize_t sched_put_config(int policy,
+                               void __user *u_config, size_t u_len,
                                const union sched_config *config, size_t len)
 {
        union sched_config *u_p = u_config;
@@ -577,10 +576,13 @@ static ssize_t sched_put_config(int policy, void __user 
*u_config,
        if (u_config == NULL)
                return -EFAULT;
 
-       if (policy == SCHED_QUOTA)
+       if (policy == SCHED_QUOTA) {
+               if (u_len < sizeof(config->quota))
+                       return -EINVAL;
                return __xn_safe_copy_to_user(&u_p->quota.info, 
&config->quota.info,
                                              sizeof(u_p->quota.info)) ?:
                        sizeof(u_p->quota.info);
+       }
 
        return __xn_safe_copy_to_user(u_config, config, len) ?: len;
 }
@@ -646,6 +648,7 @@ ssize_t __cobalt_sched_getconfig_np(int cpu, int policy,
                                     size_t *len),
                                    ssize_t (*put_config)(int policy,
                                                          void __user *u_config,
+                                                         size_t u_len,
                                                          const union 
sched_config *config,
                                                          size_t len))
 {
diff --git a/kernel/cobalt/posix/sched.h b/kernel/cobalt/posix/sched.h
index aa93fe1..fda94fc 100644
--- a/kernel/cobalt/posix/sched.h
+++ b/kernel/cobalt/posix/sched.h
@@ -53,7 +53,7 @@ ssize_t __cobalt_sched_getconfig_np(int cpu, int policy,
                                    (int policy, const void __user *u_config,
                                     size_t *len),
                                    ssize_t (*put_config)(int policy,
-                                                         void __user *u_config,
+                                                         void __user 
*u_config, size_t u_len,
                                                          const union 
sched_config *config,
                                                          size_t len));
 struct xnsched_class *
diff --git a/kernel/cobalt/posix/sem.c b/kernel/cobalt/posix/sem.c
index 433f5ca..3555ab8 100644
--- a/kernel/cobalt/posix/sem.c
+++ b/kernel/cobalt/posix/sem.c
@@ -59,7 +59,7 @@ int __cobalt_sem_destroy(xnhandle_t handle)
        xnlock_put_irqrestore(&nklock, s);
 
        cobalt_umm_free(&cobalt_ppd_get(!!(sem->flags & SEM_PSHARED))->umm,
-                       sem->datp);
+                       sem->state);
        xnregistry_remove(sem->handle);
 
        xnfree(sem);
@@ -71,10 +71,10 @@ struct cobalt_sem *
 __cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sm,
                  int flags, unsigned int value)
 {
+       struct cobalt_sem_state *state;
        struct cobalt_sem *sem, *osem;
        struct cobalt_kqueues *kq;
        struct cobalt_ppd *sys_ppd;
-       struct sem_dat *datp;
        int ret, sflags;
        spl_t s;
 
@@ -92,8 +92,8 @@ __cobalt_sem_init(const char *name, struct cobalt_sem_shadow 
*sm,
        ksformat(sem->name, sizeof(sem->name), "%s", name);
 
        sys_ppd = cobalt_ppd_get(!!(flags & SEM_PSHARED));
-       datp = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*datp));
-       if (datp == NULL) {
+       state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state));
+       if (state == NULL) {
                ret = -EAGAIN;
                goto err_free_sem;
        }
@@ -142,18 +142,18 @@ __cobalt_sem_init(const char *name, struct 
cobalt_sem_shadow *sm,
        sflags = flags & SEM_FIFO ? 0 : XNSYNCH_PRIO;
        xnsynch_init(&sem->synchbase, sflags, NULL);
 
-       sem->datp = datp;
-       atomic_set(&datp->value, value);
-       datp->flags = flags;
+       sem->state = state;
+       atomic_set(&state->value, value);
+       state->flags = flags;
        sem->flags = flags;
        sem->owningq = kq;
        sem->refs = name[0] ? 2 : 1;
 
        sm->magic = name[0] ? COBALT_NAMED_SEM_MAGIC : COBALT_SEM_MAGIC;
        sm->handle = sem->handle;
-       sm->datp_offset = cobalt_umm_offset(&sys_ppd->umm, datp);
+       sm->state_offset = cobalt_umm_offset(&sys_ppd->umm, state);
        if (flags & SEM_PSHARED)
-               sm->datp_offset = -sm->datp_offset;
+               sm->state_offset = -sm->state_offset;
        xnlock_put_irqrestore(&nklock, s);
 
        trace_cobalt_psem_init(sem->name, sem->handle, flags, value);
@@ -162,7 +162,7 @@ __cobalt_sem_init(const char *name, struct 
cobalt_sem_shadow *sm,
 
 err_lock_put:
        xnlock_put_irqrestore(&nklock, s);
-       cobalt_umm_free(&sys_ppd->umm, datp);
+       cobalt_umm_free(&sys_ppd->umm, state);
 err_free_sem:
        xnfree(sem);
 out:
@@ -225,7 +225,7 @@ static inline int sem_trywait_inner(struct cobalt_sem *sem)
                return -EPERM;
 #endif
 
-       if (atomic_sub_return(1, &sem->datp->value) < 0)
+       if (atomic_sub_return(1, &sem->state->value) < 0)
                return -EAGAIN;
 
        return 0;
@@ -306,7 +306,7 @@ int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user 
*u_sem,
                 * applications ported to Linux happy.
                 */
                if (pull_ts) {
-                       atomic_inc(&sem->datp->value);
+                       atomic_inc(&sem->state->value);
                        xnlock_put_irqrestore(&nklock, s);
                        ret = sem_fetch_timeout(&ts, u_ts);
                        xnlock_get_irqsave(&nklock, s);
@@ -327,7 +327,7 @@ int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user 
*u_sem,
                        ret = -EINVAL;
                else if (info & (XNBREAK|XNTIMEO)) {
                        ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
-                       atomic_inc(&sem->datp->value);
+                       atomic_inc(&sem->state->value);
                }
                break;
        }
@@ -347,18 +347,18 @@ int sem_post_inner(struct cobalt_sem *sem, struct 
cobalt_kqueues *ownq, int bcas
                return -EPERM;
 #endif
 
-       if (atomic_read(&sem->datp->value) == SEM_VALUE_MAX)
+       if (atomic_read(&sem->state->value) == SEM_VALUE_MAX)
                return -EINVAL;
 
        if (!bcast) {
-               if (atomic_inc_return(&sem->datp->value) <= 0) {
+               if (atomic_inc_return(&sem->state->value) <= 0) {
                        if (xnsynch_wakeup_one_sleeper(&sem->synchbase))
                                xnsched_run();
                } else if (sem->flags & SEM_PULSE)
-                       atomic_set(&sem->datp->value, 0);
+                       atomic_set(&sem->state->value, 0);
        } else {
-               if (atomic_read(&sem->datp->value) < 0) {
-                       atomic_set(&sem->datp->value, 0);
+               if (atomic_read(&sem->state->value) < 0) {
+                       atomic_set(&sem->state->value, 0);
                        if (xnsynch_flush(&sem->synchbase, 0) ==
                                XNSYNCH_RESCHED)
                                xnsched_run();
@@ -401,7 +401,7 @@ static int sem_getvalue(xnhandle_t handle, int *value)
                return -EPERM;
        }
 
-       *value = atomic_read(&sem->datp->value);
+       *value = atomic_read(&sem->state->value);
        if ((sem->flags & SEM_REPORT) == 0 && *value < 0)
                *value = 0;
 
@@ -559,7 +559,7 @@ COBALT_SYSCALL(sem_inquire, current,
                 * holding any lock, then revalidate the handle.
                 */
                if (t == NULL) {
-                       val = atomic_read(&sem->datp->value);
+                       val = atomic_read(&sem->state->value);
                        if (val >= 0 || u_waitlist == NULL)
                                break;
                        xnlock_put_irqrestore(&nklock, s);
@@ -575,7 +575,7 @@ COBALT_SYSCALL(sem_inquire, current,
                        xnlock_get_irqsave(&nklock, s);
                } else if (pstamp == nstamp)
                        break;
-               else if (val != atomic_read(&sem->datp->value)) {
+               else if (val != atomic_read(&sem->state->value)) {
                        xnlock_put_irqrestore(&nklock, s);
                        if (t != fbuf)
                                xnfree(t);
diff --git a/kernel/cobalt/posix/sem.h b/kernel/cobalt/posix/sem.h
index 5e03b91..1211a4d 100644
--- a/kernel/cobalt/posix/sem.h
+++ b/kernel/cobalt/posix/sem.h
@@ -31,7 +31,7 @@ struct cobalt_sem {
        struct xnsynch synchbase;
        /** semq */
        struct list_head link;
-       struct sem_dat *datp;
+       struct cobalt_sem_state *state;
        int flags;
        struct cobalt_kqueues *owningq;
        xnhandle_t handle;
@@ -59,6 +59,11 @@ typedef struct
 #define SEM_FAILED     NULL
 #define SEM_NAMED      0x80000000
 
+struct cobalt_sem_shadow __user *
+__cobalt_sem_open(struct cobalt_sem_shadow __user *usm,
+                 const char __user *u_name,
+                 int oflags, mode_t mode, unsigned int value);
+
 int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem,
                           const void __user *u_ts,
                           int (*fetch_timeout)(struct timespec *ts,
@@ -99,9 +104,9 @@ COBALT_SYSCALL_DECL(sem_destroy,
                    int, (struct cobalt_sem_shadow __user *u_sem));
 
 COBALT_SYSCALL_DECL(sem_open,
-                   int, (struct cobalt_sem_shadow __user *__user *u_addr,
+                   int, (struct cobalt_sem_shadow __user *__user *u_addrp,
                          const char __user *u_name,
-                         int oflags, mode_t mode, unsigned value));
+                         int oflags, mode_t mode, unsigned int value));
 
 COBALT_SYSCALL_DECL(sem_close,
                    int, (struct cobalt_sem_shadow __user *usm));
diff --git a/kernel/cobalt/posix/syscall.c b/kernel/cobalt/posix/syscall.c
index ae879fc..71ed345 100644
--- a/kernel/cobalt/posix/syscall.c
+++ b/kernel/cobalt/posix/syscall.c
@@ -77,10 +77,6 @@ typedef int (*cobalt_syshand)(unsigned long arg1, unsigned 
long arg2,
                              unsigned long arg3, unsigned long arg4,
                              unsigned long arg5);
 
-static const cobalt_syshand cobalt_syscalls[];
-
-static const int cobalt_sysmodes[];
-
 static void prepare_for_signal(struct task_struct *p,
                               struct xnthread *thread,
                               struct pt_regs *regs,
@@ -104,416 +100,116 @@ static void prepare_for_signal(struct task_struct *p,
        xnthread_relax(notify, SIGDEBUG_MIGRATE_SIGNAL);
 }
 
-static int handle_head_syscall(struct ipipe_domain *ipd, struct pt_regs *regs)
+static COBALT_SYSCALL(migrate, current, int, (int domain))
 {
-       struct cobalt_process *process;
-       int switched, ret, sigs;
-       struct xnthread *thread;
-       cobalt_syshand handler;
-       struct task_struct *p;
-       unsigned int nr;
-       int sysflags;
-
-       if (!__xn_syscall_p(regs))
-               goto linux_syscall;
-
-       thread = xnthread_current();
-       nr = __xn_syscall(regs);
+       struct xnthread *thread = xnthread_current();
 
-       trace_cobalt_head_sysentry(thread, nr);
+       if (ipipe_root_p) {
+               if (domain == COBALT_PRIMARY) {
+                       if (thread == NULL)
+                               return -EPERM;
+                       /*
+                        * Paranoid: a corner case where userland
+                        * fiddles with SIGSHADOW while the target
+                        * thread is still waiting to be started.
+                        */
+                       if (xnthread_test_state(thread, XNDORMANT))
+                               return 0;
 
-       if (nr >= __NR_COBALT_SYSCALLS)
-               goto bad_syscall;
+                       return xnthread_harden() ? : 1;
+               }
+               return 0;
+       }
 
-       process = cobalt_current_process();
-       if (process == NULL) {
-               process = cobalt_search_process(current->mm);
-               cobalt_set_process(process);
+       /* ipipe_current_domain != ipipe_root_domain */
+       if (domain == COBALT_SECONDARY) {
+               xnthread_relax(0, 0);
+               return 1;
        }
 
-       handler = cobalt_syscalls[nr];
-       sysflags = cobalt_sysmodes[nr & (__NR_COBALT_SYSCALLS - 1)];
+       return 0;
+}
 
-       /*
-        * Executing Cobalt services requires CAP_SYS_NICE, except for
-        * sc_cobalt_bind which does its own checks.
-        */
-       if (unlikely((process == NULL && nr != sc_cobalt_bind) ||
-                    (thread == NULL && (sysflags & __xn_exec_shadow) != 0) ||
-                    (!cap_raised(current_cap(), CAP_SYS_NICE) &&
-                     nr != sc_cobalt_bind))) {
-               if (XENO_DEBUG(COBALT))
-                       printk(XENO_WARN
-                              "syscall <%d> denied to %s[%d]\n",
-                              nr, current->comm, current->pid);
-               __xn_error_return(regs, -EPERM);
-               goto ret_handled;
-       }
+static COBALT_SYSCALL(info, lostage,
+                     int, (struct cobalt_sysinfo __user *u_info))
+{
+       struct cobalt_sysinfo info;
 
-       if (sysflags & __xn_exec_conforming)
-               /*
-                * If the conforming exec bit is set, turn the exec
-                * bitmask for the syscall into the most appropriate
-                * setup for the caller, i.e. Xenomai domain for
-                * shadow threads, Linux otherwise.
-                */
-               sysflags |= (thread ? __xn_exec_histage : __xn_exec_lostage);
+       info.clockfreq = xnarch_machdata.clock_freq;
+       info.vdso = cobalt_umm_offset(&cobalt_ppd_get(1)->umm, nkvdso);
 
-       /*
-        * Here we have to dispatch the syscall execution properly,
-        * depending on:
-        *
-        * o Whether the syscall must be run into the Linux or Xenomai
-        * domain, or indifferently in the current Xenomai domain.
-        *
-        * o Whether the caller currently runs in the Linux or Xenomai
-        * domain.
-        */
-       switched = 0;
-restart:
-       /*
-        * Process adaptive syscalls by restarting them in the
-        * opposite domain.
-        */
-       if (sysflags & __xn_exec_lostage) {
-               /*
-                * The syscall must run from the Linux domain.
-                */
-               if (ipd == &xnsched_realtime_domain) {
-                       /*
-                        * Request originates from the Xenomai domain:
-                        * relax the caller then invoke the syscall
-                        * handler right after.
-                        */
-                       xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
-                       switched = 1;
-               } else
-                       /*
-                        * Request originates from the Linux domain:
-                        * propagate the event to our Linux-based
-                        * handler, so that the syscall is executed
-                        * from there.
-                        */
-                       return KEVENT_PROPAGATE;
-       } else if (sysflags & (__xn_exec_histage | __xn_exec_current)) {
-               /*
-                * Syscall must run either from the Xenomai domain, or
-                * from the calling domain.
-                *
-                * If the request originates from the Linux domain,
-                * hand it over to our secondary-mode dispatcher.
-                * Otherwise, invoke the syscall handler immediately.
-                */
-               if (ipd != &xnsched_realtime_domain)
-                       return KEVENT_PROPAGATE;
-       }
+       return __xn_safe_copy_to_user(u_info, &info, sizeof(info));
+}
 
-       ret = handler(__xn_reg_arglist(regs));
-       if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive) != 0) {
-               if (switched) {
-                       switched = 0;
-                       ret = xnthread_harden();
-                       if (ret)
-                               goto done;
-               }
+static COBALT_SYSCALL(trace, current,
+                     int, (int op, unsigned long a1,
+                           unsigned long a2, unsigned long a3))
+{
+       int ret = -EINVAL;
 
-               sysflags ^=
-                   (__xn_exec_lostage | __xn_exec_histage |
-                    __xn_exec_adaptive);
-               goto restart;
-       }
-done:
-       __xn_status_return(regs, ret);
-       sigs = 0;
-       if (!xnsched_root_p()) {
-               p = current;
-               if (signal_pending(p) ||
-                   xnthread_test_info(thread, XNKICKED)) {
-                       sigs = 1;
-                       prepare_for_signal(p, thread, regs, sysflags);
-               } else if (xnthread_test_state(thread, XNWEAK) &&
-                          thread->res_count == 0) {
-                       if (switched)
-                               switched = 0;
-                       else
-                               xnthread_relax(0, 0);
-               }
-       }
-       if (!sigs && (sysflags & __xn_exec_switchback) != 0 && switched)
-               xnthread_harden(); /* -EPERM will be trapped later if needed. */
+       switch (op) {
+       case __xntrace_op_max_begin:
+               ret = xntrace_max_begin(a1);
+               break;
 
-ret_handled:
-       /* Update the stats and userland-visible state. */
-       if (thread) {
-               xnstat_counter_inc(&thread->stat.xsc);
-               xnthread_sync_window(thread);
-       }
+       case __xntrace_op_max_end:
+               ret = xntrace_max_end(a1);
+               break;
 
-       trace_cobalt_head_sysexit(thread, __xn_reg_rval(regs));
+       case __xntrace_op_max_reset:
+               ret = xntrace_max_reset();
+               break;
 
-       return KEVENT_STOP;
+       case __xntrace_op_user_start:
+               ret = xntrace_user_start();
+               break;
 
-linux_syscall:
-       if (xnsched_root_p())
-               /*
-                * The call originates from the Linux domain, either
-                * from a relaxed shadow or from a regular Linux task;
-                * just propagate the event so that we will fall back
-                * to handle_root_syscall().
-                */
-               return KEVENT_PROPAGATE;
+       case __xntrace_op_user_stop:
+               ret = xntrace_user_stop(a1);
+               break;
 
-       /*
-        * From now on, we know that we have a valid shadow thread
-        * pointer.
-        *
-        * The current syscall will eventually fall back to the Linux
-        * syscall handler if our Linux domain handler does not
-        * intercept it. Before we let it go, ensure that the current
-        * thread has properly entered the Linux domain.
-        */
-       xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+       case __xntrace_op_user_freeze:
+               ret = xntrace_user_freeze(a1, a2);
+               break;
 
-       return KEVENT_PROPAGATE;
+       case __xntrace_op_special:
+               ret = xntrace_special(a1 & 0xFF, a2);
+               break;
 
-bad_syscall:
-       printk(XENO_WARN "bad syscall <%#lx>\n", __xn_syscall(regs));
-       
-       __xn_error_return(regs, -ENOSYS);
+       case __xntrace_op_special_u64:
+               ret = xntrace_special_u64(a1 & 0xFF,
+                                         (((u64) a2) << 32) | a3);
+               break;
+       }
+       return ret;
+}
 
-       return KEVENT_STOP;
+static COBALT_SYSCALL(archcall, current,
+                     int, (unsigned long a1, unsigned long a2,
+                           unsigned long a3, unsigned long a4,
+                           unsigned long a5))
+{
+       return xnarch_local_syscall(a1, a2, a3, a4, a5);
 }
 
-static int handle_root_syscall(struct ipipe_domain *ipd, struct pt_regs *regs)
+static COBALT_SYSCALL(get_current, current,
+                     int, (xnhandle_t __user *u_handle))
 {
-       int sysflags, switched, ret, sigs;
-       struct xnthread *thread;
-       cobalt_syshand handler;
-       struct task_struct *p;
-       unsigned int nr;
+       struct xnthread *cur = xnthread_current();
 
-       /*
-        * Catch cancellation requests pending for user shadows
-        * running mostly in secondary mode, i.e. XNWEAK. In that
-        * case, we won't run prepare_for_signal() that frequently, so
-        * check for cancellation here.
-        */
-       xnthread_test_cancel();
+       if (cur == NULL)
+               return -EPERM;
 
-       if (!__xn_syscall_p(regs))
-               /* Fall back to Linux syscall handling. */
-               return KEVENT_PROPAGATE;
+       return __xn_safe_copy_to_user(u_handle, &cur->handle,
+                                     sizeof(*u_handle));
+}
 
-       thread = xnthread_current();
-       /* nr has already been checked in the head domain handler. */
-       nr = __xn_syscall(regs);
-
-       trace_cobalt_root_sysentry(thread, nr);
-
-       /* Processing a Xenomai syscall. */
-
-       handler = cobalt_syscalls[nr];
-       sysflags = cobalt_sysmodes[nr & (__NR_COBALT_SYSCALLS - 1)];
-
-       if ((sysflags & __xn_exec_conforming) != 0)
-               sysflags |= (thread ? __xn_exec_histage : __xn_exec_lostage);
-restart:
-       /*
-        * Process adaptive syscalls by restarting them in the
-        * opposite domain.
-        */
-       if (sysflags & __xn_exec_histage) {
-               /*
-                * This request originates from the Linux domain and
-                * must be run into the Xenomai domain: harden the
-                * caller and execute the syscall.
-                */
-               ret = xnthread_harden();
-               if (ret) {
-                       __xn_error_return(regs, ret);
-                       goto ret_handled;
-               }
-               switched = 1;
-       } else
-               /*
-                * We want to run the syscall in the Linux domain.
-                */
-               switched = 0;
-
-       ret = handler(__xn_reg_arglist(regs));
-       if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive) != 0) {
-               if (switched) {
-                       switched = 0;
-                       xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
-               }
-
-               sysflags ^=
-                   (__xn_exec_lostage | __xn_exec_histage |
-                    __xn_exec_adaptive);
-               goto restart;
-       }
-
-       __xn_status_return(regs, ret);
-
-       sigs = 0;
-       if (!xnsched_root_p()) {
-               /*
-                * We may have gained a shadow TCB from the syscall we
-                * just invoked, so make sure to fetch it.
-                */
-               thread = xnthread_current();
-               p = current;
-               if (signal_pending(p)) {
-                       sigs = 1;
-                       prepare_for_signal(p, thread, regs, sysflags);
-               } else if (xnthread_test_state(thread, XNWEAK) &&
-                          thread->res_count == 0)
-                       sysflags |= __xn_exec_switchback;
-       }
-       if (!sigs && (sysflags & __xn_exec_switchback) != 0
-           && (switched || xnsched_primary_p()))
-               xnthread_relax(0, 0);
-
-ret_handled:
-       /* Update the stats and userland-visible state. */
-       if (thread) {
-               xnstat_counter_inc(&thread->stat.xsc);
-               xnthread_sync_window(thread);
-       }
-
-       trace_cobalt_root_sysexit(thread, __xn_reg_rval(regs));
-
-       return KEVENT_STOP;
-}
-
-int ipipe_syscall_hook(struct ipipe_domain *ipd, struct pt_regs *regs)
-{
-       if (unlikely(ipipe_root_p))
-               return handle_root_syscall(ipd, regs);
-
-       return handle_head_syscall(ipd, regs);
-}
-
-int ipipe_fastcall_hook(struct pt_regs *regs)
-{
-       int ret;
-
-       ret = handle_head_syscall(&xnsched_realtime_domain, regs);
-       XENO_BUGON(COBALT, ret == KEVENT_PROPAGATE);
-
-       return ret;
-}
-
-static COBALT_SYSCALL(migrate, current, int, (int domain))
-{
-       struct xnthread *thread = xnthread_current();
-
-       if (ipipe_root_p) {
-               if (domain == COBALT_PRIMARY) {
-                       if (thread == NULL)
-                               return -EPERM;
-                       /*
-                        * Paranoid: a corner case where userland
-                        * fiddles with SIGSHADOW while the target
-                        * thread is still waiting to be started.
-                        */
-                       if (xnthread_test_state(thread, XNDORMANT))
-                               return 0;
-
-                       return xnthread_harden() ? : 1;
-               }
-               return 0;
-       }
-
-       /* ipipe_current_domain != ipipe_root_domain */
-       if (domain == COBALT_SECONDARY) {
-               xnthread_relax(0, 0);
-               return 1;
-       }
-
-       return 0;
-}
-
-static COBALT_SYSCALL(info, lostage,
-                     int, (struct cobalt_sysinfo __user *u_info))
-{
-       struct cobalt_sysinfo info;
-
-       info.clockfreq = xnarch_machdata.clock_freq;
-       info.vdso = cobalt_umm_offset(&cobalt_ppd_get(1)->umm, nkvdso);
-
-       return __xn_safe_copy_to_user(u_info, &info, sizeof(info));
-}
-
-static COBALT_SYSCALL(trace, current,
-                     int, (int op, unsigned long a1,
-                           unsigned long a2, unsigned long a3))
-{
-       int ret = -EINVAL;
-
-       switch (op) {
-       case __xntrace_op_max_begin:
-               ret = xntrace_max_begin(a1);
-               break;
-
-       case __xntrace_op_max_end:
-               ret = xntrace_max_end(a1);
-               break;
-
-       case __xntrace_op_max_reset:
-               ret = xntrace_max_reset();
-               break;
-
-       case __xntrace_op_user_start:
-               ret = xntrace_user_start();
-               break;
-
-       case __xntrace_op_user_stop:
-               ret = xntrace_user_stop(a1);
-               break;
-
-       case __xntrace_op_user_freeze:
-               ret = xntrace_user_freeze(a1, a2);
-               break;
-
-       case __xntrace_op_special:
-               ret = xntrace_special(a1 & 0xFF, a2);
-               break;
-
-       case __xntrace_op_special_u64:
-               ret = xntrace_special_u64(a1 & 0xFF,
-                                         (((u64) a2) << 32) | a3);
-               break;
-       }
-       return ret;
-}
-
-static COBALT_SYSCALL(archcall, current,
-                     int, (unsigned long a1, unsigned long a2,
-                           unsigned long a3, unsigned long a4,
-                           unsigned long a5))
-{
-       return xnarch_local_syscall(a1, a2, a3, a4, a5);
-}
-
-static COBALT_SYSCALL(get_current, current,
-                     int, (xnhandle_t __user *u_handle))
-{
-       struct xnthread *cur = xnthread_current();
-
-       if (cur == NULL)
-               return -EPERM;
-
-       return __xn_safe_copy_to_user(u_handle, &cur->handle,
-                                     sizeof(*u_handle));
-}
-
-static COBALT_SYSCALL(backtrace, current,
-                     int, (int nr, unsigned long __user *u_backtrace,
-                           int reason))
-{
-       unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH];
-       int ret;
+static COBALT_SYSCALL(backtrace, current,
+                     int, (int nr, unsigned long __user *u_backtrace,
+                           int reason))
+{
+       unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+       int ret;
 
        /*
         * In case backtrace() in userland is broken or fails. We may
@@ -1021,3 +717,305 @@ static const int cobalt_sysmodes[] = {
        __COBALT_MODE(sysconf, current),
        __COBALT_MODE(sysctl, probing),
 };
+
+static int handle_head_syscall(struct ipipe_domain *ipd, struct pt_regs *regs)
+{
+       struct cobalt_process *process;
+       int switched, ret, sigs;
+       struct xnthread *thread;
+       cobalt_syshand handler;
+       struct task_struct *p;
+       unsigned int nr, code;
+       int sysflags;
+
+       if (!__xn_syscall_p(regs))
+               goto linux_syscall;
+
+       thread = xnthread_current();
+       code = __xn_syscall(regs);
+       if (code >= ARRAY_SIZE(cobalt_syscalls))
+               goto bad_syscall;
+
+       nr = code & (__NR_COBALT_SYSCALLS - 1);
+
+       trace_cobalt_head_sysentry(thread, code);
+
+       process = cobalt_current_process();
+       if (process == NULL) {
+               process = cobalt_search_process(current->mm);
+               cobalt_set_process(process);
+       }
+
+       handler = cobalt_syscalls[code];
+       sysflags = cobalt_sysmodes[nr];
+
+       /*
+        * Executing Cobalt services requires CAP_SYS_NICE, except for
+        * sc_cobalt_bind which does its own checks.
+        */
+       if (unlikely((process == NULL && nr != sc_cobalt_bind) ||
+                    (thread == NULL && (sysflags & __xn_exec_shadow) != 0) ||
+                    (!cap_raised(current_cap(), CAP_SYS_NICE) &&
+                     nr != sc_cobalt_bind))) {
+               if (XENO_DEBUG(COBALT))
+                       printk(XENO_WARN
+                              "syscall <%d> denied to %s[%d]\n",
+                              nr, current->comm, current->pid);
+               __xn_error_return(regs, -EPERM);
+               goto ret_handled;
+       }
+
+       if (sysflags & __xn_exec_conforming)
+               /*
+                * If the conforming exec bit is set, turn the exec
+                * bitmask for the syscall into the most appropriate
+                * setup for the caller, i.e. Xenomai domain for
+                * shadow threads, Linux otherwise.
+                */
+               sysflags |= (thread ? __xn_exec_histage : __xn_exec_lostage);
+
+       /*
+        * Here we have to dispatch the syscall execution properly,
+        * depending on:
+        *
+        * o Whether the syscall must be run into the Linux or Xenomai
+        * domain, or indifferently in the current Xenomai domain.
+        *
+        * o Whether the caller currently runs in the Linux or Xenomai
+        * domain.
+        */
+       switched = 0;
+restart:
+       /*
+        * Process adaptive syscalls by restarting them in the
+        * opposite domain.
+        */
+       if (sysflags & __xn_exec_lostage) {
+               /*
+                * The syscall must run from the Linux domain.
+                */
+               if (ipd == &xnsched_realtime_domain) {
+                       /*
+                        * Request originates from the Xenomai domain:
+                        * relax the caller then invoke the syscall
+                        * handler right after.
+                        */
+                       xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+                       switched = 1;
+               } else
+                       /*
+                        * Request originates from the Linux domain:
+                        * propagate the event to our Linux-based
+                        * handler, so that the syscall is executed
+                        * from there.
+                        */
+                       return KEVENT_PROPAGATE;
+       } else if (sysflags & (__xn_exec_histage | __xn_exec_current)) {
+               /*
+                * Syscall must run either from the Xenomai domain, or
+                * from the calling domain.
+                *
+                * If the request originates from the Linux domain,
+                * hand it over to our secondary-mode dispatcher.
+                * Otherwise, invoke the syscall handler immediately.
+                */
+               if (ipd != &xnsched_realtime_domain)
+                       return KEVENT_PROPAGATE;
+       }
+
+       ret = handler(__xn_reg_arglist(regs));
+       if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive) != 0) {
+               if (switched) {
+                       switched = 0;
+                       ret = xnthread_harden();
+                       if (ret)
+                               goto done;
+               }
+
+               sysflags ^=
+                   (__xn_exec_lostage | __xn_exec_histage |
+                    __xn_exec_adaptive);
+               goto restart;
+       }
+done:
+       __xn_status_return(regs, ret);
+       sigs = 0;
+       if (!xnsched_root_p()) {
+               p = current;
+               if (signal_pending(p) ||
+                   xnthread_test_info(thread, XNKICKED)) {
+                       sigs = 1;
+                       prepare_for_signal(p, thread, regs, sysflags);
+               } else if (xnthread_test_state(thread, XNWEAK) &&
+                          thread->res_count == 0) {
+                       if (switched)
+                               switched = 0;
+                       else
+                               xnthread_relax(0, 0);
+               }
+       }
+       if (!sigs && (sysflags & __xn_exec_switchback) != 0 && switched)
+               xnthread_harden(); /* -EPERM will be trapped later if needed. */
+
+ret_handled:
+       /* Update the stats and userland-visible state. */
+       if (thread) {
+               xnstat_counter_inc(&thread->stat.xsc);
+               xnthread_sync_window(thread);
+       }
+
+       trace_cobalt_head_sysexit(thread, __xn_reg_rval(regs));
+
+       return KEVENT_STOP;
+
+linux_syscall:
+       if (xnsched_root_p())
+               /*
+                * The call originates from the Linux domain, either
+                * from a relaxed shadow or from a regular Linux task;
+                * just propagate the event so that we will fall back
+                * to handle_root_syscall().
+                */
+               return KEVENT_PROPAGATE;
+
+       /*
+        * From now on, we know that we have a valid shadow thread
+        * pointer.
+        *
+        * The current syscall will eventually fall back to the Linux
+        * syscall handler if our Linux domain handler does not
+        * intercept it. Before we let it go, ensure that the current
+        * thread has properly entered the Linux domain.
+        */
+       xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+
+       return KEVENT_PROPAGATE;
+
+bad_syscall:
+       printk(XENO_WARN "bad syscall <%#lx>\n", __xn_syscall(regs));
+       
+       __xn_error_return(regs, -ENOSYS);
+
+       return KEVENT_STOP;
+}
+
+static int handle_root_syscall(struct ipipe_domain *ipd, struct pt_regs *regs)
+{
+       int sysflags, switched, ret, sigs;
+       struct xnthread *thread;
+       cobalt_syshand handler;
+       struct task_struct *p;
+       unsigned int nr, code;
+
+       /*
+        * Catch cancellation requests pending for user shadows
+        * running mostly in secondary mode, i.e. XNWEAK. In that
+        * case, we won't run prepare_for_signal() that frequently, so
+        * check for cancellation here.
+        */
+       xnthread_test_cancel();
+
+       if (!__xn_syscall_p(regs))
+               /* Fall back to Linux syscall handling. */
+               return KEVENT_PROPAGATE;
+
+       thread = xnthread_current();
+       /* code has already been checked in the head domain handler. */
+       code = __xn_syscall(regs);
+       nr = code & (__NR_COBALT_SYSCALLS - 1);
+
+       trace_cobalt_root_sysentry(thread, code);
+
+       /* Processing a Xenomai syscall. */
+
+       handler = cobalt_syscalls[code];
+       sysflags = cobalt_sysmodes[nr];
+
+       if ((sysflags & __xn_exec_conforming) != 0)
+               sysflags |= (thread ? __xn_exec_histage : __xn_exec_lostage);
+restart:
+       /*
+        * Process adaptive syscalls by restarting them in the
+        * opposite domain.
+        */
+       if (sysflags & __xn_exec_histage) {
+               /*
+                * This request originates from the Linux domain and
+                * must be run into the Xenomai domain: harden the
+                * caller and execute the syscall.
+                */
+               ret = xnthread_harden();
+               if (ret) {
+                       __xn_error_return(regs, ret);
+                       goto ret_handled;
+               }
+               switched = 1;
+       } else
+               /*
+                * We want to run the syscall in the Linux domain.
+                */
+               switched = 0;
+
+       ret = handler(__xn_reg_arglist(regs));
+       if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive) != 0) {
+               if (switched) {
+                       switched = 0;
+                       xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+               }
+
+               sysflags ^=
+                   (__xn_exec_lostage | __xn_exec_histage |
+                    __xn_exec_adaptive);
+               goto restart;
+       }
+
+       __xn_status_return(regs, ret);
+
+       sigs = 0;
+       if (!xnsched_root_p()) {
+               /*
+                * We may have gained a shadow TCB from the syscall we
+                * just invoked, so make sure to fetch it.
+                */
+               thread = xnthread_current();
+               p = current;
+               if (signal_pending(p)) {
+                       sigs = 1;
+                       prepare_for_signal(p, thread, regs, sysflags);
+               } else if (xnthread_test_state(thread, XNWEAK) &&
+                          thread->res_count == 0)
+                       sysflags |= __xn_exec_switchback;
+       }
+       if (!sigs && (sysflags & __xn_exec_switchback) != 0
+           && (switched || xnsched_primary_p()))
+               xnthread_relax(0, 0);
+
+ret_handled:
+       /* Update the stats and userland-visible state. */
+       if (thread) {
+               xnstat_counter_inc(&thread->stat.xsc);
+               xnthread_sync_window(thread);
+       }
+
+       trace_cobalt_root_sysexit(thread, __xn_reg_rval(regs));
+
+       return KEVENT_STOP;
+}
+
+int ipipe_syscall_hook(struct ipipe_domain *ipd, struct pt_regs *regs)
+{
+       if (unlikely(ipipe_root_p))
+               return handle_root_syscall(ipd, regs);
+
+       return handle_head_syscall(ipd, regs);
+}
+
+int ipipe_fastcall_hook(struct pt_regs *regs)
+{
+       int ret;
+
+       ret = handle_head_syscall(&xnsched_realtime_domain, regs);
+       XENO_BUGON(COBALT, ret == KEVENT_PROPAGATE);
+
+       return ret;
+}
diff --git a/kernel/cobalt/posix/thread.c b/kernel/cobalt/posix/thread.c
index a74d024..defea50 100644
--- a/kernel/cobalt/posix/thread.c
+++ b/kernel/cobalt/posix/thread.c
@@ -372,10 +372,10 @@ static inline int pthread_create(struct cobalt_thread 
**thread_p,
        iattr.flags = XNUSER|XNFPU;
        iattr.personality = &cobalt_personality;
        iattr.affinity = CPU_MASK_ALL;
-       if (xnthread_init(&thread->threadbase,
-                         &iattr, sched_class, &param) != 0) {
+       ret = xnthread_init(&thread->threadbase, &iattr, sched_class, &param);
+       if (ret) {
                xnfree(thread);
-               return -EAGAIN;
+               return ret;
        }
 
        thread->sched_u_policy = policy;
@@ -541,22 +541,16 @@ COBALT_SYSCALL(thread_getschedparam_ex, current,
        return __xn_safe_copy_to_user(u_param, &param_ex, sizeof(param_ex));
 }
 
-COBALT_SYSCALL(thread_create, init,
-              int, (unsigned long pth, int policy,
-                    struct sched_param_ex __user *u_param,
-                    int xid,
-                    __u32 __user *u_winoff))
+int __cobalt_thread_create(unsigned long pth, int policy,
+                          struct sched_param_ex *param_ex,
+                          int xid, __u32 __user *u_winoff)
 {
        struct cobalt_thread *thread = NULL;
        struct task_struct *p = current;
-       struct sched_param_ex param_ex;
        struct cobalt_local_hkey hkey;
        int ret;
 
-       if (__xn_safe_copy_from_user(&param_ex, u_param, sizeof(param_ex)))
-               return -EFAULT;
-
-       trace_cobalt_pthread_create(pth, policy, &param_ex);
+       trace_cobalt_pthread_create(pth, policy, param_ex);
 
        /*
         * We have been passed the pthread_t identifier the user-space
@@ -566,7 +560,7 @@ COBALT_SYSCALL(thread_create, init,
        hkey.u_pth = pth;
        hkey.mm = p->mm;
 
-       ret = pthread_create(&thread, policy, &param_ex, p);
+       ret = pthread_create(&thread, policy, param_ex, p);
        if (ret)
                return ret;
 
@@ -593,6 +587,22 @@ fail:
        return ret;
 }
 
+COBALT_SYSCALL(thread_create, init,
+              int, (unsigned long pth, int policy,
+                    struct sched_param_ex __user *u_param,
+                    int xid,
+                    __u32 __user *u_winoff))
+{
+       struct sched_param_ex param_ex;
+       int ret;
+
+       ret = __xn_safe_copy_from_user(&param_ex, u_param, sizeof(param_ex));
+       if (ret)
+               return ret;
+
+       return __cobalt_thread_create(pth, policy, &param_ex, xid, u_winoff);
+}
+
 struct cobalt_thread *
 cobalt_thread_shadow(struct task_struct *p,
                     struct cobalt_local_hkey *hkey,
diff --git a/kernel/cobalt/posix/thread.h b/kernel/cobalt/posix/thread.h
index 1322993..392cb51 100644
--- a/kernel/cobalt/posix/thread.h
+++ b/kernel/cobalt/posix/thread.h
@@ -122,6 +122,20 @@ static inline struct cobalt_thread 
*cobalt_current_thread(void)
        return curr ? container_of(curr, struct cobalt_thread, threadbase) : 
NULL;
 }
 
+int __cobalt_thread_create(unsigned long pth, int policy,
+                          struct sched_param_ex __user *u_param,
+                          int xid, __u32 __user *u_winoff);
+
+int __cobalt_thread_setschedparam_ex(unsigned long pth,
+                                    int policy,
+                                    const struct sched_param_ex *param_ex,
+                                    __u32 __user *u_winoff,
+                                    int __user *u_promoted);
+
+int __cobalt_thread_getschedparam_ex(unsigned long pth,
+                                    int __user *u_policy,
+                                    struct sched_param_ex *param_ex);
+
 struct cobalt_thread *cobalt_thread_find(pid_t pid);
 
 struct cobalt_thread *cobalt_thread_find_local(pid_t pid);
@@ -156,12 +170,6 @@ COBALT_SYSCALL_DECL(thread_getstat,
                    int, (pid_t pid,
                          struct cobalt_threadstat __user *u_stat));
 
-int __cobalt_thread_setschedparam_ex(unsigned long pth,
-                                    int policy,
-                                    const struct sched_param_ex *param_ex,
-                                    __u32 __user *u_winoff,
-                                    int __user *u_promoted);
-
 COBALT_SYSCALL_DECL(thread_setschedparam_ex,
                    int, (unsigned long pth,
                          int policy,
@@ -169,10 +177,6 @@ COBALT_SYSCALL_DECL(thread_setschedparam_ex,
                          __u32 __user *u_winoff,
                          int __user *u_promoted));
 
-int __cobalt_thread_getschedparam_ex(unsigned long pth,
-                                    int __user *u_policy,
-                                    struct sched_param_ex *param_ex);
-
 COBALT_SYSCALL_DECL(thread_getschedparam_ex,
                    int, (unsigned long pth,
                          int __user *u_policy,
diff --git a/lib/cobalt/clock.c b/lib/cobalt/clock.c
index 501cdec..b31517f 100644
--- a/lib/cobalt/clock.c
+++ b/lib/cobalt/clock.c
@@ -134,8 +134,8 @@ static int __do_clock_host_realtime(struct timespec *ts)
                mask = hostrt_data->mask;
                mult = hostrt_data->mult;
                shift = hostrt_data->shift;
-               ts->tv_sec = hostrt_data->wall_time_sec;
-               nsec = hostrt_data->wall_time_nsec;
+               ts->tv_sec = hostrt_data->wall_sec;
+               nsec = hostrt_data->wall_nsec;
        }
 
        cycle_delta = (now - base) & mask;
diff --git a/lib/cobalt/cond.c b/lib/cobalt/cond.c
index c177e4a..70a97fd 100644
--- a/lib/cobalt/cond.c
+++ b/lib/cobalt/cond.c
@@ -59,24 +59,23 @@ static inline struct cobalt_cond_state *
 get_cond_state(struct cobalt_cond_shadow *shadow)
 {
        if (xnsynch_is_shared(shadow->handle))
-               return (struct cobalt_cond_state *)(cobalt_umm_shared
-                                + shadow->state_offset);
-       return shadow->state;
+               return cobalt_umm_shared + shadow->state_offset;
+
+       return cobalt_umm_private + shadow->state_offset;
 }
 
-static inline struct mutex_dat *
+static inline struct cobalt_mutex_state *
 get_mutex_state(struct cobalt_cond_shadow *shadow)
 {
        struct cobalt_cond_state *cond_state = get_cond_state(shadow);
 
-       if (cond_state->mutex_datp == (struct mutex_dat *)~0UL)
+       if (cond_state->mutex_state_offset == ~0U)
                return NULL;
 
        if (xnsynch_is_shared(shadow->handle))
-               return (struct mutex_dat *)(cobalt_umm_shared
-                                           + cond_state->mutex_datp_offset);
+               return cobalt_umm_shared + cond_state->mutex_state_offset;
 
-       return cond_state->mutex_datp;
+       return cobalt_umm_private + cond_state->mutex_state_offset;
 }
 
 void cobalt_default_condattr_init(void)
@@ -134,18 +133,7 @@ COBALT_IMPL(int, pthread_cond_init, (pthread_cond_t *cond,
        if (err)
                return err;
 
-       if (kcattr.pshared)
-               cond_state = get_cond_state(_cnd);
-       else {
-               /*
-                * This is condvar is local to the current process,
-                * build a direct pointer for fast access.
-                */
-               cond_state = (struct cobalt_cond_state *)
-                       (cobalt_umm_private + _cnd->state_offset);
-               _cnd->state = cond_state;
-       }
-
+       cond_state = get_cond_state(_cnd);
        cobalt_commit_memory(cond_state);
 
        return 0;
@@ -405,8 +393,8 @@ COBALT_IMPL(int, pthread_cond_timedwait, (pthread_cond_t 
*cond,
 COBALT_IMPL(int, pthread_cond_signal, (pthread_cond_t *cond))
 {
        struct cobalt_cond_shadow *_cnd = &((union cobalt_cond_union 
*)cond)->shadow_cond;
+       struct cobalt_mutex_state *mutex_state;
        struct cobalt_cond_state *cond_state;
-       struct mutex_dat *mutex_state;
        __u32 pending_signals;
        xnhandle_t cur;
        __u32 flags;
@@ -457,8 +445,8 @@ COBALT_IMPL(int, pthread_cond_signal, (pthread_cond_t 
*cond))
 COBALT_IMPL(int, pthread_cond_broadcast, (pthread_cond_t *cond))
 {
        struct cobalt_cond_shadow *_cnd = &((union cobalt_cond_union 
*)cond)->shadow_cond;
+       struct cobalt_mutex_state *mutex_state;
        struct cobalt_cond_state *cond_state;
-       struct mutex_dat *mutex_state;
        xnhandle_t cur;
        __u32 flags;
 
diff --git a/lib/cobalt/current.h b/lib/cobalt/current.h
index d678d69..3e28b83 100644
--- a/lib/cobalt/current.h
+++ b/lib/cobalt/current.h
@@ -43,7 +43,7 @@ static inline xnhandle_t cobalt_get_current_fast(void)
        return cobalt_get_current();
 }
 
-static inline unsigned long cobalt_get_current_mode(void)
+static inline int cobalt_get_current_mode(void)
 {
        return cobalt_current_window ? cobalt_current_window->state : XNRELAX;
 }
@@ -73,7 +73,7 @@ static inline xnhandle_t cobalt_get_current_fast(void)
        return (xnhandle_t)(uintptr_t)val ?: XN_NO_HANDLE;
 }
 
-static inline unsigned long cobalt_get_current_mode(void)
+static inline int cobalt_get_current_mode(void)
 {
        struct xnthread_user_window *window;
 
diff --git a/lib/cobalt/internal.c b/lib/cobalt/internal.c
index aa6292d..5255559 100644
--- a/lib/cobalt/internal.c
+++ b/lib/cobalt/internal.c
@@ -45,7 +45,7 @@ int cobalt_sysconf(int option, void *buf, size_t bufsz)
 
 void cobalt_thread_harden(void)
 {
-       unsigned long status = cobalt_get_current_mode();
+       int status = cobalt_get_current_mode();
 
        /* non-RT shadows are NOT allowed to force primary mode. */
        if ((status & (XNRELAX|XNWEAK)) == XNRELAX)
@@ -54,7 +54,7 @@ void cobalt_thread_harden(void)
 
 void cobalt_thread_relax(void)
 {
-       unsigned long status = cobalt_get_current_mode();
+       int status = cobalt_get_current_mode();
 
        if ((status & XNRELAX) == 0)
                XENOMAI_SYSCALL1(sc_cobalt_migrate, COBALT_SECONDARY);
@@ -161,16 +161,16 @@ size_t cobalt_get_stacksize(size_t size)
 }
 
 static inline
-struct cobalt_monitor_data *get_monitor_data(cobalt_monitor_t *mon)
+struct cobalt_monitor_state *get_monitor_state(cobalt_monitor_t *mon)
 {
        return mon->flags & COBALT_MONITOR_SHARED ?
-               cobalt_umm_shared + mon->u.data_offset :
-               mon->u.data;
+               cobalt_umm_shared + mon->state_offset :
+               cobalt_umm_private + mon->state_offset;
 }
 
 int cobalt_monitor_init(cobalt_monitor_t *mon, clockid_t clk_id, int flags)
 {
-       struct cobalt_monitor_data *datp;
+       struct cobalt_monitor_state *state;
        int ret;
 
        ret = XENOMAI_SYSCALL3(sc_cobalt_monitor_init,
@@ -178,13 +178,8 @@ int cobalt_monitor_init(cobalt_monitor_t *mon, clockid_t 
clk_id, int flags)
        if (ret)
                return ret;
 
-       if ((flags & COBALT_MONITOR_SHARED) == 0) {
-               datp = cobalt_umm_private + mon->u.data_offset;
-               mon->u.data = datp;
-       } else
-               datp = get_monitor_data(mon);
-
-       cobalt_commit_memory(datp);
+       state = get_monitor_state(mon);
+       cobalt_commit_memory(state);
 
        return 0;
 }
@@ -196,9 +191,8 @@ int cobalt_monitor_destroy(cobalt_monitor_t *mon)
 
 int cobalt_monitor_enter(cobalt_monitor_t *mon)
 {
-       struct cobalt_monitor_data *datp;
-       unsigned long status;
-       int ret, oldtype;
+       struct cobalt_monitor_state *state;
+       int status, ret, oldtype;
        xnhandle_t cur;
 
        /*
@@ -212,11 +206,11 @@ int cobalt_monitor_enter(cobalt_monitor_t *mon)
        if (status & (XNRELAX|XNWEAK))
                goto syscall;
 
-       datp = get_monitor_data(mon);
+       state = get_monitor_state(mon);
        cur = cobalt_get_current();
-       ret = xnsynch_fast_acquire(&datp->owner, cur);
+       ret = xnsynch_fast_acquire(&state->owner, cur);
        if (ret == 0) {
-               datp->flags &= 
~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST);
+               state->flags &= 
~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST);
                return 0;
        }
 syscall:
@@ -237,16 +231,15 @@ syscall:
 
 int cobalt_monitor_exit(cobalt_monitor_t *mon)
 {
-       struct cobalt_monitor_data *datp;
-       unsigned long status;
+       struct cobalt_monitor_state *state;
+       int status, ret;
        xnhandle_t cur;
-       int ret;
 
        __sync_synchronize();
 
-       datp = get_monitor_data(mon);
-       if ((datp->flags & COBALT_MONITOR_PENDED) &&
-           (datp->flags & COBALT_MONITOR_SIGNALED))
+       state = get_monitor_state(mon);
+       if ((state->flags & COBALT_MONITOR_PENDED) &&
+           (state->flags & COBALT_MONITOR_SIGNALED))
                goto syscall;
 
        status = cobalt_get_current_mode();
@@ -254,7 +247,7 @@ int cobalt_monitor_exit(cobalt_monitor_t *mon)
                goto syscall;
 
        cur = cobalt_get_current();
-       if (xnsynch_fast_release(&datp->owner, cur))
+       if (xnsynch_fast_release(&state->owner, cur))
                return 0;
 syscall:
        do
@@ -290,21 +283,21 @@ int cobalt_monitor_wait(cobalt_monitor_t *mon, int event,
 void cobalt_monitor_grant(cobalt_monitor_t *mon,
                          struct xnthread_user_window *u_window)
 {
-       struct cobalt_monitor_data *datp = get_monitor_data(mon);
+       struct cobalt_monitor_state *state = get_monitor_state(mon);
 
-       datp->flags |= COBALT_MONITOR_GRANTED;
+       state->flags |= COBALT_MONITOR_GRANTED;
        u_window->grant_value = 1;
 }
 
 int cobalt_monitor_grant_sync(cobalt_monitor_t *mon,
                          struct xnthread_user_window *u_window)
 {
-       struct cobalt_monitor_data *datp = get_monitor_data(mon);
+       struct cobalt_monitor_state *state = get_monitor_state(mon);
        int ret, oldtype;
 
        cobalt_monitor_grant(mon, u_window);
 
-       if ((datp->flags & COBALT_MONITOR_PENDED) == 0)
+       if ((state->flags & COBALT_MONITOR_PENDED) == 0)
                return 0;
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
@@ -321,19 +314,19 @@ int cobalt_monitor_grant_sync(cobalt_monitor_t *mon,
 
 void cobalt_monitor_grant_all(cobalt_monitor_t *mon)
 {
-       struct cobalt_monitor_data *datp = get_monitor_data(mon);
+       struct cobalt_monitor_state *state = get_monitor_state(mon);
 
-       datp->flags |= COBALT_MONITOR_GRANTED|COBALT_MONITOR_BROADCAST;
+       state->flags |= COBALT_MONITOR_GRANTED|COBALT_MONITOR_BROADCAST;
 }
 
 int cobalt_monitor_grant_all_sync(cobalt_monitor_t *mon)
 {
-       struct cobalt_monitor_data *datp = get_monitor_data(mon);
+       struct cobalt_monitor_state *state = get_monitor_state(mon);
        int ret, oldtype;
 
        cobalt_monitor_grant_all(mon);
 
-       if ((datp->flags & COBALT_MONITOR_PENDED) == 0)
+       if ((state->flags & COBALT_MONITOR_PENDED) == 0)
                return 0;
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
@@ -350,19 +343,19 @@ int cobalt_monitor_grant_all_sync(cobalt_monitor_t *mon)
 
 void cobalt_monitor_drain(cobalt_monitor_t *mon)
 {
-       struct cobalt_monitor_data *datp = get_monitor_data(mon);
+       struct cobalt_monitor_state *state = get_monitor_state(mon);
 
-       datp->flags |= COBALT_MONITOR_DRAINED;
+       state->flags |= COBALT_MONITOR_DRAINED;
 }
 
 int cobalt_monitor_drain_sync(cobalt_monitor_t *mon)
 {
-       struct cobalt_monitor_data *datp = get_monitor_data(mon);
+       struct cobalt_monitor_state *state = get_monitor_state(mon);
        int ret, oldtype;
 
        cobalt_monitor_drain(mon);
 
-       if ((datp->flags & COBALT_MONITOR_PENDED) == 0)
+       if ((state->flags & COBALT_MONITOR_PENDED) == 0)
                return 0;
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
@@ -379,19 +372,19 @@ int cobalt_monitor_drain_sync(cobalt_monitor_t *mon)
 
 void cobalt_monitor_drain_all(cobalt_monitor_t *mon)
 {
-       struct cobalt_monitor_data *datp = get_monitor_data(mon);
+       struct cobalt_monitor_state *state = get_monitor_state(mon);
 
-       datp->flags |= COBALT_MONITOR_DRAINED|COBALT_MONITOR_BROADCAST;
+       state->flags |= COBALT_MONITOR_DRAINED|COBALT_MONITOR_BROADCAST;
 }
 
 int cobalt_monitor_drain_all_sync(cobalt_monitor_t *mon)
 {
-       struct cobalt_monitor_data *datp = get_monitor_data(mon);
+       struct cobalt_monitor_state *state = get_monitor_state(mon);
        int ret, oldtype;
 
        cobalt_monitor_drain_all(mon);
 
-       if ((datp->flags & COBALT_MONITOR_PENDED) == 0)
+       if ((state->flags & COBALT_MONITOR_PENDED) == 0)
                return 0;
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
@@ -438,30 +431,25 @@ forward:
 }
 
 static inline
-struct cobalt_event_data *get_event_data(cobalt_event_t *event)
+struct cobalt_event_state *get_event_state(cobalt_event_t *event)
 {
        return event->flags & COBALT_EVENT_SHARED ?
-               cobalt_umm_shared + event->u.data_offset :
-               event->u.data;
+               cobalt_umm_shared + event->state_offset :
+               cobalt_umm_shared + event->state_offset;
 }
 
 int cobalt_event_init(cobalt_event_t *event, unsigned int value,
                      int flags)
 {
-       struct cobalt_event_data *datp;
+       struct cobalt_event_state *state;
        int ret;
 
        ret = XENOMAI_SYSCALL3(sc_cobalt_event_init, event, value, flags);
        if (ret)
                return ret;
 
-       if ((flags & COBALT_EVENT_SHARED) == 0) {
-               datp = cobalt_umm_private + event->u.data_offset;
-               event->u.data = datp;
-       } else
-               datp = get_event_data(event);
-
-       cobalt_commit_memory(datp);
+       state = get_event_state(event);
+       cobalt_commit_memory(state);
 
        return 0;
 }
@@ -473,14 +461,14 @@ int cobalt_event_destroy(cobalt_event_t *event)
 
 int cobalt_event_post(cobalt_event_t *event, unsigned int bits)
 {
-       struct cobalt_event_data *datp = get_event_data(event);
+       struct cobalt_event_state *state = get_event_state(event);
 
        if (bits == 0)
                return 0;
 
-       __sync_or_and_fetch(&datp->value, bits); /* full barrier. */
+       __sync_or_and_fetch(&state->value, bits); /* full barrier. */
 
-       if ((datp->flags & COBALT_EVENT_PENDED) == 0)
+       if ((state->flags & COBALT_EVENT_PENDED) == 0)
                return 0;
 
        return XENOMAI_SYSCALL1(sc_cobalt_event_sync, event);
@@ -505,9 +493,9 @@ int cobalt_event_wait(cobalt_event_t *event,
 unsigned long cobalt_event_clear(cobalt_event_t *event,
                                 unsigned int bits)
 {
-       struct cobalt_event_data *datp = get_event_data(event);
+       struct cobalt_event_state *state = get_event_state(event);
 
-       return __sync_fetch_and_and(&datp->value, ~bits);
+       return __sync_fetch_and_and(&state->value, ~bits);
 }
 
 int cobalt_event_inquire(cobalt_event_t *event,
diff --git a/lib/cobalt/internal.h b/lib/cobalt/internal.h
index 8b58666..c7350be 100644
--- a/lib/cobalt/internal.h
+++ b/lib/cobalt/internal.h
@@ -27,18 +27,18 @@ extern void *cobalt_umm_shared;
 
 void cobalt_sigshadow_install_once(void);
 
-static inline struct mutex_dat *mutex_get_datp(struct cobalt_mutex_shadow 
*shadow)
+static inline
+struct cobalt_mutex_state *mutex_get_state(struct cobalt_mutex_shadow *shadow)
 {
        if (shadow->attr.pshared)
-               return (struct mutex_dat *)
-                       (cobalt_umm_shared + shadow->dat_offset);
+               return cobalt_umm_shared + shadow->state_offset;
 
-       return shadow->dat;
+       return cobalt_umm_private + shadow->state_offset;
 }
 
 static inline atomic_t *mutex_get_ownerp(struct cobalt_mutex_shadow *shadow)
 {
-       return &mutex_get_datp(shadow)->owner;
+       return &mutex_get_state(shadow)->owner;
 }
 
 void cobalt_thread_init(void);
diff --git a/lib/cobalt/mutex.c b/lib/cobalt/mutex.c
index 90c53f0..93a9bbc 100644
--- a/lib/cobalt/mutex.c
+++ b/lib/cobalt/mutex.c
@@ -92,8 +92,8 @@ COBALT_IMPL(int, pthread_mutex_init, (pthread_mutex_t *mutex,
 {
        struct cobalt_mutex_shadow *_mutex =
                &((union cobalt_mutex_union *)mutex)->shadow_mutex;
+       struct cobalt_mutex_state *state;
        struct cobalt_mutexattr kmattr;
-       struct mutex_dat *datp;
        int err, tmp;
 
        if (_mutex->magic == COBALT_MUTEX_MAGIC) {
@@ -129,14 +129,8 @@ COBALT_IMPL(int, pthread_mutex_init, (pthread_mutex_t 
*mutex,
        if (err)
                return err;
 
-       if (!_mutex->attr.pshared) {
-               datp = (struct mutex_dat *)
-                       (cobalt_umm_private + _mutex->dat_offset);
-               _mutex->dat = datp;
-       } else
-               datp = mutex_get_datp(_mutex);
-
-       cobalt_commit_memory(datp);
+       state = mutex_get_state(_mutex);
+       cobalt_commit_memory(state);
 
        return err;
 }
@@ -212,9 +206,8 @@ COBALT_IMPL(int, pthread_mutex_lock, (pthread_mutex_t 
*mutex))
 {
        struct cobalt_mutex_shadow *_mutex =
                &((union cobalt_mutex_union *)mutex)->shadow_mutex;
-       unsigned long status;
+       int status, ret;
        xnhandle_t cur;
-       int err;
 
        cur = cobalt_get_current();
        if (cur == XN_NO_HANDLE)
@@ -230,18 +223,18 @@ COBALT_IMPL(int, pthread_mutex_lock, (pthread_mutex_t 
*mutex))
         */
        status = cobalt_get_current_mode();
        if ((status & (XNRELAX|XNWEAK)) == 0) {
-               err = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
-               if (err == 0) {
+               ret = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
+               if (ret == 0) {
                        _mutex->lockcnt = 1;
                        return 0;
                }
        } else {
-               err = xnsynch_fast_owner_check(mutex_get_ownerp(_mutex), cur);
-               if (!err)
-                       err = -EBUSY;
+               ret = xnsynch_fast_owner_check(mutex_get_ownerp(_mutex), cur);
+               if (ret == 0)
+                       ret = -EBUSY;
        }
 
-       if (err == -EBUSY)
+       if (ret == -EBUSY)
                switch(_mutex->attr.type) {
                case PTHREAD_MUTEX_NORMAL:
                        break;
@@ -256,14 +249,14 @@ COBALT_IMPL(int, pthread_mutex_lock, (pthread_mutex_t 
*mutex))
                        return 0;
                }
 
-       do {
-               err = XENOMAI_SYSCALL1(sc_cobalt_mutex_lock, _mutex);
-       } while (err == -EINTR);
+       do
+               ret = XENOMAI_SYSCALL1(sc_cobalt_mutex_lock, _mutex);
+       while (ret == -EINTR);
 
-       if (!err)
+       if (ret == 0)
                _mutex->lockcnt = 1;
 
-       return -err;
+       return -ret;
 }
 
 /**
@@ -301,9 +294,8 @@ COBALT_IMPL(int, pthread_mutex_timedlock, (pthread_mutex_t 
*mutex,
 {
        struct cobalt_mutex_shadow *_mutex =
                &((union cobalt_mutex_union *)mutex)->shadow_mutex;
-       unsigned long status;
+       int status, ret;
        xnhandle_t cur;
-       int err;
 
        cur = cobalt_get_current();
        if (cur == XN_NO_HANDLE)
@@ -315,18 +307,18 @@ COBALT_IMPL(int, pthread_mutex_timedlock, 
(pthread_mutex_t *mutex,
        /* See __cobalt_pthread_mutex_lock() */
        status = cobalt_get_current_mode();
        if ((status & (XNRELAX|XNWEAK)) == 0) {
-               err = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
-               if (err == 0) {
+               ret = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
+               if (ret == 0) {
                        _mutex->lockcnt = 1;
                        return 0;
                }
        } else {
-               err = xnsynch_fast_owner_check(mutex_get_ownerp(_mutex), cur);
-               if (!err)
-                       err = -EBUSY;
+               ret = xnsynch_fast_owner_check(mutex_get_ownerp(_mutex), cur);
+               if (ret == 0)
+                       ret = -EBUSY;
        }
 
-       if (err == -EBUSY)
+       if (ret == -EBUSY)
                switch(_mutex->attr.type) {
                case PTHREAD_MUTEX_NORMAL:
                        break;
@@ -343,12 +335,12 @@ COBALT_IMPL(int, pthread_mutex_timedlock, 
(pthread_mutex_t *mutex,
                }
 
        do {
-               err = XENOMAI_SYSCALL2(sc_cobalt_mutex_timedlock, _mutex, to);
-       } while (err == -EINTR);
+               ret = XENOMAI_SYSCALL2(sc_cobalt_mutex_timedlock, _mutex, to);
+       } while (ret == -EINTR);
 
-       if (!err)
+       if (ret == 0)
                _mutex->lockcnt = 1;
-       return -err;
+       return -ret;
 }
 
 /**
@@ -379,9 +371,8 @@ COBALT_IMPL(int, pthread_mutex_trylock, (pthread_mutex_t 
*mutex))
 {
        struct cobalt_mutex_shadow *_mutex =
                &((union cobalt_mutex_union *)mutex)->shadow_mutex;
-       unsigned long status;
+       int status, err;
        xnhandle_t cur;
-       int err;
 
        cur = cobalt_get_current();
        if (cur == XN_NO_HANDLE)
@@ -456,8 +447,8 @@ COBALT_IMPL(int, pthread_mutex_unlock, (pthread_mutex_t 
*mutex))
 {
        struct cobalt_mutex_shadow *_mutex =
                &((union cobalt_mutex_union *)mutex)->shadow_mutex;
-       struct mutex_dat *datp = NULL;
-       xnhandle_t cur = XN_NO_HANDLE;
+       struct cobalt_mutex_state *state;
+       xnhandle_t cur;
        int err;
 
        if (_mutex->magic != COBALT_MUTEX_MAGIC)
@@ -467,8 +458,8 @@ COBALT_IMPL(int, pthread_mutex_unlock, (pthread_mutex_t 
*mutex))
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
-       datp = mutex_get_datp(_mutex);
-       if (xnsynch_fast_owner_check(&datp->owner, cur) != 0)
+       state = mutex_get_state(_mutex);
+       if (xnsynch_fast_owner_check(&state->owner, cur) != 0)
                return EPERM;
 
        if (_mutex->lockcnt > 1) {
@@ -476,13 +467,13 @@ COBALT_IMPL(int, pthread_mutex_unlock, (pthread_mutex_t 
*mutex))
                return 0;
        }
 
-       if ((datp->flags & COBALT_MUTEX_COND_SIGNAL))
+       if ((state->flags & COBALT_MUTEX_COND_SIGNAL))
                goto do_syscall;
 
        if (cobalt_get_current_mode() & XNWEAK)
                goto do_syscall;
 
-       if (xnsynch_fast_release(&datp->owner, cur))
+       if (xnsynch_fast_release(&state->owner, cur))
                return 0;
 do_syscall:
 
diff --git a/lib/cobalt/semaphore.c b/lib/cobalt/semaphore.c
index 87cea44..f95c26c 100644
--- a/lib/cobalt/semaphore.c
+++ b/lib/cobalt/semaphore.c
@@ -42,16 +42,15 @@
  *@{
  */
 
-static inline struct sem_dat *sem_get_datp(struct cobalt_sem_shadow *shadow)
+static inline
+struct cobalt_sem_state *sem_get_state(struct cobalt_sem_shadow *shadow)
 {
-       unsigned int pshared = shadow->datp_offset < 0;
+       unsigned int pshared = shadow->state_offset < 0;
 
        if (pshared)
-               return (struct sem_dat *)
-                       (cobalt_umm_shared - shadow->datp_offset);
+               return cobalt_umm_shared - shadow->state_offset;
 
-       return (struct sem_dat *)
-               (cobalt_umm_private + shadow->datp_offset);
+       return cobalt_umm_private + shadow->state_offset;
 }
 
 /**
@@ -94,7 +93,7 @@ COBALT_IMPL(int, sem_init, (sem_t *sem, int pshared, unsigned 
int value))
                return -1;
        }
 
-       cobalt_commit_memory(sem_get_datp(_sem));
+       cobalt_commit_memory(sem_get_state(_sem));
 
        return 0;
 }
@@ -179,8 +178,8 @@ COBALT_IMPL(int, sem_destroy, (sem_t *sem))
 COBALT_IMPL(int, sem_post, (sem_t *sem))
 {
        struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union 
*)sem)->shadow_sem;
+       struct cobalt_sem_state *state;
        int value, ret, old, new;
-       struct sem_dat *datp;
 
        if (_sem->magic != COBALT_SEM_MAGIC
            && _sem->magic != COBALT_NAMED_SEM_MAGIC) {
@@ -188,16 +187,16 @@ COBALT_IMPL(int, sem_post, (sem_t *sem))
                return -1;
        }
 
-       datp = sem_get_datp(_sem);
+       state = sem_get_state(_sem);
        smp_mb();
-       value = atomic_read(&datp->value);
+       value = atomic_read(&state->value);
        if (value >= 0) {
-               if (datp->flags & SEM_PULSE)
+               if (state->flags & SEM_PULSE)
                        return 0;
                do {
                        old = value;
                        new = value + 1;
-                       value = atomic_cmpxchg(&datp->value, old, new);
+                       value = atomic_cmpxchg(&state->value, old, new);
                        if (value < 0)
                                goto do_syscall;
                } while (value != old);
@@ -239,7 +238,7 @@ COBALT_IMPL(int, sem_post, (sem_t *sem))
 COBALT_IMPL(int, sem_trywait, (sem_t *sem))
 {
        struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union 
*)sem)->shadow_sem;
-       struct sem_dat *datp;
+       struct cobalt_sem_state *state;
        int value, old, new;
 
        if (_sem->magic != COBALT_SEM_MAGIC
@@ -248,14 +247,14 @@ COBALT_IMPL(int, sem_trywait, (sem_t *sem))
                return -1;
        }
 
-       datp = sem_get_datp(_sem);
+       state = sem_get_state(_sem);
        smp_mb();
-       value = atomic_read(&datp->value);
+       value = atomic_read(&state->value);
        if (value > 0) {
                do {
                        old = value;
                        new = value - 1;
-                       value = atomic_cmpxchg(&datp->value, old, new);
+                       value = atomic_cmpxchg(&state->value, old, new);
                        if (value <= 0)
                                goto eagain;
                } while (value != old);
@@ -404,7 +403,7 @@ COBALT_IMPL(int, sem_timedwait, (sem_t *sem, const struct 
timespec *abs_timeout)
 COBALT_IMPL(int, sem_getvalue, (sem_t *sem, int *sval))
 {
        struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union 
*)sem)->shadow_sem;
-       struct sem_dat *datp;
+       struct cobalt_sem_state *state;
        int value;
 
        if (_sem->magic != COBALT_SEM_MAGIC
@@ -413,10 +412,10 @@ COBALT_IMPL(int, sem_getvalue, (sem_t *sem, int *sval))
                return -1;
        }
 
-       datp = sem_get_datp(_sem);
+       state = sem_get_state(_sem);
        smp_mb();
-       value = atomic_read(&datp->value);
-       if (value < 0 && (datp->flags & SEM_REPORT) == 0)
+       value = atomic_read(&state->value);
+       if (value < 0 && (state->flags & SEM_REPORT) == 0)
                value = 0;
 
        *sval = value;
@@ -604,7 +603,7 @@ int sem_init_np(sem_t *sem, int flags, unsigned int value)
 int sem_broadcast_np(sem_t *sem)
 {
        struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union 
*)sem)->shadow_sem;
-       struct sem_dat *datp;
+       struct cobalt_sem_state *state;
        int value, ret;
 
        if (_sem->magic != COBALT_SEM_MAGIC
@@ -613,9 +612,9 @@ int sem_broadcast_np(sem_t *sem)
                return -1;
        }
 
-       datp = sem_get_datp(_sem);
+       state = sem_get_state(_sem);
        smp_mb();
-       value = atomic_read(&datp->value);
+       value = atomic_read(&state->value);
        if (value >= 0)
                return 0;
 
diff --git a/testsuite/clocktest/clocktest.c b/testsuite/clocktest/clocktest.c
index ef19c95..ae73ec0 100644
--- a/testsuite/clocktest/clocktest.c
+++ b/testsuite/clocktest/clocktest.c
@@ -94,16 +94,14 @@ static void show_hostrt_diagnostics(void)
                return;
        }
 
-       printf("Sequence counter : %u\n",
+       printf("sequence counter : %u\n",
               cobalt_vdso->hostrt_data.lock.sequence);
        printf("wall_time_sec    : %lld\n",
-              (unsigned long long)cobalt_vdso->hostrt_data.wall_time_sec);
-       printf("wall_time_nsec   : %u\n", 
cobalt_vdso->hostrt_data.wall_time_nsec);
-       printf("wall_to_monotonic\n");
-       printf("          tv_sec : %jd\n",
-              (intmax_t)cobalt_vdso->hostrt_data.wall_to_monotonic.tv_sec);
-       printf("         tv_nsec : %ld\n",
-              cobalt_vdso->hostrt_data.wall_to_monotonic.tv_nsec);
+              (unsigned long long)cobalt_vdso->hostrt_data.wall_sec);
+       printf("wall_time_nsec   : %u\n", cobalt_vdso->hostrt_data.wall_nsec);
+       printf("wall_to_monotonic_sec    : %lld\n",
+              (unsigned long long)cobalt_vdso->hostrt_data.wtom_sec);
+       printf("wall_to_monotonic_nsec   : %u\n", 
cobalt_vdso->hostrt_data.wtom_nsec);
        printf("cycle_last       : %Lu\n", cobalt_vdso->hostrt_data.cycle_last);
        printf("mask             : 0x%Lx\n", cobalt_vdso->hostrt_data.mask);
        printf("mult             : %u\n", cobalt_vdso->hostrt_data.mult);


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to