Module: xenomai-forge
Branch: next
Commit: d8047b7abccca48d721f7b70b321cf811a6d60e6
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=d8047b7abccca48d721f7b70b321cf811a6d60e6

Author: Philippe Gerum <r...@xenomai.org>
Date:   Thu Apr 17 16:02:23 2014 +0200

cobalt/kernel: convert legacy trace markers to kernel tracepoints

Rebase trace statements in the Cobalt core over the generic tracepoint
API. A few former trace markers which did not help in analysing the
dynamic behavior of the system have been dropped in the process.

This work is originally based on Jan Kiszka's trace events conversion
patch series for Xenomai 2.6.

---

 include/cobalt/kernel/synch.h     |    3 +
 include/cobalt/kernel/thread.h    |   25 --
 include/cobalt/kernel/trace.h     |    5 +-
 kernel/cobalt/clock.c             |   10 +-
 kernel/cobalt/init.c              |    4 -
 kernel/cobalt/intr.c              |   47 +--
 kernel/cobalt/sched.c             |   16 +-
 kernel/cobalt/shadow.c            |   70 ++--
 kernel/cobalt/synch.c             |   48 +--
 kernel/cobalt/thread.c            |   87 ++---
 kernel/cobalt/timer.c             |   12 +-
 kernel/cobalt/trace/cobalt-core.h |  647 +++++++++++++++++++++++++++++++++++++
 scripts/prepare-kernel.sh         |    3 +
 13 files changed, 760 insertions(+), 217 deletions(-)

diff --git a/include/cobalt/kernel/synch.h b/include/cobalt/kernel/synch.h
index 78f3402..a316e3f 100644
--- a/include/cobalt/kernel/synch.h
+++ b/include/cobalt/kernel/synch.h
@@ -152,6 +152,9 @@ int xnsynch_acquire(struct xnsynch *synch,
                    xnticks_t timeout,
                    xntmode_t timeout_mode);
 
+struct xnthread *xnsynch_release(struct xnsynch *synch,
+                                struct xnthread *thread);
+
 struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch);
 
 int xnsynch_flush(struct xnsynch *synch, int reason);
diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index b28400b..e053845 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -317,31 +317,6 @@ static inline int xnthread_try_grab(struct xnthread 
*thread,
        return 1;
 }
 
-/*
- * XXX: Mutual dependency issue with synch.h, we have to define
- * xnsynch_release() here.
- */
-static inline struct xnthread *
-xnsynch_release(struct xnsynch *synch, struct xnthread *thread)
-{
-       atomic_long_t *lockp;
-       xnhandle_t threadh;
-
-       XENO_BUGON(NUCLEUS, (synch->status & XNSYNCH_OWNER) == 0);
-
-       trace_mark(xn_nucleus, synch_release, "synch %p", synch);
-
-       if (unlikely(xnthread_test_state(thread, XNWEAK)))
-               __xnsynch_fixup_rescnt(thread);
-
-       lockp = xnsynch_fastlock(synch);
-       threadh = xnthread_handle(thread);
-       if (likely(xnsynch_fast_release(lockp, threadh)))
-               return NULL;
-
-       return __xnsynch_transfer_ownership(synch, thread);
-}
-
 static inline int normalize_priority(int prio)
 {
        return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
diff --git a/include/cobalt/kernel/trace.h b/include/cobalt/kernel/trace.h
index 87c01d3..d143e3a 100644
--- a/include/cobalt/kernel/trace.h
+++ b/include/cobalt/kernel/trace.h
@@ -23,12 +23,9 @@
 #include <linux/ipipe_trace.h>
 #include <cobalt/uapi/kernel/trace.h>
 
-#ifdef CONFIG_LTT
-#include <linux/marker.h>
-#else
+/* TEMP */
 #undef trace_mark
 #define trace_mark(channel, ev, fmt, args...)  do { } while (0)
-#endif
 
 static inline int xntrace_max_begin(unsigned long v)
 {
diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c
index 479b7b2..71fae16 100644
--- a/kernel/cobalt/clock.c
+++ b/kernel/cobalt/clock.c
@@ -30,6 +30,7 @@
 #include <cobalt/kernel/arith.h>
 #include <cobalt/kernel/vdso.h>
 #include <asm/xenomai/calibration.h>
+#include <trace/events/cobalt-core.h>
 
 unsigned long nktimerlat;
 
@@ -307,9 +308,6 @@ void xnclock_adjust(struct xnclock *clock, xnsticks_t delta)
        nkvdso->wallclock_offset = nkclock.wallclock_offset;
        now = xnclock_read_monotonic(clock) + nkclock.wallclock_offset;
        adjust_clock_timers(clock, delta);
-
-       trace_mark(xn_nucleus, clock_adjust, "clock %s, delta %Lu",
-                  clock->name, delta);
 }
 EXPORT_SYMBOL_GPL(xnclock_adjust);
 
@@ -482,8 +480,6 @@ int xnclock_register(struct xnclock *clock)
 
        secondary_mode_only();
 
-       trace_mark(xn_nucleus, clock_register, "clock %s", clock->name);
-
        /* Allocate the percpu timer queue slot. */
        clock->timerdata = alloc_percpu(struct xntimerdata);
        if (clock->timerdata == NULL)
@@ -525,8 +521,6 @@ void xnclock_deregister(struct xnclock *clock)
 
        secondary_mode_only();
 
-       trace_mark(xn_nucleus, clock_deregister, "clock %s", clock->name);
-
        cleanup_clock_proc(clock);
 
        for_each_online_cpu(cpu) {
@@ -588,7 +582,7 @@ void xnclock_tick(struct xnclock *clock)
                if (delta > (xnsticks_t)clock->gravity)
                        break;
 
-               trace_mark(xn_nucleus, timer_expire, "timer %p", timer);
+               trace_cobalt_timer_expire(timer);
 
                xntimer_dequeue(timer, timerq);
                xntimer_account_fired(timer);
diff --git a/kernel/cobalt/init.c b/kernel/cobalt/init.c
index 5ea48d9..e753820 100644
--- a/kernel/cobalt/init.c
+++ b/kernel/cobalt/init.c
@@ -86,8 +86,6 @@ static void disable_timesource(void)
 {
        int cpu;
 
-       trace_mark(xn_nucleus, disable_timesource, MARK_NOARGS);
-
        /*
         * We must not hold the nklock while stopping the hardware
         * timer, since this could cause deadlock situations to arise
@@ -238,8 +236,6 @@ static __init int enable_timesource(void)
        int ret, cpu, _cpu;
        spl_t s;
 
-       trace_mark(xn_nucleus, enable_timesource, MARK_NOARGS);
-
 #ifdef CONFIG_XENO_OPT_STATS
        /*
         * Only for statistical purpose, the timer interrupt is
diff --git a/kernel/cobalt/intr.c b/kernel/cobalt/intr.c
index 1ba8db4..343bf8b 100644
--- a/kernel/cobalt/intr.c
+++ b/kernel/cobalt/intr.c
@@ -24,12 +24,12 @@
 */
 
 #include <linux/mutex.h>
-
 #include <cobalt/kernel/sched.h>
 #include <cobalt/kernel/intr.h>
 #include <cobalt/kernel/stat.h>
 #include <cobalt/kernel/clock.h>
 #include <cobalt/kernel/assert.h>
+#include <trace/events/cobalt-core.h>
 
 #define XNINTR_MAX_UNHANDLED   1000
 
@@ -78,7 +78,7 @@ static inline void stat_counter_dec(void) {}
 static inline void sync_stat_references(struct xnintr *intr) {}
 #endif /* CONFIG_XENO_OPT_STATS */
 
-static void xnintr_irq_handler(unsigned irq, void *cookie);
+static void xnintr_irq_handler(unsigned int irq, void *cookie);
 
 void xnintr_host_tick(struct xnsched *sched) /* Interrupts off. */
 {
@@ -110,9 +110,7 @@ void xnintr_core_clock_handler(void)
        prev = xnstat_exectime_switch(sched, &statp->account);
        xnstat_counter_inc(&statp->hits);
 
-       trace_mark(xn_nucleus, irq_enter, "irq %u",
-                  per_cpu(ipipe_percpu.hrtimer_irq, cpu));
-       trace_mark(xn_nucleus, clock_tick, MARK_NOARGS);
+       trace_cobalt_clock_entry(per_cpu(ipipe_percpu.hrtimer_irq, cpu));
 
        ++sched->inesting;
        sched->lflags |= XNINIRQ;
@@ -121,6 +119,7 @@ void xnintr_core_clock_handler(void)
        xnclock_tick(&nkclock);
        xnlock_put(&nklock);
 
+       trace_cobalt_clock_exit(per_cpu(ipipe_percpu.hrtimer_irq, cpu));
        xnstat_exectime_switch(sched, prev);
 
        if (--sched->inesting == 0) {
@@ -138,10 +137,6 @@ void xnintr_core_clock_handler(void)
        if ((sched->lflags & XNHTICK) &&
            xnthread_test_state(sched->curr, XNROOT))
                xnintr_host_tick(sched);
-
-       /* We keep tracing the entry CPU, regardless of migration. */
-       trace_mark(xn_nucleus, irq_exit, "irq %u",
-                  per_cpu(ipipe_percpu.hrtimer_irq, cpu));
 }
 
 /* Optional support for shared interrupts. */
@@ -156,7 +151,7 @@ struct xnintr_irq {
 
 static struct xnintr_irq xnirqs[IPIPE_NR_IRQS];
 
-static inline struct xnintr *xnintr_shirq_first(unsigned irq)
+static inline struct xnintr *xnintr_shirq_first(unsigned int irq)
 {
        return xnirqs[irq].handlers;
 }
@@ -170,7 +165,7 @@ static inline struct xnintr *xnintr_shirq_next(struct 
xnintr *prev)
  * Low-level interrupt handler dispatching the user-defined ISRs for
  * shared interrupts -- Called with interrupts off.
  */
-static void xnintr_shirq_handler(unsigned irq, void *cookie)
+static void xnintr_shirq_handler(unsigned int irq, void *cookie)
 {
        struct xnsched *sched = xnsched_current();
        struct xnintr_irq *shirq = &xnirqs[irq];
@@ -182,7 +177,7 @@ static void xnintr_shirq_handler(unsigned irq, void *cookie)
 
        prev  = xnstat_exectime_get_current(sched);
        start = xnstat_exectime_now();
-       trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
+       trace_cobalt_irq_entry(irq);
 
        ++sched->inesting;
        sched->lflags |= XNINIRQ;
@@ -231,14 +226,14 @@ static void xnintr_shirq_handler(unsigned irq, void 
*cookie)
                xnsched_run();
        }
 
-       trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
+       trace_cobalt_irq_exit(irq);
 }
 
 /*
  * Low-level interrupt handler dispatching the user-defined ISRs for
  * shared edge-triggered interrupts -- Called with interrupts off.
  */
-static void xnintr_edge_shirq_handler(unsigned irq, void *cookie)
+static void xnintr_edge_shirq_handler(unsigned int irq, void *cookie)
 {
        const int MAX_EDGEIRQ_COUNTER = 128;
        struct xnsched *sched = xnsched_current();
@@ -251,7 +246,7 @@ static void xnintr_edge_shirq_handler(unsigned irq, void 
*cookie)
 
        prev  = xnstat_exectime_get_current(sched);
        start = xnstat_exectime_now();
-       trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
+       trace_cobalt_irq_entry(irq);
 
        ++sched->inesting;
        sched->lflags |= XNINIRQ;
@@ -312,7 +307,7 @@ static void xnintr_edge_shirq_handler(unsigned irq, void 
*cookie)
                xnsched_run();
        }
 
-       trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
+       trace_cobalt_irq_exit(irq);
 }
 
 static inline int xnintr_irq_attach(struct xnintr *intr)
@@ -433,7 +428,7 @@ static inline void xnintr_irq_detach(struct xnintr *intr)
  * Low-level interrupt handler dispatching non-shared ISRs -- Called
  * with interrupts off.
  */
-static void xnintr_irq_handler(unsigned irq, void *cookie)
+static void xnintr_irq_handler(unsigned int irq, void *cookie)
 {
        struct xnsched *sched = xnsched_current();
        struct xnirqstat *statp;
@@ -444,7 +439,7 @@ static void xnintr_irq_handler(unsigned irq, void *cookie)
 
        prev  = xnstat_exectime_get_current(sched);
        start = xnstat_exectime_now();
-       trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
+       trace_cobalt_irq_entry(irq);
 
        ++sched->inesting;
        sched->lflags |= XNINIRQ;
@@ -499,7 +494,7 @@ unlock_and_exit:
                xnsched_run();
        }
 
-       trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
+       trace_cobalt_irq_exit(irq);
 }
 
 int __init xnintr_mount(void)
@@ -522,7 +517,7 @@ static void clear_irqstats(struct xnintr *intr)
 }
 
 /**
- * @fn int xnintr_init(struct xnintr *intr,const char *name,unsigned 
irq,xnisr_t isr,xniack_t iack,int flags)
+ * @fn int xnintr_init(struct xnintr *intr,const char *name,unsigned int 
irq,xnisr_t isr,xniack_t iack,int flags)
  * @brief Initialize an interrupt object.
  *
  * Associates an interrupt object with an IRQ line.
@@ -693,9 +688,6 @@ int xnintr_attach(struct xnintr *intr, void *cookie)
 
        secondary_mode_only();
 
-       trace_mark(xn_nucleus, irq_attach, "irq %u name %s",
-                  intr->irq, intr->name);
-
        intr->cookie = cookie;
        clear_irqstats(intr);
 
@@ -745,8 +737,6 @@ void xnintr_detach(struct xnintr *intr)
 {
        secondary_mode_only();
 
-       trace_mark(xn_nucleus, irq_detach, "irq %u", intr->irq);
-
        mutex_lock(&intrlock);
 
        if (intr->flags & XN_ISR_ATTACHED) {
@@ -775,7 +765,7 @@ EXPORT_SYMBOL_GPL(xnintr_detach);
 void xnintr_enable(struct xnintr *intr)
 {
        secondary_mode_only();
-       trace_mark(xn_nucleus, irq_enable, "irq %u", intr->irq);
+       trace_cobalt_irq_enable(intr->irq);
        ipipe_enable_irq(intr->irq);
 }
 EXPORT_SYMBOL_GPL(xnintr_enable);
@@ -797,7 +787,7 @@ EXPORT_SYMBOL_GPL(xnintr_enable);
 void xnintr_disable(struct xnintr *intr)
 {
        secondary_mode_only();
-       trace_mark(xn_nucleus, irq_disable, "irq %u", intr->irq);
+       trace_cobalt_irq_disable(intr->irq);
        ipipe_disable_irq(intr->irq);
 }
 EXPORT_SYMBOL_GPL(xnintr_disable);
@@ -823,9 +813,6 @@ EXPORT_SYMBOL_GPL(xnintr_disable);
 void xnintr_affinity(struct xnintr *intr, cpumask_t cpumask)
 {
        secondary_mode_only();
-       trace_mark(xn_nucleus, irq_affinity, "irq %u %lu",
-                  intr->irq, *(unsigned long *)&cpumask);
-
 #ifdef CONFIG_SMP
        ipipe_set_irq_affinity(intr->irq, cpumask);
 #endif
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 93e44ac..61d0580 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -25,6 +25,8 @@
 #include <cobalt/kernel/heap.h>
 #include <cobalt/kernel/shadow.h>
 #include <cobalt/kernel/arith.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/cobalt-core.h>
 
 DEFINE_PER_CPU(struct xnsched, nksched);
 EXPORT_PER_CPU_SYMBOL_GPL(nksched);
@@ -108,9 +110,7 @@ static void watchdog_handler(struct xntimer *timer)
        if (likely(++sched->wdcount < wd_timeout_arg))
                return;
 
-       trace_mark(xn_nucleus, watchdog_signal,
-                  "thread %p thread_name %s",
-                  curr, xnthread_name(curr));
+       trace_cobalt_watchdog_signal(curr);
 
        if (xnthread_test_state(curr, XNUSER)) {
                printk(XENO_WARN "watchdog triggered on CPU #%d -- runaway 
thread "
@@ -721,7 +721,7 @@ static inline void leave_root(struct xnthread *root)
 
 void __xnsched_run_handler(void) /* hw interrupts off. */
 {
-       trace_mark(xn_nucleus, sched_remote, MARK_NOARGS);
+       trace_cobalt_schedule_remote(xnsched_current());
        xnsched_run();
 }
 
@@ -734,7 +734,7 @@ int __xnsched_run(struct xnsched *sched)
        if (xnarch_escalate())
                return 0;
 
-       trace_mark(xn_nucleus, sched, MARK_NOARGS);
+       trace_cobalt_schedule(sched);
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -758,11 +758,7 @@ reschedule:
 
        prev = curr;
 
-       trace_mark(xn_nucleus, sched_switch,
-                  "prev %p prev_name %s "
-                  "next %p next_name %s",
-                  prev, xnthread_name(prev),
-                  next, xnthread_name(next));
+       trace_cobalt_switch_context(prev, next);
 
        if (xnthread_test_state(next, XNROOT))
                xnsched_reset_watchdog(sched);
diff --git a/kernel/cobalt/shadow.c b/kernel/cobalt/shadow.c
index 1d188f6..9da7c22 100644
--- a/kernel/cobalt/shadow.c
+++ b/kernel/cobalt/shadow.c
@@ -58,6 +58,7 @@
 #include <cobalt/kernel/ppd.h>
 #include <cobalt/kernel/vdso.h>
 #include <cobalt/kernel/thread.h>
+#include <trace/events/cobalt-core.h>
 #include <asm/xenomai/features.h>
 #include <asm/xenomai/syscall.h>
 #include <asm-generic/xenomai/mayday.h>
@@ -259,8 +260,7 @@ static void lostage_task_wakeup(struct ipipe_work_header 
*work)
        rq = container_of(work, struct lostage_wakeup, work);
        p = rq->task;
 
-       trace_mark(xn_nucleus, lostage_wakeup, "comm %s pid %d",
-                  p->comm, p->pid);
+       trace_cobalt_lostage_wakeup(p);
 
        wake_up_process(p);
 }
@@ -275,6 +275,8 @@ static void post_wakeup(struct task_struct *p)
                .task = p,
        };
 
+       trace_cobalt_lostage_request("wakeup", wakework.task);
+
        ipipe_post_work_root(&wakework, work);
 }
 
@@ -312,8 +314,7 @@ static void lostage_task_signal(struct ipipe_work_header 
*work)
 
        signo = rq->signo;
 
-       trace_mark(xn_nucleus, lostage_signal, "comm %s pid %d sig %d",
-                  p->comm, p->pid, signo);
+       trace_cobalt_lostage_signal(p, signo);
 
        if (signo == SIGSHADOW || signo == SIGDEBUG) {
                memset(&si, '\0', sizeof(si));
@@ -484,9 +485,7 @@ int xnshadow_harden(void)
        if (signal_pending(p))
                return -ERESTARTSYS;
 
-       trace_mark(xn_nucleus, shadow_gohard,
-                  "thread %p name %s comm %s",
-                  thread, xnthread_name(thread), p->comm);
+       trace_cobalt_shadow_gohard(thread);
 
        xnthread_clear_sync_window(thread, XNRELAX);
 
@@ -504,8 +503,7 @@ int xnshadow_harden(void)
        xnsched_resched_after_unlocked_switch();
        xnthread_test_cancel();
 
-       trace_mark(xn_nucleus, shadow_hardened, "thread %p name %s",
-                  thread, xnthread_name(thread));
+       trace_cobalt_shadow_hardened(thread);
 
        /*
         * Recheck pending signals once again. As we block task
@@ -561,8 +559,7 @@ void xnshadow_relax(int notify, int reason)
         * domain to the Linux domain.  This will cause the Linux task
         * to resume using the register state of the shadow thread.
         */
-       trace_mark(xn_nucleus, shadow_gorelax, "thread %p thread_name %s",
-                 thread, xnthread_name(thread));
+       trace_cobalt_shadow_gorelax(thread);
 
        /*
         * If you intend to change the following interrupt-free
@@ -623,9 +620,7 @@ void xnshadow_relax(int notify, int reason)
        }
 #endif
 
-       trace_mark(xn_nucleus, shadow_relaxed,
-                 "thread %p thread_name %s comm %s",
-                 thread, xnthread_name(thread), p->comm);
+       trace_cobalt_shadow_relaxed(thread);
 }
 EXPORT_SYMBOL_GPL(xnshadow_relax);
 
@@ -935,10 +930,7 @@ int xnshadow_map_user(struct xnthread *thread,
        __xn_put_user(xnheap_mapped_offset(sem_heap, u_window), 
u_window_offset);
        pin_to_initial_cpu(thread);
 
-       trace_mark(xn_nucleus, shadow_map_user,
-                  "thread %p thread_name %s pid %d priority %d",
-                  thread, xnthread_name(thread), current->pid,
-                  xnthread_base_priority(thread));
+       trace_cobalt_shadow_map(thread);
 
        /*
         * CAUTION: we enable the pipeline notifier only when our
@@ -1001,6 +993,8 @@ static inline void wakeup_parent(struct completion *done)
                .done = done,
        };
 
+       trace_cobalt_lostage_request("wakeup", current);
+
        ipipe_post_work_root(&wakework, work);
 }
 
@@ -1061,10 +1055,7 @@ int xnshadow_map_kernel(struct xnthread *thread, struct 
completion *done)
        thread->u_window = NULL;
        pin_to_initial_cpu(thread);
 
-       trace_mark(xn_nucleus, shadow_map_kernel,
-                  "thread %p thread_name %s pid %d priority %d",
-                  thread, xnthread_name(thread), p->pid,
-                  xnthread_base_priority(thread));
+       trace_cobalt_shadow_map(thread);
 
        xnthread_init_shadow_tcb(thread, p);
        xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
@@ -1113,9 +1104,7 @@ EXPORT_SYMBOL_GPL(xnshadow_map_kernel);
 
 void xnshadow_finalize(struct xnthread *thread)
 {
-       trace_mark(xn_nucleus, shadow_finalize,
-                  "thread %p thread_name %s pid %d",
-                  thread, xnthread_name(thread), xnthread_host_pid(thread));
+       trace_cobalt_shadow_finalize(thread);
 
        xnthread_run_handler_stack(thread, finalize_thread);
 }
@@ -1684,6 +1673,8 @@ void xnshadow_send_sig(struct xnthread *thread, int sig, 
int arg)
                .sigval = arg,
        };
 
+       trace_cobalt_lostage_request("signal", sigwork.task);
+
        ipipe_post_work_root(&sigwork, work);
 }
 EXPORT_SYMBOL_GPL(xnshadow_send_sig);
@@ -1878,10 +1869,7 @@ static int handle_head_syscall(struct ipipe_domain *ipd, 
struct pt_regs *regs)
        muxid = __xn_mux_id(regs);
        muxop = __xn_mux_op(regs);
 
-       trace_mark(xn_nucleus, syscall_histage_entry,
-                  "thread %p thread_name %s muxid %d muxop %d",
-                  thread, thread ? xnthread_name(thread) : NULL,
-                  muxid, muxop);
+       trace_cobalt_head_sysentry(thread, muxid, muxop);
 
        if (muxid < 0 || muxid >= NR_PERSONALITIES || muxop < 0)
                goto bad_syscall;
@@ -2024,8 +2012,7 @@ ret_handled:
                xnthread_sync_window(thread);
        }
 
-       trace_mark(xn_nucleus, syscall_histage_exit,
-                  "ret %ld", __xn_reg_rval(regs));
+       trace_cobalt_head_sysexit(thread, __xn_reg_rval(regs));
 
        return EVENT_STOP;
 
@@ -2090,11 +2077,7 @@ static int handle_root_syscall(struct ipipe_domain *ipd, 
struct pt_regs *regs)
        muxid = __xn_mux_id(regs);
        muxop = __xn_mux_op(regs);
 
-       trace_mark(xn_nucleus, syscall_lostage_entry,
-                  "thread %p thread_name %s muxid %d muxop %d",
-                  xnsched_current_thread(),
-                  xnthread_name(xnsched_current_thread()),
-                  muxid, muxop);
+       trace_cobalt_root_sysentry(thread, muxid, muxop);
 
        /* Processing a Xenomai syscall. */
 
@@ -2166,8 +2149,7 @@ ret_handled:
                xnthread_sync_window(thread);
        }
 
-       trace_mark(xn_nucleus, syscall_lostage_exit,
-                  "ret %ld", __xn_reg_rval(regs));
+       trace_cobalt_root_sysexit(thread, __xn_reg_rval(regs));
 
        return EVENT_STOP;
 }
@@ -2193,11 +2175,9 @@ static int handle_taskexit_event(struct task_struct *p) 
/* p == current */
        secondary_mode_only();
        thread = xnshadow_current();
        XENO_BUGON(NUCLEUS, thread == NULL);
+       trace_cobalt_shadow_unmap(thread);
        personality = thread->personality;
 
-       trace_mark(xn_nucleus, shadow_exit, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
-
        if (xnthread_test_state(thread, XNDEBUG))
                unlock_timers();
 
@@ -2501,8 +2481,8 @@ int ipipe_kevent_hook(int kevent, void *data)
 
 static inline int handle_exception(struct ipipe_trap_data *d)
 {
-       struct xnsched *sched;
        struct xnthread *thread;
+       struct xnsched *sched;
 
        sched = xnsched_current();
        thread = sched->curr;
@@ -2510,11 +2490,7 @@ static inline int handle_exception(struct 
ipipe_trap_data *d)
        if (xnthread_test_state(thread, XNROOT))
                return 0;
 
-       trace_mark(xn_nucleus, thread_fault,
-                  "thread %p thread_name %s ip %p type 0x%x",
-                  thread, xnthread_name(thread),
-                  (void *)xnarch_fault_pc(d),
-                  xnarch_fault_trap(d));
+       trace_cobalt_thread_fault(thread, d);
 
        if (xnarch_fault_fpu_p(d)) {
 #ifdef CONFIG_XENO_HW_FPU
diff --git a/kernel/cobalt/synch.c b/kernel/cobalt/synch.c
index cdf485f..c35beb9 100644
--- a/kernel/cobalt/synch.c
+++ b/kernel/cobalt/synch.c
@@ -26,6 +26,7 @@
 #include <cobalt/kernel/thread.h>
 #include <cobalt/kernel/clock.h>
 #include <cobalt/kernel/shadow.h>
+#include <trace/events/cobalt-core.h>
 
 /**
  * @fn void xnsynch_init(struct xnsynch *synch, int flags,
@@ -141,9 +142,7 @@ int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t 
timeout,
 
        xnlock_get_irqsave(&nklock, s);
 
-       trace_mark(xn_nucleus, synch_sleepon,
-                  "thread %p thread_name %s synch %p",
-                  thread, xnthread_name(thread), synch);
+       trace_cobalt_synch_sleepon(synch, thread);
 
        if ((synch->status & XNSYNCH_PRIO) == 0) /* i.e. FIFO */
                list_add_tail(&thread->plink, &synch->pendq);
@@ -193,12 +192,10 @@ struct xnthread *xnsynch_wakeup_one_sleeper(struct 
xnsynch *synch)
                goto out;
        }
 
+       trace_cobalt_synch_wakeup(synch);
        thread = list_first_entry(&synch->pendq, struct xnthread, plink);
        list_del(&thread->plink);
        thread->wchan = NULL;
-       trace_mark(xn_nucleus, synch_wakeup_one,
-                  "thread %p thread_name %s synch %p",
-                  thread, xnthread_name(thread), synch);
        xnthread_resume(thread, XNPEND);
 out:
        xnlock_put_irqrestore(&nklock, s);
@@ -220,14 +217,13 @@ int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, 
int nr)
        if (list_empty(&synch->pendq))
                goto out;
 
+       trace_cobalt_synch_wakeup_many(synch);
+
        list_for_each_entry_safe(thread, tmp, &synch->pendq, plink) {
                if (nwakeups++ >= nr)
                        break;
                list_del(&thread->plink);
                thread->wchan = NULL;
-               trace_mark(xn_nucleus, synch_wakeup_many,
-                          "thread %p thread_name %s synch %p",
-                          thread, xnthread_name(thread), synch);
                xnthread_resume(thread, XNPEND);
        }
 out:
@@ -267,11 +263,9 @@ void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, 
struct xnthread *sleeper
 
        xnlock_get_irqsave(&nklock, s);
 
+       trace_cobalt_synch_wakeup(synch);
        list_del(&sleeper->plink);
        sleeper->wchan = NULL;
-       trace_mark(xn_nucleus, synch_wakeup_this,
-                  "thread %p thread_name %s synch %p",
-                  sleeper, xnthread_name(sleeper), synch);
        xnthread_resume(sleeper, XNPEND);
 
        xnlock_put_irqrestore(&nklock, s);
@@ -349,7 +343,7 @@ int xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
 
        XENO_BUGON(NUCLEUS, (synch->status & XNSYNCH_OWNER) == 0);
 
-       trace_mark(xn_nucleus, synch_acquire, "synch %p", synch);
+       trace_cobalt_synch_acquire(synch, thread);
 redo:
        fastlock = atomic_long_cmpxchg(lockp, XN_NO_HANDLE, threadh);
 
@@ -505,6 +499,27 @@ EXPORT_SYMBOL_GPL(xnsynch_acquire);
  *
  * @remark Tags: none.
  */
+struct xnthread *xnsynch_release(struct xnsynch *synch,
+                                struct xnthread *thread)
+{
+       atomic_long_t *lockp;
+       xnhandle_t threadh;
+
+       XENO_BUGON(NUCLEUS, (synch->status & XNSYNCH_OWNER) == 0);
+
+       trace_cobalt_synch_release(synch);
+
+       if (unlikely(xnthread_test_state(thread, XNWEAK)))
+               __xnsynch_fixup_rescnt(thread);
+
+       lockp = xnsynch_fastlock(synch);
+       threadh = xnthread_handle(thread);
+       if (likely(xnsynch_fast_release(lockp, threadh)))
+               return NULL;
+
+       return __xnsynch_transfer_ownership(synch, thread);
+}
+EXPORT_SYMBOL_GPL(xnsynch_release);
 
 /**
  * @internal
@@ -745,8 +760,7 @@ int xnsynch_flush(struct xnsynch *synch, int reason)
 
        xnlock_get_irqsave(&nklock, s);
 
-       trace_mark(xn_nucleus, synch_flush, "synch %p reason %lu",
-                  synch, reason);
+       trace_cobalt_synch_flush(synch);
 
        if (list_empty(&synch->pendq)) {
                XENO_BUGON(NUCLEUS, synch->status & XNSYNCH_CLAIMED);
@@ -787,9 +801,7 @@ void xnsynch_forget_sleeper(struct xnthread *thread)
        struct xnsynch *synch = thread->wchan, *nsynch;
        struct xnthread *owner, *target;
 
-       trace_mark(xn_nucleus, synch_forget,
-                  "thread %p thread_name %s synch %p",
-                  thread, xnthread_name(thread), synch);
+       trace_cobalt_synch_forget(synch);
 
        xnthread_clear_state(thread, XNPEND);
        thread->wchan = NULL;
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index e97ea9c..fe3ddef 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -36,6 +36,7 @@
 #include <cobalt/kernel/shadow.h>
 #include <cobalt/kernel/lock.h>
 #include <cobalt/kernel/thread.h>
+#include <trace/events/cobalt-core.h>
 
 /**
  * @ingroup nucleus
@@ -107,8 +108,7 @@ static int kthread_trampoline(void *arg)
                return ret;
        }
 
-       trace_mark(xn_nucleus, thread_boot, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
+       trace_cobalt_shadow_entry(thread);
 
        thread->entry(thread->cookie);
 
@@ -462,9 +462,6 @@ void __xnthread_cleanup(struct xnthread *curr)
 
        secondary_mode_only();
 
-       trace_mark(xn_nucleus, thread_cleanup, "thread %p thread_name %s",
-                  curr, xnthread_name(curr));
-
        xntimer_destroy(&curr->rtimer);
        xntimer_destroy(&curr->ptimer);
 
@@ -574,10 +571,7 @@ int xnthread_init(struct xnthread *thread,
        if (ret)
                return ret;
 
-       trace_mark(xn_nucleus, thread_init,
-                  "thread %p thread_name %s flags %lu class %s prio %d",
-                  thread, xnthread_name(thread), attr->flags,
-                  sched_class->name, thread->cprio);
+       trace_cobalt_thread_init(thread, attr, sched_class);
 
        xnlock_get_irqsave(&nklock, s);
        list_add_tail(&thread->glink, &nkthreadq);
@@ -647,8 +641,7 @@ int xnthread_start(struct xnthread *thread,
        thread->entry = attr->entry;
        thread->cookie = attr->cookie;
 
-       trace_mark(xn_nucleus, thread_start, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
+       trace_cobalt_thread_start(thread);
 
        xnthread_resume(thread, XNDORMANT);
        xnsched_run();
@@ -705,13 +698,10 @@ int xnthread_set_mode(struct xnthread *thread, int 
clrmask, int setmask)
 
        xnlock_get_irqsave(&nklock, s);
 
-       trace_mark(xn_nucleus, thread_setmode,
-                  "thread %p thread_name %s clrmask 0x%x setmask 0x%x",
-                  thread, xnthread_name(thread), clrmask, setmask);
-
        oldmode = xnthread_state_flags(thread) & XNTHREAD_MODE_BITS;
        xnthread_clear_state(thread, clrmask & XNTHREAD_MODE_BITS);
        xnthread_set_state(thread, setmask & XNTHREAD_MODE_BITS);
+       trace_cobalt_thread_set_mode(thread);
 
        /*
         * Marking the thread as (non-)preemptible requires special
@@ -810,11 +800,7 @@ void xnthread_suspend(struct xnthread *thread, int mask,
 
        xnlock_get_irqsave(&nklock, s);
 
-       trace_mark(xn_nucleus, thread_suspend,
-                  "thread %p thread_name %s mask %lu timeout %Lu "
-                  "timeout_mode %d wchan %p",
-                  thread, xnthread_name(thread), mask, timeout,
-                  timeout_mode, wchan);
+       trace_cobalt_thread_suspend(thread, mask, timeout, timeout_mode, wchan);
 
        sched = thread->sched;
        oldstate = thread->state;
@@ -1019,9 +1005,7 @@ void xnthread_resume(struct xnthread *thread, int mask)
 
        xnlock_get_irqsave(&nklock, s);
 
-       trace_mark(xn_nucleus, thread_resume,
-                  "thread %p thread_name %s mask %lu",
-                  thread, xnthread_name(thread), mask);
+       trace_cobalt_thread_resume(thread, mask);
 
        xntrace_pid(xnthread_host_pid(thread), 
xnthread_current_priority(thread));
 
@@ -1147,10 +1131,7 @@ int xnthread_unblock(struct xnthread *thread)
         */
        xnlock_get_irqsave(&nklock, s);
 
-       trace_mark(xn_nucleus, thread_unblock,
-                  "thread %p thread_name %s state %lu",
-                  thread, xnthread_name(thread),
-                  xnthread_state_flags(thread));
+       trace_cobalt_thread_unblock(thread);
 
        if (xnthread_test_state(thread, XNDELAY))
                xnthread_resume(thread, XNDELAY);
@@ -1227,11 +1208,6 @@ int xnthread_set_periodic(struct xnthread *thread, 
xnticks_t idate,
 
        xnlock_get_irqsave(&nklock, s);
 
-       trace_mark(xn_nucleus, thread_setperiodic,
-                  "thread %p thread_name %s idate %Lu mode %d period %Lu timer 
%p",
-                  thread, xnthread_name(thread), idate, timeout_mode, period,
-                  &thread->ptimer);
-
        if (period == XN_INFINITE) {
                if (xntimer_running_p(&thread->ptimer))
                        xntimer_stop(&thread->ptimer);
@@ -1307,7 +1283,7 @@ int xnthread_wait_period(unsigned long *overruns_r)
        unsigned long overruns = 0;
        struct xnthread *thread;
        xnticks_t now;
-       int err = 0;
+       int ret = 0;
        spl_t s;
 
        thread = xnsched_current_thread();
@@ -1315,19 +1291,18 @@ int xnthread_wait_period(unsigned long *overruns_r)
        xnlock_get_irqsave(&nklock, s);
 
        if (unlikely(!xntimer_running_p(&thread->ptimer))) {
-               err = -EWOULDBLOCK;
-               goto unlock_and_exit;
+               ret = -EWOULDBLOCK;
+               goto out;
        }
 
-       trace_mark(xn_nucleus, thread_waitperiod, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
+       trace_cobalt_thread_wait_period(thread);
 
        now = xnclock_read_raw(&nkclock);
        if (likely((xnsticks_t)(now - xntimer_pexpect(&thread->ptimer)) < 0)) {
                xnthread_suspend(thread, XNDELAY, XN_INFINITE, XN_RELATIVE, 
NULL);
                if (unlikely(xnthread_test_info(thread, XNBREAK))) {
-                       err = -EINTR;
-                       goto unlock_and_exit;
+                       ret = -EINTR;
+                       goto out;
                }
 
                now = xnclock_read_raw(&nkclock);
@@ -1335,21 +1310,16 @@ int xnthread_wait_period(unsigned long *overruns_r)
 
        overruns = xntimer_get_overruns(&thread->ptimer, now);
        if (overruns) {
-               err = -ETIMEDOUT;
-
-               trace_mark(xn_nucleus, thread_missedperiod,
-                          "thread %p thread_name %s overruns %lu",
-                          thread, xnthread_name(thread), overruns);
+               ret = -ETIMEDOUT;
+               trace_cobalt_thread_missed_period(thread);
        }
 
        if (likely(overruns_r != NULL))
                *overruns_r = overruns;
-
-      unlock_and_exit:
-
+ out:
        xnlock_put_irqrestore(&nklock, s);
 
-       return err;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(xnthread_wait_period);
 
@@ -1446,8 +1416,7 @@ void xnthread_cancel(struct xnthread *thread)
        if (xnthread_test_info(thread, XNCANCELD))
                goto check_self_cancel;
 
-       trace_mark(xn_nucleus, thread_cancel, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
+       trace_cobalt_thread_cancel(thread);
 
        xnthread_set_info(thread, XNCANCELD);
 
@@ -1526,8 +1495,7 @@ int xnthread_join(struct xnthread *thread, bool 
uninterruptible)
                return 0;
        }
 
-       trace_mark(xn_nucleus, thread_join, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
+       trace_cobalt_thread_join(thread);
 
        if (ipipe_root_p) {
                if (xnthread_test_state(thread, XNJOINED)) {
@@ -1621,9 +1589,7 @@ int xnthread_migrate(int cpu)
        if (sched == xnthread_sched(thread))
                goto unlock_and_exit;
 
-       trace_mark(xn_nucleus, thread_migrate,
-                  "thread %p thread_name %s cpu %d",
-                  thread, xnthread_name(thread), cpu);
+       trace_cobalt_thread_migrate(thread, cpu);
 
        /* Move to remote scheduler. */
        xnsched_migrate(thread, sched);
@@ -1659,12 +1625,10 @@ EXPORT_SYMBOL_GPL(xnthread_migrate);
 
 void xnthread_migrate_passive(struct xnthread *thread, struct xnsched *sched)
 {                              /* nklocked, IRQs off */
-       trace_mark(xn_nucleus, thread_migrate_passive,
-                  "thread %p thread_name %s cpu %d",
-                  thread, xnthread_name(thread), xnsched_cpu(sched));
-
        if (thread->sched == sched)
                return;
+
+       trace_cobalt_thread_migrate_passive(thread, xnsched_cpu(sched));
        /*
         * Timer migration is postponed until the next timeout happens
         * for the periodic and rrb timers. The resource timer will be
@@ -1770,11 +1734,6 @@ int __xnthread_set_schedparam(struct xnthread *thread,
 
        new_wprio = thread->wprio;
 
-       trace_mark(xn_nucleus, set_thread_schedparam,
-                  "thread %p thread_name %s class %s prio %d",
-                  thread, xnthread_name(thread),
-                  thread->sched_class->name, thread->cprio);
-
        /*
         * Update the pending order of the thread inside its wait
         * queue, unless this behaviour has been explicitly disabled
diff --git a/kernel/cobalt/timer.c b/kernel/cobalt/timer.c
index 5a46c36..e493ac6 100644
--- a/kernel/cobalt/timer.c
+++ b/kernel/cobalt/timer.c
@@ -44,6 +44,7 @@
 #include <cobalt/kernel/clock.h>
 #include <cobalt/kernel/trace.h>
 #include <cobalt/kernel/arith.h>
+#include <trace/events/cobalt-core.h>
 
 int xntimer_heading_p(struct xntimer *timer)
 {
@@ -109,9 +110,7 @@ int xntimer_start(struct xntimer *timer,
        struct xnsched *sched;
        xnticks_t date, now;
 
-       trace_mark(xn_nucleus, timer_start,
-                  "timer %p value %Lu interval %Lu mode %u",
-                  timer, value, interval, mode);
+       trace_cobalt_timer_start(timer, value, interval, mode);
 
        if ((timer->status & XNTIMER_DEQUEUED) == 0)
                xntimer_dequeue(timer, q);
@@ -178,7 +177,7 @@ void __xntimer_stop(struct xntimer *timer)
        struct xnsched *sched;
        int heading;
 
-       trace_mark(xn_nucleus, timer_stop, "timer %p", timer);
+       trace_cobalt_timer_stop(timer);
 
        heading = xntimer_heading_p(timer);
        xntimer_dequeue(timer, q);
@@ -439,12 +438,11 @@ void __xntimer_migrate(struct xntimer *timer, struct 
xnsched *sched)
        struct xnclock *clock;
        xntimerq_t *q;
 
-       trace_mark(xn_nucleus, timer_migrate, "timer %p cpu %d",
-                  timer, (int)xnsched_cpu(sched));
-
        if (sched == timer->sched)
                return;
 
+       trace_cobalt_timer_migrate(timer, xnsched_cpu(sched));
+
        if (timer->status & XNTIMER_DEQUEUED)
                timer->sched = sched;
        else {
diff --git a/kernel/cobalt/trace/cobalt-core.h 
b/kernel/cobalt/trace/cobalt-core.h
new file mode 100644
index 0000000..c33c452
--- /dev/null
+++ b/kernel/cobalt/trace/cobalt-core.h
@@ -0,0 +1,647 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cobalt-core
+
+#if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COBALT_CORE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(thread_event,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, thread)
+               __string(name, xnthread_name(thread))
+               __field(pid_t, pid)
+               __field(unsigned long, state)
+               __field(unsigned long, info)
+       ),
+
+       TP_fast_assign(
+               __entry->thread = thread;
+               __assign_str(name, xnthread_name(thread));
+               __entry->state = thread->state;
+               __entry->info = thread->info;
+               __entry->pid = xnthread_host_pid(thread);
+       ),
+
+       TP_printk("thread=%p(%s) pid=%d state=0x%lx info=0x%lx",
+                 __entry->thread, __get_str(name), __entry->pid,
+                 __entry->state, __entry->info)
+);
+
+DECLARE_EVENT_CLASS(synch_wait_event,
+       TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
+       TP_ARGS(synch, thread),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, thread)
+               __string(name, xnthread_name(thread))
+               __field(struct xnsynch *, synch)
+       ),
+
+       TP_fast_assign(
+               __entry->thread = thread;
+               __assign_str(name, xnthread_name(thread));
+               __entry->synch = synch;
+       ),
+
+       TP_printk("synch=%p thread=%p(%s)",
+                 __entry->synch, __entry->thread, __get_str(name))
+);
+
+DECLARE_EVENT_CLASS(synch_post_event,
+       TP_PROTO(struct xnsynch *synch),
+       TP_ARGS(synch),
+
+       TP_STRUCT__entry(
+               __field(struct xnsynch *, synch)
+       ),
+
+       TP_fast_assign(
+               __entry->synch = synch;
+       ),
+
+       TP_printk("synch=%p", __entry->synch)
+);
+
+DECLARE_EVENT_CLASS(irq_event,
+       TP_PROTO(unsigned int irq),
+       TP_ARGS(irq),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, irq)
+       ),
+
+       TP_fast_assign(
+               __entry->irq = irq;
+       ),
+
+       TP_printk("irq=%u", __entry->irq)
+);
+
+DECLARE_EVENT_CLASS(clock_event,
+       TP_PROTO(unsigned int irq),
+       TP_ARGS(irq),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, irq)
+       ),
+
+       TP_fast_assign(
+               __entry->irq = irq;
+       ),
+
+       TP_printk("clock_irq=%u", __entry->irq)
+);
+
+DECLARE_EVENT_CLASS(syscall_entry,
+       TP_PROTO(struct xnthread *thread, int muxid, int muxop),
+       TP_ARGS(thread, muxid, muxop),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, thread)
+               __string(name, thread ? xnthread_name(thread) : "(anon)")
+               __field(int, muxid)
+               __field(int, muxop)
+       ),
+
+       TP_fast_assign(
+               __entry->thread = thread;
+               __assign_str(name, thread ? xnthread_name(thread) : "(anon)");
+               __entry->muxid = muxid;
+               __entry->muxop = muxop;
+       ),
+
+       TP_printk("thread=%p(%s) muxid=%d muxop=%d",
+                 __entry->thread, __get_str(name), __entry->muxid,
+                 __entry->muxop)
+);
+
+DECLARE_EVENT_CLASS(syscall_exit,
+       TP_PROTO(struct xnthread *thread, int result),
+       TP_ARGS(thread, result),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, thread)
+               __field(int, result)
+       ),
+
+       TP_fast_assign(
+               __entry->thread = thread;
+               __entry->result = result;
+       ),
+
+       TP_printk("thread=%p result=%d",
+                 __entry->thread, __entry->result)
+);
+
+DECLARE_EVENT_CLASS(thread_migrate,
+       TP_PROTO(struct xnthread *thread, unsigned int cpu),
+       TP_ARGS(thread, cpu),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, thread)
+               __string(name, xnthread_name(thread))
+               __field(unsigned int, cpu)
+       ),
+
+       TP_fast_assign(
+               __entry->thread = thread;
+               __assign_str(name, xnthread_name(thread));
+               __entry->cpu = cpu;
+       ),
+
+       TP_printk("thread=%p(%s) cpu=%u",
+                 __entry->thread, __get_str(name), __entry->cpu)
+);
+
+DECLARE_EVENT_CLASS(timer_event,
+       TP_PROTO(struct xntimer *timer),
+       TP_ARGS(timer),
+
+       TP_STRUCT__entry(
+               __field(struct xntimer *, timer)
+       ),
+
+       TP_fast_assign(
+               __entry->timer = timer;
+       ),
+
+       TP_printk("timer=%p", __entry->timer)
+);
+
+TRACE_EVENT(cobalt_schedule,
+       TP_PROTO(struct xnsched *sched),
+       TP_ARGS(sched),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, status)
+       ),
+
+       TP_fast_assign(
+               __entry->status = sched->status;
+       ),
+
+       TP_printk("status=0x%lx", __entry->status)
+);
+
+TRACE_EVENT(cobalt_schedule_remote,
+       TP_PROTO(struct xnsched *sched),
+       TP_ARGS(sched),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, status)
+       ),
+
+       TP_fast_assign(
+               __entry->status = sched->status;
+       ),
+
+       TP_printk("status=0x%lx", __entry->status)
+);
+
+TRACE_EVENT(cobalt_switch_context,
+       TP_PROTO(struct xnthread *prev, struct xnthread *next),
+       TP_ARGS(prev, next),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, prev)
+               __field(struct xnthread *, next)
+               __string(prev_name, xnthread_name(prev))
+               __string(next_name, xnthread_name(next))
+       ),
+
+       TP_fast_assign(
+               __entry->prev = prev;
+               __entry->next = next;
+               __assign_str(prev_name, xnthread_name(prev));
+               __assign_str(next_name, xnthread_name(next));
+       ),
+
+       TP_printk("prev=%p(%s) next=%p(%s)",
+                 __entry->prev, __get_str(prev_name),
+                 __entry->next, __get_str(next_name))
+);
+
+TRACE_EVENT(cobalt_thread_init,
+       TP_PROTO(struct xnthread *thread,
+                const struct xnthread_init_attr *attr,
+                struct xnsched_class *sched_class),
+       TP_ARGS(thread, attr, sched_class),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, thread)
+               __string(thread_name, xnthread_name(thread))
+               __string(class_name, sched_class->name)
+               __field(unsigned long, flags)
+               __field(int, cprio)
+       ),
+
+       TP_fast_assign(
+               __entry->thread = thread;
+               __assign_str(thread_name, xnthread_name(thread));
+               __entry->flags = attr->flags;
+               __assign_str(class_name, sched_class->name);
+               __entry->cprio = thread->cprio;
+       ),
+
+       TP_printk("thread=%p(%s) flags=0x%lx class=%s prio=%d",
+                  __entry->thread, __get_str(thread_name), __entry->flags,
+                  __get_str(class_name), __entry->cprio)
+);
+
+TRACE_EVENT(cobalt_thread_suspend,
+       TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout,
+                xntmode_t timeout_mode, struct xnsynch *wchan),
+       TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, thread)
+               __field(unsigned long, mask)
+               __field(xnticks_t, timeout)
+               __field(xntmode_t, timeout_mode)
+               __field(struct xnsynch *, wchan)
+       ),
+
+       TP_fast_assign(
+               __entry->thread = thread;
+               __entry->mask = mask;
+               __entry->timeout = timeout;
+               __entry->timeout_mode = timeout_mode;
+               __entry->wchan = wchan;
+       ),
+
+       TP_printk("thread=%p mask=%lu timeout=%Lu timeout_mode=%d wchan=%p",
+                 __entry->thread, __entry->mask,
+                 __entry->timeout, __entry->timeout_mode, __entry->wchan)
+);
+
+TRACE_EVENT(cobalt_thread_resume,
+       TP_PROTO(struct xnthread *thread, unsigned long mask),
+       TP_ARGS(thread, mask),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, thread)
+               __field(unsigned long, mask)
+       ),
+
+       TP_fast_assign(
+               __entry->thread = thread;
+               __entry->mask = mask;
+       ),
+
+       TP_printk("thread=%p mask=0x%lx",
+                 __entry->thread, __entry->mask)
+);
+
+TRACE_EVENT(cobalt_thread_fault,
+       TP_PROTO(struct xnthread *thread, struct ipipe_trap_data *td),
+       TP_ARGS(thread, td),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, thread)
+               __string(name, xnthread_name(thread))
+               __field(void *, ip)
+               __field(unsigned int, type)
+       ),
+
+       TP_fast_assign(
+               __entry->thread = thread;
+               __assign_str(name, xnthread_name(thread));
+               __entry->ip = (void *)xnarch_fault_pc(td);
+               __entry->type = xnarch_fault_trap(td);
+       ),
+
+       TP_printk("thread=%p(%s) ip=%p type=%x",
+                 __entry->thread, __get_str(name), __entry->ip,
+                 __entry->type)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_start,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_cancel,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_join,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_unblock,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_wait_period,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_missed_period,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_set_mode,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_migrate, cobalt_thread_migrate,
+       TP_PROTO(struct xnthread *thread, unsigned int cpu),
+       TP_ARGS(thread, cpu)
+);
+
+DEFINE_EVENT(thread_migrate, cobalt_thread_migrate_passive,
+       TP_PROTO(struct xnthread *thread, unsigned int cpu),
+       TP_ARGS(thread, cpu)
+);
+
+DEFINE_EVENT(thread_event, cobalt_shadow_gohard,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_watchdog_signal,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_shadow_hardened,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_shadow_gorelax,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_shadow_relaxed,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_shadow_entry,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_shadow_map,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *, thread)
+               __string(name, xnthread_name(thread))
+               __field(int, prio)
+       ),
+
+       TP_fast_assign(
+               __entry->thread = thread;
+               __assign_str(name, xnthread_name(thread));
+               __entry->prio = xnthread_base_priority(thread);
+       ),
+
+       TP_printk("thread=%p(%s) prio=%d",
+                 __entry->thread, __get_str(name), __entry->prio)
+);
+
+DEFINE_EVENT(thread_event, cobalt_shadow_unmap,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_shadow_finalize,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_lostage_request,
+        TP_PROTO(const char *type, struct task_struct *task),
+       TP_ARGS(type, task),
+
+       TP_STRUCT__entry(
+               __field(pid_t, pid)
+               __array(char, comm, TASK_COMM_LEN)
+               __field(const char *, type)
+       ),
+
+       TP_fast_assign(
+               __entry->type = type;
+               __entry->pid = task->pid;
+               memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+       ),
+
+       TP_printk("request=%s pid=%d comm=%s",
+                 __entry->type, __entry->pid, __entry->comm)
+);
+
+TRACE_EVENT(cobalt_lostage_wakeup,
+       TP_PROTO(struct task_struct *task),
+       TP_ARGS(task),
+
+       TP_STRUCT__entry(
+               __field(pid_t, pid)
+               __array(char, comm, TASK_COMM_LEN)
+       ),
+
+       TP_fast_assign(
+               __entry->pid = task->pid;
+               memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+       ),
+
+       TP_printk("pid=%d comm=%s",
+                 __entry->pid, __entry->comm)
+);
+
+TRACE_EVENT(cobalt_lostage_signal,
+       TP_PROTO(struct task_struct *task, int sig),
+       TP_ARGS(task, sig),
+
+       TP_STRUCT__entry(
+               __field(pid_t, pid)
+               __array(char, comm, TASK_COMM_LEN)
+               __field(int, sig)
+       ),
+
+       TP_fast_assign(
+               __entry->pid = task->pid;
+               __entry->sig = sig;
+               memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+       ),
+
+       TP_printk("pid=%d comm=%s sig=%d",
+                 __entry->pid, __entry->comm, __entry->sig)
+);
+
+DEFINE_EVENT(syscall_entry, cobalt_head_sysentry,
+       TP_PROTO(struct xnthread *thread, int muxid, int muxop),
+       TP_ARGS(thread, muxid, muxop)
+);
+
+DEFINE_EVENT(syscall_exit, cobalt_head_sysexit,
+       TP_PROTO(struct xnthread *thread, int result),
+       TP_ARGS(thread, result)
+);
+
+DEFINE_EVENT(syscall_entry, cobalt_root_sysentry,
+       TP_PROTO(struct xnthread *thread, int muxid, int muxop),
+       TP_ARGS(thread, muxid, muxop)
+);
+
+DEFINE_EVENT(syscall_exit, cobalt_root_sysexit,
+       TP_PROTO(struct xnthread *thread, int result),
+       TP_ARGS(thread, result)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_entry,
+       TP_PROTO(unsigned int irq),
+       TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_exit,
+       TP_PROTO(unsigned int irq),
+       TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_enable,
+       TP_PROTO(unsigned int irq),
+       TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_disable,
+       TP_PROTO(unsigned int irq),
+       TP_ARGS(irq)
+);
+
+DEFINE_EVENT(clock_event, cobalt_clock_entry,
+       TP_PROTO(unsigned int irq),
+       TP_ARGS(irq)
+);
+
+DEFINE_EVENT(clock_event, cobalt_clock_exit,
+       TP_PROTO(unsigned int irq),
+       TP_ARGS(irq)
+);
+
+DEFINE_EVENT(timer_event, cobalt_timer_stop,
+       TP_PROTO(struct xntimer *timer),
+       TP_ARGS(timer)
+);
+
+DEFINE_EVENT(timer_event, cobalt_timer_expire,
+       TP_PROTO(struct xntimer *timer),
+       TP_ARGS(timer)
+);
+
+#define cobalt_print_timer_mode(mode)                  \
+       __print_symbolic(mode,                          \
+                        { XN_RELATIVE, "rel" },        \
+                        { XN_ABSOLUTE, "abs" },        \
+                        { XN_REALTIME, "rt" })
+
+TRACE_EVENT(cobalt_timer_start,
+       TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval,
+                xntmode_t mode),
+       TP_ARGS(timer, value, interval, mode),
+
+       TP_STRUCT__entry(
+               __field(struct xntimer *, timer)
+#ifdef CONFIG_XENO_OPT_STATS
+               __string(handler, timer->handler_name)
+#endif
+               __field(xnticks_t, value)
+               __field(xnticks_t, interval)
+               __field(xntmode_t, mode)
+       ),
+
+       TP_fast_assign(
+               __entry->timer = timer;
+#ifdef CONFIG_XENO_OPT_STATS
+               __assign_str(handler, timer->handler_name);
+#endif
+               __entry->value = value;
+               __entry->interval = interval;
+               __entry->mode = mode;
+       ),
+
+       TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s",
+                 __entry->timer,
+#ifdef CONFIG_XENO_OPT_STATS
+                 __get_str(handler),
+#else
+                 "(anon)",
+#endif
+                 __entry->value, __entry->interval,
+                 cobalt_print_timer_mode(__entry->mode))
+);
+
+#ifdef CONFIG_SMP
+
+TRACE_EVENT(cobalt_timer_migrate,
+       TP_PROTO(struct xntimer *timer, unsigned int cpu),
+       TP_ARGS(timer, cpu),
+
+       TP_STRUCT__entry(
+               __field(struct xntimer *, timer)
+               __field(unsigned int, cpu)
+       ),
+
+       TP_fast_assign(
+               __entry->timer = timer;
+               __entry->cpu = cpu;
+       ),
+
+       TP_printk("timer=%p cpu=%u",
+                 __entry->timer, __entry->cpu)
+);
+
+#endif /* CONFIG_SMP */
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
+       TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
+       TP_ARGS(synch, thread)
+);
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
+       TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
+       TP_ARGS(synch, thread)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_release,
+       TP_PROTO(struct xnsynch *synch),
+       TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
+       TP_PROTO(struct xnsynch *synch),
+       TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
+       TP_PROTO(struct xnsynch *synch),
+       TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
+       TP_PROTO(struct xnsynch *synch),
+       TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
+       TP_PROTO(struct xnsynch *synch),
+       TP_ARGS(synch)
+);
+
+#endif /* _TRACE_COBALT_CORE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/scripts/prepare-kernel.sh b/scripts/prepare-kernel.sh
index cff8764..0fc11ad 100755
--- a/scripts/prepare-kernel.sh
+++ b/scripts/prepare-kernel.sh
@@ -439,6 +439,9 @@ patch_link r m kernel/cobalt/arch/$linux_arch 
arch/$linux_arch/xenomai
 patch_link n n kernel/cobalt/include/ipipe arch/$linux_arch/include/ipipe
 patch_architecture_specific="n"
 patch_link n m kernel/cobalt kernel/xenomai
+patch_link n cobalt-core.h kernel/cobalt/trace include/trace/events
+patch_link n cobalt-rtdm.h kernel/cobalt/trace include/trace/events
+patch_link n cobalt-posix.h kernel/cobalt/trace include/trace/events
 patch_link r n kernel/cobalt/include/asm-generic/xenomai 
include/asm-generic/xenomai
 patch_link n m kernel/cobalt/posix kernel/xenomai/posix
 patch_link n m kernel/cobalt/rtdm kernel/xenomai/rtdm


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to