Module: xenomai-jki
Branch: queues/ftrace
Commit: 2f6b8a0ce533b97eb6f8ec90e8de2a62e0ea8eb6
URL:    
http://git.xenomai.org/?p=xenomai-jki.git;a=commit;h=2f6b8a0ce533b97eb6f8ec90e8de2a62e0ea8eb6

Author: Jan Kiszka <jan.kis...@siemens.com>
Date:   Wed Jan  9 15:03:28 2013 +0100

nucleus: Basic trace events conversion

Convert important nucleus tracepoints into TRACE_EVENT format.

This also introduces a new header group: include/trace contains
tracepoint definitions that need to be put into include/trace/events by
prepare-kernel.sh.

Signed-off-by: Jan Kiszka <jan.kis...@siemens.com>

---

 include/trace/xn_nucleus.h |  374 ++++++++++++++++++++++++++++++++++++++++++++
 ksrc/nucleus/pod.c         |   52 ++-----
 ksrc/nucleus/shadow.c      |   43 ++----
 scripts/prepare-kernel.sh  |    3 +-
 4 files changed, 405 insertions(+), 67 deletions(-)

diff --git a/include/trace/xn_nucleus.h b/include/trace/xn_nucleus.h
new file mode 100644
index 0000000..a9c23af
--- /dev/null
+++ b/include/trace/xn_nucleus.h
@@ -0,0 +1,374 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xn_nucleus
+
+#if !defined(_TRACE_XN_NUCLEUS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_XN_NUCLEUS_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(thread_event_verbose,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+               __array(char,                   thread_name, XNOBJECT_NAME_LEN)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+               memcpy(__entry->thread_name, xnthread_name(thread),
+                      XNOBJECT_NAME_LEN);
+       ),
+
+       TP_printk("thread=%p thread_name=%s",
+                 __entry->thread, __entry->thread_name)
+);
+
+DECLARE_EVENT_CLASS(thread_event,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+       ),
+
+       TP_printk("thread=%p", __entry->thread)
+);
+
+TRACE_EVENT(xn_nucleus_sched,
+       TP_PROTO(struct xnsched *sched),
+       TP_ARGS(sched),
+
+       TP_STRUCT__entry(
+               __field(unsigned long,          status)
+       ),
+
+       TP_fast_assign(
+               __entry->status         = sched->status;
+       ),
+
+       TP_printk("status=%lx", __entry->status)
+);
+
+TRACE_EVENT(xn_nucleus_sched_remote,
+       TP_PROTO(struct xnsched *sched),
+       TP_ARGS(sched),
+
+       TP_STRUCT__entry(
+               __field(unsigned long,          status)
+       ),
+
+       TP_fast_assign(
+               __entry->status         = sched->status;
+       ),
+
+       TP_printk("status=%lx", __entry->status)
+);
+
+TRACE_EVENT(xn_nucleus_sched_switch,
+       TP_PROTO(struct xnthread *prev, struct xnthread *next),
+       TP_ARGS(prev, next),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      prev)
+               __field(struct xnthread *,      next)
+               __array(char,                   prev_name, XNOBJECT_NAME_LEN)
+               __array(char,                   next_name, XNOBJECT_NAME_LEN)
+       ),
+
+       TP_fast_assign(
+               __entry->prev           = prev;
+               __entry->next           = next;
+               memcpy(__entry->prev_name, xnthread_name(prev),
+                      XNOBJECT_NAME_LEN);
+               memcpy(__entry->next_name, xnthread_name(next),
+                      XNOBJECT_NAME_LEN);
+       ),
+
+       TP_printk("prev=%p prev_name=%s next=%p next_name=%s",
+                 __entry->prev, __entry->prev_name,
+                 __entry->next, __entry->next_name)
+);
+
+TRACE_EVENT(xn_nucleus_sched_sigdispatch,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread),
+
+       TP_STRUCT__entry(
+               __field(unsigned long,          signals)
+       ),
+
+       TP_fast_assign(
+               __entry->signals        = thread->signals;
+       ),
+
+       TP_printk("signals=%lx", __entry->signals)
+);
+
+TRACE_EVENT(xn_nucleus_thread_init,
+       TP_PROTO(struct xnthread *thread,
+                const struct xnthread_init_attr *attr,
+                struct xnsched_class *sched_class),
+       TP_ARGS(thread, attr, sched_class),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+               __array(char,                   thread_name, XNOBJECT_NAME_LEN)
+               __field(xnflags_t,              flags)
+               __array(char,                   class_name, XNOBJECT_NAME_LEN)
+               __field(int,                    cprio)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+               memcpy(__entry->thread_name, xnthread_name(thread),
+                      XNOBJECT_NAME_LEN);
+               __entry->flags          = attr->flags;
+               memcpy(__entry->class_name, sched_class->name,
+                      XNOBJECT_NAME_LEN-1);
+               __entry->class_name[XNOBJECT_NAME_LEN-1] = 0;
+               __entry->cprio          = thread->cprio;
+       ),
+
+       TP_printk("thread=%p thread_name=%s flags=%lx class=%s prio=%d",
+                  __entry->thread, __entry->thread_name, __entry->flags,
+                  __entry->class_name, __entry->cprio)
+);
+
+TRACE_EVENT(xn_nucleus_thread_suspend,
+       TP_PROTO(struct xnthread *thread, xnflags_t mask, xnticks_t timeout,
+                xntmode_t timeout_mode, xnsynch_t *wchan),
+       TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+               __field(xnflags_t,              mask)
+               __field(xnticks_t,              timeout)
+               __field(xntmode_t,              timeout_mode)
+               __field(xnsynch_t *,            wchan)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+               __entry->mask           = mask;
+               __entry->timeout        = timeout;
+               __entry->timeout_mode   = timeout_mode;
+               __entry->wchan          = wchan;
+       ),
+
+       TP_printk("thread=%p mask=%lu timeout=%Lu timeout_mode=%d wchan=%p",
+                 __entry->thread, __entry->mask,
+                 __entry->timeout, __entry->timeout_mode, __entry->wchan)
+);
+
+TRACE_EVENT(xn_nucleus_thread_resume,
+       TP_PROTO(struct xnthread *thread, xnflags_t mask),
+       TP_ARGS(thread, mask),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+               __field(xnflags_t,              mask)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+               __entry->mask           = mask;
+       ),
+
+       TP_printk("thread=%p mask=%lx",
+                 __entry->thread, __entry->mask)
+);
+
+TRACE_EVENT(xn_nucleus_thread_migrate,
+       TP_PROTO(struct xnthread *thread, unsigned int cpu),
+       TP_ARGS(thread, cpu),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+               __array(char,                   thread_name, XNOBJECT_NAME_LEN)
+               __field(unsigned int,           cpu)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+               memcpy(__entry->thread_name, xnthread_name(thread),
+                      XNOBJECT_NAME_LEN);
+               __entry->cpu            = cpu;
+       ),
+
+       TP_printk("thread=%p thread_name=%s cpu=%u",
+                 __entry->thread, __entry->thread_name, __entry->cpu)
+);
+
+TRACE_EVENT(xn_nucleus_thread_fault,
+       TP_PROTO(struct xnthread *thread, xnarch_fltinfo_t *fltinfo),
+       TP_ARGS(thread, fltinfo),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+               __array(char,                   thread_name, XNOBJECT_NAME_LEN)
+               __field(void *,                 ip)
+               __field(unsigned int,           type)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+               memcpy(__entry->thread_name, xnthread_name(thread),
+                      XNOBJECT_NAME_LEN);
+               __entry->ip             = (void *)xnarch_fault_pc(fltinfo);
+               __entry->type           = xnarch_fault_trap(fltinfo);
+       ),
+
+       TP_printk("thread=%p thread_name=%s ip=%p type=%x",
+                 __entry->thread, __entry->thread_name, __entry->ip,
+                 __entry->type)
+);
+
+DEFINE_EVENT(thread_event, xn_nucleus_thread_delete,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, xn_nucleus_shadow_gohard,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event_verbose, xn_nucleus_shadow_hardened,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, xn_nucleus_shadow_gorelax,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event_verbose, xn_nucleus_shadow_relaxed,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread)
+);
+
+TRACE_EVENT(xn_nucleus_shadow_map,
+       TP_PROTO(struct xnthread *thread),
+       TP_ARGS(thread),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+               __array(char,                   thread_name, XNOBJECT_NAME_LEN)
+               __field(int,                    prio)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+               memcpy(__entry->thread_name, xnthread_name(thread),
+                      XNOBJECT_NAME_LEN);
+               __entry->prio           = xnthread_base_priority(thread);
+       ),
+
+       TP_printk("thread=%p thread_name=%s prio=%d",
+                 __entry->thread, __entry->thread_name, __entry->prio)
+);
+
+TRACE_EVENT(xn_nucleus_shadow_unmap,
+       TP_PROTO(struct xnthread *thread, struct task_struct *task),
+       TP_ARGS(thread, task),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+               __array(char,                   thread_name, XNOBJECT_NAME_LEN)
+               __field(pid_t,                  pid)
+               __array(char,                   comm, TASK_COMM_LEN)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+               memcpy(__entry->thread_name, xnthread_name(thread),
+                      XNOBJECT_NAME_LEN);
+               if (task) {
+                       __entry->pid    = task->pid;
+                       memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+               } else {
+                       __entry->pid    = -1;
+                       __entry->comm[0] = 0;
+               }
+       ),
+
+       TP_printk("thread=%p thread_name=%s pid=%d comm=%s",
+                 __entry->thread, __entry->thread_name, __entry->pid,
+                 __entry->comm)
+);
+
+DECLARE_EVENT_CLASS(syscall_entry,
+       TP_PROTO(struct xnthread *thread, int muxid, int muxop),
+       TP_ARGS(thread, muxid, muxop),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+               __array(char,                   thread_name, XNOBJECT_NAME_LEN)
+               __field(int,                    muxid)
+               __field(int,                    muxop)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+               __entry->thread_name[0] = 0;
+               if (thread)
+                       memcpy(__entry->thread_name, xnthread_name(thread),
+                              XNOBJECT_NAME_LEN);
+               __entry->muxid          = muxid;
+               __entry->muxop          = muxop;
+       ),
+
+       TP_printk("thread=%p thread_name=%s muxid=%d muxop=%d",
+                 __entry->thread, __entry->thread_name, __entry->muxid,
+                 __entry->muxop)
+);
+
+DECLARE_EVENT_CLASS(syscall_exit,
+       TP_PROTO(struct xnthread *thread, int result),
+       TP_ARGS(thread, result),
+
+       TP_STRUCT__entry(
+               __field(struct xnthread *,      thread)
+               __field(int,                    result)
+       ),
+
+       TP_fast_assign(
+               __entry->thread         = thread;
+               __entry->result         = result;
+       ),
+
+       TP_printk("thread=%p result=%d",
+                 __entry->thread, __entry->result)
+);
+
+DEFINE_EVENT(syscall_entry, xn_nucleus_sys_histage_entry,
+       TP_PROTO(struct xnthread *thread, int muxid, int muxop),
+       TP_ARGS(thread, muxid, muxop)
+);
+
+DEFINE_EVENT(syscall_exit, xn_nucleus_sys_histage_exit,
+       TP_PROTO(struct xnthread *thread, int result),
+       TP_ARGS(thread, result)
+);
+
+DEFINE_EVENT(syscall_entry, xn_nucleus_sys_lostage_entry,
+       TP_PROTO(struct xnthread *thread, int muxid, int muxop),
+       TP_ARGS(thread, muxid, muxop)
+);
+
+DEFINE_EVENT(syscall_exit, xn_nucleus_sys_lostage_exit,
+       TP_PROTO(struct xnthread *thread, int result),
+       TP_ARGS(thread, result)
+);
+
+#endif /* _TRACE_XN_NUCLEUS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/ksrc/nucleus/pod.c b/ksrc/nucleus/pod.c
index 0a2ee19..9254d14 100644
--- a/ksrc/nucleus/pod.c
+++ b/ksrc/nucleus/pod.c
@@ -46,6 +46,9 @@
 #include <nucleus/select.h>
 #include <asm/xenomai/bits/pod.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/xn_nucleus.h>
+
 /*
  * NOTE: We need to initialize the globals; remember that this code
  * also runs over the simulator in user-space.
@@ -267,16 +270,13 @@ EXPORT_SYMBOL_GPL(xnpod_fatal_helper);
 
 void xnpod_schedule_handler(void) /* Called with hw interrupts off. */
 {
-       xnsched_t *sched;
+       xnsched_t *sched = xnpod_current_sched();
 
-       trace_mark(xn_nucleus, sched_remote, MARK_NOARGS);
        xnarch_memory_barrier();
+       trace_xn_nucleus_sched_remote(sched);
 #if defined(CONFIG_SMP) && defined(CONFIG_XENO_OPT_PRIOCPL)
-       sched = xnpod_current_sched();
        if (testbits(sched->rpistatus, XNRPICK))
                xnshadow_rpi_check();
-#else
-       (void)sched;
 #endif /* CONFIG_SMP && CONFIG_XENO_OPT_PRIOCPL */
        xnpod_schedule();
 }
@@ -637,10 +637,7 @@ int xnpod_init_thread(struct xnthread *thread,
        if (ret)
                return ret;
 
-       trace_mark(xn_nucleus, thread_init,
-                  "thread %p thread_name %s flags %lu class %s prio %d",
-                  thread, xnthread_name(thread), attr->flags,
-                  sched_class->name, thread->cprio);
+       trace_xn_nucleus_thread_init(thread, attr, sched_class);
 
        xnlock_get_irqsave(&nklock, s);
        appendq(&nkpod->threadq, &thread->glink);
@@ -1143,8 +1140,7 @@ void xnpod_delete_thread(xnthread_t *thread)
                nkpod->schedhook(thread, XNDELETED);
 #endif /* __XENO_SIM__ */
 
-       trace_mark(xn_nucleus, thread_delete, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
+       trace_xn_nucleus_thread_delete(thread);
 
        removeq(&nkpod->threadq, &thread->glink);
        xnvfile_touch_tag(&nkpod->threadlist_tag);
@@ -1352,11 +1348,8 @@ void xnpod_suspend_thread(xnthread_t *thread, xnflags_t 
mask,
 
        xnlock_get_irqsave(&nklock, s);
 
-       trace_mark(xn_nucleus, thread_suspend,
-                  "thread %p thread_name %s mask %lu timeout %Lu "
-                  "timeout_mode %d wchan %p",
-                  thread, xnthread_name(thread), mask, timeout,
-                  timeout_mode, wchan);
+       trace_xn_nucleus_thread_suspend(thread, mask, timeout, timeout_mode,
+                                       wchan);
 
        sched = thread->sched;
        oldstate = thread->state;
@@ -1582,9 +1575,7 @@ void xnpod_resume_thread(struct xnthread *thread, 
xnflags_t mask)
 
        xnlock_get_irqsave(&nklock, s);
 
-       trace_mark(xn_nucleus, thread_resume,
-                  "thread %p thread_name %s mask %lu",
-                  thread, xnthread_name(thread), mask);
+       trace_xn_nucleus_thread_resume(thread, mask);
        xnarch_trace_pid(xnthread_user_task(thread) ?
                         xnarch_user_pid(xnthread_archtcb(thread)) : -1,
                         xnthread_current_priority(thread));
@@ -1941,9 +1932,7 @@ int xnpod_migrate_thread(int cpu)
 
        sched = xnpod_sched_slot(cpu);
 
-       trace_mark(xn_nucleus, thread_migrate,
-                  "thread %p thread_name %s cpu %d",
-                  thread, xnthread_name(thread), cpu);
+       trace_xn_nucleus_thread_migrate(thread, cpu);
 
        __xnpod_release_fpu(thread);
 
@@ -1995,8 +1984,7 @@ void xnpod_dispatch_signals(void)
            || thread->asr == XNTHREAD_INVALID_ASR)
                return;
 
-       trace_mark(xn_nucleus, sched_sigdispatch, "signals %lu",
-                  thread->signals);
+       trace_xn_nucleus_sched_sigdispatch(thread);
 
        /* Start the asynchronous service routine */
        oldmode = xnthread_test_state(thread, XNTHREAD_MODE_BITS);
@@ -2170,10 +2158,10 @@ void __xnpod_schedule(struct xnsched *sched)
        if (xnarch_escalate())
                return;
 
-       trace_mark(xn_nucleus, sched, MARK_NOARGS);
-
        xnlock_get_irqsave(&nklock, s);
 
+       trace_xn_nucleus_sched(sched);
+
        curr = sched->curr;
 
        xnarch_trace_pid(xnthread_user_task(curr) ?
@@ -2204,11 +2192,7 @@ reschedule:
 
        prev = curr;
 
-       trace_mark(xn_nucleus, sched_switch,
-                  "prev %p prev_name %s "
-                  "next %p next_name %s",
-                  prev, xnthread_name(prev),
-                  next, xnthread_name(next));
+       trace_xn_nucleus_sched_switch(prev, next);
 
 #ifdef CONFIG_XENO_OPT_PERVASIVE
        shadow = xnthread_test_state(prev, XNSHADOW);
@@ -2542,11 +2526,7 @@ int xnpod_trap_fault(xnarch_fltinfo_t *fltinfo)
 
        thread = xnpod_current_thread();
 
-       trace_mark(xn_nucleus, thread_fault,
-                  "thread %p thread_name %s ip %p type 0x%x",
-                  thread, xnthread_name(thread),
-                  (void *)xnarch_fault_pc(fltinfo),
-                  xnarch_fault_trap(fltinfo));
+       trace_xn_nucleus_thread_fault(thread, fltinfo);
 
 #ifdef __KERNEL__
        if (xnarch_fault_fpu_p(fltinfo)) {
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index 50ee035..8927e34 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -56,6 +56,7 @@
 #include <asm/xenomai/features.h>
 #include <asm/xenomai/syscall.h>
 #include <asm/xenomai/bits/shadow.h>
+#include <trace/events/xn_nucleus.h>
 
 static int xn_gid_arg = -1;
 module_param_named(xenomai_gid, xn_gid_arg, int, 0644);
@@ -984,9 +985,7 @@ redo:
         * out.
         */
 
-       trace_mark(xn_nucleus, shadow_gohard,
-                  "thread %p thread_name %s comm %s",
-                  thread, xnthread_name(thread), this_task->comm);
+       trace_xn_nucleus_shadow_gohard(thread);
 
        sched->gktarget = thread;
        xnthread_set_info(thread, XNATOMIC);
@@ -1049,8 +1048,7 @@ redo:
        if (rpi_p(thread))
                rpi_clear_remote(thread);
 
-       trace_mark(xn_nucleus, shadow_hardened, "thread %p thread_name %s",
-                  thread, xnthread_name(thread));
+       trace_xn_nucleus_shadow_hardened(thread);
 
        /*
         * Recheck pending signals once again. As we block task wakeups during
@@ -1119,8 +1117,7 @@ void xnshadow_relax(int notify, int reason)
         * domain to the Linux domain.  This will cause the Linux task
         * to resume using the register state of the shadow thread.
         */
-       trace_mark(xn_nucleus, shadow_gorelax, "thread %p thread_name %s",
-                 thread, xnthread_name(thread));
+       trace_xn_nucleus_shadow_gorelax(thread);
 
        /*
         * If you intend to change the following interrupt-free
@@ -1195,9 +1192,7 @@ void xnshadow_relax(int notify, int reason)
        if (thread->u_mode)
                *(thread->u_mode) = thread->state;
 
-       trace_mark(xn_nucleus, shadow_relaxed,
-                 "thread %p thread_name %s comm %s",
-                 thread, xnthread_name(thread), current->comm);
+       trace_xn_nucleus_shadow_relaxed(thread);
 }
 EXPORT_SYMBOL_GPL(xnshadow_relax);
 
@@ -1333,10 +1328,7 @@ int xnshadow_map(xnthread_t *thread, xncompletion_t 
__user *u_completion,
        affinity = xnarch_cpumask_of_cpu(xnarch_first_cpu(affinity));
        set_cpus_allowed(current, affinity);
 
-       trace_mark(xn_nucleus, shadow_map,
-                  "thread %p thread_name %s pid %d priority %d",
-                  thread, xnthread_name(thread), current->pid,
-                  xnthread_base_priority(thread));
+       trace_xn_nucleus_shadow_map(thread);
 
        xnarch_init_shadow_tcb(xnthread_archtcb(thread), thread,
                               xnthread_name(thread));
@@ -1437,9 +1429,7 @@ void xnshadow_unmap(xnthread_t *thread)
 
        xnarch_atomic_dec(&sys_ppd->refcnt);
 
-       trace_mark(xn_nucleus, shadow_unmap,
-                  "thread %p thread_name %s pid %d",
-                  thread, xnthread_name(thread), p ? p->pid : -1);
+       trace_xn_nucleus_shadow_unmap(thread, p);
 
        if (!p)
                return;
@@ -2266,10 +2256,7 @@ int do_hisyscall_event(unsigned event, 
rthal_pipeline_stage_t *stage,
        muxid = __xn_mux_id(regs);
        muxop = __xn_mux_op(regs);
 
-       trace_mark(xn_nucleus, syscall_histage_entry,
-                  "thread %p thread_name %s muxid %d muxop %d",
-                  thread, thread ? xnthread_name(thread) : NULL,
-                  muxid, muxop);
+       trace_xn_nucleus_sys_histage_entry(thread, muxid, muxop);
 
        if (muxid < 0 || muxid > XENOMAI_MUX_NR ||
            muxop < 0 || muxop >= muxtable[muxid].props->nrcalls) {
@@ -2383,8 +2370,7 @@ int do_hisyscall_event(unsigned event, 
rthal_pipeline_stage_t *stage,
        if (thread && thread->u_mode)
                *thread->u_mode = thread->state;
 
-       trace_mark(xn_nucleus, syscall_histage_exit,
-                  "ret %ld", __xn_reg_rval(regs));
+       trace_xn_nucleus_sys_histage_exit(thread, __xn_reg_rval(regs));
        return RTHAL_EVENT_STOP;
 
       linux_syscall:
@@ -2484,11 +2470,9 @@ int do_losyscall_event(unsigned event, 
rthal_pipeline_stage_t *stage,
        muxid = __xn_mux_id(regs);
        muxop = __xn_mux_op(regs);
 
-       trace_mark(xn_nucleus, syscall_lostage_entry,
-                  "thread %p thread_name %s muxid %d muxop %d",
-                  xnpod_active_p() ? xnpod_current_thread() : NULL,
-                  xnpod_active_p() ? xnthread_name(xnpod_current_thread()) : 
NULL,
-                  muxid, muxop);
+       trace_xn_nucleus_sys_lostage_entry(xnpod_active_p() ?
+                                          xnpod_current_thread() : NULL,
+                                          muxid, muxop);
 
        /* Processing a real-time skin syscall. */
 
@@ -2553,8 +2537,7 @@ int do_losyscall_event(unsigned event, 
rthal_pipeline_stage_t *stage,
        if (thread && thread->u_mode)
                *thread->u_mode = thread->state;
 
-       trace_mark(xn_nucleus, syscall_lostage_exit,
-                  "ret %ld", __xn_reg_rval(regs));
+       trace_xn_nucleus_sys_lostage_exit(thread, __xn_reg_rval(regs));
        return RTHAL_EVENT_STOP;
 }
 
diff --git a/scripts/prepare-kernel.sh b/scripts/prepare-kernel.sh
index 021c650..2da4436 100755
--- a/scripts/prepare-kernel.sh
+++ b/scripts/prepare-kernel.sh
@@ -597,9 +597,10 @@ patch_link r n include/asm-$xenomai_arch 
$linux_include_asm/xenomai
 patch_architecture_specific="n"
 patch_link r n include/asm-generic include/asm-generic/xenomai
 patch_link n n include include/xenomai
+patch_link n n include/trace include/trace/events
 cd $xenomai_root
 for d in include/* ; do
-    if test -d $d -a -z "`echo $d | grep '^include/asm-'`"; then
+    if test -d $d -a -z "`echo $d | grep '^include/\(asm-\|trace\)'`"; then
         destdir=`echo $d | sed -e 's,^\(include\)\(/.*\)$,\1/xenomai\2,'`
         patch_link r n $d $destdir
     fi


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to