[Xenomai-git] Philippe Gerum : cobalt/intr: turn irq statistics into per-cpu data

2013-07-31 Thread git repository hosting
Module: xenomai-forge
Branch: next
Commit: 7986453e91dbb50cce5f1fba45cc5a7dbf0623a0
URL:
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=7986453e91dbb50cce5f1fba45cc5a7dbf0623a0

Author: Philippe Gerum r...@xenomai.org
Date:   Tue Jul 30 21:55:43 2013 +0200

cobalt/intr: turn irq statistics into per-cpu data

---

 include/cobalt/kernel/intr.h |   50 +
 kernel/cobalt/intr.c |  122 +-
 kernel/cobalt/pod.c  |4 +-
 3 files changed, 102 insertions(+), 74 deletions(-)

diff --git a/include/cobalt/kernel/intr.h b/include/cobalt/kernel/intr.h
index cd14a1e..835f7fd 100644
--- a/include/cobalt/kernel/intr.h
+++ b/include/cobalt/kernel/intr.h
@@ -46,32 +46,36 @@ typedef int (*xnisr_t)(struct xnintr *intr);
 
 typedef void (*xniack_t)(unsigned irq, void *arg);
 
-typedef struct xnintr {
+struct xnirqstat {
+   /* ! Number of handled receipts since attachment. */
+   xnstat_counter_t hits;
+   /* ! Runtime accounting entity */
+   xnstat_exectime_t account;
+   /* ! Accumulated accounting entity */
+   xnstat_exectime_t sum;
+};
 
+typedef struct xnintr {
 #ifdef CONFIG_XENO_OPT_SHIRQ
-struct xnintr *next; /* ! Next object in the IRQ-sharing chain. */
+   /* ! Next object in the IRQ-sharing chain. */
+   struct xnintr *next;
 #endif /* CONFIG_XENO_OPT_SHIRQ */
-
-unsigned unhandled;/* ! Number of consequent unhandled interrupts 
*/
-
-xnisr_t isr;   /* ! Interrupt service routine. */
-
-void *cookie;  /* ! User-defined cookie value. */
-
-int flags; /* ! Creation flags. */
-
-unsigned irq;  /* ! IRQ number. */
-
-xniack_t iack; /* ! Interrupt acknowledge routine. */
-
-const char *name;  /* ! Symbolic name. */
-
-struct {
-   xnstat_counter_t hits;/* ! Number of handled receipts since 
attachment. */
-   xnstat_exectime_t account; /* ! Runtime accounting entity */
-   xnstat_exectime_t sum; /* ! Accumulated accounting entity */
-} stat[NR_CPUS];
-
+   /* ! Number of consequent unhandled interrupts */
+   unsigned int unhandled;
+   /* ! Interrupt service routine. */
+   xnisr_t isr;
+   /* ! User-defined cookie value. */
+   void *cookie;
+   /* ! Creation flags. */
+   int flags;
+   /* ! IRQ number. */
+   unsigned int irq;
+   /* ! Interrupt acknowledge routine. */
+   xniack_t iack;
+   /* ! Symbolic name. */
+   const char *name;
+   /* ! Statistics. */
+   struct xnirqstat *stats;
 } xnintr_t;
 
 typedef struct xnintr_iterator {
diff --git a/kernel/cobalt/intr.c b/kernel/cobalt/intr.c
index 37851ff..7625fb4 100644
--- a/kernel/cobalt/intr.c
+++ b/kernel/cobalt/intr.c
@@ -51,35 +51,38 @@ static int xnintr_list_rev;  /* Modification counter of 
xnintr list */
  * This guarantees that module.c::stat_seq_open() won't get
  * an up-to-date xnintr_list_rev and old xnintr_count. */
 
-static inline void xnintr_stat_counter_inc(void)
+static inline void stat_counter_inc(void)
 {
xnintr_count++;
smp_mb();
xnintr_list_rev++;
 }
 
-static inline void xnintr_stat_counter_dec(void)
+static inline void stat_counter_dec(void)
 {
xnintr_count--;
smp_mb();
xnintr_list_rev++;
 }
 
-static inline void xnintr_sync_stat_references(xnintr_t *intr)
+static inline void sync_stat_references(struct xnintr *intr)
 {
+   struct xnirqstat *statp;
+   struct xnsched *sched;
int cpu;
 
for_each_online_cpu(cpu) {
-   struct xnsched *sched = xnpod_sched_slot(cpu);
+   sched = xnpod_sched_slot(cpu);
+   statp = per_cpu_ptr(intr-stats, cpu);
/* Synchronize on all dangling references to go away. */
-   while (sched-current_account == intr-stat[cpu].account)
+   while (sched-current_account == statp-account)
cpu_relax();
}
 }
 #else
-static inline void xnintr_stat_counter_inc(void) {}
-static inline void xnintr_stat_counter_dec(void) {}
-static inline void xnintr_sync_stat_references(xnintr_t *intr) {}
+static inline void stat_counter_inc(void) {}
+static inline void stat_counter_dec(void) {}
+static inline void sync_stat_references(xnintr_t *intr) {}
 #endif /* CONFIG_XENO_OPT_STATS */
 
 static void xnintr_irq_handler(unsigned irq, void *cookie);
@@ -100,10 +103,12 @@ void xnintr_core_clock_handler(void)
 {
struct xnsched *sched = xnpod_current_sched();
int cpu  __maybe_unused = xnsched_cpu(sched);
+   struct xnirqstat *statp;
xnstat_exectime_t *prev;
 
-   prev = xnstat_exectime_switch(sched, nktimer.stat[cpu].account);
-   xnstat_counter_inc(nktimer.stat[cpu].hits);
+   statp = __this_cpu_ptr(nktimer.stats);
+   prev = xnstat_exectime_switch(sched, statp-account);
+   xnstat_counter_inc(statp-hits);
 
trace_mark(xn_nucleus, irq_enter, irq %u,
   

[Xenomai-git] Philippe Gerum : cobalt/pod: fix deletion of dormant threads

2013-07-31 Thread git repository hosting
Module: xenomai-forge
Branch: next
Commit: 12883e792750f109c15e149a416eed3ca055b155
URL:
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=12883e792750f109c15e149a416eed3ca055b155

Author: Philippe Gerum r...@xenomai.org
Date:   Wed Jul 31 12:47:52 2013 +0200

cobalt/pod: fix deletion of dormant threads

A dormant thread which receives a cancellation request through
xnpod_cancel_thread() must wake up first, for reaching a cancellation
point asap.

In no way shall this thread's resources be dropped over the caller's
context, this would break the basic assumption with the Cobalt core,
that resources must be claimed on behalf of the exiting thread only.

---

 kernel/cobalt/pod.c |   11 +--
 1 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index 5ff2a9d..3fd270d 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -650,7 +650,7 @@ static inline int moving_target(struct xnsched *sched, 
struct xnthread *thread)
return ret;
 }
 
-static void cleanup_thread(struct xnthread *thread) /* nklock held, irqs off */
+static inline void cleanup_thread(struct xnthread *thread) /* nklock held, 
irqs off */
 {
struct xnsched *sched = thread-sched;
 
@@ -764,8 +764,15 @@ void xnpod_cancel_thread(struct xnthread *thread)
 
xnthread_set_info(thread, XNCANCELD);
 
+   /*
+* If @thread is not started yet, fake a start request,
+* raising the kicked condition bit to make sure it will reach
+* xnpod_testcancel_thread() on its wakeup path.
+*/
if (xnthread_test_state(thread, XNDORMANT)) {
-   cleanup_thread(thread);
+   xnthread_set_info(thread, XNKICKED);
+   xnpod_resume_thread(thread, XNDORMANT);
+   xnpod_schedule();
goto unlock_and_exit;
}
 


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git


[Xenomai-git] Philippe Gerum : cobalt/thread: move non-critical cleanup code out of critical section

2013-07-31 Thread git repository hosting
Module: xenomai-forge
Branch: next
Commit: 5d4c329d2bf5aadfd6c36d298b72a15414f0bfca
URL:
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=5d4c329d2bf5aadfd6c36d298b72a15414f0bfca

Author: Philippe Gerum r...@xenomai.org
Date:   Wed Jul 31 14:29:34 2013 +0200

cobalt/thread: move non-critical cleanup code out of critical section

Since the thread cleanup code can only run on behalf of the root
domain, and only from the exiting context, we may do a lot of
housekeeping locklessly.

Remove those fully private cleanups from the nklocked section, which
should help reducing the overall latency upon thread exit.

---

 include/cobalt/kernel/thread.h |2 +-
 kernel/cobalt/pod.c|   31 ---
 kernel/cobalt/shadow.c |2 +-
 kernel/cobalt/thread.c |4 +---
 4 files changed, 19 insertions(+), 20 deletions(-)

diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index 28b4296..191c24e 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -324,7 +324,7 @@ void xnthread_init_shadow_tcb(struct xnthread *thread,
 
 void xnthread_init_root_tcb(struct xnthread *thread);
 
-void xnthread_cleanup(struct xnthread *thread);
+void xnthread_deregister(struct xnthread *thread);
 
 char *xnthread_format_status(unsigned long status, char *buf, int size);
 
diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index 3fd270d..5d911af 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -654,9 +654,6 @@ static inline void cleanup_thread(struct xnthread *thread) 
/* nklock held, irqs
 {
struct xnsched *sched = thread-sched;
 
-   trace_mark(xn_nucleus, thread_cleanup, thread %p thread_name %s,
-  thread, xnthread_name(thread));
-
list_del(thread-glink);
nkpod-nrthreads--;
xnvfile_touch_tag(nkpod-threadlist_tag);
@@ -667,16 +664,8 @@ static inline void cleanup_thread(struct xnthread *thread) 
/* nklock held, irqs
xnthread_clear_state(thread, XNREADY);
}
 
-   xntimer_destroy(thread-rtimer);
-   xntimer_destroy(thread-ptimer);
-   xntimer_destroy(thread-rrbtimer);
thread-idtag = 0;
 
-   if (thread-selector) {
-   xnselector_destroy(thread-selector);
-   thread-selector = NULL;
-   }
-
if (xnthread_test_state(thread, XNPEND))
xnsynch_forget_sleeper(thread);
 
@@ -695,12 +684,9 @@ static inline void cleanup_thread(struct xnthread *thread) 
/* nklock held, irqs
return;
 
xnsched_forget(thread);
-   xnthread_cleanup(thread);
+   xnthread_deregister(thread);
/* Finalize last since this incurs releasing the TCB. */
xnshadow_finalize(thread);
-
-   if (xnthread_test_state(sched-curr, XNROOT))
-   xnfreesync();
 }
 
 void __xnpod_cleanup_thread(struct xnthread *thread)
@@ -708,9 +694,24 @@ void __xnpod_cleanup_thread(struct xnthread *thread)
spl_t s;
 
XENO_BUGON(NUCLEUS, !ipipe_root_p);
+
+   trace_mark(xn_nucleus, thread_cleanup, thread %p thread_name %s,
+  thread, xnthread_name(thread));
+
+   xntimer_destroy(thread-rtimer);
+   xntimer_destroy(thread-ptimer);
+   xntimer_destroy(thread-rrbtimer);
+
+   if (thread-selector) {
+   xnselector_destroy(thread-selector);
+   thread-selector = NULL;
+   }
+
xnlock_get_irqsave(nklock, s);
cleanup_thread(thread);
+   xnfreesync();
xnlock_put_irqrestore(nklock, s);
+
wake_up(nkjoinq);
 }
 
diff --git a/kernel/cobalt/shadow.c b/kernel/cobalt/shadow.c
index 11776ec..4d0b761 100644
--- a/kernel/cobalt/shadow.c
+++ b/kernel/cobalt/shadow.c
@@ -1025,7 +1025,7 @@ void xnshadow_finalize(struct xnthread *thread)
 {
struct xnsys_ppd *sys_ppd;
 
-   trace_mark(xn_nucleus, shadow_unmap,
+   trace_mark(xn_nucleus, shadow_finalize,
   thread %p thread_name %s pid %d,
   thread, xnthread_name(thread), xnthread_host_pid(thread));
 
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 5d268a2..4e0ae56 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -229,10 +229,8 @@ void xnthread_init_root_tcb(struct xnthread *thread)
xnarch_init_root_tcb(tcb);
 }
 
-void xnthread_cleanup(struct xnthread *thread)
+void xnthread_deregister(struct xnthread *thread)
 {
-   /* Does not wreck the TCB, only releases the held resources. */
-
if (thread-registry.handle != XN_NO_HANDLE)
xnregistry_remove(thread-registry.handle);
 


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git


[Xenomai-git] Philippe Gerum : cobalt/pod: drop the XNSTARTED bit from the thread state mask

2013-07-31 Thread git repository hosting
Module: xenomai-forge
Branch: next
Commit: 748499e90bbc074bf0a1105ca028d83d8ebb397b
URL:
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=748499e90bbc074bf0a1105ca028d83d8ebb397b

Author: Philippe Gerum r...@xenomai.org
Date:   Wed Jul 31 11:36:22 2013 +0200

cobalt/pod: drop the XNSTARTED bit from the thread state mask

Since XNDORMANT now exclusively means never started, we don't need
the XNSTARTED bit anymore.

Strictly speaking, XNDORMANT == !XNSTARTED and conversely.

---

 include/cobalt/uapi/kernel/thread.h |   29 ++---
 kernel/cobalt/pod.c |2 +-
 kernel/cobalt/sched.c   |4 ++--
 kernel/cobalt/shadow.c  |4 ++--
 4 files changed, 19 insertions(+), 20 deletions(-)

diff --git a/include/cobalt/uapi/kernel/thread.h 
b/include/cobalt/uapi/kernel/thread.h
index 7913d82..069507b 100644
--- a/include/cobalt/uapi/kernel/thread.h
+++ b/include/cobalt/uapi/kernel/thread.h
@@ -35,21 +35,20 @@
 #define XNREADY   0x0008 /** Linked to the ready queue. */
 #define XNDORMANT 0x0010 /** Not started yet */
 #define XNZOMBIE  0x0020 /** Zombie thread in deletion process */
-#define XNSTARTED 0x0040 /** Thread has been started */
-#define XNMAPPED  0x0080 /** Thread is mapped to a linux task */
-#define XNRELAX   0x0100 /** Relaxed shadow thread (blocking bit) */
-#define XNMIGRATE 0x0200 /** Thread is currently migrating to another 
CPU. */
-#define XNHELD0x0400 /** Thread is held to process emergency. */
+#define XNMAPPED  0x0040 /** Thread is mapped to a linux task */
+#define XNRELAX   0x0080 /** Relaxed shadow thread (blocking bit) */
+#define XNMIGRATE 0x0100 /** Thread is currently migrating to another 
CPU. */
+#define XNHELD0x0200 /** Thread is held to process emergency. */
 
-#define XNBOOST   0x0800 /** Undergoes a PIP boost */
-#define XNDEBUG   0x1000 /** Hit a debugger breakpoint */
-#define XNLOCK0x2000 /** Holds the scheduler lock (i.e. not 
preemptible) */
-#define XNRRB 0x4000 /** Undergoes a round-robin scheduling */
-#define XNTRAPSW  0x8000 /** Trap execution mode switches */
-#define XNFPU 0x0001 /** Thread uses FPU */
-#define XNROOT0x0002 /** Root thread (that is, Linux/IDLE) */
-#define XNWEAK0x0004 /** Non real-time shadow (from the WEAK class) */
-#define XNUSER0x0008 /** Shadow thread running in userland */
+#define XNBOOST   0x0400 /** Undergoes a PIP boost */
+#define XNDEBUG   0x0800 /** Hit a debugger breakpoint */
+#define XNLOCK0x1000 /** Holds the scheduler lock (i.e. not 
preemptible) */
+#define XNRRB 0x2000 /** Undergoes a round-robin scheduling */
+#define XNTRAPSW  0x4000 /** Trap execution mode switches */
+#define XNFPU 0x8000 /** Thread uses FPU */
+#define XNROOT0x0001 /** Root thread (that is, Linux/IDLE) */
+#define XNWEAK0x0002 /** Non real-time shadow (from the WEAK class) */
+#define XNUSER0x0004 /** Shadow thread running in userland */
 
 /** @} */
 
@@ -91,7 +90,7 @@
  * 'r' - Undergoes round-robin.
  * 't' - Mode switches trapped.
  */
-#define XNTHREAD_STATE_LABELS  SWDRU...X.HbTlrt
+#define XNTHREAD_STATE_LABELS  SWDRU..X.HbTlrt
 
 /**
  * @brief Structure containing thread information.
diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index 0071d96..0b35cb9 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -545,7 +545,7 @@ int xnpod_start_thread(struct xnthread *thread,
}
 #endif /* CONFIG_SMP */
 
-   xnthread_set_state(thread, (attr-mode  (XNTHREAD_MODE_BITS | XNSUSP)) 
| XNSTARTED);
+   xnthread_set_state(thread, attr-mode  (XNTHREAD_MODE_BITS | XNSUSP));
thread-imode = (attr-mode  XNTHREAD_MODE_BITS);
thread-entry = attr-entry;
thread-cookie = attr-cookie;
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index c353674..342fd00 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -153,7 +153,7 @@ void xnsched_init(struct xnsched *sched, int cpu)
xntimer_set_sched(sched-htimer, sched);
sched-zombie = NULL;
 
-   attr.flags = XNROOT | XNSTARTED | XNFPU;
+   attr.flags = XNROOT | XNFPU;
attr.name = root_name;
attr.personality = generic_personality;
param.idle.prio = XNSCHED_IDLE_PRIO;
@@ -367,7 +367,7 @@ int xnsched_set_policy(struct xnthread *thread,
if (xnthread_test_state(thread, XNREADY))
xnsched_enqueue(thread);
 
-   if (xnthread_test_state(thread, XNSTARTED))
+   if (!xnthread_test_state(thread, XNDORMANT))
xnsched_set_resched(thread-sched);
 
return 0;
diff --git a/kernel/cobalt/shadow.c b/kernel/cobalt/shadow.c
index bfffef1..9efd929 100644
--- a/kernel/cobalt/shadow.c
+++ b/kernel/cobalt/shadow.c
@@ -1010,7 +1010,7 @@ int xnshadow_map_kernel(struct xnthread *thread, struct 
completion *done)
 * Make 

[Xenomai-git] Philippe Gerum : cobalt/sched: drop delayed thread cleanup via zombie state transition

2013-07-31 Thread git repository hosting
Module: xenomai-forge
Branch: next
Commit: 91db1cbd9f2c547d8c1e1365c678327eeaaf8f39
URL:
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=91db1cbd9f2c547d8c1e1365c678327eeaaf8f39

Author: Philippe Gerum r...@xenomai.org
Date:   Wed Jul 31 11:59:30 2013 +0200

cobalt/sched: drop delayed thread cleanup via zombie state transition

Since Xenomai thread deletion now always involves a self-exit from
secondary mode, possibly after a pending cancellation request is
noticed, there is no way a thread could self-delete from primary mode
anymore.

For this reason, delaying resource cleanup until the exiting thread
schedules out from xnpod_schedule() becomes pointless, since it must
have done so earlier, when leaving primary mode prior to running
do_exit() and the taskexit handler. At this point, the exiting thread
runs in mere linux context, and may release any Xenomai-specific
resource.

We drop the mechanism for delaying thread finalization upon
self-deletion, removing all zombie state hooks in the same move.

---

 include/cobalt/kernel/sched.h |   68 +---
 kernel/cobalt/lock.c  |2 +-
 kernel/cobalt/pod.c   |   26 ---
 kernel/cobalt/sched.c |   44 +--
 kernel/cobalt/shadow.c|1 -
 5 files changed, 45 insertions(+), 96 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index be3f2c5..402c385 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -55,51 +55,65 @@ struct xnsched_rt {
  * \brief Scheduling information structure.
  */
 
-typedef struct xnsched {
-
-   unsigned long status;   /*! Scheduler specific status bitmask. 
*/
-   unsigned long lflags;   /*! Scheduler specific local flags 
bitmask. */
-   struct xnthread *curr;  /*! Current thread. */
+struct xnsched {
+   /*! Scheduler specific status bitmask. */
+   unsigned long status;
+   /*! Scheduler specific local flags bitmask. */
+   unsigned long lflags;
+   /*! Current thread. */
+   struct xnthread *curr;
 #ifdef CONFIG_SMP
+   /*! Owner CPU id. */
int cpu;
-   cpumask_t resched;  /*! Mask of CPUs needing rescheduling. 
*/
+   /*! Mask of CPUs needing rescheduling. */
+   cpumask_t resched;
 #endif
-   struct xnsched_rt rt;   /*! Context of built-in real-time 
class. */
+   /*! Context of built-in real-time class. */
+   struct xnsched_rt rt;
 #ifdef CONFIG_XENO_OPT_SCHED_WEAK
-   struct xnsched_weak weak;   /*! Context of weak scheduling class. 
*/
+   /*! Context of weak scheduling class. */
+   struct xnsched_weak weak;
 #endif
 #ifdef CONFIG_XENO_OPT_SCHED_TP
-   struct xnsched_tp tp;   /*! Context of TP class. */
+   /*! Context of TP class. */
+   struct xnsched_tp tp;
 #endif
 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
-   struct xnsched_sporadic pss;/*! Context of sporadic scheduling 
class. */
+   /*! Context of sporadic scheduling class. */
+   struct xnsched_sporadic pss;
 #endif
-   volatile unsigned inesting; /*! Interrupt nesting level. */
-   struct xntimer htimer;  /*! Host timer. */
-   struct xnthread *zombie;
-   struct xnthread rootcb; /*! Root thread control block. */
+   /*! Interrupt nesting level. */
+   volatile unsigned inesting;
+   /*! Host timer. */
+   struct xntimer htimer;
+   /*! Root thread control block. */
+   struct xnthread rootcb;
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_HW_FPU
-   struct xnthread *fpuholder; /*! Thread owning the current FPU 
context. */
+   /*! Thread owning the current FPU context. */
+   struct xnthread *fpuholder;
 #endif
 #ifdef CONFIG_XENO_OPT_WATCHDOG
-   struct xntimer wdtimer; /*! Watchdog timer object. */
-   int wdcount;/*! Watchdog tick count. */
+   /*! Watchdog timer object. */
+   struct xntimer wdtimer;
+   /*! Watchdog tick count. */
+   int wdcount;
 #endif
 #ifdef CONFIG_XENO_OPT_STATS
-   xnticks_t last_account_switch;  /*! Last account switch date (ticks). 
*/
-   xnstat_exectime_t *current_account; /*! Currently active account */
+   /*! Last account switch date (ticks). */
+   xnticks_t last_account_switch;
+   /*! Currently active account */
+   xnstat_exectime_t *current_account;
 #endif
-} xnsched_t;
+};
 
 DECLARE_PER_CPU(struct xnsched, nksched);
 
 union xnsched_policy_param;
 
 struct xnsched_class {
-
void (*sched_init)(struct xnsched *sched);
void (*sched_enqueue)(struct xnthread *thread);
void (*sched_dequeue)(struct xnthread *thread);
@@ -156,7 +170,7 @@ static inline int xnsched_resched_p(struct xnsched *sched)
 /* Set resched flag for the given scheduler. */
 #ifdef CONFIG_SMP
 

[Xenomai-git] Philippe Gerum : cobalt/arch: turn apc and fault statistics into per-cpu data

2013-07-31 Thread git repository hosting
Module: xenomai-forge
Branch: next
Commit: 439adcd6e8512e464a82994d6579189771cd1bf9
URL:
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=439adcd6e8512e464a82994d6579189771cd1bf9

Author: Philippe Gerum r...@xenomai.org
Date:   Tue Jul 30 19:30:55 2013 +0200

cobalt/arch: turn apc and fault statistics into per-cpu data

---

 include/cobalt/kernel/apc.h|5 ++-
 kernel/cobalt/apc.c|   27 ++-
 .../cobalt/include/asm-generic/xenomai/machine.h   |   12 ++--
 kernel/cobalt/init.c   |3 ++
 kernel/cobalt/procfs.c |4 +-
 kernel/cobalt/shadow.c |   15 +++---
 6 files changed, 41 insertions(+), 25 deletions(-)

diff --git a/include/cobalt/kernel/apc.h b/include/cobalt/kernel/apc.h
index c18bceb..67aa735 100644
--- a/include/cobalt/kernel/apc.h
+++ b/include/cobalt/kernel/apc.h
@@ -37,8 +37,9 @@ void xnapc_free(int apc);
 
 static inline void __xnapc_schedule(int apc)
 {
-   int cpu = ipipe_processor_id();
-   if (!__test_and_set_bit(apc, xnarch_machdata.apc_pending[cpu]))
+   unsigned long *p = 
__this_cpu_ptr(xnarch_percpu_machdata)-apc_pending;
+
+   if (!__test_and_set_bit(apc, p))
ipipe_post_irq_root(xnarch_machdata.apc_virq);
 }
 
diff --git a/kernel/cobalt/apc.c b/kernel/cobalt/apc.c
index 25bb33d..b01f0c8 100644
--- a/kernel/cobalt/apc.c
+++ b/kernel/cobalt/apc.c
@@ -44,27 +44,28 @@ static IPIPE_DEFINE_SPINLOCK(apc_lock);
 void apc_dispatch(unsigned int virq, void *arg)
 {
void (*handler)(void *), *cookie;
-   int apc, cpu;
-
-   spin_lock(apc_lock);
-
-   cpu = ipipe_processor_id();
+   unsigned long *p;
+   int apc;
 
/*
-* ! This loop is not protected against a handler becoming
-* unavailable while processing the pending queue; the
-* software must make sure to uninstall all APCs before
-* eventually unloading any module that may contain APC
+* CAUTION: The APC dispatch loop is not protected against a
+* handler becoming unavailable while processing the pending
+* queue; the software must make sure to uninstall all APCs
+* before eventually unloading any module that may contain APC
 * handlers. We keep the handler affinity with the poster's
 * CPU, so that the handler is invoked on the same CPU than
 * the code which called xnapc_schedule().
 */
-   while (xnarch_machdata.apc_pending[cpu]) {
-   apc = ffnz(xnarch_machdata.apc_pending[cpu]);
-   clear_bit(apc, xnarch_machdata.apc_pending[cpu]);
+   spin_lock(apc_lock);
+
+   /* This is atomic linux context (non-threaded IRQ). */
+   p = __this_cpu_ptr(xnarch_percpu_machdata)-apc_pending;
+   while (*p) {
+   apc = ffnz(*p);
+   clear_bit(apc, p);
handler = xnarch_machdata.apc_table[apc].handler;
cookie = xnarch_machdata.apc_table[apc].cookie;
-   xnarch_machdata.apc_table[apc].hits[cpu]++;
+   __this_cpu_ptr(xnarch_percpu_machdata)-apc_shots[apc]++;
spin_unlock(apc_lock);
handler(cookie);
spin_lock(apc_lock);
diff --git a/kernel/cobalt/include/asm-generic/xenomai/machine.h 
b/kernel/cobalt/include/asm-generic/xenomai/machine.h
index a32e63c..b6b4e8e 100644
--- a/kernel/cobalt/include/asm-generic/xenomai/machine.h
+++ b/kernel/cobalt/include/asm-generic/xenomai/machine.h
@@ -24,6 +24,7 @@
 #endif
 
 #include linux/ipipe.h
+#include linux/percpu.h
 #include asm/byteorder.h
 #include asm/xenomai/wrappers.h
 
@@ -40,21 +41,26 @@ struct xnarch_machdesc {
 
 extern struct xnarch_machdesc xnarch_machdesc;
 
+struct xnarch_percpu_machdata {
+   unsigned long apc_pending;
+   unsigned long apc_shots[BITS_PER_LONG];
+   unsigned int faults[IPIPE_NR_FAULTS];
+};
+
+DECLARE_PER_CPU(struct xnarch_percpu_machdata, xnarch_percpu_machdata);
+
 struct xnarch_machdata {
struct ipipe_domain domain;
unsigned long timer_freq;
unsigned long clock_freq;
unsigned int apc_virq;
unsigned long apc_map;
-   unsigned long apc_pending[NR_CPUS];
unsigned int escalate_virq;
struct {
void (*handler)(void *cookie);
void *cookie;
const char *name;
-   unsigned long hits[NR_CPUS];
} apc_table[BITS_PER_LONG];
-   unsigned int faults[NR_CPUS][IPIPE_NR_FAULTS];
 #ifdef CONFIG_SMP
cpumask_t supported_cpus;
 #endif
diff --git a/kernel/cobalt/init.c b/kernel/cobalt/init.c
index 2f3a4da..6604833 100644
--- a/kernel/cobalt/init.c
+++ b/kernel/cobalt/init.c
@@ -58,6 +58,9 @@ module_param_named(disable, disable_arg, ulong, 0444);
 struct xnarch_machdata xnarch_machdata;
 EXPORT_SYMBOL_GPL(xnarch_machdata);
 
+struct xnarch_percpu_machdata 

[Xenomai-git] Philippe Gerum : cobalt/pod: drop PEXEC and FATAL condition bits

2013-07-31 Thread git repository hosting
Module: xenomai-forge
Branch: next
Commit: b78c14c033ca12e976bd188c3f782c578b35e320
URL:
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=b78c14c033ca12e976bd188c3f782c578b35e320

Author: Philippe Gerum r...@xenomai.org
Date:   Wed Jul 31 15:32:14 2013 +0200

cobalt/pod: drop PEXEC and FATAL condition bits

XNPEXEC makes no sense anymore as it used to protect our kernel
handlers from running when no real-time services are available yet.

It turns out that the Cobalt core is now initialized early, and those
handlers are not even installed until the interrupt pipeline is
requested to forward the trap and syscall events, which only
happens...after the system has been initialized.

Finally, exposing the fatal bit as a global state flag, only to test
it within the panic handler was quite overkill.

---

 include/cobalt/kernel/assert.h|4 +-
 include/cobalt/kernel/pod.h   |   14 +-
 kernel/cobalt/arch/blackfin/machine.c |   19 ++-
 kernel/cobalt/pod.c   |   46 +
 kernel/cobalt/registry.c  |3 -
 kernel/cobalt/shadow.c|   92 
 kernel/cobalt/timer.c |7 +--
 kernel/cobalt/vfile.c |8 ---
 8 files changed, 59 insertions(+), 134 deletions(-)

diff --git a/include/cobalt/kernel/assert.h b/include/cobalt/kernel/assert.h
index 6bd00bd..c54b824 100644
--- a/include/cobalt/kernel/assert.h
+++ b/include/cobalt/kernel/assert.h
@@ -46,6 +46,8 @@
 #define CONFIG_XENO_OPT_DEBUG_NUCLEUS 0
 #endif /* CONFIG_XENO_OPT_DEBUG_NUCLEUS */
 
-void xnpod_fatal(const char *format, ...);
+extern void (*nkpanic)(const char *format, ...);
+
+#define xnpod_fatal(__fmt, __args...) nkpanic(__fmt, ##__args)
 
 #endif /* !_COBALT_KERNEL_ASSERT_H */
diff --git a/include/cobalt/kernel/pod.h b/include/cobalt/kernel/pod.h
index c5b4290..d7e0d94 100644
--- a/include/cobalt/kernel/pod.h
+++ b/include/cobalt/kernel/pod.h
@@ -36,9 +36,7 @@
 #include cobalt/kernel/lock.h
 
 /* Pod status flags */
-#define XNFATAL  0x0001/* Fatal error in progress */
-#define XNPEXEC  0x0002/* Pod is active (a skin is attached) */
-#define XNCLKLK  0x0004/* All clocks locked */
+#define XNCLKLK  0x0001/* All clocks locked */
 
 #define XNPOD_NORMAL_EXIT  0x0
 #define XNPOD_FATAL_EXIT   0x1
@@ -102,16 +100,6 @@ static inline struct xnsched *xnpod_current_sched(void)
return __this_cpu_ptr(nksched);
 }
 
-static inline int xnpod_active_p(void)
-{
-   return nkpod-status  XNPEXEC;
-}
-
-static inline int xnpod_fatal_p(void)
-{
-   return nkpod-status  XNFATAL;
-}
-
 static inline int xnpod_interrupt_p(void)
 {
return xnpod_current_sched()-lflags  XNINIRQ;
diff --git a/kernel/cobalt/arch/blackfin/machine.c 
b/kernel/cobalt/arch/blackfin/machine.c
index ef279d6..bdaec5b 100644
--- a/kernel/cobalt/arch/blackfin/machine.c
+++ b/kernel/cobalt/arch/blackfin/machine.c
@@ -28,8 +28,23 @@ static unsigned long mach_blackfin_calibrate(void)
 
 static void schedule_deferred(void)
 {
-   if (xnpod_active_p())
-   xnpod_schedule();
+   /*
+* We have a small race window which turns out to be
+* innocuous, i.e.:
+*
+* mach_setup() ...
+*IRQ/syscall
+*= irq_tail_hook
+*   = xnpod_schedule()
+*...
+* xnpod_init()
+*
+* in which case, we would call xnpod_schedule() for a not yet
+* initialized system. However, we would be covered by the
+* check for XNSCHED in xnpod_schedule(), which basically
+* makes this call a nop.
+*/
+   xnpod_schedule();
 }
 
 static int mach_blackfin_init(void)
diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index 5d911af..7bb8d07 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -36,6 +36,7 @@
 #include linux/kallsyms.h
 #include linux/ptrace.h
 #include linux/sched.h
+#include linux/kernel.h
 #include linux/wait.h
 #include cobalt/kernel/pod.h
 #include cobalt/kernel/timer.h
@@ -55,6 +56,9 @@
 xnpod_t nkpod_struct;
 EXPORT_SYMBOL_GPL(nkpod_struct);
 
+void (*nkpanic)(const char *format, ...) = panic;
+EXPORT_SYMBOL_GPL(nkpanic);
+
 unsigned long nktimerlat;
 
 cpumask_t nkaffinity = XNPOD_ALL_CPUS;
@@ -133,11 +137,12 @@ static inline void __xnpod_switch_fpu(struct xnsched 
*sched)
 
 #endif /* !CONFIG_XENO_HW_FPU */
 
-void xnpod_fatal(const char *format, ...)
+static void fatal(const char *format, ...)
 {
static char msg_buf[1024];
struct xnthread *thread;
struct xnsched *sched;
+   static int oopsed;
char pbuf[16];
xnticks_t now;
unsigned cpu;
@@ -150,15 +155,15 @@ void xnpod_fatal(const char *format, ...)
 
xnlock_get_irqsave(nklock, s);
 
+   if (oopsed)
+   goto out;
+
+   oopsed = 1;
va_start(ap, format);
vsnprintf(msg_buf, sizeof(msg_buf), format, ap);

[Xenomai-git] Philippe Gerum : cobalt/heap: drop delayed memory release mechanism

2013-07-31 Thread git repository hosting
Module: xenomai-forge
Branch: next
Commit: 0cee5a087acd7fef6f52c4c33d89ef244bdff582
URL:
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=0cee5a087acd7fef6f52c4c33d89ef244bdff582

Author: Philippe Gerum r...@xenomai.org
Date:   Wed Jul 31 16:08:08 2013 +0200

cobalt/heap: drop delayed memory release mechanism

We have no more client for xnheap_schedule_free(). Drop this ugly
service, which used to paper over a design issue in the thread
deletion path, which has been fixed lately.

Since this unfortunate code was running while holding the nklock all
the way, this can only help decreasing latency.

---

 include/cobalt/kernel/heap.h |   22 -
 kernel/cobalt/heap.c |   68 --
 kernel/cobalt/pod.c  |4 +--
 3 files changed, 1 insertions(+), 93 deletions(-)

diff --git a/include/cobalt/kernel/heap.h b/include/cobalt/kernel/heap.h
index 8dcbd0e..263dd6a 100644
--- a/include/cobalt/kernel/heap.h
+++ b/include/cobalt/kernel/heap.h
@@ -102,8 +102,6 @@ struct xnheap {
int fcount;
} buckets[XNHEAP_NBUCKETS];
 
-   struct list_head *idleq[NR_CPUS];
-
/* # of active user-space mappings. */
unsigned long numaps;
/* Kernel memory flags (0 if vmalloc()). */
@@ -158,7 +156,6 @@ static inline size_t xnheap_internal_overhead(size_t hsize, 
size_t psize)
 
 #define xnmalloc(size) xnheap_alloc(kheap,size)
 #define xnfree(ptr)xnheap_free(kheap,ptr)
-#define xnfreesync()   xnheap_finalize_free(kheap)
 
 static inline size_t xnheap_rounded_size(size_t hsize, size_t psize)
 {
@@ -241,25 +238,6 @@ int xnheap_test_and_free(struct xnheap *heap,
 int xnheap_free(struct xnheap *heap,
void *block);
 
-void xnheap_schedule_free(struct xnheap *heap,
- void *block,
- struct list_head *link);
-
-void xnheap_finalize_free_inner(struct xnheap *heap,
-   int cpu);
-
-static inline void xnheap_finalize_free(struct xnheap *heap)
-{
-   int cpu = ipipe_processor_id();
-
-   XENO_ASSERT(NUCLEUS,
-   spltest() != 0,
-   xnpod_fatal(%s called in unsafe context, __FUNCTION__));
-
-   if (heap-idleq[cpu])
-   xnheap_finalize_free_inner(heap, cpu);
-}
-
 int xnheap_check_block(struct xnheap *heap,
   void *block);
 
diff --git a/kernel/cobalt/heap.c b/kernel/cobalt/heap.c
index 3b9a51c..478fbff 100644
--- a/kernel/cobalt/heap.c
+++ b/kernel/cobalt/heap.c
@@ -270,7 +270,6 @@ int xnheap_init(struct xnheap *heap,
 {
unsigned long hdrsize, shiftsize, pageshift;
struct xnextent *extent;
-   unsigned int cpu;
spl_t s;
 
/*
@@ -326,9 +325,6 @@ int xnheap_init(struct xnheap *heap,
heap-ubytes = 0;
heap-maxcont = heap-npages * pagesize;
 
-   for_each_online_cpu(cpu)
-   heap-idleq[cpu] = NULL;
-
INIT_LIST_HEAD(heap-extents);
heap-nrextents = 1;
xnlock_init(heap-lock);
@@ -993,70 +989,6 @@ int xnheap_extend(struct xnheap *heap, void *extaddr, 
unsigned long extsize)
 }
 EXPORT_SYMBOL_GPL(xnheap_extend);
 
-/*!
- * \fn int xnheap_schedule_free(struct xnheap *heap, void *block, struct 
list_head *link)
- * \brief Schedule a memory block for release.
- *
- * This routine schedules a block for release by
- * xnheap_finalize_free(). This service is useful to lazily free
- * blocks of heap memory when immediate release is not an option,
- * e.g. when active references are still pending on the object for a
- * short time after the call. xnheap_finalize_free() is expected to be
- * eventually called by the client code at some point in the future
- * when actually freeing the idle objects is deemed safe.
- *
- * @param heap The descriptor address of the heap to release memory
- * to.
- *
- * @param block The address of the region to be returned to the heap.
- *
- * @param link The address of a link member, likely but not
- * necessarily within the released object, which will be used by the
- * heap manager to hold the block in the queue of idle objects.
- *
- * Environments:
- *
- * This service can be called from:
- *
- * - Kernel module initialization/cleanup code
- * - Interrupt service routine
- * - Kernel-based task
- * - User-space task
- *
- * Rescheduling: never.
- */
-
-void xnheap_schedule_free(struct xnheap *heap, void *block, struct list_head 
*link)
-{
-   unsigned int cpu;
-   spl_t s;
-
-   xnlock_get_irqsave(heap-lock, s);
-   /*
-* NOTE: we only need a one-way linked list for remembering
-* the idle objects through the 'next' field, so the 'last'
-* field of the link is used to point at the beginning of the
-* freed memory.
-*/
-   cpu = ipipe_processor_id();
-   link-prev = block;
-   link-next = heap-idleq[cpu];
-   heap-idleq[cpu] = link;
-   xnlock_put_irqrestore(heap-lock, 

[Xenomai-git] Philippe Gerum : cobalt/thread: move finalizer call out of any critical section

2013-07-31 Thread git repository hosting
Module: xenomai-forge
Branch: next
Commit: 39395a0c5a4d30674232c36193c9ffaec147610c
URL:
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=39395a0c5a4d30674232c36193c9ffaec147610c

Author: Philippe Gerum r...@xenomai.org
Date:   Wed Jul 31 16:00:50 2013 +0200

cobalt/thread: move finalizer call out of any critical section

There is no point in calling the thread finalization handler while
holding a lock, since it is supposed to release the TCB to some
allocator, which may incur some work.

Besides, the thread has entered an innocuous state long ago before the
finalizer is eventually called, so there is nothing to protect in its
TCB at this point.

Finalizers should do proper locking locally if need be.

---

 kernel/cobalt/pod.c  |6 +++---
 kernel/cobalt/posix/thread.c |6 +++---
 kernel/cobalt/shadow.c   |2 --
 3 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index 331dcf1..d90a2f6 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -782,8 +782,6 @@ static inline void cleanup_thread(struct xnthread *thread) 
/* nklock held, irqs
 
xnsched_forget(thread);
xnthread_deregister(thread);
-   /* Finalize last since this incurs releasing the TCB. */
-   xnshadow_finalize(thread);
 }
 
 void __xnpod_cleanup_thread(struct xnthread *thread)
@@ -806,9 +804,11 @@ void __xnpod_cleanup_thread(struct xnthread *thread)
 
xnlock_get_irqsave(nklock, s);
cleanup_thread(thread);
-   xnfreesync();
xnlock_put_irqrestore(nklock, s);
 
+   /* Finalize last since this incurs releasing the TCB. */
+   xnshadow_finalize(thread);
+
wake_up(nkjoinq);
 }
 
diff --git a/kernel/cobalt/posix/thread.c b/kernel/cobalt/posix/thread.c
index 7caf10f..ac00826 100644
--- a/kernel/cobalt/posix/thread.c
+++ b/kernel/cobalt/posix/thread.c
@@ -212,18 +212,18 @@ struct xnpersonality *cobalt_thread_exit(struct xnthread 
*curr)
cobalt_timer_flush(thread);
xnsynch_destroy(thread-monitor_synch);
xnsynch_destroy(thread-sigwait);
+   list_del(thread-link);
 
/* We don't stack over any personality, no chaining. */
return NULL;
 }
 
-struct xnpersonality *cobalt_thread_finalize(struct xnthread *zombie) /* 
nklocked, IRQs off */
+struct xnpersonality *cobalt_thread_finalize(struct xnthread *zombie)
 {
struct cobalt_thread *thread;
 
thread = container_of(zombie, struct cobalt_thread, threadbase);
-   list_del(thread-link);
-   xnheap_schedule_free(kheap, thread, thread-link);
+   xnfree(thread);
 
return NULL;
 }
diff --git a/kernel/cobalt/shadow.c b/kernel/cobalt/shadow.c
index 4d6b9eb..d43753f 100644
--- a/kernel/cobalt/shadow.c
+++ b/kernel/cobalt/shadow.c
@@ -1032,8 +1032,6 @@ void xnshadow_finalize(struct xnthread *thread)
 
xnthread_run_handler(thread, finalize_thread);
 
-   xnthread_clear_state(thread, XNMAPPED);
-
if (xnthread_test_state(thread, XNUSER)) {
sys_ppd = xnsys_ppd_get(0);
atomic_dec(sys_ppd-refcnt);


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git


[Xenomai-git] Philippe Gerum : cobalt/pod: privatize inner support routines

2013-07-31 Thread git repository hosting
Module: xenomai-forge
Branch: next
Commit: 1b1098e166c7cb37ce389d916dcfd5481cb4dc2f
URL:
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=1b1098e166c7cb37ce389d916dcfd5481cb4dc2f

Author: Philippe Gerum r...@xenomai.org
Date:   Wed Jul 31 15:53:52 2013 +0200

cobalt/pod: privatize inner support routines

---

 include/cobalt/kernel/pod.h |6 -
 kernel/cobalt/pod.c |  338 ++-
 kernel/cobalt/shadow.c  |   71 +-
 3 files changed, 182 insertions(+), 233 deletions(-)

diff --git a/include/cobalt/kernel/pod.h b/include/cobalt/kernel/pod.h
index d7e0d94..dd7657c 100644
--- a/include/cobalt/kernel/pod.h
+++ b/include/cobalt/kernel/pod.h
@@ -132,10 +132,6 @@ static inline int xnpod_primary_p(void)
 
 int xnpod_init(void);
 
-int xnpod_enable_timesource(void);
-
-void xnpod_disable_timesource(void);
-
 void xnpod_shutdown(int xtype);
 
 int xnpod_init_thread(struct xnthread *thread,
@@ -278,8 +274,6 @@ static inline void xnpod_testcancel_thread(void)
__xnpod_testcancel_thread(curr);
 }
 
-int xnpod_handle_exception(struct ipipe_trap_data *d);
-
 int xnpod_set_thread_periodic(struct xnthread *thread,
  xnticks_t idate,
  xntmode_t timeout_mode,
diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index 7bb8d07..331dcf1 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -33,7 +33,6 @@
  *@{*/
 
 #include stdarg.h
-#include linux/kallsyms.h
 #include linux/ptrace.h
 #include linux/sched.h
 #include linux/kernel.h
@@ -217,9 +216,82 @@ static void xnpod_flush_heap(struct xnheap *heap,
free_pages_exact(extaddr, extsize);
 }
 
-/*!
- * \fn int xnpod_init(void)
- * \brief Initialize the core pod.
+static int enable_timesource(void)
+{
+   struct xnsched *sched;
+   int htickval, cpu;
+   spl_t s;
+
+   trace_mark(xn_nucleus, enable_timesource, MARK_NOARGS);
+
+#ifdef CONFIG_XENO_OPT_STATS
+   /*
+* Only for statistical purpose, the timer interrupt is
+* attached by xntimer_grab_hardware().
+*/
+   xnintr_init(nktimer, [timer],
+   per_cpu(ipipe_percpu.hrtimer_irq, 0), NULL, NULL, 0);
+#endif /* CONFIG_XENO_OPT_STATS */
+
+   nkclock.wallclock_offset =
+   xnclock_get_host_time() - xnclock_read_monotonic(nkclock);
+
+   for_each_online_cpu(cpu) {
+   if (!xnarch_cpu_supported(cpu))
+   continue;
+
+   htickval = xntimer_grab_hardware(cpu);
+   if (htickval  0) {
+   while (--cpu = 0)
+   xntimer_release_hardware(cpu);
+
+   return htickval;
+   }
+
+   xnlock_get_irqsave(nklock, s);
+
+   /* If the current tick device for the target CPU is
+* periodic, we won't be called back for host tick
+* emulation. Therefore, we need to start a periodic
+* nucleus timer which will emulate the ticking for
+* that CPU, since we are going to hijack the hw clock
+* chip for managing our own system timer.
+*
+* CAUTION:
+*
+* - nucleus timers may be started only _after_ the hw
+* timer has been set up for the target CPU through a
+* call to xntimer_grab_hardware().
+*
+* - we don't compensate for the elapsed portion of
+* the current host tick, since we cannot get this
+* information easily for all CPUs except the current
+* one, and also because of the declining relevance of
+* the jiffies clocksource anyway.
+*
+* - we must not hold the nklock across calls to
+* xntimer_grab_hardware().
+*/
+
+   sched = xnpod_sched_slot(cpu);
+   if (htickval  1)
+   xntimer_start(sched-htimer, htickval, htickval, 
XN_RELATIVE);
+   else if (htickval == 1)
+   xntimer_start(sched-htimer, 0, 0, XN_RELATIVE);
+
+#if defined(CONFIG_XENO_OPT_WATCHDOG)
+   xntimer_start(sched-wdtimer, 10UL, 10UL, 
XN_RELATIVE);
+   xnsched_reset_watchdog(sched);
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+   xnlock_put_irqrestore(nklock, s);
+   }
+
+   return 0;
+}
+
+/**
+ * @fn int xnpod_init(void)
+ * @brief Initialize the core pod.
  *
  * Initializes the core interface pod which can subsequently be used
  * to start real-time activities. Once the core pod is active,
@@ -230,13 +302,25 @@ static void xnpod_flush_heap(struct xnheap *heap,
  *
  * - -ENOMEM is returned if the memory manager fails to initialize.
  *
+ * - -ENODEV is returned if a failure occurred while configuring the
+ * hardware timer.
+ *
  *