Re: [Xenomai-core] [PATCH 1/2] Introduce xntbase_ns2ticks_ceil

2007-09-07 Thread Philippe Gerum
On Thu, 2007-08-30 at 09:11 +0200, Jan Kiszka wrote:
 This introduces a round-up-variant of xntbase_ns2ticks. It is uninlined
 for the non-trivial case due to its text size. Its only user will be
 RTDM for now, but maybe the POSIX skin can use it as well.
 
 Philippe, if you accept this one, I'll handle the RTDM side myself.
 

Merged, thanks.

 Jan
 plain text document attachment (xntbase_ns2ticks_ceil.patch)
 ---
  include/nucleus/timebase.h |7 +++
  ksrc/nucleus/timebase.c|7 +++
  2 files changed, 14 insertions(+)
 
 Index: xenomai/include/nucleus/timebase.h
 ===
 --- xenomai.orig/include/nucleus/timebase.h
 +++ xenomai/include/nucleus/timebase.h
 @@ -148,6 +148,8 @@ static inline xnticks_t xntbase_ns2ticks
   return xnarch_ulldiv(t, xntbase_get_tickval(base), NULL);
  }
  
 +xnticks_t xntbase_ns2ticks_ceil(xntbase_t *base, xntime_t t);
 +
  static inline int xntbase_master_p(xntbase_t *base)
  {
   return base == nktbase;
 @@ -247,6 +249,11 @@ static inline xnticks_t xntbase_ns2ticks
   return t;
  }
  
 +static inline xnticks_t xntbase_ns2ticks_ceil(xntbase_t *base, xntime_t t)
 +{
 + return t;
 +}
 +
  static inline int xntbase_master_p(xntbase_t *base)
  {
   return 1;
 Index: xenomai/ksrc/nucleus/timebase.c
 ===
 --- xenomai.orig/ksrc/nucleus/timebase.c
 +++ xenomai/ksrc/nucleus/timebase.c
 @@ -483,6 +483,12 @@ void xntbase_tick(xntbase_t *base)
   xnlock_put_irqrestore(nklock, s);
  }
  
 +xnticks_t xntbase_ns2ticks_ceil(xntbase_t *base, xntime_t t)
 +{
 + return xnarch_ulldiv(t + xntbase_get_tickval(base) - 1,
 +  xntbase_get_tickval(base), NULL);
 +}
 +
  EXPORT_SYMBOL(xntbase_alloc);
  EXPORT_SYMBOL(xntbase_free);
  EXPORT_SYMBOL(xntbase_update);
 @@ -490,6 +496,7 @@ EXPORT_SYMBOL(xntbase_switch);
  EXPORT_SYMBOL(xntbase_start);
  EXPORT_SYMBOL(xntbase_stop);
  EXPORT_SYMBOL(xntbase_tick);
 +EXPORT_SYMBOL(xntbase_ns2ticks_ceil);
  
  #endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */
  
-- 
Philippe.



___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] [PATCH] fix 2.4-kernel build warning

2007-09-07 Thread Philippe Gerum
On Thu, 2007-08-30 at 08:27 +0200, Jan Kiszka wrote:
 I noticed some warning during a 2.4 build of trunk which may point out
 unexpected side effects:
 

Merged, thanks.

 In file included from pod.c:45:
 /usr/src/linux-2.4.35.1/include/asm/xenomai/bits/pod.h:32:1: warning: 
 xnarch_tsc_to_ns redefined
 In file included from /usr/src/linux-2.4.35.1/include/linux/modversions.h:275,
  from /usr/src/linux-2.4.35.1/include/linux/module.h:22,
  from 
 /usr/src/linux-2.4.35.1/include/asm-generic/xenomai/system.h:30,
  from /usr/src/linux-2.4.35.1/include/asm/xenomai/system.h:28,
  from 
 /usr/src/linux-2.4.35.1/include/xenomai/nucleus/types.h:39,
  from 
 /usr/src/linux-2.4.35.1/include/xenomai/nucleus/queue.h:24,
  from 
 /usr/src/linux-2.4.35.1/include/xenomai/nucleus/timebase.h:29,
  from 
 /usr/src/linux-2.4.35.1/include/xenomai/nucleus/timer.h:26,
  from 
 /usr/src/linux-2.4.35.1/include/xenomai/nucleus/thread.h:25,
  from 
 /usr/src/linux-2.4.35.1/include/xenomai/nucleus/pod.h:34,
  from pod.c:36:
 /usr/src/linux-2.4.35.1/include/linux/modules/pod.ver:4:1: warning: this is 
 the location of the previous definition
 
 However, the attached patch fixes it.
 
 Jan
 plain text document attachment (fix-2.4-redefine-warning.patch)
 ---
  include/asm-arm/bits/pod.h  |2 +-
  include/asm-blackfin/bits/pod.h |2 +-
  include/asm-generic/bits/pod.h  |   10 --
  include/asm-i386/bits/pod.h |2 +-
  include/asm-x86_64/bits/pod.h   |2 +-
  5 files changed, 8 insertions(+), 10 deletions(-)
 
 Index: xenomai/include/asm-arm/bits/pod.h
 ===
 --- xenomai.orig/include/asm-arm/bits/pod.h
 +++ xenomai/include/asm-arm/bits/pod.h
 @@ -30,7 +30,7 @@ long long xnarch_tsc_to_ns(long long ts)
  {
   return xnarch_llmulshft(ts, xnarch_tsc_scale, xnarch_tsc_shift);
  }
 -#define xnarch_tsc_to_ns xnarch_tsc_to_ns
 +#define XNARCH_TSC_TO_NS
  
  #include asm-generic/xenomai/bits/pod.h
  
 Index: xenomai/include/asm-blackfin/bits/pod.h
 ===
 --- xenomai.orig/include/asm-blackfin/bits/pod.h
 +++ xenomai/include/asm-blackfin/bits/pod.h
 @@ -27,7 +27,7 @@ long long xnarch_tsc_to_ns(long long ts)
  {
   return xnarch_llmulshft(ts, xnarch_tsc_scale, xnarch_tsc_shift);
  }
 -#define xnarch_tsc_to_ns xnarch_tsc_to_ns
 +#define XNARCH_TSC_TO_NS
  
  #include asm-generic/xenomai/bits/pod.h
  
 Index: xenomai/include/asm-generic/bits/pod.h
 ===
 --- xenomai.orig/include/asm-generic/bits/pod.h
 +++ xenomai/include/asm-generic/bits/pod.h
 @@ -273,23 +273,21 @@ unsigned long long xnarch_get_host_time(
  
  EXPORT_SYMBOL(xnarch_get_host_time);
  
 -#ifndef xnarch_tsc_to_ns
 +#ifndef XNARCH_TSC_TO_NS
  long long xnarch_tsc_to_ns(long long ts)
  {
  return xnarch_llimd(ts,10,RTHAL_CPU_FREQ);
  }
 -#define xnarch_tsc_to_ns xnarch_tsc_to_ns
 -#endif /* !xnarch_tsc_to_ns */
 +#endif /* !XNARCH_TSC_TO_NS */
  
  EXPORT_SYMBOL(xnarch_tsc_to_ns);
  
 -#ifndef xnarch_ns_to_tsc
 +#ifndef XNARCH_NS_TO_TSC
  long long xnarch_ns_to_tsc(long long ns)
  {
  return xnarch_llimd(ns,RTHAL_CPU_FREQ,10);
  }
 -#define xnarch_ns_to_tsc xnarch_ns_to_tsc
 -#endif /* !xnarch_ns_to_tsc */
 +#endif /* !XNARCH_NS_TO_TSC */
  
  EXPORT_SYMBOL(xnarch_ns_to_tsc);
  
 Index: xenomai/include/asm-i386/bits/pod.h
 ===
 --- xenomai.orig/include/asm-i386/bits/pod.h
 +++ xenomai/include/asm-i386/bits/pod.h
 @@ -29,7 +29,7 @@ long long xnarch_tsc_to_ns(long long ts)
  {
   return xnarch_llmulshft(ts, xnarch_tsc_scale, xnarch_tsc_shift);
  }
 -#define xnarch_tsc_to_ns xnarch_tsc_to_ns
 +#define XNARCH_TSC_TO_NS
  
  #include asm-generic/xenomai/bits/pod.h
  #include asm/xenomai/switch.h
 Index: xenomai/include/asm-x86_64/bits/pod.h
 ===
 --- xenomai.orig/include/asm-x86_64/bits/pod.h
 +++ xenomai/include/asm-x86_64/bits/pod.h
 @@ -28,7 +28,7 @@ long long xnarch_tsc_to_ns(long long ts)
  {
   return xnarch_llmulshft(ts, xnarch_tsc_scale, xnarch_tsc_shift);
  }
 -#define xnarch_tsc_to_ns xnarch_tsc_to_ns
 +#define XNARCH_TSC_TO_NS
  
  #include asm-generic/xenomai/bits/pod.h
  #include asm/xenomai/switch.h
 ___
 Xenomai-core mailing list
 Xenomai-core@gna.org
 https://mail.gna.org/listinfo/xenomai-core
-- 
Philippe.



___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


[Xenomai-core] Summary: Xenomai 2.3.2 and 2.4 lock-ups and OOPSes

2007-09-07 Thread Peter Soetens
Just in case you hooked off the long discussion about the issues we found from
Xenomai 2.3.2 on:

  o We are using the xeno_native skin, create Xeno tasks and semaphores, but 
have strong indications that the crashes are caused by the memory allocation 
scheme of Xenomai in combination with task creation/deletion
  o We found two ways to break Xenomai, causing a 'Killed' (rt_task_delete) 
and causing an OOPS (rt_task_join).
  o They happen on 2.6.20 and 2.6.22 kernels
  o On the 2.3 branch, r2429 works, r2433 causes the faults. The patch is 
small, and in the ChangLog: 

2007-05-11  Philippe Gerum  [EMAIL PROTECTED]

* include/nucleus/heap.h (xnfreesafe): Use xnpod_current_p() when
checking for deferral.

* include/nucleus/pod.h (xnpod_current_p): Give exec mode
awareness to this predicate, checking for primary/secondary mode
of shadows.

2007-05-11  Gilles Chanteperdrix  [EMAIL PROTECTED]

* ksrc/skins: Always defer thread memory release in deletion hook
by calling xnheap_schedule_free() instead of xnfreesafe().

  o We reverted this patch on HEAD of the 2.3 branch, but got -ENOMEM errors 
during Xenomai resource allocations, indicating that later changes depend on 
this patch. So we use clean HEAD again further on to find the causes:
 o A first test (in Orocos) creates one thread, two semaphores, lets it wait 
on them and cleans up the thread.
 o During rt_task_delete, our program gets 'Killed' (without joinable thread), 
hence a user space problem. However, gdb is of no use, all thread info is 
lost.
 o We made the thread joinable (T_JOINABLE), and then joined. This bypassed 
the Kill on the first run but causes an OOPS the second time the same 
application is started:

Oops:  [#1]
PREEMPT
CPU:0
EIP:0060:[fef4a1f3]Not tainted VLI
EFLAGS: 00010002   (2.6.20.9-ipipe-1.8-08 #2)
EIP is at get_free_range+0x56/0x160 [xeno_nucleus]
eax: f3a81d01   ebx: 0200   ecx: 0101   edx: fef62b00
esi: 0101   edi: 0200   ebp: f0f33ec4   esp: f0f33e98
ds: 007b   es: 007b   ss: 0068
Process NonPeriodicActi (pid: 3020, ti=f0f32000 task=f7ce61b0 
task.ti=f0f32000)
Stack:  0600 fef62b80 f3a81b24 f3a8 fef62ba4 f3a80720 0101
   0600 f0f33f18 f7ce6360 f0f33ee4 fef4a948 fef62b80 f0f33f08 
   0400 f0f33f18 f7ce6360 f0f33f50 ff13e1de 0282 0282 bfab6350
Call Trace:
 [c0103ffb] show_trace_log_lvl+0x1f/0x35
 [c01040bb] show_stack_log_lvl+0xaa/0xcf
 [c01042a9] show_registers+0x1c9/0x392
 [c0104588] die+0x116/0x245
 [c0110fca] do_page_fault+0x287/0x61d
 [c010ea35] __ipipe_handle_exception+0x63/0x136
 [c029466d] error_code+0x79/0x88
 [fef4a948] xnheap_alloc+0x15b/0x17d [xeno_nucleus]
 [ff13e1de] __rt_task_create+0xe0/0x171 [xeno_native]
 [fef5655f] losyscall_event+0xaf/0x170 [xeno_nucleus]
 [c0138804] __ipipe_dispatch_event+0xc0/0x1da
 [c010e90b] __ipipe_syscall_root+0x43/0x10a
 [c0102e79] system_call+0x29/0x41
 ===
Code: 74 61 85 c0 74 5d c7 45 e0 00 00 00 00 8b 4d e4 8b 49 10 89 4d ec 85 c9 
74 38 8b 45 dc 8b 78 0c 89 4d f0 89 ce 89 fb eb 02 89 ce 8b 09 8d 04 3e 39 
c1 0f 94 c2 3b 5d d8 0f 92 c0 01 fb 84 c2 75
EIP: [fef4a1f3] get_free_range+0x56/0x160 [xeno_nucleus] SS:ESP 
0068:f0f33e98
[hard lockup]

  o Our application is also mixing the original RT_TASK struct and return 
value of the rt_task_self() function call when calling rt_ functions. 
Switching between one of those influences the crashing behaviour as well, not 
further investigated.

  o This was reproduced on two different systems (one with SMI workaround 
working)
 
You have the patch that broke things, I hope this gives you a hint on what 
causes our crashes. Know that Orocos as-is has worked with Xenomai from  
Xenomai 2.0 on.

Peter

-- 
Peter Soetens -- FMTC -- http://www.fmtc.be

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] Make watchdog-timer configurable

2007-09-07 Thread Philippe Gerum
On Tue, 2007-09-04 at 15:12 +0200, Johan Borkhuis wrote:
 I made the following change to Xenomai (version 2.3.2, but it could also 
 be applied to 2.4 as the watchdog code has not been changed).
 

Merged, with forward port to 2.4. Thanks.

 The watchdog timeout is fixed at 4 seconds. For us this is a problem as 
 there are some processes that take more time than this.
 
 I made the following change to allow this to be changed through the 
 Linux kernel configuration. The value is configurable between 1 and 60 
 seconds.
 
 Below is a patchfile that implements this change. Please let me know if 
 you have comments on this, or if there are problems with this change.
 
 
 diff -u -r org/xenomai-2.3.2/ksrc/nucleus/Kconfig 
 xenomai-2.3.2/ksrc/nucleus/Kconfig
 --- org/xenomai-2.3.2/ksrc/nucleus/Kconfig  2007-07-06 
 18:42:14.0 +0200
 +++ xenomai-2.3.2/ksrc/nucleus/Kconfig  2007-09-04 14:13:41.0 +0200
 @@ -172,6 +172,17 @@
 behalf of the timer tick handler, thus is only active after
 the timer has been started.
  
 +config XENO_OPT_WATCHDOG_PERIOD
 +   depends on XENO_OPT_WATCHDOG
 +   int Watchdog period (s)
 +   default 4
 +range 1 60
 +   help
 +
 +Nr of seconds of uninterrupted real-time activity after which
 +the watchdog triggers.
 +
 +
  menu Timing
  
  config XENO_OPT_TIMING_PERIODIC
 diff -u -r org/xenomai-2.3.2/ksrc/nucleus/pod.c 
 xenomai-2.3.2/ksrc/nucleus/pod.c
 --- org/xenomai-2.3.2/ksrc/nucleus/pod.c2007-07-07 
 10:18:57.0 +0200
 +++ xenomai-2.3.2/ksrc/nucleus/pod.c2007-09-04 14:06:35.0 +0200
 @@ -2925,7 +2925,7 @@
 return nkpod-svctable.faulthandler(fltinfo);
  }
  
 -#ifdef CONFIG_XENO_OPT_WATCHDOG
 +#if (defined CONFIG_XENO_OPT_WATCHDOG)  
 (CONFIG_XENO_OPT_WATCHDOG_PERIOD  0)
  
  /*!
   * @internal
 @@ -2948,7 +2948,7 @@
 return;
 }
  
 -   if (unlikely(++sched-wd_count = 4)) {
 +   if (unlikely(++sched-wd_count = 
 CONFIG_XENO_OPT_WATCHDOG_PERIOD)) {
 xnltt_log_event(xeno_ev_watchdog, thread-name);
 xnprintf(watchdog triggered -- killing runaway thread 
 '%s'\n,
  thread-name);
 
 
 
 Kind regards,
 Johan Borkhuis
 
 
 ___
 Xenomai-core mailing list
 Xenomai-core@gna.org
 https://mail.gna.org/listinfo/xenomai-core
-- 
Philippe.



___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] Summary: Xenomai 2.3.2 and 2.4 lock-ups and OOPSes

2007-09-07 Thread Gilles Chanteperdrix
Philippe Gerum wrote:
  On Fri, 2007-09-07 at 11:27 +0200, Peter Soetens wrote:
   Just in case you hooked off the long discussion about the issues we found 
   from
   Xenomai 2.3.2 on:
   
 o We are using the xeno_native skin, create Xeno tasks and semaphores, 
   but 
   have strong indications that the crashes are caused by the memory 
   allocation 
   scheme of Xenomai in combination with task creation/deletion
 o We found two ways to break Xenomai, causing a 'Killed' 
   (rt_task_delete) 
   and causing an OOPS (rt_task_join).
 o They happen on 2.6.20 and 2.6.22 kernels
 o On the 2.3 branch, r2429 works, r2433 causes the faults. The patch is 
   small, and in the ChangLog: 
   
   2007-05-11  Philippe Gerum  [EMAIL PROTECTED]
   
   * include/nucleus/heap.h (xnfreesafe): Use xnpod_current_p() when
   checking for deferral.
   
   * include/nucleus/pod.h (xnpod_current_p): Give exec mode
   awareness to this predicate, checking for primary/secondary mode
   of shadows.
   
   2007-05-11  Gilles Chanteperdrix  [EMAIL PROTECTED]
   
   * ksrc/skins: Always defer thread memory release in deletion hook
   by calling xnheap_schedule_free() instead of xnfreesafe().
   
 o We reverted this patch on HEAD of the 2.3 branch, but got -ENOMEM 
   errors 
   during Xenomai resource allocations, indicating that later changes depend 
   on 
   this patch. So we use clean HEAD again further on to find the causes:
o A first test (in Orocos) creates one thread, two semaphores, lets it 
   wait 
   on them and cleans up the thread.
  
  Please point me at the actual Orocos test code that breaks, with the
  hope to get a fairly standalone test case from it; if you do have a
  standalone test case already, this would be even better. I intend to
  address this issue asap.

Before you have a piece of code that causes the crash, I gave a look at
the code involved. The only suspicious thing I see is that the correct
working of native skins thread termination depends on the execution
order of the two deletion hooks, the one in task.c and the one in
syscall.c. As a matter of fact, if the one in task.c is executed before
the one in syscall.c, the task magic is changed and xnshadow_unmap will
never be called. I suspect this is true for all skins, but I do not know
if this could cause a crash.

-- 


Gilles Chanteperdrix.

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core


Re: [Xenomai-core] Summary: Xenomai 2.3.2 and 2.4 lock-ups and OOPSes

2007-09-07 Thread Gilles Chanteperdrix
On 9/7/07, Gilles Chanteperdrix [EMAIL PROTECTED] wrote:
 Philippe Gerum wrote:
   On Fri, 2007-09-07 at 11:27 +0200, Peter Soetens wrote:
Just in case you hooked off the long discussion about the issues we
 found from
Xenomai 2.3.2 on:
   
  o We are using the xeno_native skin, create Xeno tasks and
 semaphores, but
have strong indications that the crashes are caused by the memory
 allocation
scheme of Xenomai in combination with task creation/deletion
  o We found two ways to break Xenomai, causing a 'Killed'
 (rt_task_delete)
and causing an OOPS (rt_task_join).
  o They happen on 2.6.20 and 2.6.22 kernels
  o On the 2.3 branch, r2429 works, r2433 causes the faults. The patch
 is
small, and in the ChangLog:
   
2007-05-11  Philippe Gerum  [EMAIL PROTECTED]
   
* include/nucleus/heap.h (xnfreesafe): Use xnpod_current_p() when
checking for deferral.
   
* include/nucleus/pod.h (xnpod_current_p): Give exec mode
awareness to this predicate, checking for primary/secondary mode
of shadows.
   
2007-05-11  Gilles Chanteperdrix  [EMAIL PROTECTED]
   
* ksrc/skins: Always defer thread memory release in deletion hook
by calling xnheap_schedule_free() instead of xnfreesafe().
   
  o We reverted this patch on HEAD of the 2.3 branch, but got -ENOMEM
 errors
during Xenomai resource allocations, indicating that later changes
 depend on
this patch. So we use clean HEAD again further on to find the causes:
 o A first test (in Orocos) creates one thread, two semaphores, lets it
 wait
on them and cleans up the thread.
  
   Please point me at the actual Orocos test code that breaks, with the
   hope to get a fairly standalone test case from it; if you do have a
   standalone test case already, this would be even better. I intend to
   address this issue asap.

 Before you have a piece of code that causes the crash, I gave a look at
 the code involved. The only suspicious thing I see is that the correct
 working of native skins thread termination depends on the execution
 order of the two deletion hooks, the one in task.c and the one in
 syscall.c. As a matter of fact, if the one in task.c is executed before
 the one in syscall.c, the task magic is changed and xnshadow_unmap will
 never be called. I suspect this is true for all skins, but I do not know
 if this could cause a crash.

There are two magics involved, this supposition is wrong.

-- 
   Gilles Chanteperdrix

___
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core