Re: [Xenomai-core] [Adeos-main] New I-pipe patch for ARM needs testing.
BOUIN Alexandre wrote: > Same tests but for AT91SAM9263-EK board : > - one under DD load, latency set to 45µs > - another one under Calibrator load, latency set to 30µs > - and the last under Test_switch load, latency set to 45µs > Xeno-test traces are available and the end of this mail. > > We used kernel 2.6.20.13, Xenomai snapshot from 08/06/2007, > adeos-ipipe-2.6.20-arm-1.7-03.patch included plus 2.6.20-at91.patch. > > A little correction on macb and ipipe patch was needed : > - macb ethernet controler is not detected on startup > - we added some corrections to make 9263 usable under ipipe > - owing to changes in at91 patch (clk are now registered on startup) we > made a little correction for future adeos-ipipe-2.6.21-arm.patch. > > You will find all added patches attached to this mail. > <> <<2.6.20-at91-macb-9263.patch>> > <> > Latencies are a little worst than on other boards. We presume that it's due > to the large number of peripherals. > > Our latency tests on AT91 boards are done ;) That is great, thanks. Your patch will be merged into future patches (ipipe for 2.6.21 or 2.6.22), where support for 9263 was merged. About the latency issue, have you checked that it is not due to the prioritized interrupt controller ? -- Gilles Chanteperdrix. ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [Adeos-main] New I-pipe patch for ARM needs testing.
BOUIN Alexandre wrote: > Same tests but for AT91SAM9263-EK board : > - one under DD load, latency set to 45µs > - another one under Calibrator load, latency set to 30µs > - and the last under Test_switch load, latency set to 45µs > Xeno-test traces are available and the end of this mail. > > We used kernel 2.6.20.13, Xenomai snapshot from 08/06/2007, > adeos-ipipe-2.6.20-arm-1.7-03.patch included plus 2.6.20-at91.patch. > > A little correction on macb and ipipe patch was needed : > - macb ethernet controler is not detected on startup > - we added some corrections to make 9263 usable under ipipe > - owing to changes in at91 patch (clk are now registered on startup) we made > a little correction for future adeos-ipipe-2.6.21-arm.patch. > > You will find all added patches attached to this mail. > <> <<2.6.20-at91-macb-9263.patch>> > <> > Latencies are a little worst than on other boards. We presume that it's due > to the large number of peripherals. Every additional IRQ source can cause additional delays of your benchmarked event. All those IRQs may once in a while line up just in front of your RT-IRQ, delaying it yet a bit more. Jan signature.asc Description: OpenPGP digital signature ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
[Xenomai-core] [PATCH 2/2] run prepare-kernel on defaults
This helps to run prepare-kernel with less explicit switches from automatic build scripts. It adds the "--default" parameter which virtually presses when the user used to be asked for some decision. Would be nice to have in 2.3.x as well. Index: xenomai/scripts/prepare-kernel.sh === --- xenomai.orig/scripts/prepare-kernel.sh +++ xenomai/scripts/prepare-kernel.sh @@ -168,7 +168,7 @@ generate_patch() { } -usage='usage: prepare-kernel --linux= --adeos= [--arch=] [--outpatch= [--filterkvers=y|n] [--filterarch=y|n]] [--forcelink]' +usage='usage: prepare-kernel --linux= --adeos= [--arch=] [--outpatch= [--filterkvers=y|n] [--filterarch=y|n]] [--forcelink] [--default] [--verbose]' me=`basename $0` while test $# -gt 0; do @@ -198,6 +198,9 @@ while test $# -gt 0; do --forcelink) forcelink=1 ;; +--default) +usedefault=1 +;; --verbose) verbose=1 ;; @@ -226,8 +229,10 @@ xenomai_root=`cd $xenomai_root && pwd` default_linux_tree=/lib/modules/`uname -r`/source while test x$linux_tree = x; do - echo -n "Linux tree [default $default_linux_tree]: " - read linux_tree + if test x$usedefault = x; then + echo -n "Linux tree [default $default_linux_tree]: " + read linux_tree + fi if test x$linux_tree = x; then linux_tree=$default_linux_tree fi @@ -274,8 +279,10 @@ fi while : ; do if test x$linux_arch = x; then - echo -n "Target architecture [default $default_linux_arch]: " - read linux_arch + if test x$usedefault = x; then + echo -n "Target architecture [default $default_linux_arch]: " + read linux_arch + fi if test x$linux_arch = x; then linux_arch=$default_linux_arch fi @@ -363,8 +370,10 @@ else default_adeos_patch=/dev/null fi while test x$adeos_patch = x; do - echo -n "Adeos patch [default $default_adeos_patch]: " - read adeos_patch + if test x$usedefault = x; then + echo -n "Adeos patch [default $default_adeos_patch]: " + read adeos_patch + fi if test x$adeos_patch = x; then adeos_patch=$default_adeos_patch fi signature.asc Description: OpenPGP digital signature ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
[Xenomai-core] [PATCH 1/2] fix automatic patch election
Currently, the patch election algorithm does not take the kernel sub-level version into account, thus one gets offered 2.6.20, although the kernel is 2.6.19. This patch fixes it. Should go into 2.3.x as well. Index: xenomai/scripts/prepare-kernel.sh === --- xenomai.orig/scripts/prepare-kernel.sh +++ xenomai/scripts/prepare-kernel.sh @@ -357,7 +357,7 @@ elif test -r $linux_tree/include/linux/a exit 2 else if test x$adeos_patch = x; then - default_adeos_patch=`( ls $xenomai_root/ksrc/arch/$xenomai_arch/patches/adeos-ipipe-$linux_VERSION.$linux_PATCHLEVEL*|sort -r ) 2>/dev/null | head -n1` + default_adeos_patch=`( ls $xenomai_root/ksrc/arch/$xenomai_arch/patches/adeos-ipipe-$linux_VERSION.$linux_PATCHLEVEL.$linux_SUBLEVEL*|sort -r ) 2>/dev/null | head -n1` fi if test x$default_adeos_patch = x; then default_adeos_patch=/dev/null signature.asc Description: OpenPGP digital signature ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [RFC][PATCH] shirq locking rework
Dmitry Adamushko wrote: > On 22/06/07, Jan Kiszka <[EMAIL PROTECTED]> wrote: >> [ ... ] >> >> Only compile-tested under various .configs. Any comment welcome. >> > >> @@ -76,7 +102,7 @@ static inline void xnintr_stat_counter_d >> static void xnintr_irq_handler(unsigned irq, void *cookie) >> { >> xnsched_t *sched = xnpod_current_sched(); >> - xnintr_t *intr = (xnintr_t *)cookie; >> + xnintr_t *intr; >> xnstat_runtime_t *prev; >> xnticks_t start; >> int s; >> @@ -86,6 +112,16 @@ static void xnintr_irq_handler(unsigned >> xnltt_log_event(xeno_ev_ienter, irq); >> >> ++sched->inesting; >> + >> + xnlock_get(&xnirqs[irq].lock); >> + >> +#ifdef CONFIG_SMP >> + /* In SMP case, we have to reload the cookie under the per-IRQ >> lock >> + to avoid racing with xnintr_detach. */ >> + intr = rthal_irq_cookie(&rthal_domain, irq); >> +#else >> + intr = cookie; >> +#endif >> s = intr->isr(intr); > > I guess, 'intr' can be NULL here. Yeah, needs to be caught as well. > > Could you please send me attached (non-inlined) a combo patch on top > of the trunk version (as I see this one seems to be on top of your > previous one)? I'll try to come up with some solution during this > weekend. My patch was an attachment (though I wonder we everyone has so much problems with inlines - broken mailers all over the place), and it was already against trunk (#2654). Philippe merged my preliminary fix this morning. Looking forward to your suggestions! Jan signature.asc Description: OpenPGP digital signature ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [RFC][PATCH] shirq locking rework
On 22/06/07, Jan Kiszka <[EMAIL PROTECTED]> wrote: > [ ... ] > > Only compile-tested under various .configs. Any comment welcome. > > @@ -76,7 +102,7 @@ static inline void xnintr_stat_counter_d > static void xnintr_irq_handler(unsigned irq, void *cookie) > { > xnsched_t *sched = xnpod_current_sched(); > - xnintr_t *intr = (xnintr_t *)cookie; > + xnintr_t *intr; > xnstat_runtime_t *prev; > xnticks_t start; > int s; > @@ -86,6 +112,16 @@ static void xnintr_irq_handler(unsigned > xnltt_log_event(xeno_ev_ienter, irq); > > ++sched->inesting; > + > + xnlock_get(&xnirqs[irq].lock); > + > +#ifdef CONFIG_SMP > + /* In SMP case, we have to reload the cookie under the per-IRQ lock > + to avoid racing with xnintr_detach. */ > + intr = rthal_irq_cookie(&rthal_domain, irq); > +#else > + intr = cookie; > +#endif > s = intr->isr(intr); I guess, 'intr' can be NULL here. Could you please send me attached (non-inlined) a combo patch on top of the trunk version (as I see this one seems to be on top of your previous one)? I'll try to come up with some solution during this weekend. -- Best regards, Dmitry Adamushko ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [RFC][PATCH] shirq locking rework
Jan Kiszka wrote: > Dmitry Adamushko wrote: >> On 21/06/07, Jan Kiszka <[EMAIL PROTECTED]> wrote: [ .. ] surprise-surprise.. sure, one way or another it must be fixed. heh.. too many bugs (and I don't even want to say who's responsible) :-/ >>> I wouldn't accept all the responsibility if I were you. >> I have no problems in this respect. I was just a bit sarcastic with >> the way to say "it's my fault". >> >> >>> It's a sign that the design might be >>> too complex now >> frankly speaking, I don't think it's really complex :) >> >> >>> Things get worse, at least with XENO_OPT_STATS: Due to the runtime >>> statistics collection, we may end up with dangling references to our >>> xnintr object even after xnintr_shirq_unlock(). >>> >>> We would actually need some kind of IRQ_INPROGRESS + synchronize_irq() >>> at i-pipe level. But we have the problem, in contrast to the kernel, >>> that we reschedule from inside the handler (more precisely: at nucleus >>> level), thus synchronize_irq() would not just wait on some simple >>> handler to exit... >> Yeah.. we had already conversations on this topic (I think with >> Philippe) and, if I recall right, that was one of the reasons. That's >> why synchronize_irq() is in the nucleus layer. > > Hmm, then we may be forced to get the cookie out of ipipe's hands again > and put it back into a nucleus irq array, i.e. move the cookie > dereferencing under a nucleus managed per-irq lock. But there is still > the issue with xnsched_t::current_account... > Hell, what a mess... Here is a first attempt to address the remaining issues. Take it with a huge grain of salt, I haven't yet made up my mind if it really solves our races and if it isn't too ugly. Please direct special attention to - xnintr_sync_stat_references (the comment says it all) - xnintr_edge_shirq_handler (unrelated change, but I somehow felt like this is nicer) Only compile-tested under various .configs. Any comment welcome. Thanks, Jan Index: xenomai/ksrc/nucleus/intr.c === --- xenomai.orig/ksrc/nucleus/intr.c +++ xenomai/ksrc/nucleus/intr.c @@ -41,6 +41,18 @@ DEFINE_PRIVATE_XNLOCK(intrlock); +typedef struct xnintr_irq { + + DECLARE_XNLOCK(lock); + +#if defined(CONFIG_XENO_OPT_SHIRQ_LEVEL) || defined(CONFIG_XENO_OPT_SHIRQ_EDGE) + xnintr_t *handlers; + int unhandled; +#endif +} cacheline_aligned_in_smp xnintr_irq_t; + +static xnintr_irq_t xnirqs[RTHAL_NR_IRQS]; + #ifdef CONFIG_XENO_OPT_STATS xnintr_t nkclock; /* Only for statistics */ int xnintr_count = 1; /* Number of attached xnintr objects + nkclock */ @@ -63,9 +75,23 @@ static inline void xnintr_stat_counter_d xnarch_memory_barrier(); xnintr_list_rev++; } + +static inline void xnintr_sync_stat_references(xnintr_t *intr) +{ + int cpu; + + for_each_online_cpu(cpu) { + xnsched_t *sched = xnpod_sched_slot(cpu); + + /* I don't feel very well... hacking this. */ + while (sched->current_account == &intr->stat[cpu].account) + cpu_relax(); + } +} #else static inline void xnintr_stat_counter_inc(void) {} static inline void xnintr_stat_counter_dec(void) {} +static inline void xnintr_sync_stat_references(xnintr_t *intr) {} #endif /* CONFIG_XENO_OPT_STATS */ /* @@ -76,7 +102,7 @@ static inline void xnintr_stat_counter_d static void xnintr_irq_handler(unsigned irq, void *cookie) { xnsched_t *sched = xnpod_current_sched(); - xnintr_t *intr = (xnintr_t *)cookie; + xnintr_t *intr; xnstat_runtime_t *prev; xnticks_t start; int s; @@ -86,6 +112,16 @@ static void xnintr_irq_handler(unsigned xnltt_log_event(xeno_ev_ienter, irq); ++sched->inesting; + + xnlock_get(&xnirqs[irq].lock); + +#ifdef CONFIG_SMP + /* In SMP case, we have to reload the cookie under the per-IRQ lock + to avoid racing with xnintr_detach. */ + intr = rthal_irq_cookie(&rthal_domain, irq); +#else + intr = cookie; +#endif s = intr->isr(intr); if (unlikely(s == XN_ISR_NONE)) { @@ -102,6 +138,8 @@ static void xnintr_irq_handler(unsigned intr->unhandled = 0; } + xnlock_put(&xnirqs[irq].lock); + if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) @@ -161,18 +199,6 @@ void xnintr_clock_handler(void) #if defined(CONFIG_XENO_OPT_SHIRQ_LEVEL) || defined(CONFIG_XENO_OPT_SHIRQ_EDGE) -typedef struct xnintr_shirq { - - xnintr_t *handlers; - int unhandled; -#ifdef CONFIG_SMP - xnlock_t lock; -#endif - -} xnintr_shirq_t; - -static xnintr_shirq_t xnshirqs[RTHAL_NR_IRQS]; - #if defined(CONFIG_XENO_OPT_SHIRQ_LEVEL) /* * Low-level interrupt handler dispatching the user-defined ISRs for @@ -184,7 +210,7 @@ static void xnintr_shirq_handler(unsigne xnsched_t *sched =