Module: xenomai-2.5 Branch: master Commit: f6af9b831c8d6356f56445ca94e980f976da90ce URL: http://git.xenomai.org/?p=xenomai-2.5.git;a=commit;h=f6af9b831c8d6356f56445ca94e980f976da90ce
Author: Philippe Gerum <r...@xenomai.org> Date: Sat Aug 28 16:36:23 2010 +0200 nucleus/shadow: shorten the uninterruptible path to secondary mode Switching a thread from primary to secondary mode entails running a significantly long code path with interrupts off, to hand over the relaxing thread to the Linux scheduler. Investigation on different architectures showed that such code path was involved most of the time in latency peaks, typically when an interrupt arrives at the very beginning of the migration sequence, and remains blocked until the thread is fully switched out. Having RPI enabled may increase the penalty, since pushing the relaxing thread to the local RPI queue is part of this sequence (rpi_push). Tracing reveals that a significant portion of the uninterruptible sequence is actually spent running the rescheduling procedure (xnpod_schedule). However, nothing requires us to suspend /and/ switch out a relaxing thread atomically; actually, this is even inefficient, since this tends to give a high priority to a thread going for less real-time guarantees, over a real-time activity which could be started by a pending interrupt. This patch introduces a special handling of the XNRELAX bit condition in xnpod_suspend_thread(), so that all locks (smp and local interrupts) are dropped right before switching out the current thread, to open a window for interrupt preemption. Additionally, interrupt management is now shared between xnshadow_relax() and xnpod_suspend_thread(), so that basic assumptions can be made on the current interrupt state, to further reduce interrupt masking. Best cases: - no interrupt will be pending, so the relaxed thread will be switched out immediately. - an interrupt will be pending for the Xenomai domain, performing time-critical duties such as waking up a real-time thread, in which case the latency to handle a real-time event will have been lower. Worst case: - an interrupt will be pending for Linux, in which case the rescheduling will be postponed until the interrupt pipeline has logged it (but not dispatched, since we will be running over the high priority Xenomai domain). --- include/asm-generic/system.h | 1 + ksrc/nucleus/pod.c | 19 +++++++++++++++++-- ksrc/nucleus/shadow.c | 24 ++++++++++++++++++++---- 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/include/asm-generic/system.h b/include/asm-generic/system.h index a2c8fb9..4b5ce95 100644 --- a/include/asm-generic/system.h +++ b/include/asm-generic/system.h @@ -83,6 +83,7 @@ typedef unsigned long spl_t; #else /* !CONFIG_SMP */ #define splexit(x) rthal_local_irq_restore(x) #endif /* !CONFIG_SMP */ +#define splmax() rthal_local_irq_disable() #define splnone() rthal_local_irq_enable() #define spltest() rthal_local_irq_test() #define splget(x) rthal_local_irq_flags(x) diff --git a/ksrc/nucleus/pod.c b/ksrc/nucleus/pod.c index 8bfa49f..50b6d01 100644 --- a/ksrc/nucleus/pod.c +++ b/ksrc/nucleus/pod.c @@ -1456,12 +1456,27 @@ void xnpod_suspend_thread(xnthread_t *thread, xnflags_t mask, nkpod->schedhook(thread, mask); #endif /* __XENO_SIM__ */ - if (thread == sched->curr) + if (thread == sched->curr) { + /* + * If the current thread is being relaxed, we must + * have been called from xnshadow_relax(), in which + * case we introduce an opportunity for interrupt + * delivery right before switching context, which + * shortens the uninterruptible code path. This + * particular caller expects us to always return with + * interrupts enabled. + */ + if (mask & XNRELAX) { + xnlock_clear_irqon(&nklock); + __xnpod_schedule(sched); + return; + } /* * If the thread is runnning on another CPU, - * xnpod_schedule will just trigger the IPI. + * xnpod_schedule will trigger the IPI as needed. */ xnpod_schedule(); + } #ifdef CONFIG_XENO_OPT_PERVASIVE /* * Ok, this one is an interesting corner case, which requires diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c index fe1ba64..da03f21 100644 --- a/ksrc/nucleus/shadow.c +++ b/ksrc/nucleus/shadow.c @@ -1138,7 +1138,6 @@ void xnshadow_relax(int notify, int reason) xnthread_t *thread = xnpod_current_thread(); siginfo_t si; int prio; - spl_t s; XENO_BUGON(NUCLEUS, xnthread_test_state(thread, XNROOT)); @@ -1150,13 +1149,30 @@ void xnshadow_relax(int notify, int reason) trace_mark(xn_nucleus, shadow_gorelax, "thread %p thread_name %s", thread, xnthread_name(thread)); - splhigh(s); + /* + * If you intend to change the following interrupt-free + * sequence, /first/ make sure to: + * + * - read commit #d3242401b8 + * + * - check the special handling of XNRELAX in + * xnpod_suspend_thread() when switching out the current + * thread, not to break basic assumptions we do there. + * + * We disable interrupts here to initiate the migration + * sequence, and let xnpod_suspend_thread() enable them back + * before returning to us. + */ + splmax(); rpi_push(thread->sched, thread); schedule_linux_call(LO_WAKEUP_REQ, current, 0); clear_task_nowakeup(current); xnpod_suspend_thread(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL); - splexit(s); - + /* + * As a special case when switching out a relaxed thread, + * interrupts have been re-enabled before returning to us. See + * xnpod_suspend_thread(). + */ if (XENO_DEBUG(NUCLEUS) && rthal_current_domain != rthal_root_domain) xnpod_fatal("xnshadow_relax() failed for thread %s[%d]", thread->name, xnthread_user_pid(thread)); _______________________________________________ Xenomai-git mailing list Xenomai-git@gna.org https://mail.gna.org/listinfo/xenomai-git