Module: xenomai-forge
Branch: master
Commit: 42eb9298e88e422fbd124c3c5288d0aae25cce0a
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=42eb9298e88e422fbd124c3c5288d0aae25cce0a

Author: Philippe Gerum <r...@xenomai.org>
Date:   Fri Sep 13 18:52:34 2013 +0200

cobalt/thread: rework thread cancellation handling

With the rebasing of Xenomai kernel threads over the regular Linux
kthreads, we just can't guarantee synchronous deletion with
xnthread_thread_cancel() anymore. The killed thread will have to reach
a cancellation point later for the request to be actually fulfilled,
so that it may wrap up and exit gracefully linux-wise.

This is actually safer, because kernel threads may associate data to
the wait context before suspending, and certainly don't want to be
wiped out, without being allowed to do the wrap up, thus potentially
leaving stale data.

e.g. we should allow this:

list_add(&myself, &some_q);
xnthread_sleep_on()
list_del(&myself);
...
xnthread_test_cancel();

To this end, this patch removes all cancellation points from
xnthread_suspend(), expecting the Xenomai kernel threads to check for
pending termination requests via a call to xnthread_test_cancel(),
from within their work loop.

In addition, the xnthread_prepare/finish_wait() calls have been
replaced by the xnthread_prepare/complete_wait() pattern, to be used
as follows:

consumer:
        xnthread_prepare_wait(&wc);
        info = xnsynch_sleep_on(&sync, ...);
        if (info) {
           /* process XNRMID, XNBREAK, XNTIMEO as usual. */
        } else {
             /*
              * Resource obtained. In addition,
              * xnthread_wait_complete_p(&wc) may be called to make
              * sure xnthread_complete_wait(wc) was issued for the
              * wait context, assuming forced unblocks may happen,
              * independently of XNRMID|XNBREAK|XNTIMEO
              * (not recommended though).
              */
        }

producer:
        thread = xnsynch_wakeup_one_sleeper(&synch);
        wc = xnthread_get_wait_context(thread);
        /* Post resource. */
        xnthread_complete_wait(wc);
        xnsched_run();

---

 include/cobalt/kernel/shadow.h      |    2 -
 include/cobalt/kernel/synch.h       |    1 +
 include/cobalt/kernel/thread.h      |   24 ++++--
 kernel/cobalt/posix/event.c         |    2 -
 kernel/cobalt/posix/mqueue.c        |   15 +---
 kernel/cobalt/posix/signal.c        |    2 +-
 kernel/cobalt/posix/thread.c        |   26 +-----
 kernel/cobalt/posix/thread.h        |    2 -
 kernel/cobalt/sched.c               |   13 ++-
 kernel/cobalt/shadow.c              |   50 +++++++++--
 kernel/cobalt/thread.c              |  156 ++++++++++++++++++++++-------------
 kernel/drivers/ipc/bufp.c           |   20 -----
 kernel/drivers/ipc/internal.h       |    1 -
 kernel/drivers/testing/switchtest.c |    7 +-
 14 files changed, 181 insertions(+), 140 deletions(-)

diff --git a/include/cobalt/kernel/shadow.h b/include/cobalt/kernel/shadow.h
index 4fdc221..6423332 100644
--- a/include/cobalt/kernel/shadow.h
+++ b/include/cobalt/kernel/shadow.h
@@ -55,8 +55,6 @@ static inline struct xnthread *xnshadow_current(void)
        return ipipe_current_threadinfo()->thread;
 }
 
-#define xnshadow_current_p(thread) (xnshadow_current() == (thread))
-
 static inline struct xnthread *xnshadow_thread(struct task_struct *p)
 {
        return ipipe_task_threadinfo(p)->thread;
diff --git a/include/cobalt/kernel/synch.h b/include/cobalt/kernel/synch.h
index ea93129..181e053 100644
--- a/include/cobalt/kernel/synch.h
+++ b/include/cobalt/kernel/synch.h
@@ -25,6 +25,7 @@
 #include <cobalt/kernel/list.h>
 #include <cobalt/kernel/assert.h>
 #include <cobalt/kernel/timer.h>
+#include <cobalt/uapi/kernel/synch.h>
 
 #define XNSYNCH_CLAIMED 0x10   /* Claimed by other thread(s) w/ PIP */
 
diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index feb4da2..3434536 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -28,8 +28,8 @@
 #include <cobalt/kernel/schedparam.h>
 #include <cobalt/kernel/trace.h>
 #include <cobalt/kernel/shadow.h>
+#include <cobalt/kernel/synch.h>
 #include <cobalt/uapi/kernel/thread.h>
-#include <cobalt/uapi/kernel/synch.h>
 #include <asm/xenomai/machine.h>
 #include <asm/xenomai/thread.h>
 
@@ -37,7 +37,6 @@
 #define XNTHREAD_MODE_BITS    (XNLOCK|XNRRB|XNTRAPSW)
 
 struct xnthread;
-struct xnsynch;
 struct xnsched;
 struct xnselector;
 struct xnsched_class;
@@ -58,7 +57,7 @@ struct xnthread_start_attr {
 };
 
 struct xnthread_wait_context {
-       /* anchor object */
+       int posted;
 };
 
 typedef struct xnthread {
@@ -169,6 +168,8 @@ typedef struct xnthread {
        const char *exe_path;   /* Executable path */
        u32 proghash;           /* Hash value for exe_path */
 #endif
+       /** Exit event for joining the thread. */
+       struct xnsynch join_synch;
 } xnthread_t;
 
 #define xnthread_name(thread)               ((thread)->name)
@@ -344,6 +345,18 @@ static inline void xnthread_test_cancel(void)
                __xnthread_test_cancel(curr);
 }
 
+static inline
+void xnthread_complete_wait(struct xnthread_wait_context *wc)
+{
+       wc->posted = 1;
+}
+
+static inline
+int xnthread_wait_complete_p(struct xnthread_wait_context *wc)
+{
+       return wc->posted;
+}
+
 #ifdef CONFIG_XENO_HW_FPU
 void xnthread_switch_fpu(struct xnsched *sched);
 #else
@@ -365,9 +378,6 @@ xnticks_t xnthread_get_period(struct xnthread *thread);
 
 void xnthread_prepare_wait(struct xnthread_wait_context *wc);
 
-void xnthread_finish_wait(struct xnthread_wait_context *wc,
-                         void (*cleanup)(struct xnthread_wait_context *wc));
-
 int xnthread_init(struct xnthread *thread,
                  const struct xnthread_init_attr *attr,
                  struct xnsched_class *sched_class,
@@ -403,7 +413,7 @@ int xnthread_set_slice(struct xnthread *thread,
 
 void xnthread_cancel(struct xnthread *thread);
 
-void xnthread_join(struct xnthread *thread);
+int xnthread_join(struct xnthread *thread);
 
 #ifdef CONFIG_SMP
 int xnthread_migrate(int cpu);
diff --git a/kernel/cobalt/posix/event.c b/kernel/cobalt/posix/event.c
index 0d91f3d..d836866 100644
--- a/kernel/cobalt/posix/event.c
+++ b/kernel/cobalt/posix/event.c
@@ -164,8 +164,6 @@ int cobalt_event_wait(struct cobalt_event_shadow __user 
*u_evtsh,
        xnthread_prepare_wait(&ewc.wc);
        datp->nwaiters++;
        info = xnsynch_sleep_on(&event->synch, timeout, tmode);
-       xnthread_finish_wait(&ewc.wc, NULL);
-
        if (info & XNRMID) {
                ret = -EIDRM;
                goto out;
diff --git a/kernel/cobalt/posix/mqueue.c b/kernel/cobalt/posix/mqueue.c
index 119805e..27d1c55 100644
--- a/kernel/cobalt/posix/mqueue.c
+++ b/kernel/cobalt/posix/mqueue.c
@@ -620,6 +620,7 @@ cobalt_mq_finish_send(mqd_t fd, cobalt_mq_t *mq, struct 
cobalt_msg *msg)
                wc = xnthread_get_wait_context(thread);
                mwc = container_of(wc, struct cobalt_mqwait_context, wc);
                mwc->msg = msg;
+               xnthread_complete_wait(wc);
                mq->nodebase.refcount++;
        } else {
                /* Nope, have to go through the queue. */
@@ -714,17 +715,9 @@ cobalt_mq_timedrcv_inner(cobalt_mq_t **mqp, mqd_t fd,
        mq = node2mq(cobalt_desc_node(desc));
        xnthread_prepare_wait(&mwc.wc);
        ret = xnsynch_sleep_on(&mq->receivers, to, tmode);
-       xnthread_finish_wait(&mwc.wc, NULL);
-
-       if (ret == 0) {
-               /* Revalidate the descriptor. */
-               ret = cobalt_desc_get(&desc, fd, COBALT_MQ_MAGIC);
-               if (ret) {
-                       mq_msg_free(mq, msg);
-                       msg = ERR_PTR(-ret);
-               }  else
-                       msg = mwc.msg;
-       } else if (ret & XNRMID)
+       if (ret == 0)
+               msg = mwc.msg;
+       else if (ret & XNRMID)
                msg = ERR_PTR(-EBADF);
        else if (ret & XNTIMEO)
                msg = ERR_PTR(-ETIMEDOUT);
diff --git a/kernel/cobalt/posix/signal.c b/kernel/cobalt/posix/signal.c
index 31c6a5b..40430ac 100644
--- a/kernel/cobalt/posix/signal.c
+++ b/kernel/cobalt/posix/signal.c
@@ -80,6 +80,7 @@ deliver:
        cobalt_copy_siginfo(sigp->si.si_code, swc->si, &sigp->si);
        cobalt_call_extension(signal_deliver, &thread->extref,
                              ret, swc->si, sigp);
+       xnthread_complete_wait(&swc->wc);
        xnsynch_wakeup_one_sleeper(&thread->sigwait);
        list_del(&thread->signext);
 
@@ -255,7 +256,6 @@ wait:
        xnthread_prepare_wait(&swc.wc);
        list_add_tail(&curr->signext, &curr->process->sigwaiters);
        ret = xnsynch_sleep_on(&curr->sigwait, timeout, XN_RELATIVE);
-       xnthread_finish_wait(&swc.wc, NULL);
        if (ret) {
                list_del(&curr->signext);
                ret = ret & XNBREAK ? -EINTR : -EAGAIN;
diff --git a/kernel/cobalt/posix/thread.c b/kernel/cobalt/posix/thread.c
index 0c1243f..c638d5d 100644
--- a/kernel/cobalt/posix/thread.c
+++ b/kernel/cobalt/posix/thread.c
@@ -247,9 +247,6 @@ struct xnpersonality *cobalt_thread_exit(struct xnthread 
*curr)
        cobalt_signal_flush(thread);
        xnsynch_destroy(&thread->monitor_synch);
        xnsynch_destroy(&thread->sigwait);
-       /* Waiters will receive EIDRM */
-       xnsynch_destroy(&thread->join_synch);
-       xnsched_run();
 
        return NULL;
 }
@@ -506,7 +503,6 @@ static inline int pthread_create(struct cobalt_thread 
**thread_p,
 
        thread->hkey.u_pth = 0;
        thread->hkey.mm = NULL;
-       xnsynch_init(&thread->join_synch, XNSYNCH_FIFO, NULL);
 
        *thread_p = thread;
 
@@ -1096,32 +1092,18 @@ int cobalt_thread_join(unsigned long pth)
 {
        struct cobalt_local_hkey hkey;
        struct cobalt_thread *thread;
-       int ret;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
-
        hkey.u_pth = pth;
        hkey.mm = current->mm;
        thread = thread_lookup(&hkey);
-       if (thread == NULL)
-               ret = -ESRCH;
-       else if (thread == cobalt_current_thread())
-               ret = -EDEADLK;
-       else if (xnsynch_pended_p(&thread->join_synch))
-               ret = -EBUSY;
-       else {
-               xnthread_set_state(&thread->threadbase, XNJOINED);
-               ret = xnsynch_sleep_on(&thread->join_synch,
-                                      XN_INFINITE, XN_RELATIVE);
-               ret = ret & XNBREAK ? -EINTR : 0;
-               if (ret != -EIDRM && thread_lookup(&hkey) == thread)
-                       xnthread_clear_state(&thread->threadbase, XNJOINED);
-       }
-
        xnlock_put_irqrestore(&nklock, s);
 
-       return ret;
+       if (thread == NULL)
+               return -ESRCH;
+
+       return xnthread_join(&thread->threadbase);
 }
 
 int cobalt_thread_stat(pid_t pid,
diff --git a/kernel/cobalt/posix/thread.h b/kernel/cobalt/posix/thread.h
index 978c1c3..39f32c4 100644
--- a/kernel/cobalt/posix/thread.h
+++ b/kernel/cobalt/posix/thread.h
@@ -119,8 +119,6 @@ struct cobalt_thread {
        int monitor_queued;
 
        struct cobalt_local_hkey hkey;
-       /** Exit event for joining the thread. */
-       struct xnsynch join_synch;
 };
 
 struct cobalt_sigwait_context {
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index e88dbbd..2e11826 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -114,8 +114,17 @@ static void watchdog_handler(struct xntimer *timer)
                xnshadow_call_mayday(curr, SIGDEBUG_WATCHDOG);
        } else {
                printk(XENO_WARN "watchdog triggered on CPU #%d -- runaway 
thread "
-                      "'%s' cancelled\n", xnsched_cpu(sched), 
xnthread_name(curr));
-               xnthread_cancel(curr);
+                      "'%s' canceled\n", xnsched_cpu(sched), 
xnthread_name(curr));
+               /*
+                * On behalf on an IRQ handler, xnthread_cancel()
+                * would go half way cancelling the preempted
+                * thread. Therefore we manually raise XNKICKED to
+                * cause the next call to xnthread_suspend() to return
+                * early in XNBREAK condition, and XNCANCELD so that
+                * @thread exits next time it invokes
+                * xnthread_test_cancel().
+                */
+               xnthread_set_info(curr, XNKICKED|XNCANCELD);
        }
 
        xnsched_reset_watchdog(sched);
diff --git a/kernel/cobalt/shadow.c b/kernel/cobalt/shadow.c
index 8a9ac9c..e5ad6e3 100644
--- a/kernel/cobalt/shadow.c
+++ b/kernel/cobalt/shadow.c
@@ -246,7 +246,7 @@ static void request_syscall_restart(struct xnthread *thread,
                        __xn_error_return(regs,
                                          (sysflags & __xn_exec_norestart) ?
                                          -EINTR : -ERESTARTSYS);
-                       notify = !xnthread_test_state(thread, 
XNDEBUG|XNCANCELD);
+                       notify = !xnthread_test_state(thread, XNDEBUG);
                }
                xnthread_clear_info(thread, XNKICKED);
        }
@@ -705,9 +705,33 @@ static int force_wakeup(struct xnthread *thread) /* nklock 
locked, irqs off */
                ret = 1;
        }
 
+       /*
+        * CAUTION: we must NOT raise XNBREAK when clearing a forcible
+        * block state, such as XNSUSP, XNHELD. The caller of
+        * xnthread_suspend() we unblock shall proceed as for a normal
+        * return, until it traverses a cancellation point if
+        * XNCANCELD was raised earlier, or calls xnthread_suspend()
+        * which will detect XNKICKED and act accordingly.
+        *
+        * Rationale: callers of xnthread_suspend() may assume that
+        * receiving XNBREAK means that the process that motivated the
+        * blocking did not go to completion. E.g. the wait context
+        * (see. xnthread_prepare_wait()) was NOT posted before
+        * xnsynch_sleep_on() returned, leaving no useful data there.
+        * Therefore, in case only XNSUSP remains set for the thread
+        * on entry to force_wakeup(), after XNPEND was lifted earlier
+        * when the wait went to successful completion (i.e. no
+        * timeout), then we want the kicked thread to know that it
+        * did receive the requested resource, not finding XNBREAK in
+        * its state word.
+        *
+        * Callers of xnthread_suspend() may inquire for XNKICKED to
+        * detect forcible unblocks from XNSUSP, XNHELD, if they
+        * should act upon this case specifically.
+        */
        if (xnthread_test_state(thread, XNSUSP|XNHELD)) {
                xnthread_resume(thread, XNSUSP|XNHELD);
-               xnthread_set_info(thread, XNKICKED|XNBREAK);
+               xnthread_set_info(thread, XNKICKED);
        }
 
        return ret;
@@ -745,13 +769,15 @@ void __xnshadow_kick(struct xnthread *thread) /* nklock 
locked, irqs off */
        xnthread_set_info(thread, XNKICKED);
 
        /*
-        * No need to run a mayday trap if the current thread kicks
-        * itself out of primary mode: it will relax on its way back
-        * to userland via the current syscall epilogue. Otherwise, we
-        * want that thread to enter the mayday trap asap, to call us
-        * back for relaxing.
+        * We may send mayday signals to userland threads only.
+        * However, no need to run a mayday trap if the current thread
+        * kicks itself out of primary mode: it will relax on its way
+        * back to userland via the current syscall
+        * epilogue. Otherwise, we want that thread to enter the
+        * mayday trap asap, to call us back for relaxing.
         */
-       if (thread != xnsched_current_thread())
+       if (thread != xnsched_current_thread() &&
+           xnthread_test_state(thread, XNUSER))
                xnarch_call_mayday(p);
 }
 
@@ -1124,6 +1150,8 @@ int xnshadow_map_kernel(struct xnthread *thread, struct 
completion *done)
                                 XN_INFINITE, XN_RELATIVE, NULL);
        xnlock_put_irqrestore(&nklock, s);
 
+       xnthread_test_cancel();
+
        xntrace_pid(xnthread_host_pid(thread),
                    xnthread_current_priority(thread));
 
@@ -1238,6 +1266,9 @@ static unsigned long map_mayday_page(struct task_struct 
*p)
 void xnshadow_call_mayday(struct xnthread *thread, int sigtype)
 {
        struct task_struct *p = xnthread_host_task(thread);
+
+       /* Mayday traps are available to userland threads only. */
+       XENO_BUGON(NUCLEUS, !xnthread_test_state(thread, XNUSER));
        xnthread_set_info(thread, XNKICKED);
        xnshadow_send_sig(thread, SIGDEBUG, sigtype);
        xnarch_call_mayday(p);
@@ -2254,6 +2285,9 @@ static int handle_taskexit_event(struct task_struct *p) 
/* p == current */
                unlock_timers();
 
        xnthread_run_handler(thread, exit_thread);
+       /* Waiters will receive EIDRM */
+       xnsynch_destroy(&thread->join_synch);
+       xnsched_run();
 
        if (xnthread_test_state(thread, XNUSER)) {
                xnlock_get_irqsave(&nklock, s);
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index b68187d..fbc7e84 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -180,6 +180,7 @@ int __xnthread_init(struct xnthread *thread,
        memset(&thread->stat, 0, sizeof(thread->stat));
        thread->selector = NULL;
        INIT_LIST_HEAD(&thread->claimq);
+       xnsynch_init(&thread->join_synch, XNSYNCH_FIFO, NULL);
        /* These will be filled by xnthread_start() */
        thread->imode = 0;
        thread->entry = NULL;
@@ -337,31 +338,15 @@ xnticks_t xnthread_get_period(xnthread_t *thread)
 }
 EXPORT_SYMBOL_GPL(xnthread_get_period);
 
-/* NOTE: caller must provide for locking */
 void xnthread_prepare_wait(struct xnthread_wait_context *wc)
 {
        struct xnthread *curr = xnsched_current_thread();
 
+       wc->posted = 0;
        curr->wcontext = wc;
 }
 EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 
-/* NOTE: caller must provide for locking */
-void xnthread_finish_wait(struct xnthread_wait_context *wc,
-                         void (*cleanup)(struct xnthread_wait_context *wc))
-{
-       struct xnthread *curr = xnsched_current_thread();
-
-       curr->wcontext = NULL;
-
-       if (xnthread_test_info(curr, XNCANCELD)) {
-               if (cleanup)
-                       cleanup(wc);
-               xnthread_cancel(curr);
-       }
-}
-EXPORT_SYMBOL_GPL(xnthread_finish_wait);
-
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
        int ret = 0;
@@ -865,20 +850,19 @@ void xnthread_suspend(xnthread_t *thread, int mask,
        if (thread == sched->curr)
                xnsched_set_resched(sched);
 
-       /* Is the thread ready to run? */
+       /*
+        * If attempting to suspend a runnable thread which is pending
+        * a forced switch to secondary mode, just raise the break
+        * condition and return immediately.
+        *
+        * We may end up suspending a kicked thread that has been
+        * preempted on its relaxing path, which is a perfectly valid
+        * situation: we just ignore the signal notification in
+        * primary mode, and rely on the wakeup call pending for that
+        * task in the root context, to collect and act upon the
+        * pending Linux signal (see handle_sigwake_event()).
+        */
        if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) {
-               /*
-                * If attempting to suspend a runnable (shadow) thread
-                * which is pending a forced switch to secondary mode,
-                * just raise the break condition and return
-                * immediately.  We may end up suspending a kicked
-                * thread that has been preempted on its relaxing
-                * path, which is a perfectly valid situation: we just
-                * ignore the signal notification in primary mode, and
-                * rely on the wakeup call pending for that task in
-                * the root context, to collect and act upon the
-                * pending Linux signal.
-                */
                if ((mask & XNRELAX) == 0 &&
                    xnthread_test_info(thread, XNKICKED)) {
                        if (wchan) {
@@ -888,7 +872,6 @@ void xnthread_suspend(xnthread_t *thread, int mask,
                        xnthread_clear_info(thread, XNRMID | XNTIMEO);
                        xnthread_set_info(thread, XNBREAK);
                        xnlock_put_irqrestore(&nklock, s);
-                       xnthread_test_cancel();
                        return;
                }
                xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK | 
XNWAKEN | XNROBBED);
@@ -954,13 +937,6 @@ void xnthread_suspend(xnthread_t *thread, int mask,
                 * xnsched_run will trigger the IPI as required.
                 */
                xnsched_run();
-
-               if (xnthread_test_info(thread, XNCANCELD)) {
-                       xnlock_put_irqrestore(&nklock, s);
-                       xnthread_test_cancel();
-                       /* ... won't return ... */
-                       XENO_BUGON(NUCLEUS, 1);
-               }
                goto unlock_and_exit;
        }
 
@@ -1512,17 +1488,15 @@ EXPORT_SYMBOL_GPL(xnthread_set_slice);
  * the following situations:
  *
  * - @a thread self-cancels by a call to xnthread_cancel().
- * - @a thread calls any blocking Xenomai service that would otherwise
- * lead to a suspension in xnthread_suspend().
- * - @a thread resumes from xnthread_suspend().
  * - @a thread invokes a Linux syscall (user-space shadow only).
  * - @a thread receives a Linux signal (user-space shadow only).
+ * - @a thread explicitly calls xnthread_test_cancel().
  *
  * @param thread The descriptor address of the thread to terminate.
  *
  * Calling context: This service may be called from all runtime modes.
  *
- * Rescheduling: always in case of self-cancellation from primary mode.
+ * Rescheduling: yes.
  */
 void xnthread_cancel(struct xnthread *thread)
 {
@@ -1554,14 +1528,18 @@ void xnthread_cancel(struct xnthread *thread)
        }
 
 check_self_cancel:
-       if (xnshadow_current_p(thread)) {
+       if (xnshadow_current() == thread) {
                xnlock_put_irqrestore(&nklock, s);
                xnthread_test_cancel();
-               /* ... won't return ... */
-               XENO_BUGON(NUCLEUS, 1);
+               /*
+                * May return if on behalf of an IRQ handler which has
+                * preempted @thread.
+                */
+               return;
        }
 
        __xnshadow_kick(thread);
+       xnsched_run();
 
 unlock_and_exit:
        xnlock_put_irqrestore(&nklock, s);
@@ -1577,40 +1555,88 @@ EXPORT_SYMBOL_GPL(xnthread_cancel);
  * dormant at the time of the call, then xnthread_join() returns
  * immediately.
  *
+ * xnthread_join() adapts to the calling context (primary or
+ * secondary).
+ *
  * @param thread The descriptor address of the thread to join with.
  *
- * Calling context: This service may be called from secondary mode
- * only.
+ * @return 0 is returned on success. Otherwise, the following error
+ * codes indicate the cause of the failure:
+ *
+ * - -EDEADLK is returned if the current thread attempts to join
+ * itself.
+ *
+ * - -EINTR is returned if the current thread was unblocked while
+ *   waiting for @a thread to terminate.
+ *
+ * - -EBUSY indicates that another thread is already waiting for @a
+ *   thread to terminate.
+ *
+ * Calling context: any.
  *
  * Rescheduling: always if @a thread did not terminate yet at the time
  * of the call.
  */
-void xnthread_join(struct xnthread *thread)
+int xnthread_join(struct xnthread *thread)
 {
        unsigned int tag;
        spl_t s;
+       int ret;
+
+       XENO_BUGON(NUCLEUS, xnthread_test_state(thread, XNROOT));
 
        xnlock_get_irqsave(&nklock, s);
 
        tag = thread->idtag;
        if (xnthread_test_info(thread, XNDORMANT) || tag == 0) {
                xnlock_put_irqrestore(&nklock, s);
-               return;
+               return 0;
        }
 
-       xnlock_put_irqrestore(&nklock, s);
-
        trace_mark(xn_nucleus, thread_join, "thread %p thread_name %s",
                   thread, xnthread_name(thread));
 
-       /*
-        * Only a very few threads are likely to terminate within a
-        * short time frame at any point in time, so experiencing a
-        * thundering herd effect due to synchronizing on a single
-        * wait queue is quite unlikely. In any case, we run in
-        * secondary mode.
-        */
-       wait_event(nkjoinq, thread->idtag != tag);
+       if (ipipe_root_p) {
+               if (xnthread_test_state(thread, XNJOINED)) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+               xnthread_set_state(thread, XNJOINED);
+               xnlock_put_irqrestore(&nklock, s);
+               /*
+                * Only a very few threads are likely to terminate within a
+                * short time frame at any point in time, so experiencing a
+                * thundering herd effect due to synchronizing on a single
+                * wait queue is quite unlikely. In any case, we run in
+                * secondary mode.
+                */
+               if (wait_event_interruptible(nkjoinq, thread->idtag != tag)) {
+                       xnlock_get_irqsave(&nklock, s);
+                       if (thread->idtag == tag)
+                               xnthread_clear_state(thread, XNJOINED);
+                       ret = -EINTR;
+                       goto out;
+               }
+
+               return 0;
+       }
+
+       if (thread == xnsched_current_thread())
+               ret = -EDEADLK;
+       else if (xnsynch_pended_p(&thread->join_synch))
+               ret = -EBUSY;
+       else {
+               xnthread_set_state(thread, XNJOINED);
+               ret = xnsynch_sleep_on(&thread->join_synch,
+                                      XN_INFINITE, XN_RELATIVE);
+               if ((ret & XNRMID) == 0 && thread->idtag == tag)
+                       xnthread_clear_state(thread, XNJOINED);
+               ret = ret & XNBREAK ? -EINTR : 0;
+       }
+out:
+       xnlock_put_irqrestore(&nklock, s);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(xnthread_join);
 
@@ -1839,10 +1865,22 @@ EXPORT_SYMBOL_GPL(xnthread_set_schedparam);
 
 void __xnthread_test_cancel(struct xnthread *curr)
 {
+       /*
+        * Just in case xnthread_test_cancel() is called from an IRQ
+        * handler, in which case we may not take the exit path.
+        *
+        * NOTE: curr->sched is stable from our POV and can't change
+        * under our feet.
+        */
+       if (curr->sched->lflags & XNINIRQ)
+               return;
+
        if (!xnthread_test_state(curr, XNRELAX))
                xnshadow_relax(0, 0);
 
        do_exit(0);
+       /* ... won't return ... */
+       XENO_BUGON(NUCLEUS, 1);
 }
 EXPORT_SYMBOL_GPL(__xnthread_test_cancel);
 
diff --git a/kernel/drivers/ipc/bufp.c b/kernel/drivers/ipc/bufp.c
index 093785d..6a1117d 100644
--- a/kernel/drivers/ipc/bufp.c
+++ b/kernel/drivers/ipc/bufp.c
@@ -112,21 +112,6 @@ static struct xnpnode_link __bufp_pnode = {
 
 #endif /* !CONFIG_XENO_OPT_VFILE */
 
-static void __bufp_cleanup_handler(struct rtipc_wait_context *wc)
-{
-       struct bufp_wait_context *bufwc;
-       /*
-        * Cancellation request is pending - release the lock we hold,
-        * we'll be vanishing away soon. Granted, we could avoid doing
-        * that, since we know that this particular lock is Xenomai's
-        * nklock, which may be held across rescheduling calls.
-        * Anyway, this illustrates how to use the cleanup handler of
-        * a wait context.
-        */
-       bufwc = container_of(wc, struct bufp_wait_context, wc);
-       rtipc_leave_atomic(bufwc->lockctx);
-}
-
 static int bufp_socket(struct rtipc_private *priv,
                       rtdm_user_info_t *user_info)
 {
@@ -295,12 +280,9 @@ redo:
                 */
                ret = rtdm_event_timedwait(&sk->i_event,
                                           sk->rx_timeout, &toseq);
-               rtipc_finish_wait(&wait.wc, __bufp_cleanup_handler);
-
                if (unlikely(ret))
                        break;
        }
-
 out:
        rtipc_leave_atomic(wait.lockctx);
 
@@ -530,11 +512,9 @@ redo:
                 */
                ret = rtdm_event_timedwait(&rsk->o_event,
                                           sk->tx_timeout, &toseq);
-               rtipc_finish_wait(&wait.wc, __bufp_cleanup_handler);
                if (unlikely(ret))
                        break;
        }
-
 out:
        rtipc_leave_atomic(wait.lockctx);
 
diff --git a/kernel/drivers/ipc/internal.h b/kernel/drivers/ipc/internal.h
index 4aca153..158b36f 100644
--- a/kernel/drivers/ipc/internal.h
+++ b/kernel/drivers/ipc/internal.h
@@ -121,7 +121,6 @@ extern struct xnptree rtipc_ptree;
 
 #define rtipc_wait_context             xnthread_wait_context
 #define rtipc_prepare_wait             xnthread_prepare_wait
-#define rtipc_finish_wait              xnthread_finish_wait
 #define rtipc_get_wait_context         xnthread_get_wait_context
 
 #define rtipc_peek_wait_head(obj)      xnsynch_peek_pendq(&(obj)->synch_base)
diff --git a/kernel/drivers/testing/switchtest.c 
b/kernel/drivers/testing/switchtest.c
index 2be9a5c..ea36ee6 100644
--- a/kernel/drivers/testing/switchtest.c
+++ b/kernel/drivers/testing/switchtest.c
@@ -392,9 +392,9 @@ struct taskarg {
 static void rtswitch_ktask(void *cookie)
 {
        struct taskarg *arg = (struct taskarg *) cookie;
+       unsigned int fp_val, expected, to, i = 0;
        rtswitch_context_t *ctx = arg->ctx;
        rtswitch_task_t *task = arg->task;
-       unsigned to, i = 0;
 
        to = task->base.index;
 
@@ -423,8 +423,6 @@ static void rtswitch_ktask(void *cookie)
                }
 
                if (task->base.flags & RTTST_SWTEST_USE_FPU) {
-                       unsigned fp_val, expected;
-
                        expected = task->base.index + i * 1000;
                        fp_val = fp_regs_check(fp_features, expected, report);
 
@@ -435,6 +433,9 @@ static void rtswitch_ktask(void *cookie)
                        }
                }
 
+               if (rtdm_task_should_stop())
+                       break;
+
                if (++i == 4000000)
                        i = 0;
        }


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to