Module: xenomai-forge
Branch: next
Commit: e0791de259582a36e3aa780fe5db34202bc3fd5c
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=e0791de259582a36e3aa780fe5db34202bc3fd5c

Author: Philippe Gerum <r...@xenomai.org>
Date:   Mon May 12 14:36:25 2014 +0200

copperplate/threadobj: introduce remote agent for thread operations

In shared multi-processing mode, cancelling remote threads or updating
their scheduling parameters require going through an agent thread,
running on the process they belong to.

This patch series introduces such mechanism, so that
threadobj_cancel() and threadobj_set_priority() work transparently,
regardless of whether or not the target thread runs in the current
process.

CAUTION: remote operations are implemented by a fully asynchronous
protocol. The status returned to a caller for an operation affecting a
remote thread, tells whether the request was successfully sent to the
agent, but does not reflect the final operation status in the remote
process. The remote agent will issue a warning message in case of
failure to carry out an operation though.

---

 include/boilerplate/ancillaries.h    |    7 +-
 include/boilerplate/compiler.h       |    4 +
 include/cobalt/boilerplate/signal.h  |    2 +
 include/copperplate/heapobj.h        |    9 +
 include/copperplate/threadobj.h      |    1 +
 include/mercury/boilerplate/signal.h |    5 +-
 lib/alchemy/task.c                   |    8 +-
 lib/copperplate/internal.c           |   37 +---
 lib/copperplate/internal.h           |   14 +-
 lib/copperplate/threadobj.c          |  332 ++++++++++++++++++++++++++++------
 lib/copperplate/timerobj.c           |    3 +-
 lib/psos/task.c                      |    3 +-
 lib/vxworks/taskLib.c                |    3 +-
 lib/vxworks/testsuite/task-2.c       |   14 +-
 14 files changed, 343 insertions(+), 99 deletions(-)

diff --git a/include/boilerplate/ancillaries.h 
b/include/boilerplate/ancillaries.h
index 9d5818f..107a5f8 100644
--- a/include/boilerplate/ancillaries.h
+++ b/include/boilerplate/ancillaries.h
@@ -22,6 +22,7 @@
 #include <time.h>
 #include <pthread.h>
 #include <boilerplate/signal.h>
+#include <boilerplate/compiler.h>
 
 extern struct timespec __init_date;
 
@@ -55,12 +56,12 @@ void __printout(const char *name,
                const char *header,
                const char *fmt, va_list ap);
 
-void __panic(const char *name,
-            const char *fmt, va_list ap);
+void __noreturn __panic(const char *name,
+                       const char *fmt, va_list ap);
 
 void early_panic(const char *fmt, ...);
 
-void panic(const char *fmt, ...);
+void __noreturn panic(const char *fmt, ...);
 
 void __warning(const char *name,
               const char *fmt, va_list ap);
diff --git a/include/boilerplate/compiler.h b/include/boilerplate/compiler.h
index e3fe413..85e8846 100644
--- a/include/boilerplate/compiler.h
+++ b/include/boilerplate/compiler.h
@@ -31,4 +31,8 @@
 #define unlikely(x)    __builtin_expect(!!(x), 0)
 #endif
 
+#ifndef __noreturn
+#define __noreturn     __attribute__((__noreturn__))
+#endif
+
 #endif /* _BOILERPLATE_COMPILER_H */
diff --git a/include/cobalt/boilerplate/signal.h 
b/include/cobalt/boilerplate/signal.h
index 4807a51..b7d5ffe 100644
--- a/include/cobalt/boilerplate/signal.h
+++ b/include/cobalt/boilerplate/signal.h
@@ -20,6 +20,8 @@
 
 #include <cobalt/signal.h>
 
+#define SIGAGENT       (SIGRTMIN + 12) /* Request to remote agent */
+
 #define SIGSAFE_LOCK_ENTRY(__safelock)                                 \
        do {                                                            \
                push_cleanup_lock(__safelock);                          \
diff --git a/include/copperplate/heapobj.h b/include/copperplate/heapobj.h
index 6b5617c..495d72c 100644
--- a/include/copperplate/heapobj.h
+++ b/include/copperplate/heapobj.h
@@ -185,9 +185,15 @@ extern struct hash_table *__main_catalog;
 extern struct sysgroup *__main_sysgroup;
 
 struct sysgroup_memspec {
+       /** next member in sysgroup list. */
        struct holder next;
 };
 
+struct agent_memspec {
+       /** Agent pid in owner process. */
+       pid_t pid;
+};
+
 static inline void *mainheap_ptr(memoff_t off)
 {
        return off ? (void *)__memptr(__main_heap, off) : NULL;
@@ -315,6 +321,9 @@ char *xnstrdup(const char *ptr);
 struct sysgroup_memspec {
 };
 
+struct agent_memspec {
+};
+
 /*
  * Whether an object is laid in some shared heap. Never if pshared
  * mode is disabled.
diff --git a/include/copperplate/threadobj.h b/include/copperplate/threadobj.h
index fd87829..fa9b226 100644
--- a/include/copperplate/threadobj.h
+++ b/include/copperplate/threadobj.h
@@ -185,6 +185,7 @@ struct threadobj {
        struct traceobj *tracer;
        sem_t *cancel_sem;
        struct sysgroup_memspec memspec;
+       struct agent_memspec agent;
        struct backtrace_data btd;
 };
 
diff --git a/include/mercury/boilerplate/signal.h 
b/include/mercury/boilerplate/signal.h
index 914c02c..f25c618 100644
--- a/include/mercury/boilerplate/signal.h
+++ b/include/mercury/boilerplate/signal.h
@@ -24,10 +24,11 @@
 #define sigev_notify_thread_id  _sigev_un._tid
 #endif
 
-#define SIGSUSP                (SIGRTMIN + 8)
-#define SIGRESM                (SIGRTMIN + 9)
+#define SIGSUSP                (SIGRTMIN + 8)  /* Suspend request */
+#define SIGRESM                (SIGRTMIN + 9)  /* Resume request */
 #define SIGRELS                (SIGRTMIN + 10) /* Syscall abort */
 #define SIGRRB         (SIGRTMIN + 11) /* Round-robin event */
+#define SIGAGENT       (SIGRTMIN + 12) /* Request to remote agent */
 
 #define SIGSAFE_LOCK_ENTRY(__safelock)                                 \
        do {                                                            \
diff --git a/lib/alchemy/task.c b/lib/alchemy/task.c
index b47b02a..eaedc18 100644
--- a/lib/alchemy/task.c
+++ b/lib/alchemy/task.c
@@ -400,7 +400,8 @@ int rt_task_create(RT_TASK *task, const char *name,
 
        cta.detachstate = mode & T_JOINABLE ?
                PTHREAD_CREATE_JOINABLE : PTHREAD_CREATE_DETACHED;
-       cta.prio = prio;
+       cta.sched.policy = prio ? SCHED_RT : SCHED_OTHER;
+       cta.sched.param.sched_priority = prio;
        cta.prologue = task_prologue_1;
        cta.run = task_entry;
        cta.arg = tcb;
@@ -699,6 +700,7 @@ out:
 int rt_task_shadow(RT_TASK *task, const char *name, int prio, int mode)
 {
        struct threadobj *current = threadobj_current();
+       struct coresched_attributes csa;
        struct alchemy_task *tcb;
        struct service svc;
        pthread_t self;
@@ -736,7 +738,9 @@ int rt_task_shadow(RT_TASK *task, const char *name, int 
prio, int mode)
        if (task)
                task->thread = self;
 
-       ret = __bt(copperplate_renice_thread(self, prio));
+       csa.policy = prio ? SCHED_RT : SCHED_OTHER;
+       csa.param.sched_priority = prio;
+       ret = __bt(copperplate_renice_local_thread(self, &csa));
 out:
        CANCEL_RESTORE(svc);
 
diff --git a/lib/copperplate/internal.c b/lib/copperplate/internal.c
index 5513b2c..3a12c72 100644
--- a/lib/copperplate/internal.c
+++ b/lib/copperplate/internal.c
@@ -79,15 +79,10 @@ int copperplate_create_thread(struct corethread_attributes 
*cta,
        return __bt(thread_spawn_epilogue(cta));
 }
 
-int copperplate_renice_thread(pthread_t tid, int prio)
+int copperplate_renice_local_thread(pthread_t tid,
+                                   const struct coresched_attributes *csa)
 {
-       struct sched_param_ex param_ex;
-       int policy;
-
-       param_ex.sched_priority = prio;
-       policy = prio ? SCHED_RT : SCHED_OTHER;
-
-       return __bt(-pthread_setschedparam_ex(tid, policy, &param_ex));
+       return __bt(-pthread_setschedparam_ex(tid, csa->policy, &csa->param));
 }
 
 static inline void prepare_wait_corespec(void)
@@ -102,11 +97,6 @@ static inline void prepare_wait_corespec(void)
        cobalt_thread_harden();
 }
 
-static inline int finish_wait_corespec(struct corethread_attributes *cta)
-{
-       return __bt(copperplate_renice_thread(pthread_self(), cta->prio));
-}
-
 int copperplate_kill_tid(pid_t tid, int sig)
 {
        return __RT(kill(tid, sig)) ? -errno : 0;
@@ -147,15 +137,10 @@ int copperplate_create_thread(struct 
corethread_attributes *cta,
        return __bt(thread_spawn_epilogue(cta));
 }
 
-int copperplate_renice_thread(pthread_t tid, int prio)
+int copperplate_renice_local_thread(pthread_t tid,
+                                   const struct coresched_attributes *csa)
 {
-       struct sched_param param;
-       int policy;
-
-       param.sched_priority = prio;
-       policy = prio ? SCHED_RT : SCHED_OTHER;
-
-       return __bt(-__RT(pthread_setschedparam(tid, policy, &param)));
+       return __bt(-__RT(pthread_setschedparam(tid, csa->policy, 
&csa->param)));
 }
 
 static inline void prepare_wait_corespec(void)
@@ -163,11 +148,6 @@ static inline void prepare_wait_corespec(void)
        /* empty */
 }
 
-static inline int finish_wait_corespec(struct corethread_attributes *cta)
-{
-       return __bt(copperplate_renice_thread(pthread_self(), cta->prio));
-}
-
 #endif  /* CONFIG_XENO_MERCURY */
 
 static int thread_spawn_prologue(struct corethread_attributes *cta)
@@ -230,9 +210,9 @@ static void *thread_trampoline(void *arg)
        __RT(sem_post(&cta->__reserved.warm));
        thread_spawn_wait(&released);
        __RT(sem_destroy(&released));
-       ret = finish_wait_corespec(&_cta);
+       ret = __bt(copperplate_renice_local_thread(pthread_self(), 
&_cta.sched));
        if (ret)
-               warning("core thread prologue failed, %s", symerror(ret));
+               warning("cannot renice core thread, %s", symerror(ret));
 
        return _cta.run(_cta.arg);
 }
@@ -257,7 +237,6 @@ void panic(const char *fmt, ...)
 
        va_start(ap, fmt);
        __panic(thobj ? threadobj_get_name(thobj) : NULL, fmt, ap);
-       va_end(ap);
 }
 
 void warning(const char *fmt, ...)
diff --git a/lib/copperplate/internal.h b/lib/copperplate/internal.h
index fa3e5d1..6a45732 100644
--- a/lib/copperplate/internal.h
+++ b/lib/copperplate/internal.h
@@ -55,10 +55,19 @@ struct shared_heap {
        } buckets[HOBJ_NBUCKETS];
 };
 
+struct coresched_attributes {
+       int policy;
+#ifdef CONFIG_XENO_COBALT
+       struct sched_param_ex param;
+#else
+       struct sched_param param;
+#endif
+};
+
 struct corethread_attributes {
-       int prio;
        size_t stacksize;
        int detachstate;
+       struct coresched_attributes sched;
        int (*prologue)(void *arg);
        void *(*run)(void *arg);
        void *arg;
@@ -82,7 +91,8 @@ int copperplate_kill_tid(pid_t tid, int sig);
 int copperplate_create_thread(struct corethread_attributes *cta,
                              pthread_t *tid);
 
-int copperplate_renice_thread(pthread_t tid, int prio);
+int copperplate_renice_local_thread(pthread_t tid,
+                                   const struct coresched_attributes *csa);
 
 void copperplate_bootstrap_minimal(const char *arg0,
                                   char *mountpt);
diff --git a/lib/copperplate/threadobj.c b/lib/copperplate/threadobj.c
index 042828c..bdffdc9 100644
--- a/lib/copperplate/threadobj.c
+++ b/lib/copperplate/threadobj.c
@@ -18,6 +18,7 @@
  * Thread object abstraction.
  */
 
+#include <sys/prctl.h>
 #include <signal.h>
 #include <memory.h>
 #include <errno.h>
@@ -47,28 +48,20 @@ union copperplate_wait_union {
 
 static void finalize_thread(void *p);
 
-/*
- * NOTE on cancellation handling: Most traditional RTOSes guarantee
- * that the task/thread delete operation is strictly synchronous,
- * i.e. the deletion service returns to the caller only __after__ the
- * deleted thread entered an innocuous state, i.e. dormant/dead.
- *
- * For this reason, we always wait for the cancelled threads
- * internally (see threadobj_cancel()), which might lead to a priority
- * inversion. This is the price for guaranteeing that
- * threadobj_cancel() returns only after the cancelled thread
- * finalizer has run.
- */
+static int request_setschedparam(struct threadobj *thobj,
+                                const struct coresched_attributes *csa);
+
+static int request_cancel(struct threadobj *thobj);
+
+static int threadobj_agent_prio;
 
 int threadobj_high_prio;
 
 int threadobj_irq_prio;
 
 #ifdef HAVE_TLS
-
 __thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL)))
 struct threadobj *__threadobj_current;
-
 #endif
 
 /*
@@ -83,6 +76,159 @@ void threadobj_init_key(void)
                early_panic("failed to allocate TSD key");
 }
 
+#ifdef CONFIG_XENO_PSHARED
+
+static pid_t agent_pid;
+
+#define RMT_SETSCHED   0
+#define RMT_CANCEL     1
+
+struct remote_cancel {
+       pthread_t tid;
+};
+
+struct remote_setsched {
+       pthread_t tid;
+       struct coresched_attributes attr;
+};
+
+struct remote_request {
+       int req;        /* RMT_xx */
+       union {
+               struct remote_cancel cancel;
+               struct remote_setsched setsched;
+       } u;
+};
+
+#ifdef CONFIG_XENO_COBALT
+
+static inline void agent_init_corespec(const char *name)
+{
+       pthread_set_name_np(pthread_self(), name);
+}
+
+#else /* CONFIG_XENO_MERCURY */
+
+static inline void agent_init_corespec(const char *name)
+{
+       prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0);
+}
+
+#endif /* CONFIG_XENO_MERCURY */
+
+static int agent_prologue(void *arg)
+{
+       agent_pid = copperplate_get_tid();
+       agent_init_corespec("remote-agent");
+       threadobj_set_current(THREADOBJ_IRQCONTEXT);
+
+       return 0;
+}
+
+static void *agent_loop(void *arg)
+{
+       struct remote_request *rq;
+       siginfo_t si;
+       sigset_t set;
+       int sig, ret;
+
+       sigemptyset(&set);
+       sigaddset(&set, SIGAGENT);
+
+       for (;;) {
+               sig = __RT(sigwaitinfo(&set, &si));
+               if (sig < 0) {
+                       if (errno == EINTR)
+                               continue;
+                       panic("agent thread cannot wait for request, %s",
+                             symerror(-errno));
+               }
+               rq = si.si_ptr;
+               switch (rq->req) {
+               case RMT_SETSCHED:
+                       ret = 
copperplate_renice_local_thread(rq->u.setsched.tid,
+                                                             
&rq->u.setsched.attr);
+                       break;
+               case RMT_CANCEL:
+                       ret = pthread_cancel(rq->u.cancel.tid);
+                       break;
+               default:
+                       panic("invalid remote request #%d", rq->req);
+               }
+               if (ret)
+                       warning("remote request #%d failed, %s",
+                               rq->req, symerror(ret));
+               xnfree(rq);
+       }
+
+       return NULL;
+}
+
+static inline int send_agent(struct threadobj *thobj,
+                            struct remote_request *rq)
+{
+       union sigval val = { .sival_ptr = rq };
+       /*
+        * XXX: No backtracing, may legitimately fail if the remote
+        * process goes away (hopefully cleanly). However, the request
+        * blocks attached to unprocessed pending signals may leak, as
+        * requests are fully asynchronous. Fortunately, processes
+        * creating user threads are unlikely to ungracefully leave
+        * the session they belong to intentionally.
+        */
+       return __RT(sigqueue(thobj->agent.pid, SIGAGENT, val));
+}
+
+static void start_agent(void)
+{
+       struct corethread_attributes cta;
+       pthread_t tid;
+       sigset_t set;
+       int ret;
+
+       /*
+        * CAUTION: we expect all internal/user threads created by
+        * Copperplate to inherit this signal mask, otherwise
+        * sigqueue(SIGAGENT) might be delivered to the wrong
+        * thread. So make sure the agent support is set up early
+        * enough.
+        */
+       sigemptyset(&set);
+       sigaddset(&set, SIGAGENT);
+       pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+       cta.sched.policy = SCHED_RT;
+       cta.sched.param.sched_priority = threadobj_agent_prio;
+       cta.prologue = agent_prologue;
+       cta.run = agent_loop;
+       cta.arg = NULL;
+       cta.stacksize = PTHREAD_STACK_MIN * 4;
+       cta.detachstate = PTHREAD_CREATE_DETACHED;
+
+       ret = copperplate_create_thread(&cta, &tid);
+       if (ret)
+               panic("failed to start agent thread, %s", symerror(ret));
+}
+
+static void threadobj_set_agent(struct threadobj *thobj)
+{
+       thobj->agent.pid = agent_pid;
+}
+
+#else  /* !CONFIG_XENO_PSHARED */
+
+static inline void start_agent(void)
+{
+       /* No agent in private (process-local) session. */
+}
+
+static inline void threadobj_set_agent(struct threadobj *thobj)
+{
+       /* nop */
+}
+
+#endif /* !CONFIG_XENO_PSHARED */
+
 #ifdef CONFIG_XENO_COBALT
 
 #include "cobalt/internal.h"
@@ -156,6 +302,9 @@ int threadobj_suspend(struct threadobj *thobj) /* 
thobj->lock held */
 
        __threadobj_check_locked(thobj);
 
+       if (thobj->status & __THREAD_S_SUSPENDED)
+               return 0;
+
        thobj->status |= __THREAD_S_SUSPENDED;
        if (thobj == threadobj_current()) {
                threadobj_unlock(thobj);
@@ -173,7 +322,7 @@ int threadobj_resume(struct threadobj *thobj) /* 
thobj->lock held */
 
        __threadobj_check_locked(thobj);
 
-       if (thobj == threadobj_current())
+       if ((thobj->status & __THREAD_S_SUSPENDED) == 0)
                return 0;
 
        thobj->status &= ~__THREAD_S_SUSPENDED;
@@ -252,32 +401,37 @@ void __threadobj_set_scheduler(struct threadobj *thobj,
 
 int threadobj_set_priority(struct threadobj *thobj, int prio) /* thobj->lock 
held, dropped */
 {
-       struct sched_param_ex xparam;
-       pthread_t tid = thobj->tid;
-       int policy;
+       struct coresched_attributes csa;
+       int ret;
 
        __threadobj_check_locked(thobj);
 
-       policy = SCHED_RT;
+       csa.policy = SCHED_RT;
        if (prio == 0) {
                thobj->status &= ~__THREAD_S_RR;
-               policy = SCHED_OTHER;
+               csa.policy = SCHED_OTHER;
        } else if (thobj->status & __THREAD_S_RR) {
-               xparam.sched_rr_quantum = thobj->tslice;
-               policy = SCHED_RR;
+               csa.param.sched_rr_quantum = thobj->tslice;
+               csa.policy = SCHED_RR;
        }
 
-       thobj->priority = prio;
-       thobj->policy = policy;
-       threadobj_unlock(thobj);
        /*
-        * XXX: as a side effect, resetting SCHED_RR will refill the
-        * time credit for the target thread with the last quantum
-        * set.
+        * As a side effect, resetting SCHED_RR will refill the time
+        * credit for the target thread with the last quantum set.
         */
-       xparam.sched_priority = prio;
+       csa.param.sched_priority = prio;
+       thobj->priority = prio;
+       thobj->policy = csa.policy;
+
+       if (thobj == threadobj_current()) {
+               threadobj_unlock(thobj);
+               ret = request_setschedparam(thobj, &csa);
+       } else {
+               ret = request_setschedparam(thobj, &csa);
+               threadobj_unlock(thobj);
+       }
 
-       return pthread_setschedparam_ex(tid, policy, &xparam);
+       return __bt(ret);
 }
 
 int threadobj_set_mode(int clrmask, int setmask, int *mode_r) /* current->lock 
held */
@@ -376,8 +530,6 @@ int threadobj_stat(struct threadobj *thobj, struct 
threadobj_stat *p) /* thobj->
 
 #else /* CONFIG_XENO_MERCURY */
 
-#include <sys/prctl.h>
-
 static int threadobj_lock_prio;
 
 static void unblock_sighandler(int sig)
@@ -436,8 +588,15 @@ static inline void pkg_init_corespec(void)
 
        /*
         * We don't have builtin scheduler-lock feature over Mercury,
-        * so we emulate it by reserving the highest priority level of
-        * the SCHED_RT class to disable involuntary preemption.
+        * so we emulate it by reserving the highest thread priority
+        * level from the SCHED_RT class to disable involuntary
+        * preemption.
+        *
+        * NOTE: The remote agent thread will also run with the
+        * highest thread priority level (threadobj_agent_prio) in
+        * shared multi-processing mode, which won't affect any thread
+        * holding the scheduler lock, unless the latter has to block
+        * for some reason, defeating the purpose of such lock anyway.
         */
        threadobj_lock_prio = threadobj_high_prio;
        threadobj_high_prio = threadobj_lock_prio - 1;
@@ -678,9 +837,8 @@ void __threadobj_set_scheduler(struct threadobj *thobj,
 
 int threadobj_set_priority(struct threadobj *thobj, int prio) /* thobj->lock 
held, dropped */
 {
-       pthread_t tid = thobj->tid;
-       struct sched_param param;
-       int policy;
+       struct coresched_attributes csa;
+       int ret;
 
        __threadobj_check_locked(thobj);
 
@@ -696,23 +854,26 @@ int threadobj_set_priority(struct threadobj *thobj, int 
prio) /* thobj->lock hel
                return 0;
        }
 
-       policy = SCHED_RT;
+       csa.policy = SCHED_RT;
        if (prio == 0) {
                thobj->status &= ~__THREAD_S_RR;
-               policy = SCHED_OTHER;
+               csa.policy = SCHED_OTHER;
        } else if (thobj->status & __THREAD_S_RR)
-               policy = SCHED_RR;
+               csa.policy = SCHED_RR;
 
+       csa.param.sched_priority = prio;
        thobj->priority = prio;
-       thobj->policy = policy;
-       threadobj_unlock(thobj);
-       /*
-        * Since we released the thread container lock, we now rely on
-        * the pthread interface to recheck the tid for existence.
-        */
-       param.sched_priority = prio;
+       thobj->policy = csa.policy;
 
-       return pthread_setschedparam(tid, policy, &param);
+       if (thobj == threadobj_current()) {
+               threadobj_unlock(thobj);
+               ret = request_setschedparam(thobj, &csa);
+       } else {
+               ret = request_setschedparam(thobj, &csa);
+               threadobj_unlock(thobj);
+       }
+
+       return __bt(ret);
 }
 
 int threadobj_set_mode(int clrmask, int setmask, int *mode_r) /* current->lock 
held */
@@ -885,6 +1046,62 @@ int threadobj_stat(struct threadobj *thobj,
 
 #endif /* CONFIG_XENO_MERCURY */
 
+static int request_setschedparam(struct threadobj *thobj,
+                                const struct coresched_attributes *csa)
+{
+#ifdef CONFIG_XENO_PSHARED
+       struct remote_request *rq;
+       int ret;
+
+       if (unlikely(!threadobj_local_p(thobj))) {
+               rq = xnmalloc(sizeof(*rq));
+               if (rq == NULL)
+                       return -ENOMEM;
+
+               rq->req = RMT_SETSCHED;
+               rq->u.setsched.tid = thobj->tid;
+               rq->u.setsched.attr = *csa;
+
+               ret = __bt(send_agent(thobj, rq));
+               if (ret)
+                       xnfree(rq);
+               return ret;
+       }
+#endif
+       return __bt(copperplate_renice_local_thread(thobj->tid, csa));
+}
+
+static int request_cancel(struct threadobj *thobj) /* thobj->lock held, 
dropped. */
+{
+       pthread_t tid = thobj->tid;
+#ifdef CONFIG_XENO_PSHARED
+       struct remote_request *rq;
+       int ret;
+
+       if (unlikely(!threadobj_local_p(thobj))) {
+               threadobj_unlock(thobj);
+               rq = xnmalloc(sizeof(*rq));
+               if (rq == NULL)
+                       return -ENOMEM;
+
+               rq->req = RMT_CANCEL;
+               rq->u.cancel.tid = tid;
+
+               ret = __bt(send_agent(thobj, rq));
+               if (ret)
+                       xnfree(rq);
+               return ret;
+       }
+#endif
+       threadobj_unlock(thobj);
+
+       /* We might race, glibc will check. */
+
+       pthread_cancel(tid);
+
+       return 0;
+}
+
 void *__threadobj_alloc(size_t tcb_struct_size,
                        size_t wait_union_size,
                        int thobj_offset)
@@ -1125,6 +1342,7 @@ int threadobj_prologue(struct threadobj *thobj, const 
char *name)
        thobj->tid = pthread_self();
        thobj->pid = copperplate_get_tid();
        thobj->errno_pointer = &errno;
+       threadobj_set_agent(thobj);
        backtrace_init_context(&thobj->btd, name);
        ret = threadobj_setup_corespec(thobj);
        if (ret) {
@@ -1154,9 +1372,18 @@ int threadobj_prologue(struct threadobj *thobj, const 
char *name)
        return 0;
 }
 
+/*
+ * Most traditional RTOSes guarantee that the task/thread delete
+ * operation is strictly synchronous, i.e. the deletion service
+ * returns to the caller only __after__ the deleted thread entered an
+ * innocuous state, i.e. dormant/dead.
+ *
+ * For this reason, we always wait until the canceled thread has
+ * finalized (see cancel_sync()), at the expense of a potential
+ * priority inversion affecting the caller of threadobj_cancel().
+ */
 static void cancel_sync(struct threadobj *thobj) /* thobj->lock held */
 {
-       pthread_t tid = thobj->tid;
        int oldstate, ret = 0;
        sem_t *sem;
 
@@ -1203,9 +1430,7 @@ static void cancel_sync(struct threadobj *thobj) /* 
thobj->lock held */
 
        threadobj_cancel_2_corespec(thobj);
 
-       threadobj_unlock(thobj);
-
-       pthread_cancel(tid);
+       request_cancel(thobj);
 
        if (sem) {
                do
@@ -1424,8 +1649,9 @@ void threadobj_pkg_init(void)
 {
        threadobj_irq_prio = __RT(sched_get_priority_max(SCHED_RT));
        threadobj_high_prio = threadobj_irq_prio - 1;
+       threadobj_agent_prio = threadobj_high_prio;
 
        pkg_init_corespec();
-
+       start_agent();
        main_overlay();
 }
diff --git a/lib/copperplate/timerobj.c b/lib/copperplate/timerobj.c
index b420986..51f6b41 100644
--- a/lib/copperplate/timerobj.c
+++ b/lib/copperplate/timerobj.c
@@ -157,7 +157,8 @@ static int timerobj_spawn_server(void)
        if (svthread)
                goto out;
 
-       cta.prio = threadobj_irq_prio;
+       cta.sched.policy = SCHED_RT;
+       cta.sched.param.sched_priority = threadobj_irq_prio;
        cta.prologue = server_prologue;
        cta.run = timerobj_server;
        cta.arg = NULL;
diff --git a/lib/psos/task.c b/lib/psos/task.c
index 25ab638..13d78a8 100644
--- a/lib/psos/task.c
+++ b/lib/psos/task.c
@@ -328,7 +328,8 @@ u_long t_create(const char *name, u_long prio,
        idata.priority = cprio;
        threadobj_init(&task->thobj, &idata);
 
-       cta.prio = cprio;
+       cta.sched.policy = SCHED_RT;
+       cta.sched.param.sched_priority = cprio;
        cta.prologue = task_prologue;
        cta.run = task_trampoline;
        cta.arg = task;
diff --git a/lib/vxworks/taskLib.c b/lib/vxworks/taskLib.c
index c47919d..b793c54 100644
--- a/lib/vxworks/taskLib.c
+++ b/lib/vxworks/taskLib.c
@@ -373,7 +373,8 @@ static STATUS __taskInit(struct wind_task *task,
 
        registry_init_file(&task->fsobj, &registry_ops, 0);
 
-       cta.prio = cprio;
+       cta.sched.policy = SCHED_RT;
+       cta.sched.param.sched_priority = cprio;
        cta.prologue = task_prologue;
        cta.run = task_trampoline;
        cta.arg = task;
diff --git a/lib/vxworks/testsuite/task-2.c b/lib/vxworks/testsuite/task-2.c
index 0316df8..17b2bed 100644
--- a/lib/vxworks/testsuite/task-2.c
+++ b/lib/vxworks/testsuite/task-2.c
@@ -6,6 +6,12 @@
 #include <vxworks/taskLib.h>
 #include <vxworks/semLib.h>
 
+static inline void safe_pause(void)
+{
+       for (;;)
+               pause();
+}
+
 static struct traceobj trobj;
 
 static int tseq[] = {
@@ -35,12 +41,10 @@ static void backgroundTask(long a1, long a2, long a3, long 
a4, long a5,
                count++;
 
        /*
-        * Force a pause so that any pending cancellation is taken
-        * regardless of whether async-cancel is enabled or not.
+        * Enter infinite pause so that any pending cancellation is
+        * taken regardless of whether async-cancel is enabled or not.
         */
-       pause();
-
-       traceobj_mark(&trobj, 3);
+       safe_pause();
 
        traceobj_exit(&trobj);
 }


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to