Module: xenomai-forge
Branch: master
Commit: 86dc62d1a6e70c48258bbf5f298c409c7704ce90
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=86dc62d1a6e70c48258bbf5f298c409c7704ce90

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sun May  5 16:10:03 2013 +0200

copperplate/internal, lib/cobalt: allow specifying detach state of threads

---

 include/copperplate/threadobj.h |    4 +-
 lib/alchemy/task.c              |   13 ++-
 lib/cobalt/thread.c             |    5 +-
 lib/copperplate/internal.c      |    6 +-
 lib/copperplate/internal.h      |    1 +
 lib/copperplate/threadobj.c     |  168 +++++++++++++++++++++++----------------
 lib/copperplate/timerobj.c      |    1 +
 lib/psos/task.c                 |    6 +-
 lib/vxworks/taskLib.c           |    3 +-
 9 files changed, 127 insertions(+), 80 deletions(-)

diff --git a/include/copperplate/threadobj.h b/include/copperplate/threadobj.h
index 4a53c44..7d8cb9f 100644
--- a/include/copperplate/threadobj.h
+++ b/include/copperplate/threadobj.h
@@ -21,6 +21,7 @@
 
 #include <time.h>
 #include <sched.h>
+#include <semaphore.h>
 #include <pthread.h>
 #include <copperplate/list.h>
 #include <copperplate/lock.h>
@@ -175,6 +176,7 @@ struct threadobj {
        struct timespec tslice;
        pthread_cond_t barrier;
        struct traceobj *tracer;
+       sem_t *cancel_sem;
        struct sysgroup_memspec memspec;
        struct backtrace_data btd;
 };
@@ -274,7 +276,7 @@ void threadobj_init(struct threadobj *thobj,
 
 void threadobj_start(struct threadobj *thobj);
 
-void threadobj_shadow(void);
+void threadobj_shadow(struct threadobj *thobj);
 
 int threadobj_prologue(struct threadobj *thobj,
                       const char *name);
diff --git a/lib/alchemy/task.c b/lib/alchemy/task.c
index bb37ef0..d6b3653 100644
--- a/lib/alchemy/task.c
+++ b/lib/alchemy/task.c
@@ -349,8 +349,8 @@ int rt_task_create(RT_TASK *task, const char *name,
                   int stksize, int prio, int mode)
 {
        struct alchemy_task *tcb;
+       int detachstate, ret;
        struct service svc;
-       int ret;
 
        COPPERPLATE_PROTECT(svc);
 
@@ -361,8 +361,12 @@ int rt_task_create(RT_TASK *task, const char *name,
        /* We want this to be set prior to spawning the thread. */
        tcb->self = *task;
 
+       detachstate = mode & T_JOINABLE ?
+               PTHREAD_CREATE_JOINABLE : PTHREAD_CREATE_DETACHED;
+
        ret = __bt(copperplate_create_thread(prio, task_trampoline, tcb,
-                                            stksize, &tcb->thobj.tid));
+                                            stksize, detachstate,
+                                            &tcb->thobj.tid));
        if (ret)
                delete_tcb(tcb);
 out:
@@ -648,9 +652,8 @@ int rt_task_shadow(RT_TASK *task, const char *name, int 
prio, int mode)
        if (ret)
                goto out;
 
-       threadobj_lock(&tcb->thobj);
-       threadobj_shadow();     /* We won't wait in prologue. */
-       threadobj_unlock(&tcb->thobj);
+       threadobj_shadow(&tcb->thobj);  /* We won't wait in prologue. */
+
        ret = task_prologue(tcb);
        if (ret) {
                delete_tcb(tcb);
diff --git a/lib/cobalt/thread.c b/lib/cobalt/thread.c
index c21883c..6da99d3 100644
--- a/lib/cobalt/thread.c
+++ b/lib/cobalt/thread.c
@@ -246,10 +246,10 @@ int pthread_create_ex(pthread_t *tid,
                      const pthread_attr_ex_t *attr_ex,
                      void *(*start) (void *), void *arg)
 {
+       int inherit, detachstate, ret;
        struct pthread_iargs iargs;
        struct sched_param param;
        pthread_attr_t attr;
-       int inherit, ret;
        pthread_t ltid;
        size_t stksz;
 
@@ -284,6 +284,7 @@ int pthread_create_ex(pthread_t *tid,
                 */
                pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED);
 
+       pthread_attr_getdetachstate(&attr, &detachstate);
        pthread_attr_getstacksize(&attr, &stksz);
        pthread_attr_setstacksize(&attr, xeno_stacksize(stksz));
 
@@ -306,6 +307,8 @@ int pthread_create_ex(pthread_t *tid,
        ret = iargs.ret;
        if (ret == 0)
                *tid = ltid;
+       else if (detachstate == PTHREAD_CREATE_JOINABLE)
+               pthread_join(ltid, NULL);
 fail:
        __STD(sem_destroy(&iargs.sync));
 
diff --git a/lib/copperplate/internal.c b/lib/copperplate/internal.c
index 1265f3d..0661973 100644
--- a/lib/copperplate/internal.c
+++ b/lib/copperplate/internal.c
@@ -72,6 +72,7 @@ int copperplate_probe_node(unsigned int id)
 int copperplate_create_thread(int prio,
                              void *(*start)(void *arg), void *arg,
                              size_t stacksize,
+                             int detachstate,
                              pthread_t *tid)
 {
        struct sched_param_ex param_ex;
@@ -89,7 +90,7 @@ int copperplate_create_thread(int prio,
        pthread_attr_setschedparam_ex(&attr_ex, &param_ex);
        pthread_attr_setstacksize_ex(&attr_ex, stacksize);
        pthread_attr_setscope_ex(&attr_ex, thread_scope_attribute);
-       pthread_attr_setdetachstate_ex(&attr_ex, PTHREAD_CREATE_JOINABLE);
+       pthread_attr_setdetachstate_ex(&attr_ex, detachstate);
        ret = __bt(-pthread_create_ex(tid, &attr_ex, start, arg));
        pthread_attr_destroy_ex(&attr_ex);
 
@@ -117,6 +118,7 @@ int copperplate_probe_node(unsigned int id)
 int copperplate_create_thread(int prio,
                              void *(*start)(void *arg), void *arg,
                              size_t stacksize,
+                             int detachstate,
                              pthread_t *tid)
 {
        struct sched_param param;
@@ -134,7 +136,7 @@ int copperplate_create_thread(int prio,
        pthread_attr_setschedparam(&attr, &param);
        pthread_attr_setstacksize(&attr, stacksize);
        pthread_attr_setscope(&attr, thread_scope_attribute);
-       pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+       pthread_attr_setdetachstate(&attr, detachstate);
        ret = __bt(-pthread_create(tid, &attr, start, arg));
        pthread_attr_destroy(&attr);
 
diff --git a/lib/copperplate/internal.h b/lib/copperplate/internal.h
index 15a0321..e1fa41c 100644
--- a/lib/copperplate/internal.h
+++ b/lib/copperplate/internal.h
@@ -103,6 +103,7 @@ int copperplate_probe_node(unsigned int id);
 int copperplate_create_thread(int prio,
                              void *(*start)(void *arg), void *arg,
                              size_t stacksize,
+                             int detachstate,
                              pthread_t *tid);
 
 int copperplate_renice_thread(pthread_t tid, int prio);
diff --git a/lib/copperplate/threadobj.c b/lib/copperplate/threadobj.c
index 2e057b5..fb05a13 100644
--- a/lib/copperplate/threadobj.c
+++ b/lib/copperplate/threadobj.c
@@ -44,7 +44,7 @@ union copperplate_wait_union {
        struct eventobj_wait_struct eventobj_wait;
 };
 
-static void threadobj_finalize(void *p);
+static void finalize_thread(void *p);
 
 /*
  * NOTE on cancellation handling: Most traditional RTOSes guarantee
@@ -52,20 +52,17 @@ static void threadobj_finalize(void *p);
  * i.e. the deletion service returns to the caller only __after__ the
  * deleted thread entered an innocuous state, i.e. dormant/dead.
  *
- * For this reason, we always pthread_join() cancelled threads
- * internally (see threadobj_cancel(), which might lead to a priority
- * inversion. This is more acceptable than not guaranteeing
- * synchronous behavior, which is mandatory to make sure that our
- * thread finalizer has run for the cancelled thread, prior to
- * returning from threadobj_cancel().
+ * For this reason, we always wait for the cancelled threads
+ * internally (see threadobj_cancel()), which might lead to a priority
+ * inversion. This is the price for guaranteeing that
+ * threadobj_cancel() returns only after the cancelled thread
+ * finalizer has run.
  */
 
 int threadobj_high_prio;
 
 int threadobj_irq_prio;
 
-static void cancel_sync(struct threadobj *thobj);
-
 #ifdef HAVE_TLS
 
 __thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL)))
@@ -81,7 +78,7 @@ pthread_key_t threadobj_tskey;
 
 static inline void threadobj_init_key(void)
 {
-       if (pthread_key_create(&threadobj_tskey, threadobj_finalize))
+       if (pthread_key_create(&threadobj_tskey, finalize_thread))
                panic("failed to allocate TSD key");
 }
 
@@ -115,29 +112,8 @@ static inline void threadobj_run_corespec(struct threadobj 
*thobj)
        __cobalt_thread_harden();
 }
 
-/* thobj->lock held on entry, released on return */
-int threadobj_cancel(struct threadobj *thobj)
+static inline void threadobj_cancel_corespec(struct threadobj *thobj) /* 
thobj->lock held */
 {
-       pthread_t tid;
-
-       __threadobj_check_locked(thobj);
-
-       /*
-        * This basically makes the thread enter a zombie state, since
-        * it won't be reachable by anyone after its magic has been
-        * trashed.
-        */
-       thobj->magic = ~thobj->magic;
-
-       if (thobj == threadobj_current()) {
-               threadobj_unlock(thobj);
-               pthread_exit(NULL);
-       }
-
-       tid = thobj->tid;
-       cancel_sync(thobj);
-       threadobj_unlock(thobj);
-
        /*
         * Send a SIGDEMT signal to demote the target thread, to make
         * sure pthread_cancel() will be effective asap.
@@ -161,10 +137,7 @@ int threadobj_cancel(struct threadobj *thobj)
         * than the caller of threadobj_cancel()), but will receive
         * the following cancellation request asap.
         */
-       __RT(pthread_kill(tid, SIGDEMT));
-       pthread_cancel(tid);
-
-       return __bt(-pthread_join(tid, NULL));
+       __RT(pthread_kill(thobj->tid, SIGDEMT));
 }
 
 int threadobj_suspend(struct threadobj *thobj) /* thobj->lock held */
@@ -495,32 +468,8 @@ static inline void threadobj_run_corespec(struct threadobj 
*thobj)
 {
 }
 
-/* thobj->lock held on entry, released on return */
-int threadobj_cancel(struct threadobj *thobj)
+static inline void threadobj_cancel_corespec(struct threadobj *thobj)
 {
-       pthread_t tid;
-
-       __threadobj_check_locked(thobj);
-
-       /*
-        * This basically makes the thread enter a zombie state, since
-        * it won't be reachable by anyone after its magic has been
-        * trashed.
-        */
-       thobj->magic = ~thobj->magic;
-
-       if (thobj == threadobj_current()) {
-               threadobj_unlock(thobj);
-               pthread_exit(NULL);
-       }
-
-       cancel_sync(thobj);
-       tid = thobj->tid;
-       threadobj_unlock(thobj);
-
-       pthread_cancel(tid);
-
-       return __bt(-pthread_join(tid, NULL));
 }
 
 int threadobj_suspend(struct threadobj *thobj) /* thobj->lock held */
@@ -838,6 +787,7 @@ void threadobj_init(struct threadobj *thobj,
        holder_init(&thobj->wait_link);
        thobj->cnode = __node_id;
        thobj->pid = 0;
+       thobj->cancel_sem = NULL;
 
        /*
         * CAUTION: wait_union and wait_size have been set in
@@ -919,12 +869,12 @@ void threadobj_start(struct threadobj *thobj)     /* 
thobj->lock held. */
        start_sync(thobj, __THREAD_S_ACTIVE);
 }
 
-void threadobj_shadow(void)
+void threadobj_shadow(struct threadobj *thobj)
 {
-       struct threadobj *current = threadobj_current();
-
-       __threadobj_check_locked(current);
-       current->status |= __THREAD_S_STARTED|__THREAD_S_ACTIVE;
+       assert(thobj != threadobj_current());
+       threadobj_lock(thobj);
+       thobj->status |= __THREAD_S_STARTED|__THREAD_S_ACTIVE;
+       threadobj_unlock(thobj);
 }
 
 void threadobj_wait_start(void) /* current->lock free. */
@@ -975,7 +925,7 @@ int threadobj_prologue(struct threadobj *thobj, const char 
*name)
                 */
                assert(current->magic == 0);
                sysgroup_remove(thread, &current->memspec);
-               threadobj_finalize(current);
+               finalize_thread(current);
                threadobj_free(current);
        } else
                pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
@@ -1016,10 +966,33 @@ int threadobj_prologue(struct threadobj *thobj, const 
char *name)
 
 static void cancel_sync(struct threadobj *thobj) /* thobj->lock held */
 {
-       int oldstate;
+       pthread_t tid = thobj->tid;
+       int oldstate, ret = 0;
+       sem_t *sem;
 
        __threadobj_check_locked(thobj);
 
+       /*
+        * We have to allocate the cancel sync sema4 in the main heap
+        * dynamically, so that it always live in valid memory when we
+        * wait on it and the cancelled thread posts it. This has to
+        * be true regardless of whether --enable-pshared is in
+        * effect, or thobj becomes stale after the finalizer has run
+        * (we cannot host this sema4 in thobj for this reason).
+        */
+       sem = xnmalloc(sizeof(*sem));
+       if (sem == NULL)
+               ret = -ENOMEM;
+       else
+               __STD(sem_init(sem, sem_scope_attribute, 0));
+
+       thobj->cancel_sem = sem;
+
+       /*
+        * If the thread to delete is warming up, wait until it
+        * reaches the start barrier before sending the cancellation
+        * signal.
+        */
        while (thobj->status & __THREAD_S_WARMUP) {
                oldstate = thobj->cancel_state;
                __threadobj_tag_unlocked(thobj);
@@ -1028,10 +1001,36 @@ static void cancel_sync(struct threadobj *thobj) /* 
thobj->lock held */
                thobj->cancel_state = oldstate;
        }
 
+       /*
+        * Ok, now we shall raise the abort flag if the thread was not
+        * started yet, to kick it out of the barrier wait. We are
+        * covered by the target thread lock we hold, so we can't race
+        * with threadobj_start().
+        */
        if ((thobj->status & __THREAD_S_STARTED) == 0) {
                thobj->status |= __THREAD_S_ABORTED;
                __RT(pthread_cond_signal(&thobj->barrier));
        }
+
+       threadobj_cancel_corespec(thobj);
+
+       threadobj_unlock(thobj);
+
+       pthread_cancel(tid);
+
+       /*
+        * Not being able to sync up with the cancelled thread is not
+        * considered fatal, despite it's likely bad news for sure, so
+        * that we can keep on cleaning up the mess, hoping for the
+        * best.
+        */
+       if (sem == NULL || __STD(sem_wait(sem)))
+               warning("cannot sync with thread finalizer, %s",
+                       symerror(sem ? -errno : ret));
+       if (sem) {
+               __STD(sem_destroy(sem));
+               xnfree(sem);
+       }
 }
 
 int threadobj_sleep(struct timespec *ts)
@@ -1051,7 +1050,29 @@ int threadobj_sleep(struct timespec *ts)
        return ret;
 }
 
-static void threadobj_finalize(void *p) /* thobj->lock free */
+/* thobj->lock held on entry, released on return */
+int threadobj_cancel(struct threadobj *thobj)
+{
+       __threadobj_check_locked(thobj);
+
+       /*
+        * This basically makes the thread enter a zombie state, since
+        * it won't be reachable by anyone after its magic has been
+        * trashed.
+        */
+       thobj->magic = ~thobj->magic;
+
+       if (thobj == threadobj_current()) {
+               threadobj_unlock(thobj);
+               pthread_exit(NULL);
+       }
+
+       cancel_sync(thobj);
+
+       return 0;
+}
+
+static void finalize_thread(void *p) /* thobj->lock free */
 {
        struct threadobj *thobj = p;
 
@@ -1073,6 +1094,15 @@ static void threadobj_finalize(void *p) /* thobj->lock 
free */
        backtrace_dump(&thobj->btd);
        backtrace_destroy_context(&thobj->btd);
 
+       if (thobj->cancel_sem)
+               /* Release the killer from threadobj_cancel(). */
+               sem_post(thobj->cancel_sem);
+
+       /*
+        * Careful: once the user-defined finalizer has run, thobj may
+        * be laid on stale memory. Do not refer to its contents.
+        */
+
        if (thobj->finalizer)
                thobj->finalizer(thobj);
 
diff --git a/lib/copperplate/timerobj.c b/lib/copperplate/timerobj.c
index 9277f50..91b483d 100644
--- a/lib/copperplate/timerobj.c
+++ b/lib/copperplate/timerobj.c
@@ -225,6 +225,7 @@ static int timerobj_spawn_server(void)
        ret = __bt(copperplate_create_thread(threadobj_irq_prio,
                                             timerobj_server, NULL,
                                             PTHREAD_STACK_MIN * 16,
+                                            PTHREAD_CREATE_DETACHED,
                                             &svthread));
 
        /* Wait for timer server to initialize. */
diff --git a/lib/psos/task.c b/lib/psos/task.c
index c5b79e2..c3718f8 100644
--- a/lib/psos/task.c
+++ b/lib/psos/task.c
@@ -312,7 +312,8 @@ u_long t_create(const char *name, u_long prio,
        threadobj_init(&task->thobj, &idata);
 
        ret = __bt(copperplate_create_thread(cprio, task_trampoline, task,
-                                            ustack, &task->thobj.tid));
+                                            ustack, PTHREAD_CREATE_DETACHED,
+                                            &task->thobj.tid));
        if (ret) {
                cluster_delobj(&psos_task_table, &task->cobj);
                threadobj_destroy(&task->thobj);
@@ -433,13 +434,16 @@ u_long t_setpri(u_long tid, u_long newprio, u_long 
*oldprio_r)
 u_long t_delete(u_long tid)
 {
        struct psos_task *task;
+       struct service svc;
        int ret;
 
        task = get_psos_task_or_self(tid, &ret);
        if (task == NULL)
                return ret;
 
+       COPPERPLATE_PROTECT(svc);
        ret = threadobj_cancel(&task->thobj);
+       COPPERPLATE_UNPROTECT(svc);
        if (ret)
                return ERR_OBJDEL;
 
diff --git a/lib/vxworks/taskLib.c b/lib/vxworks/taskLib.c
index b4c2e1f..c891dd5 100644
--- a/lib/vxworks/taskLib.c
+++ b/lib/vxworks/taskLib.c
@@ -369,7 +369,8 @@ static STATUS __taskInit(struct wind_task *task,
        registry_init_file(&task->fsobj, &registry_ops);
 
        ret = __bt(copperplate_create_thread(cprio, task_trampoline, task,
-                                            stacksize, &task->thobj.tid));
+                                            stacksize, PTHREAD_CREATE_DETACHED,
+                                            &task->thobj.tid));
        if (ret) {
                registry_destroy_file(&task->fsobj);
                cluster_delobj(&wind_task_table, &task->cobj);


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to