Module: xenomai-forge
Branch: master
Commit: d802fc7c6feb61aa86eab696211498ddc2907177
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=d802fc7c6feb61aa86eab696211498ddc2907177

Author: Philippe Gerum <r...@xenomai.org>
Date:   Wed Apr 24 18:26:00 2013 +0200

copperplate/threadobj: prepare for exporting thread state via sysregd

In order to export copperplate threads via the sysregd interface, we
need more information to be returned by threadobj_stat(), such as the
current timeout value and CPU number, for both cores.

In addition, more run state information is exposed to determine
whether a thread is currently blocked, with or w/o a timeout.

Finally, the costly thread suspend/resume and syncobj wait hooks are
removed, since we can retrieve a thread state on demand, instead of
tracking state changes dynamically.

---

 include/cobalt/pthread.h        |    2 +
 include/copperplate/debug.h     |    2 +-
 include/copperplate/syncobj.h   |    4 -
 include/copperplate/threadobj.h |  125 +++++++++++++---------
 kernel/cobalt/thread.c          |  228 ++++++++++++++++++++------------------
 kernel/cobalt/thread.h          |   11 +--
 lib/alchemy/task.c              |    4 +-
 lib/cobalt/internal.c           |    4 +-
 lib/cobalt/internal.h           |    2 +-
 lib/copperplate/syncobj.c       |   35 +++---
 lib/copperplate/threadobj.c     |  166 ++++++++++++++++-------------
 lib/psos/task.c                 |    2 -
 lib/vxworks/taskInfo.c          |   10 +-
 lib/vxworks/taskLib.c           |   78 +++++---------
 lib/vxworks/taskLib.h           |    2 +
 15 files changed, 346 insertions(+), 329 deletions(-)

diff --git a/include/cobalt/pthread.h b/include/cobalt/pthread.h
index 440056c..802f778 100644
--- a/include/cobalt/pthread.h
+++ b/include/cobalt/pthread.h
@@ -158,12 +158,14 @@ struct cobalt_condattr {
 struct cobalt_cond;
 
 struct cobalt_threadstat {
+       int cpu;
        unsigned long status;
        unsigned long long xtime;
        unsigned long msw;
        unsigned long csw;
        unsigned long xsc;
        unsigned long pf;
+       unsigned long long timeout;
 };
 
 struct cobalt_monitor;
diff --git a/include/copperplate/debug.h b/include/copperplate/debug.h
index c2942b4..ffbeb02 100644
--- a/include/copperplate/debug.h
+++ b/include/copperplate/debug.h
@@ -57,7 +57,7 @@ struct backtrace_data {
        do {                                                            \
                struct threadobj *__thobj = threadobj_current();        \
                if (__thobj == NULL ||                                  \
-                   (__thobj->status & THREADOBJ_DEBUG) != 0)           \
+                   (__thobj->status & __THREAD_S_DEBUG) != 0)          \
                        __debug(__thobj, __fmt, ##__args);              \
        } while (0)
 
diff --git a/include/copperplate/syncobj.h b/include/copperplate/syncobj.h
index 0fe0d85..89e057d 100644
--- a/include/copperplate/syncobj.h
+++ b/include/copperplate/syncobj.h
@@ -33,10 +33,6 @@
 #define SYNCOBJ_SIGNALED       0x2
 #define SYNCOBJ_DRAINWAIT      0x4
 
-/* threadobj->wait_hook(status) */
-#define SYNCOBJ_BLOCK  0x1
-#define SYNCOBJ_RESUME 0x2
-
 #define SYNCOBJ_MAGIC  0xf9f99f9f
 
 struct threadobj;
diff --git a/include/copperplate/threadobj.h b/include/copperplate/threadobj.h
index a35f6ab..4a53c44 100644
--- a/include/copperplate/threadobj.h
+++ b/include/copperplate/threadobj.h
@@ -38,6 +38,8 @@ struct threadobj_corespec {
 };
 
 struct threadobj_stat {
+       /** Current CPU for thread. */
+       int cpu;
        /** Cobalt thread status bits. */
        unsigned long status;
        /** Execution time in primary mode (ns). */
@@ -50,10 +52,22 @@ struct threadobj_stat {
        unsigned long xsc;
        /** Number of page faults. */
        unsigned long pf;
+       /** Current timeout value (ns). */
+       ticks_t timeout;
 };
 
 #define SCHED_RT  SCHED_COBALT
 
+static inline
+void threadobj_save_timeout(struct threadobj_corespec *corespec,
+                           const struct timespec *timeout)
+{
+       /*
+        * We retrieve this information from the nucleus directly via
+        * __cobalt_thread_stat().
+        */
+}
+
 #else  /* CONFIG_XENO_MERCURY */
 
 #include <sys/time.h>
@@ -67,52 +81,68 @@ struct threadobj_corespec {
        struct notifier notifier;
        struct timespec wakeup;
        ticks_t period;
+       /** Timeout reported by sysregd. */
+       struct timespec timeout;
 };
 
 struct threadobj_stat {
+       /** Current CPU for thread. */
+       int cpu;
        /** Mercury thread status bits. */
        unsigned long status;
+       /** Current timeout value (ns). */
+       ticks_t timeout;
 };
 
 #define SCHED_RT  SCHED_FIFO
 
+static inline
+void threadobj_save_timeout(struct threadobj_corespec *corespec,
+                           const struct timespec *timeout)
+{
+       if (timeout)
+               corespec->timeout = *timeout;
+}
+
 #endif /* CONFIG_XENO_MERCURY */
 
-/* threadobj->suspend_hook(event) */
-#define THREADOBJ_SUSPEND      0x1     /* Just suspended. */
-#define THREADOBJ_RESUME       0x2     /* About to resume. */
+/*
+ * threadobj->status, updated with ->lock held.
+ */
+#define __THREAD_S_NOPREEMPT   (1 << 0)        /* Holds the scheduler lock. */
+#define __THREAD_S_RR          (1 << 1)        /* Undergoes round-robin. */
+#define __THREAD_S_STARTED     (1 << 2)        /* threadobj_start() called. */
+#define __THREAD_S_WARMUP      (1 << 3)        /* threadobj_prologue() not 
called yet. */
+#define __THREAD_S_ABORTED     (1 << 4)        /* Cancelled before start. */
+#define __THREAD_S_LOCKED      (1 << 5)        /* threadobj_lock() granted 
(debug only). */
+#define __THREAD_S_ACTIVE      (1 << 6)        /* Running user code. */
+#define __THREAD_S_SUSPENDED   (1 << 7)        /* Suspended via 
threadobj_suspend(). */
+#define __THREAD_S_DEBUG       (1 << 15)       /* Debug mode enabled. */
+/*
+ * threadobj->run_state, locklessly updated by "current", merged
+ * with ->status bits by threadobj_get_status().
+ */
+#define __THREAD_S_RUNNING     0
+#define __THREAD_S_DORMANT     (1 << 8)
+#define __THREAD_S_WAIT                (1 << 9)
+#define __THREAD_S_TIMEDWAIT   (1 << 10)
+#define __THREAD_S_DELAYED     (1 << 11)
 
-/* threadobj->status */
-#define THREADOBJ_SCHEDLOCK    0x1     /* Holds the scheduler lock. */
-#define THREADOBJ_ROUNDROBIN   0x2     /* Undergoes round-robin. */
-#define THREADOBJ_STARTED      0x4     /* threadobj_start() called. */
-#define THREADOBJ_WARMUP       0x8     /* threadobj_prologue() not called yet. 
*/
-#define THREADOBJ_ABORTED      0x10    /* Cancelled before start. */
-#define THREADOBJ_LOCKED       0x20    /* threadobj_lock() granted (debug 
only). */
-#define THREADOBJ_RUNNING      0x40    /* Running user code. */
-#define THREADOBJ_DEBUG                0x8000  /* Debug mode enabled. */
+/* threadobj mode bits */
+#define __THREAD_M_LOCK                (1 << 0) /* Toggle scheduler lock. */
+#define __THREAD_M_WARNSW      (1 << 1) /* Toggle switch warning bit. */
+#define __THREAD_M_CONFORMING  (1 << 2) /* Switch to conforming mode. */
+#define __THREAD_M_SPARE0      (1 << 16)
+#define __THREAD_M_SPARE1      (1 << 17)
+#define __THREAD_M_SPARE2      (1 << 18)
+#define __THREAD_M_SPARE3      (1 << 19)
+#define __THREAD_M_SPARE4      (1 << 20)
+#define __THREAD_M_SPARE5      (1 << 21)
+#define __THREAD_M_SPARE6      (1 << 22)
+#define __THREAD_M_SPARE7      (1 << 23)
 
 #define THREADOBJ_IRQCONTEXT    ((struct threadobj *)-2UL)
 
-/* threadobj mode bits */
-#define __THREAD_M_LOCK                0x80000000 /* Toggle scheduler lock. */
-#define __THREAD_M_WARNSW      0x40000000 /* Toggle switch warning bit. */
-#define __THREAD_M_CONFORMING  0x20000000 /* Switch to conforming mode. */
-#define __THREAD_M_SPARESTART  0
-#define __THREAD_M_SPARECOUNT  12
-#define __THREAD_M_SPARE0      0x00000001
-#define __THREAD_M_SPARE1      0x00000002
-#define __THREAD_M_SPARE2      0x00000004
-#define __THREAD_M_SPARE3      0x00000008
-#define __THREAD_M_SPARE4      0x00000010
-#define __THREAD_M_SPARE5      0x00000020
-#define __THREAD_M_SPARE6      0x00000040
-#define __THREAD_M_SPARE7      0x00000080
-#define __THREAD_M_SPARE8      0x00000100
-#define __THREAD_M_SPARE9      0x00000200
-#define __THREAD_M_SPARE10     0x00000400
-#define __THREAD_M_SPARE11     0x00000800
-
 struct traceobj;
 struct syncobj;
 
@@ -124,6 +154,7 @@ struct threadobj {
        int schedlock_depth;
        int cancel_state;
        int status;
+       int run_state;
        int policy;
        int priority;
        pid_t cnode;
@@ -131,14 +162,12 @@ struct threadobj {
        char name[32];
 
        void (*finalizer)(struct threadobj *thobj);
-       void (*suspend_hook)(struct threadobj *thobj, int status);
        int *errno_pointer;
        /* Those members belong exclusively to the syncobj code. */
        struct syncobj *wait_sobj;
        struct holder wait_link;
        int wait_status;
        int wait_prio;
-       void (*wait_hook)(struct syncobj *sobj, int status);
        void *wait_union;
        size_t wait_size;
 
@@ -155,8 +184,6 @@ struct threadobj_init_data {
        cpu_set_t affinity;
        int priority;
        void (*finalizer)(struct threadobj *thobj);
-       void (*wait_hook)(struct syncobj *sobj, int status);
-       void (*suspend_hook)(struct threadobj *thobj, int status);
 };
 
 extern int threadobj_high_prio;
@@ -199,18 +226,18 @@ static inline struct threadobj *threadobj_current(void)
 
 static inline void __threadobj_tag_locked(struct threadobj *thobj)
 {
-       thobj->status |= THREADOBJ_LOCKED;
+       thobj->status |= __THREAD_S_LOCKED;
 }
 
 static inline void __threadobj_tag_unlocked(struct threadobj *thobj)
 {
-       assert(thobj->status & THREADOBJ_LOCKED);
-       thobj->status &= ~THREADOBJ_LOCKED;
+       assert(thobj->status & __THREAD_S_LOCKED);
+       thobj->status &= ~__THREAD_S_LOCKED;
 }
 
 static inline void __threadobj_check_locked(struct threadobj *thobj)
 {
-       assert(thobj->status & THREADOBJ_LOCKED);
+       assert(thobj->status & __THREAD_S_LOCKED);
 }
 
 #else /* !__XENO_DEBUG__ */
@@ -247,7 +274,7 @@ void threadobj_init(struct threadobj *thobj,
 
 void threadobj_start(struct threadobj *thobj);
 
-void threadobj_shadow(struct threadobj *thobj);
+void threadobj_shadow(void);
 
 int threadobj_prologue(struct threadobj *thobj,
                       const char *name);
@@ -287,6 +314,8 @@ void threadobj_spin(ticks_t ns);
 int threadobj_stat(struct threadobj *thobj,
                   struct threadobj_stat *stat);
 
+int threadobj_sleep(struct timespec *ts);
+
 #ifdef CONFIG_XENO_PSHARED
 
 static inline int threadobj_local_p(struct threadobj *thobj)
@@ -379,15 +408,6 @@ static inline void threadobj_yield(void)
        __RT(sched_yield());
 }
 
-static inline int threadobj_sleep(struct timespec *ts)
-{
-       /*
-        * XXX: guaranteed to return -EINTR upon threadobj_unblock()
-        * with both Cobalt and Mercury cores.
-        */
-       return -__RT(clock_nanosleep(CLOCK_COPPERPLATE, TIMER_ABSTIME, ts, 
NULL));
-}
-
 static inline unsigned int threadobj_get_magic(struct threadobj *thobj)
 {
        return thobj->magic;
@@ -406,7 +426,7 @@ static inline int threadobj_get_lockdepth(struct threadobj 
*thobj)
 
 static inline int threadobj_get_status(struct threadobj *thobj)
 {
-       return thobj->status;
+       return thobj->status|thobj->run_state;
 }
 
 static inline int threadobj_get_errno(struct threadobj *thobj)
@@ -434,4 +454,9 @@ static inline const char *threadobj_get_name(struct 
threadobj *thobj)
        return thobj->name;
 }
 
+static inline pid_t threadobj_get_pid(struct threadobj *thobj)
+{
+       return thobj->pid;
+}
+
 #endif /* _COPPERPLATE_THREADOBJ_H */
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index d4e81c0..f5360d5 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -60,46 +60,55 @@ static struct xnthread_operations cobalt_thread_ops = {
 
 #define PTHREAD_HSLOTS (1 << 8)        /* Must be a power of 2 */
 
-struct tid_hash {
-       pid_t tid;
-       struct tid_hash *next;
+struct cobalt_hash {
+       pthread_t pthread;
+       pid_t pid;
+       struct cobalt_hkey hkey;
+       struct cobalt_hash *next;
+};
+
+struct pid_hash {
+       pid_t pid;
+       pthread_t pthread;
+       struct pid_hash *next;
 };
 
 static struct cobalt_hash *pthread_table[PTHREAD_HSLOTS];
 
-static struct tid_hash *tid_table[PTHREAD_HSLOTS];
+static struct pid_hash *pid_table[PTHREAD_HSLOTS];
 
 static inline struct cobalt_hash *
-cobalt_thread_hash(const struct cobalt_hkey *hkey, pthread_t k_tid, pid_t 
h_tid)
+cobalt_thread_hash(const struct cobalt_hkey *hkey, pthread_t pthread, pid_t 
pid)
 {
        struct cobalt_hash **pthead, *ptslot;
-       struct tid_hash **tidhead, *tidslot;
+       struct pid_hash **pidhead, *pidslot;
        u32 hash;
        void *p;
        spl_t s;
 
-       p = xnmalloc(sizeof(*ptslot) + sizeof(*tidslot));
+       p = xnmalloc(sizeof(*ptslot) + sizeof(*pidslot));
        if (p == NULL)
                return NULL;
 
        ptslot = p;
        ptslot->hkey = *hkey;
-       ptslot->k_tid = k_tid;
-       ptslot->h_tid = h_tid;
+       ptslot->pthread = pthread;
+       ptslot->pid = pid;
        hash = jhash2((u32 *)&ptslot->hkey,
                      sizeof(ptslot->hkey) / sizeof(u32), 0);
        pthead = &pthread_table[hash & (PTHREAD_HSLOTS - 1)];
 
-       tidslot = p + sizeof(*ptslot);
-       tidslot->tid = h_tid;
-       hash = jhash2((u32 *)&h_tid, sizeof(h_tid) / sizeof(u32), 0);
-       tidhead = &tid_table[hash & (PTHREAD_HSLOTS - 1)];
+       pidslot = p + sizeof(*ptslot);
+       pidslot->pid = pid;
+       pidslot->pthread = pthread;
+       hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+       pidhead = &pid_table[hash & (PTHREAD_HSLOTS - 1)];
 
        xnlock_get_irqsave(&nklock, s);
        ptslot->next = *pthead;
        *pthead = ptslot;
-       tidslot->next = *tidhead;
-       *tidhead = tidslot;
+       pidslot->next = *pidhead;
+       *pidhead = pidslot;
        xnlock_put_irqrestore(&nklock, s);
 
        return ptslot;
@@ -108,8 +117,8 @@ cobalt_thread_hash(const struct cobalt_hkey *hkey, 
pthread_t k_tid, pid_t h_tid)
 static inline void cobalt_thread_unhash(const struct cobalt_hkey *hkey)
 {
        struct cobalt_hash **pttail, *ptslot;
-       struct tid_hash **tidtail, *tidslot;
-       pid_t h_tid;
+       struct pid_hash **pidtail, *pidslot;
+       pid_t pid;
        u32 hash;
        spl_t s;
 
@@ -132,28 +141,28 @@ static inline void cobalt_thread_unhash(const struct 
cobalt_hkey *hkey)
        }
 
        *pttail = ptslot->next;
-       h_tid = ptslot->h_tid;
-       hash = jhash2((u32 *)&h_tid, sizeof(h_tid) / sizeof(u32), 0);
-       tidtail = &tid_table[hash & (PTHREAD_HSLOTS - 1)];
-       tidslot = *tidtail;
-       while (tidslot && tidslot->tid != h_tid) {
-               tidtail = &tidslot->next;
-               tidslot = *tidtail;
+       pid = ptslot->pid;
+       hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+       pidtail = &pid_table[hash & (PTHREAD_HSLOTS - 1)];
+       pidslot = *pidtail;
+       while (pidslot && pidslot->pid != pid) {
+               pidtail = &pidslot->next;
+               pidslot = *pidtail;
        }
-       /* tidslot must be found here. */
-       XENO_BUGON(POSIX, !(tidslot && tidtail));
-       *tidtail = tidslot->next;
+       /* pidslot must be found here. */
+       XENO_BUGON(POSIX, !(pidslot && pidtail));
+       *pidtail = pidslot->next;
 
        xnlock_put_irqrestore(&nklock, s);
 
        xnfree(ptslot);
-       xnfree(tidslot);
+       xnfree(pidslot);
 }
 
-pthread_t cobalt_thread_find(const struct cobalt_hkey *hkey)
+static pthread_t thread_find(const struct cobalt_hkey *hkey)
 {
        struct cobalt_hash *ptslot;
-       pthread_t k_tid;
+       pthread_t pthread;
        u32 hash;
        spl_t s;
 
@@ -167,11 +176,11 @@ pthread_t cobalt_thread_find(const struct cobalt_hkey 
*hkey)
               (ptslot->hkey.u_tid != hkey->u_tid || ptslot->hkey.mm != 
hkey->mm))
                ptslot = ptslot->next;
 
-       k_tid = ptslot ? ptslot->k_tid : NULL;
+       pthread = ptslot ? ptslot->pthread : NULL;
 
        xnlock_put_irqrestore(&nklock, s);
 
-       return k_tid;
+       return pthread;
 }
 
 static void thread_destroy(pthread_t thread)
@@ -818,28 +827,28 @@ int cobalt_thread_setschedparam(unsigned long tid,
        struct sched_param param;
        struct cobalt_hkey hkey;
        int ret, promoted = 0;
-       pthread_t k_tid;
+       pthread_t pthread;
 
        if (__xn_safe_copy_from_user(&param, u_param, sizeof(param)))
                return -EFAULT;
 
        hkey.u_tid = tid;
        hkey.mm = current->mm;
-       k_tid = cobalt_thread_find(&hkey);
+       pthread = thread_find(&hkey);
 
-       if (k_tid == NULL && u_window_offset) {
+       if (pthread == NULL && u_window_offset) {
                /*
                 * If the syscall applies to "current", and the latter
                 * is not a Xenomai thread already, then shadow it.
                 */
-               k_tid = cobalt_thread_shadow(current, &hkey, u_window_offset);
-               if (IS_ERR(k_tid))
-                       return PTR_ERR(k_tid);
+               pthread = cobalt_thread_shadow(current, &hkey, u_window_offset);
+               if (IS_ERR(pthread))
+                       return PTR_ERR(pthread);
 
                promoted = 1;
        }
-       if (k_tid)
-               ret = pthread_setschedparam(k_tid, policy, &param);
+       if (pthread)
+               ret = pthread_setschedparam(pthread, policy, &param);
        else
                /*
                 * target thread is not a real-time thread, and is not current,
@@ -864,24 +873,24 @@ int cobalt_thread_setschedparam_ex(unsigned long tid,
        struct sched_param_ex param;
        struct cobalt_hkey hkey;
        int ret, promoted = 0;
-       pthread_t k_tid;
+       pthread_t pthread;
 
        if (__xn_safe_copy_from_user(&param, u_param, sizeof(param)))
                return -EFAULT;
 
        hkey.u_tid = tid;
        hkey.mm = current->mm;
-       k_tid = cobalt_thread_find(&hkey);
+       pthread = thread_find(&hkey);
 
-       if (k_tid == NULL && u_window_offset) {
-               k_tid = cobalt_thread_shadow(current, &hkey, u_window_offset);
-               if (IS_ERR(k_tid))
-                       return PTR_ERR(k_tid);
+       if (pthread == NULL && u_window_offset) {
+               pthread = cobalt_thread_shadow(current, &hkey, u_window_offset);
+               if (IS_ERR(pthread))
+                       return PTR_ERR(pthread);
 
                promoted = 1;
        }
-       if (k_tid)
-               ret = pthread_setschedparam_ex(k_tid, policy, &param);
+       if (pthread)
+               ret = pthread_setschedparam_ex(pthread, policy, &param);
        else
                ret = -EPERM;
 
@@ -917,9 +926,9 @@ int cobalt_thread_create(unsigned long tid, int policy,
        struct task_struct *p = current;
        struct sched_param_ex param;
        struct cobalt_hkey hkey;
-       pthread_t k_tid = NULL;
+       pthread_t pthread = NULL;
        pthread_attr_t attr;
-       pid_t h_tid;
+       pid_t pid;
        int ret;
 
        if (__xn_safe_copy_from_user(&param, u_param, sizeof(param)))
@@ -944,26 +953,26 @@ int cobalt_thread_create(unsigned long tid, int policy,
        attr.fp = 1;
        attr.name = p->comm;
 
-       ret = pthread_create(&k_tid, &attr);
+       ret = pthread_create(&pthread, &attr);
        if (ret)
                return ret;
 
-       h_tid = task_pid_vnr(p);
-       ret = xnshadow_map_user(&k_tid->threadbase, u_window_offset);
+       pid = task_pid_vnr(p);
+       ret = xnshadow_map_user(&pthread->threadbase, u_window_offset);
        if (ret)
                goto fail;
 
-       if (!cobalt_thread_hash(&hkey, k_tid, h_tid)) {
+       if (!cobalt_thread_hash(&hkey, pthread, pid)) {
                ret = -ENOMEM;
                goto fail;
        }
 
-       k_tid->hkey = hkey;
+       pthread->hkey = hkey;
 
        return 0;
 
 fail:
-       xnpod_cancel_thread(&k_tid->threadbase);
+       xnpod_cancel_thread(&pthread->threadbase);
 
        return ret;
 }
@@ -972,35 +981,35 @@ pthread_t cobalt_thread_shadow(struct task_struct *p,
                               struct cobalt_hkey *hkey,
                               unsigned long __user *u_window_offset)
 {
-       pthread_t k_tid = NULL;
+       pthread_t pthread = NULL;
        pthread_attr_t attr;
-       pid_t h_tid;
+       pid_t pid;
        int ret;
 
        attr = default_thread_attr;
        attr.detachstate = PTHREAD_CREATE_DETACHED;
        attr.name = p->comm;
 
-       ret = pthread_create(&k_tid, &attr);
+       ret = pthread_create(&pthread, &attr);
        if (ret)
                return ERR_PTR(-ret);
 
-       h_tid = task_pid_vnr(p);
-       ret = xnshadow_map_user(&k_tid->threadbase, u_window_offset);
+       pid = task_pid_vnr(p);
+       ret = xnshadow_map_user(&pthread->threadbase, u_window_offset);
        /*
         * From now on, we run in primary mode, so we refrain from
         * calling regular kernel services (e.g. like
         * task_pid_vnr()).
         */
-       if (ret == 0 && !cobalt_thread_hash(hkey, k_tid, h_tid))
+       if (ret == 0 && !cobalt_thread_hash(hkey, pthread, pid))
                ret = -EAGAIN;
 
        if (ret)
-               xnpod_cancel_thread(&k_tid->threadbase);
+               xnpod_cancel_thread(&pthread->threadbase);
        else
-               k_tid->hkey = *hkey;
+               pthread->hkey = *hkey;
 
-       return ret ? ERR_PTR(ret) : k_tid;
+       return ret ? ERR_PTR(ret) : pthread;
 }
 
 int cobalt_thread_make_periodic_np(unsigned long tid,
@@ -1010,11 +1019,11 @@ int cobalt_thread_make_periodic_np(unsigned long tid,
 {
        struct timespec startt, periodt;
        struct cobalt_hkey hkey;
-       pthread_t k_tid;
+       pthread_t pthread;
 
        hkey.u_tid = tid;
        hkey.mm = current->mm;
-       k_tid = cobalt_thread_find(&hkey);
+       pthread = thread_find(&hkey);
 
        if (__xn_safe_copy_from_user(&startt, u_startt, sizeof(startt)))
                return -EFAULT;
@@ -1022,7 +1031,7 @@ int cobalt_thread_make_periodic_np(unsigned long tid,
        if (__xn_safe_copy_from_user(&periodt, u_periodt, sizeof(periodt)))
                return -EFAULT;
 
-       return pthread_make_periodic_np(k_tid, clk_id, &startt, &periodt);
+       return pthread_make_periodic_np(pthread, clk_id, &startt, &periodt);
 }
 
 int cobalt_thread_wait_np(unsigned long __user *u_overruns)
@@ -1057,7 +1066,7 @@ int cobalt_thread_set_name_np(unsigned long tid, const 
char __user *u_name)
        char name[XNOBJECT_NAME_LEN];
        struct cobalt_hkey hkey;
        struct task_struct *p;
-       pthread_t k_tid;
+       pthread_t pthread;
        spl_t s;
 
        if (__xn_safe_strncpy_from_user(name, u_name,
@@ -1069,40 +1078,40 @@ int cobalt_thread_set_name_np(unsigned long tid, const 
char __user *u_name)
        hkey.mm = current->mm;
 
        xnlock_get_irqsave(&nklock, s);
-       k_tid = cobalt_thread_find(&hkey);
-       if (k_tid == NULL) {
+       pthread = thread_find(&hkey);
+       if (pthread == NULL) {
                xnlock_put_irqrestore(&nklock, s);
                return -ESRCH;
        }
 
-       p = xnthread_host_task(&k_tid->threadbase);
+       p = xnthread_host_task(&pthread->threadbase);
        get_task_struct(p);
        xnlock_put_irqrestore(&nklock, s);
        strncpy(p->comm, name, sizeof(p->comm));
        p->comm[sizeof(p->comm) - 1] = '\0';
-       snprintf(xnthread_name(&k_tid->threadbase),
+       snprintf(xnthread_name(&pthread->threadbase),
                 XNOBJECT_NAME_LEN - 1, "%s", name);
        put_task_struct(p);
 
        return 0;
 }
 
-int cobalt_thread_probe_np(pid_t h_tid)
+int cobalt_thread_probe_np(pid_t pid)
 {
-       struct tid_hash *tidslot;
+       struct pid_hash *pidslot;
        u32 hash;
        int ret;
        spl_t s;
 
-       hash = jhash2((u32 *)&h_tid, sizeof(h_tid) / sizeof(u32), 0);
+       hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
 
        xnlock_get_irqsave(&nklock, s);
 
-       tidslot = tid_table[hash & (PTHREAD_HSLOTS - 1)];
-       while (tidslot && tidslot->tid != h_tid)
-               tidslot = tidslot->next;
+       pidslot = pid_table[hash & (PTHREAD_HSLOTS - 1)];
+       while (pidslot && pidslot->pid != pid)
+               pidslot = pidslot->next;
 
-       ret = tidslot ? 0 : -ESRCH;
+       ret = pidslot ? 0 : -ESRCH;
 
        xnlock_put_irqrestore(&nklock, s);
 
@@ -1112,13 +1121,13 @@ int cobalt_thread_probe_np(pid_t h_tid)
 int cobalt_thread_kill(unsigned long tid, int sig)
 {
        struct cobalt_hkey hkey;
-       pthread_t k_tid;
+       pthread_t pthread;
        int ret;
 
        hkey.u_tid = tid;
        hkey.mm = current->mm;
-       k_tid = cobalt_thread_find(&hkey);
-       if (k_tid == NULL)
+       pthread = thread_find(&hkey);
+       if (pthread == NULL)
                return -ESRCH;
        /*
         * We have to take care of self-suspension, when the
@@ -1129,7 +1138,7 @@ int cobalt_thread_kill(unsigned long tid, int sig)
         * overkill, since no other signal would require this, so we
         * handle that case locally here.
         */
-       if (sig == SIGSUSP && xnshadow_current_p(&k_tid->threadbase)) {
+       if (sig == SIGSUSP && xnshadow_current_p(&pthread->threadbase)) {
                if (xnpod_root_p()) {
                        ret = xnshadow_harden();
                        if (ret)
@@ -1149,27 +1158,27 @@ int cobalt_thread_kill(unsigned long tid, int sig)
                 * The self-suspension case for shadows was handled at
                 * call site: we must be in primary mode already.
                 */
-               xnpod_suspend_thread(&k_tid->threadbase, XNSUSP,
+               xnpod_suspend_thread(&pthread->threadbase, XNSUSP,
                                     XN_INFINITE, XN_RELATIVE, NULL);
-               if (&k_tid->threadbase == xnpod_current_thread() &&
-                   xnthread_test_info(&k_tid->threadbase, XNBREAK))
+               if (&pthread->threadbase == xnpod_current_thread() &&
+                   xnthread_test_info(&pthread->threadbase, XNBREAK))
                        ret = EINTR;
                break;
 
        case SIGRESM:
-               xnpod_resume_thread(&k_tid->threadbase, XNSUSP);
+               xnpod_resume_thread(&pthread->threadbase, XNSUSP);
                goto resched;
 
        case SIGRELS:
-               xnpod_unblock_thread(&k_tid->threadbase);
+               xnpod_unblock_thread(&pthread->threadbase);
                goto resched;
 
        case SIGKICK:
-               xnshadow_kick(&k_tid->threadbase);
+               xnshadow_kick(&pthread->threadbase);
                goto resched;
 
        case SIGDEMT:
-               xnshadow_demote(&k_tid->threadbase);
+               xnshadow_demote(&pthread->threadbase);
          resched:
                xnpod_schedule();
                break;
@@ -1181,28 +1190,31 @@ int cobalt_thread_kill(unsigned long tid, int sig)
        return 0;
 }
 
-int cobalt_thread_stat(unsigned long tid,
+int cobalt_thread_stat(pid_t pid,
                       struct cobalt_threadstat __user *u_stat)
 {
        struct cobalt_threadstat stat;
-       struct cobalt_hkey hkey;
+       struct pid_hash *pidslot;
        struct xnthread *thread;
-       pthread_t k_tid;
        xnticks_t xtime;
+       u32 hash;
        spl_t s;
 
-       hkey.u_tid = tid;
-       hkey.mm = current->mm;
+       hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
 
        xnlock_get_irqsave(&nklock, s);
 
-       k_tid = cobalt_thread_find(&hkey);
-       if (k_tid == NULL) {
+       pidslot = pid_table[hash & (PTHREAD_HSLOTS - 1)];
+       while (pidslot && pidslot->pid != pid)
+               pidslot = pidslot->next;
+
+       if (pidslot == NULL) {
                xnlock_put_irqrestore(&nklock, s);
                return -ESRCH;
        }
 
-       thread = &k_tid->threadbase;
+       thread = &pidslot->pthread->threadbase;
+       stat.cpu = xnsched_cpu(thread->sched);
        xtime = xnthread_get_exectime(thread);
        if (xnthread_sched(thread)->curr == thread)
                xtime += xnstat_exectime_now() - 
xnthread_get_lastswitch(thread);
@@ -1212,6 +1224,7 @@ int cobalt_thread_stat(unsigned long tid,
        stat.xsc = xnstat_counter_get(&thread->stat.xsc);
        stat.pf = xnstat_counter_get(&thread->stat.pf);
        stat.status = xnthread_state_flags(thread);
+       stat.timeout = xnthread_get_timeout(thread, xnclock_read_monotonic());
 
        xnlock_put_irqrestore(&nklock, s);
 
@@ -1224,17 +1237,17 @@ int cobalt_thread_getschedparam(unsigned long tid,
 {
        struct sched_param param;
        struct cobalt_hkey hkey;
-       pthread_t k_tid;
+       pthread_t pthread;
        int policy, ret;
 
        hkey.u_tid = tid;
        hkey.mm = current->mm;
-       k_tid = cobalt_thread_find(&hkey);
+       pthread = thread_find(&hkey);
 
-       if (!k_tid)
+       if (!pthread)
                return -ESRCH;
 
-       ret = pthread_getschedparam(k_tid, &policy, &param);
+       ret = pthread_getschedparam(pthread, &policy, &param);
        if (ret)
                return ret;
 
@@ -1250,17 +1263,16 @@ int cobalt_thread_getschedparam_ex(unsigned long tid,
 {
        struct sched_param_ex param;
        struct cobalt_hkey hkey;
-       pthread_t k_tid;
+       pthread_t pthread;
        int policy, ret;
 
        hkey.u_tid = tid;
        hkey.mm = current->mm;
-       k_tid = cobalt_thread_find(&hkey);
-
-       if (!k_tid)
+       pthread = thread_find(&hkey);
+       if (pthread == NULL)
                return -ESRCH;
 
-       ret = pthread_getschedparam_ex(k_tid, &policy, &param);
+       ret = pthread_getschedparam_ex(pthread, &policy, &param);
        if (ret)
                return ret;
 
diff --git a/kernel/cobalt/thread.h b/kernel/cobalt/thread.h
index f737d8d..3c8ee18 100644
--- a/kernel/cobalt/thread.h
+++ b/kernel/cobalt/thread.h
@@ -31,13 +31,6 @@ struct cobalt_hkey {
        struct mm_struct *mm;
 };
 
-struct cobalt_hash {
-       pthread_t k_tid;        /* Xenomai in-kernel (nucleus) tid */
-       pid_t h_tid;            /* Host (linux) tid */
-       struct cobalt_hkey hkey;
-       struct cobalt_hash *next;
-};
-
 typedef struct {
        cobalt_sigset_t mask;
        xnpqueue_t list;
@@ -83,8 +76,6 @@ struct cobalt_thread {
 
 #define thread_name(thread) ((thread)->attr.name)
 
-pthread_t cobalt_thread_find(const struct cobalt_hkey *hkey);
-
 int cobalt_thread_create(unsigned long tid, int policy,
                         struct sched_param_ex __user *u_param,
                         unsigned long __user *u_window_offset);
@@ -108,7 +99,7 @@ int cobalt_thread_probe_np(pid_t h_tid);
 
 int cobalt_thread_kill(unsigned long tid, int sig);
 
-int cobalt_thread_stat(unsigned long tid,
+int cobalt_thread_stat(pid_t pid,
                       struct cobalt_threadstat __user *u_stat);
 
 int cobalt_thread_setschedparam(unsigned long tid,
diff --git a/lib/alchemy/task.c b/lib/alchemy/task.c
index b378c31..8329f67 100644
--- a/lib/alchemy/task.c
+++ b/lib/alchemy/task.c
@@ -245,8 +245,6 @@ static int create_tcb(struct alchemy_task **tcbp, RT_TASK 
*task,
        tcb->flowgen = 0;
 
        idata.magic = task_magic;
-       idata.wait_hook = NULL;
-       idata.suspend_hook = NULL;
        idata.finalizer = task_finalizer;
        idata.priority = prio;
        threadobj_init(&tcb->thobj, &idata);
@@ -616,7 +614,7 @@ int rt_task_shadow(RT_TASK *task, const char *name, int 
prio, int mode)
                goto out;
 
        threadobj_lock(&tcb->thobj);
-       threadobj_shadow(&tcb->thobj); /* We won't wait in prologue. */
+       threadobj_shadow();     /* We won't wait in prologue. */
        threadobj_unlock(&tcb->thobj);
        ret = task_prologue(tcb);
        if (ret) {
diff --git a/lib/cobalt/internal.c b/lib/cobalt/internal.c
index 3d76cf2..f144fa5 100644
--- a/lib/cobalt/internal.c
+++ b/lib/cobalt/internal.c
@@ -47,10 +47,10 @@ void __cobalt_thread_harden(void)
                XENOMAI_SYSCALL1(sc_nucleus_migrate, XENOMAI_XENO_DOMAIN);
 }
 
-int __cobalt_thread_stat(pthread_t tid, struct cobalt_threadstat *stat)
+int __cobalt_thread_stat(pid_t pid, struct cobalt_threadstat *stat)
 {
        return XENOMAI_SKINCALL2(__cobalt_muxid,
-                                sc_cobalt_thread_getstat, tid, stat);
+                                sc_cobalt_thread_getstat, pid, stat);
 }
 
 void ___cobalt_prefault(void *p, size_t len)
diff --git a/lib/cobalt/internal.h b/lib/cobalt/internal.h
index a939a2b..665d1a3 100644
--- a/lib/cobalt/internal.h
+++ b/lib/cobalt/internal.h
@@ -18,7 +18,7 @@ void ___cobalt_prefault(void *p, size_t len);
 
 void __cobalt_thread_harden(void);
 
-int __cobalt_thread_stat(pthread_t tid,
+int __cobalt_thread_stat(pid_t pid,
                         struct cobalt_threadstat *stat);
 
 int cobalt_monitor_init(cobalt_monitor_t *mon, int flags);
diff --git a/lib/copperplate/syncobj.c b/lib/copperplate/syncobj.c
index ccd161b..1f47625 100644
--- a/lib/copperplate/syncobj.c
+++ b/lib/copperplate/syncobj.c
@@ -85,7 +85,9 @@ int monitor_wait_grant(struct syncobj *sobj,
 }
 
 static inline
-int monitor_wait_drain(struct syncobj *sobj, const struct timespec *timeout)
+int monitor_wait_drain(struct syncobj *sobj,
+                      struct threadobj *current,
+                      const struct timespec *timeout)
 {
        return cobalt_monitor_wait(&sobj->core.monitor,
                                   COBALT_MONITOR_WAITDRAIN,
@@ -149,7 +151,9 @@ int monitor_wait_grant(struct syncobj *sobj,
 }
 
 static inline
-int monitor_wait_drain(struct syncobj *sobj, const struct timespec *timeout)
+int monitor_wait_drain(struct syncobj *sobj,
+                      struct threadobj *current,
+                      const struct timespec *timeout)
 {
        if (timeout)
                return -pthread_cond_timedwait(&sobj->core.drain_sync,
@@ -429,18 +433,17 @@ struct threadobj *syncobj_peek_drain(struct syncobj *sobj)
        return thobj;
 }
 
-static inline int wait_epilogue(struct syncobj *sobj,
-                               struct syncstate *syns,
-                               struct threadobj *current)
+static int wait_epilogue(struct syncobj *sobj,
+                        struct syncstate *syns,
+                        struct threadobj *current)
 {
+       current->run_state = __THREAD_S_RUNNING;
+
        if (current->wait_sobj) {
                dequeue_waiter(sobj, current);
                current->wait_sobj = NULL;
        }
 
-       if (current->wait_hook)
-               current->wait_hook(sobj, SYNCOBJ_RESUME);
-
        sobj->wait_count--;
        assert(sobj->wait_count >= 0);
 
@@ -469,17 +472,16 @@ int syncobj_wait_grant(struct syncobj *sobj, const struct 
timespec *timeout,
 
        assert(current != NULL);
 
+       current->run_state = timeout ? __THREAD_S_TIMEDWAIT : __THREAD_S_WAIT;
+       threadobj_save_timeout(&current->core, timeout);
        current->wait_status = 0;
        enqueue_waiter(sobj, current);
        current->wait_sobj = sobj;
        sobj->grant_count++;
        sobj->wait_count++;
 
-       if (current->wait_hook)
-               current->wait_hook(sobj, SYNCOBJ_BLOCK);
-
        /*
-        * XXX: we are guaranteed to be in deferred cancel mode, with
+        * NOTE: we are guaranteed to be in deferred cancel mode, with
         * cancelability disabled (in syncobj_lock); re-enable it
         * before pending on the condvar.
         */
@@ -508,27 +510,26 @@ int syncobj_wait_drain(struct syncobj *sobj, const struct 
timespec *timeout,
 
        assert(current != NULL);
 
+       current->run_state = timeout ? __THREAD_S_TIMEDWAIT : __THREAD_S_WAIT;
+       threadobj_save_timeout(&current->core, timeout);
        current->wait_status = SYNCOBJ_DRAINWAIT;
        list_append(&current->wait_link, &sobj->drain_list);
        current->wait_sobj = sobj;
        sobj->drain_count++;
        sobj->wait_count++;
 
-       if (current->wait_hook)
-               current->wait_hook(sobj, SYNCOBJ_BLOCK);
-
        pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &state);
        assert(state == PTHREAD_CANCEL_DISABLE);
 
        /*
-        * XXX: Since the DRAINED signal is broadcast to all waiters,
+        * NOTE: Since the DRAINED signal is broadcast to all waiters,
         * a race may exist for acting upon it among those
         * threads. Therefore the caller must check that the drain
         * condition is still true before proceeding.
         */
        do {
                __syncobj_tag_unlocked(sobj);
-               ret = monitor_wait_drain(sobj, timeout);
+               ret = monitor_wait_drain(sobj, current, timeout);
                __syncobj_tag_locked(sobj);
        } while (ret == 0 && current->wait_sobj);
 
diff --git a/lib/copperplate/threadobj.c b/lib/copperplate/threadobj.c
index a7a144d..2e057b5 100644
--- a/lib/copperplate/threadobj.c
+++ b/lib/copperplate/threadobj.c
@@ -169,30 +169,16 @@ int threadobj_cancel(struct threadobj *thobj)
 
 int threadobj_suspend(struct threadobj *thobj) /* thobj->lock held */
 {
-       struct threadobj *current = threadobj_current();
        pthread_t tid = thobj->tid;
        int ret;
 
        __threadobj_check_locked(thobj);
 
-       /*
-        * XXX: we must guarantee that a THREADOBJ_SUSPEND event is sent
-        * only once the target thread is in an innocuous state,
-        * i.e. about to suspend if current, or suspended
-        * otherwise. This way, the hook routine may always safely
-        * assume that the thread state in userland will not change,
-        * until that thread is resumed.
-        */
-       if (thobj->suspend_hook && thobj == current)
-               thobj->suspend_hook(thobj, THREADOBJ_SUSPEND);
-
+       thobj->status |= __THREAD_S_SUSPENDED;
        threadobj_unlock(thobj);
        ret = __RT(pthread_kill(tid, SIGSUSP));
        threadobj_lock(thobj);
 
-       if (thobj->suspend_hook && thobj != current)
-               thobj->suspend_hook(thobj, THREADOBJ_SUSPEND);
-
        return __bt(-ret);
 }
 
@@ -206,16 +192,7 @@ int threadobj_resume(struct threadobj *thobj) /* 
thobj->lock held */
        if (thobj == threadobj_current())
                return 0;
 
-       /*
-        * XXX: we must guarantee that a THREADOBJ_RESUME event is
-        * sent while the target thread is still in an innocuous
-        * state, prior to being actually resuled. This way, the hook
-        * routine may always safely assume that the thread state in
-        * userland will not change, until that point.
-        */
-       if (thobj->suspend_hook)
-               thobj->suspend_hook(thobj, THREADOBJ_RESUME);
-
+       thobj->status &= ~__THREAD_S_SUSPENDED;
        threadobj_unlock(thobj);
        ret = __RT(pthread_kill(tid, SIGRESM));
        threadobj_lock(thobj);
@@ -232,7 +209,7 @@ int threadobj_lock_sched(struct threadobj *thobj) /* 
thobj->lock held */
        if (thobj->schedlock_depth++ > 0)
                return 0;
 
-       thobj->status |= THREADOBJ_SCHEDLOCK;
+       thobj->status |= __THREAD_S_NOPREEMPT;
        /*
         * In essence, we can't be scheduled out as a result of
         * locking the scheduler, so no need to drop the thread lock
@@ -261,7 +238,7 @@ int threadobj_unlock_sched(struct threadobj *thobj) /* 
thobj->lock held */
        if (--thobj->schedlock_depth > 0)
                return 0;
 
-       thobj->status &= ~THREADOBJ_SCHEDLOCK;
+       thobj->status &= ~__THREAD_S_NOPREEMPT;
        threadobj_unlock(thobj);
        ret = pthread_set_mode_np(PTHREAD_LOCK_SCHED, 0, NULL);
        threadobj_lock(thobj);
@@ -279,9 +256,9 @@ int threadobj_set_priority(struct threadobj *thobj, int 
prio) /* thobj->lock hel
 
        policy = SCHED_RT;
        if (prio == 0) {
-               thobj->status &= ~THREADOBJ_ROUNDROBIN;
+               thobj->status &= ~__THREAD_S_RR;
                policy = SCHED_OTHER;
-       } else if (thobj->status & THREADOBJ_ROUNDROBIN) {
+       } else if (thobj->status & __THREAD_S_RR) {
                xparam.sched_rr_quantum = thobj->tslice;
                policy = SCHED_RR;
        }
@@ -337,12 +314,12 @@ static int set_rr(struct threadobj *thobj, struct 
timespec *quantum)
        if (quantum && (quantum->tv_sec || quantum->tv_nsec)) {
                policy = SCHED_RR;
                xparam.sched_rr_quantum = *quantum;
-               thobj->status |= THREADOBJ_ROUNDROBIN;
+               thobj->status |= __THREAD_S_RR;
                thobj->tslice = *quantum;
                xparam.sched_priority = thobj->priority ?: 1;
        } else {
                policy = thobj->policy;
-               thobj->status &= ~THREADOBJ_ROUNDROBIN;
+               thobj->status &= ~__THREAD_S_RR;
                xparam.sched_rr_quantum.tv_sec = 0;
                xparam.sched_rr_quantum.tv_nsec = 0;
                xparam.sched_priority = thobj->priority;
@@ -376,16 +353,18 @@ int threadobj_stat(struct threadobj *thobj, struct 
threadobj_stat *p) /* thobj->
 
        __threadobj_check_locked(thobj);
 
-       ret = __cobalt_thread_stat(thobj->tid, &stat);
+       ret = __cobalt_thread_stat(thobj->pid, &stat);
        if (ret)
                return __bt(ret);
 
+       p->cpu = stat.cpu;
        p->status = stat.status;
        p->xtime = stat.xtime;
        p->msw = stat.msw;
        p->csw = stat.csw;
        p->xsc = stat.xsc;
        p->pf = stat.pf;
+       p->timeout = stat.timeout;
 
        return 0;
 }
@@ -414,7 +393,7 @@ static void roundrobin_handler(int sig)
         * multiple arbitrary time slices (i.e. vs the kernel
         * pre-defined and fixed one).
         */
-       if (current && (current->status & THREADOBJ_ROUNDROBIN) != 0)
+       if (current && (current->status & __THREAD_S_RR) != 0)
                sched_yield();
 }
 
@@ -446,16 +425,18 @@ static void notifier_callback(const struct notifier *nf)
        current = container_of(nf, struct threadobj, core.notifier);
        assert(current == threadobj_current());
 
-       if (current->suspend_hook) {
-               threadobj_lock(current);
-               current->suspend_hook(current, THREADOBJ_SUSPEND);
-               threadobj_unlock(current);
-               notifier_wait(nf);
-               threadobj_lock(current);
-               current->suspend_hook(current, THREADOBJ_RESUME);
-               threadobj_unlock(current);
-       } else
-               notifier_wait(nf); /* Wait for threadobj_resume(). */
+       /*
+        * In the Mercury case, we mark the thread as suspended only
+        * when the notifier handler is entered, not from
+        * threadobj_suspend().
+        */
+       threadobj_lock(current);
+       current->status |= __THREAD_S_SUSPENDED;
+       threadobj_unlock(current);
+       notifier_wait(nf); /* Wait for threadobj_resume(). */
+       threadobj_lock(current);
+       current->status &= ~__THREAD_S_SUSPENDED;
+       threadobj_unlock(current);
 }
 
 static inline void threadobj_init_corespec(struct threadobj *thobj)
@@ -578,7 +559,7 @@ int threadobj_lock_sched(struct threadobj *thobj) /* 
thobj->lock held */
 
        thobj->core.prio_unlocked = thobj->priority;
        thobj->core.policy_unlocked = thobj->policy;
-       thobj->status |= THREADOBJ_SCHEDLOCK;
+       thobj->status |= __THREAD_S_NOPREEMPT;
        thobj->priority = threadobj_lock_prio;
        thobj->policy = SCHED_RT;
        param.sched_priority = threadobj_lock_prio;
@@ -602,7 +583,7 @@ int threadobj_unlock_sched(struct threadobj *thobj) /* 
thobj->lock held */
        if (--thobj->schedlock_depth > 0)
                return 0;
 
-       thobj->status &= ~THREADOBJ_SCHEDLOCK;
+       thobj->status &= ~__THREAD_S_NOPREEMPT;
        thobj->priority = thobj->core.prio_unlocked;
        param.sched_priority = thobj->core.prio_unlocked;
        policy = thobj->core.policy_unlocked;
@@ -626,7 +607,7 @@ int threadobj_set_priority(struct threadobj *thobj, int 
prio) /* thobj->lock hel
         * the target thread holds the scheduler lock, but only record
         * the level to set when unlocking.
         */
-       if (thobj->status & THREADOBJ_SCHEDLOCK) {
+       if (thobj->status & __THREAD_S_NOPREEMPT) {
                thobj->core.prio_unlocked = prio;
                thobj->core.policy_unlocked = prio ? SCHED_RT : SCHED_OTHER;
                return 0;
@@ -635,7 +616,7 @@ int threadobj_set_priority(struct threadobj *thobj, int 
prio) /* thobj->lock hel
        thobj->priority = prio;
        policy = SCHED_RT;
        if (prio == 0) {
-               thobj->status &= ~THREADOBJ_ROUNDROBIN;
+               thobj->status &= ~__THREAD_S_RR;
                policy = SCHED_OTHER;
        }
 
@@ -658,7 +639,7 @@ int threadobj_set_mode(int clrmask, int setmask, int 
*mode_r) /* current->lock h
 
        __threadobj_check_locked(current);
 
-       if (current->status & THREADOBJ_SCHEDLOCK)
+       if (current->status & __THREAD_S_NOPREEMPT)
                old |= __THREAD_M_LOCK;
 
        if (setmask & __THREAD_M_LOCK)
@@ -666,7 +647,7 @@ int threadobj_set_mode(int clrmask, int setmask, int 
*mode_r) /* current->lock h
        else if (clrmask & __THREAD_M_LOCK)
                threadobj_unlock_sched(current);
 
-       if (*mode_r)
+       if (mode_r)
                *mode_r = old;
 
        return ret;
@@ -684,13 +665,13 @@ static inline int set_rr(struct threadobj *thobj, struct 
timespec *quantum)
                value.it_value = *quantum;
                thobj->tslice = *quantum;
 
-               if (thobj->status & THREADOBJ_ROUNDROBIN) {
+               if (thobj->status & __THREAD_S_RR) {
                        /* Changing quantum of ongoing RR. */
                        ret = timer_settime(thobj->core.rr_timer, 0, &value, 
NULL);
                        return ret ? __bt(-errno) : 0;
                }
 
-               thobj->status |= THREADOBJ_ROUNDROBIN;
+               thobj->status |= __THREAD_S_RR;
                /*
                 * Switch to SCHED_FIFO policy, assign default prio=1
                 * if coming from SCHED_OTHER. We use a per-thread
@@ -702,9 +683,9 @@ static inline int set_rr(struct threadobj *thobj, struct 
timespec *quantum)
                if (ret)
                        return __bt(-errno);
        } else {
-               if ((thobj->status & THREADOBJ_ROUNDROBIN) == 0)
+               if ((thobj->status & __THREAD_S_RR) == 0)
                        return 0;
-               thobj->status &= ~THREADOBJ_ROUNDROBIN;
+               thobj->status &= ~__THREAD_S_RR;
                /*
                 * Disarm timer and reset scheduling parameters to
                 * former policy.
@@ -788,8 +769,29 @@ int threadobj_wait_period(struct threadobj *thobj,
 int threadobj_stat(struct threadobj *thobj,
                   struct threadobj_stat *stat) /* thobj->lock held */
 {
+       struct timespec now, delta;
+
        __threadobj_check_locked(thobj);
 
+       stat->cpu = sched_getcpu();
+       if (stat->cpu < 0)
+               stat->cpu = 0;  /* assume uniprocessor on ENOSYS */
+
+       stat->status = threadobj_get_status(thobj);
+
+       if (thobj->run_state & (__THREAD_S_TIMEDWAIT|__THREAD_S_DELAYED)) {
+               __RT(clock_gettime(CLOCK_COPPERPLATE, &now));
+               timespec_sub(&delta, &thobj->core.timeout, &now);
+               stat->timeout = timespec_scalar(&delta);
+               /*
+                * The timeout might fire as we are calculating the
+                * delta: sanitize any negative value as 1.
+                */
+               if ((sticks_t)stat->timeout < 0)
+                       stat->timeout = 1;
+       } else
+               stat->timeout = 0;
+
        return 0;
 }
 
@@ -828,13 +830,12 @@ void threadobj_init(struct threadobj *thobj,
        thobj->tracer = NULL;
        thobj->wait_sobj = NULL;
        thobj->finalizer = idata->finalizer;
-       thobj->wait_hook = idata->wait_hook;
        thobj->schedlock_depth = 0;
-       thobj->status = THREADOBJ_WARMUP;
+       thobj->status = __THREAD_S_WARMUP;
+       thobj->run_state = __THREAD_S_DORMANT;
        thobj->priority = idata->priority;
        thobj->policy = idata->priority ? SCHED_RT : SCHED_OTHER;
        holder_init(&thobj->wait_link);
-       thobj->suspend_hook = idata->suspend_hook;
        thobj->cnode = __node_id;
        thobj->pid = 0;
 
@@ -901,10 +902,10 @@ void threadobj_start(struct threadobj *thobj)     /* 
thobj->lock held. */
 
        __threadobj_check_locked(thobj);
 
-       if (thobj->status & THREADOBJ_STARTED)
+       if (thobj->status & __THREAD_S_STARTED)
                return;
 
-       thobj->status |= THREADOBJ_STARTED;
+       thobj->status |= __THREAD_S_STARTED;
        __RT(pthread_cond_signal(&thobj->barrier));
 
        if (current && thobj->priority <= current->priority)
@@ -915,13 +916,15 @@ void threadobj_start(struct threadobj *thobj)     /* 
thobj->lock held. */
         * enters the user code, or aborts prior to reaching that
         * point, whichever comes first.
         */
-       start_sync(thobj, THREADOBJ_RUNNING);
+       start_sync(thobj, __THREAD_S_ACTIVE);
 }
 
-void threadobj_shadow(struct threadobj *thobj)
+void threadobj_shadow(void)
 {
-       __threadobj_check_locked(thobj);
-       thobj->status |= THREADOBJ_STARTED|THREADOBJ_RUNNING;
+       struct threadobj *current = threadobj_current();
+
+       __threadobj_check_locked(current);
+       current->status |= __THREAD_S_STARTED|__THREAD_S_ACTIVE;
 }
 
 void threadobj_wait_start(void) /* current->lock free. */
@@ -930,16 +933,16 @@ void threadobj_wait_start(void) /* current->lock free. */
        int status;
 
        threadobj_lock(current);
-       status = start_sync(current, THREADOBJ_STARTED|THREADOBJ_ABORTED);
+       status = start_sync(current, __THREAD_S_STARTED|__THREAD_S_ABORTED);
        threadobj_unlock(current);
 
        /*
-        * We may have preempted the guy who set THREADOBJ_ABORTED in
+        * We may have preempted the guy who set __THREAD_S_ABORTED in
         * our status before it had a chance to issue pthread_cancel()
         * on us, so we need to go idle into a cancellation point to
         * wait for it: use pause() for this.
         */
-       while (status & THREADOBJ_ABORTED)
+       while (status & __THREAD_S_ABORTED)
                pause();
 }
 
@@ -948,7 +951,8 @@ void threadobj_notify_entry(void) /* current->lock free. */
        struct threadobj *current = threadobj_current();
 
        threadobj_lock(current);
-       current->status |= THREADOBJ_RUNNING;
+       current->status |= __THREAD_S_ACTIVE;
+       current->run_state = __THREAD_S_RUNNING;
        __RT(pthread_cond_signal(&current->barrier));
        threadobj_unlock(current);
 }
@@ -998,7 +1002,7 @@ int threadobj_prologue(struct threadobj *thobj, const char 
*name)
        sysgroup_add(thread, &thobj->memspec);
 
        threadobj_lock(thobj);
-       thobj->status &= ~THREADOBJ_WARMUP;
+       thobj->status &= ~__THREAD_S_WARMUP;
        __RT(pthread_cond_signal(&thobj->barrier));
        threadobj_unlock(thobj);
 
@@ -1016,7 +1020,7 @@ static void cancel_sync(struct threadobj *thobj) /* 
thobj->lock held */
 
        __threadobj_check_locked(thobj);
 
-       while (thobj->status & THREADOBJ_WARMUP) {
+       while (thobj->status & __THREAD_S_WARMUP) {
                oldstate = thobj->cancel_state;
                __threadobj_tag_unlocked(thobj);
                __RT(pthread_cond_wait(&thobj->barrier, &thobj->lock));
@@ -1024,12 +1028,29 @@ static void cancel_sync(struct threadobj *thobj) /* 
thobj->lock held */
                thobj->cancel_state = oldstate;
        }
 
-       if ((thobj->status & THREADOBJ_STARTED) == 0) {
-               thobj->status |= THREADOBJ_ABORTED;
+       if ((thobj->status & __THREAD_S_STARTED) == 0) {
+               thobj->status |= __THREAD_S_ABORTED;
                __RT(pthread_cond_signal(&thobj->barrier));
        }
 }
 
+int threadobj_sleep(struct timespec *ts)
+{
+       struct threadobj *current = threadobj_current();
+       int ret;
+
+       /*
+        * clock_nanosleep() returns -EINTR upon threadobj_unblock()
+        * with both Cobalt and Mercury cores.
+        */
+       current->run_state = __THREAD_S_DELAYED;
+       threadobj_save_timeout(&current->core, ts);
+       ret = -__RT(clock_nanosleep(CLOCK_COPPERPLATE, TIMER_ABSTIME, ts, 
NULL));
+       current->run_state = __THREAD_S_RUNNING;
+
+       return ret;
+}
+
 static void threadobj_finalize(void *p) /* thobj->lock free */
 {
        struct threadobj *thobj = p;
@@ -1103,7 +1124,7 @@ int threadobj_set_rr(struct threadobj *thobj, struct 
timespec *quantum)
         * logic simpler in the Mercury case with respect to tracking
         * the current scheduling parameters.
         */
-       if (thobj->status & THREADOBJ_SCHEDLOCK)
+       if (thobj->status & __THREAD_S_NOPREEMPT)
                return -EINVAL;
 
        return __bt(set_rr(thobj, quantum));
@@ -1143,11 +1164,10 @@ static inline void main_overlay(void)
                panic("failed to allocate main tcb");
 
        idata.magic = 0x0;
-       idata.wait_hook = NULL;
-       idata.suspend_hook = NULL;
        idata.finalizer = NULL;
        idata.priority = 0;
        threadobj_init(tcb, &idata);
+       tcb->status = __THREAD_S_STARTED|__THREAD_S_ACTIVE;
        threadobj_prologue(tcb, "main");
        pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
 }
diff --git a/lib/psos/task.c b/lib/psos/task.c
index c8a2c3b..c5b79e2 100644
--- a/lib/psos/task.c
+++ b/lib/psos/task.c
@@ -307,8 +307,6 @@ u_long t_create(const char *name, u_long prio,
        }
 
        idata.magic = task_magic;
-       idata.wait_hook = NULL;
-       idata.suspend_hook = NULL;
        idata.finalizer = task_finalizer;
        idata.priority = cprio;
        threadobj_init(&task->thobj, &idata);
diff --git a/lib/vxworks/taskInfo.c b/lib/vxworks/taskInfo.c
index b0b53a7..dfc347f 100644
--- a/lib/vxworks/taskInfo.c
+++ b/lib/vxworks/taskInfo.c
@@ -78,10 +78,10 @@ BOOL taskIsReady(TASK_ID task_id)
        if (task == NULL)
                return 0;
 
-       status = task->tcb->status;
+       status = get_task_status(task);
        put_wind_task(task);
 
-       return (status & (WIND_SUSPEND|WIND_DELAY)) == 0;
+       return status == WIND_READY;
 }
 
 BOOL taskIsSuspended(TASK_ID task_id)
@@ -93,11 +93,11 @@ BOOL taskIsSuspended(TASK_ID task_id)
        if (task == NULL)
                return 0;
 
-       status = task->tcb->status;
+       status = threadobj_get_status(&task->thobj);
 
        put_wind_task(task);
 
-       return (status & WIND_SUSPEND) != 0;
+       return (status & __THREAD_S_SUSPENDED) != 0;
 }
 
 STATUS taskGetInfo(TASK_ID task_id, TASK_DESC *desc)
@@ -118,7 +118,7 @@ STATUS taskGetInfo(TASK_ID task_id, TASK_DESC *desc)
        tcb = task->tcb;
        desc->td_tid = task_id;
        desc->td_priority = wind_task_get_priority(task);
-       desc->td_status = tcb->status;
+       desc->td_status = get_task_status(task);
        desc->td_flags = tcb->flags;
        strncpy(desc->td_name, task->name, sizeof(desc->td_name));
        desc->td_entry = tcb->entry;
diff --git a/lib/vxworks/taskLib.c b/lib/vxworks/taskLib.c
index de6b8c9..f5e4553 100644
--- a/lib/vxworks/taskLib.c
+++ b/lib/vxworks/taskLib.c
@@ -92,7 +92,7 @@ struct wind_task *get_wind_task(TASK_ID tid)
         * chance is pthread_mutex_lock() in threadobj_lock()
         * detecting a wrong mutex kind and bailing out.
         *
-        * XXX: threadobj_lock() disables cancellability for the
+        * NOTE: threadobj_lock() disables cancellability for the
         * caller upon success, until the lock is dropped in
         * threadobj_unlock(), so there is no way it may vanish while
         * holding the lock. Therefore we need no cleanup handler
@@ -132,6 +132,21 @@ void put_wind_task(struct wind_task *task)
        threadobj_unlock(&task->thobj);
 }
 
+int get_task_status(struct wind_task *task)
+{
+       int status = threadobj_get_status(&task->thobj), ret = WIND_READY;
+
+       if (status & __THREAD_S_SUSPENDED)
+               ret |= WIND_SUSPEND;
+
+       if (status & (__THREAD_S_WAIT|__THREAD_S_TIMEDWAIT))
+               ret |= WIND_PEND;
+       else if (status & __THREAD_S_DELAYED)
+               ret |= WIND_DELAY;
+
+       return ret;
+}
+
 static void task_finalizer(struct threadobj *thobj)
 {
        struct wind_task *task = container_of(thobj, struct wind_task, thobj);
@@ -149,32 +164,6 @@ static void task_finalizer(struct threadobj *thobj)
        threadobj_free(task);
 }
 
-/*
- * XXX: A wait hook always runs on behalf of the target task, no lock
- * is needed to access the current TCB. A suspend hook may run over
- * any thread context, and always runs with the thread lock held
- * for this reason.
- */
-static void task_wait_hook(struct syncobj *sobj, int status)
-{
-       struct wind_task *task = wind_task_current();
-
-       if (status & SYNCOBJ_BLOCK)
-               task->tcb->status |= WIND_PEND;
-       else
-               task->tcb->status &= ~WIND_PEND;
-}
-
-static void task_suspend_hook(struct threadobj *thobj, int status)
-{
-       struct wind_task *task = container_of(thobj, struct wind_task, thobj);
-
-       if (status & THREADOBJ_SUSPEND)
-               task->tcb->status |= WIND_SUSPEND;
-       else
-               task->tcb->status &= ~WIND_SUSPEND;
-}
-
 #ifdef CONFIG_XENO_REGISTRY
 
 static inline char *task_decode_status(struct wind_task *task, char *buf)
@@ -183,22 +172,18 @@ static inline char *task_decode_status(struct wind_task 
*task, char *buf)
 
        *buf = '\0';
        status = threadobj_get_status(&task->thobj);
-       if (status & THREADOBJ_SCHEDLOCK)
+       if (status & __THREAD_S_NOPREEMPT)
                strcat(buf, "+sched_lock");
-       if (status & THREADOBJ_ROUNDROBIN)
+       if (status & __THREAD_S_RR)
                strcat(buf, "+sched_rr");
-
-       status = task->tcb->status;
-       if (status == WIND_READY)
+       if (status & __THREAD_S_SUSPENDED)
+               strcat(buf, "+suspended");
+       if (status & (__THREAD_S_WAIT|__THREAD_S_TIMEDWAIT))
+               strcat(buf, "+pending");
+       else if (status & __THREAD_S_DELAYED)
+               strcat(buf, "+delayed");
+       else
                strcat(buf, "+ready");
-       else {
-               if (status & WIND_SUSPEND)
-                       strcat(buf, "+suspended");
-               if (status & WIND_PEND)
-                       strcat(buf, "+pending");
-               if (status & WIND_DELAY)
-                       strcat(buf, "+delayed");
-       }
 
        return buf + 1;
 }
@@ -362,23 +347,12 @@ static STATUS __taskInit(struct wind_task *task,
 
        task->tcb = tcb;
        tcb->opaque = task;
-       /*
-        * CAUTION: tcb->status in only modified by the owner task
-        * (see suspend/resume hooks), or when such task is guaranteed
-        * not to be running, e.g. in taskActivate(). So we do NOT
-        * take any lock specifically for updating it. However, we
-        * know that a memory barrier will be issued shortly after
-        * such updates because of other locking being in effect, so
-        * we don't explicitely have to provide for it.
-        */
        tcb->status = WIND_SUSPEND;
        tcb->safeCnt = 0;
        tcb->flags = flags;
        tcb->entry = entry;
 
        idata.magic = task_magic;
-       idata.wait_hook = task_wait_hook;
-       idata.suspend_hook = task_suspend_hook;
        idata.finalizer = task_finalizer;
        idata.priority = cprio;
        threadobj_init(&task->thobj, &idata);
@@ -864,9 +838,7 @@ STATUS taskDelay(int ticks)
        COPPERPLATE_PROTECT(svc);
 
        clockobj_ticks_to_timeout(&wind_clock, ticks, &rqt);
-       current->tcb->status |= WIND_DELAY;
        ret = threadobj_sleep(&rqt);
-       current->tcb->status &= ~WIND_DELAY;
        if (ret) {
                errno = -ret;
                ret = ERROR;
diff --git a/lib/vxworks/taskLib.h b/lib/vxworks/taskLib.h
index aed8ac0..6a7babf 100644
--- a/lib/vxworks/taskLib.h
+++ b/lib/vxworks/taskLib.h
@@ -96,6 +96,8 @@ struct wind_task *get_wind_task_or_self(TASK_ID tid);
 
 void put_wind_task(struct wind_task *task);
 
+int get_task_status(struct wind_task *task);
+
 extern struct cluster wind_task_table;
 
 extern struct pvlist wind_task_list;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to