To improve robustness of the fast mutex implementation in POSIX (and
later on in native), it is better to track the mutex owner by handle
instead of kernel object pointer. Therefore, this patch changes
__xn_sys_current (xeno_set_current) so that it returns
xnthread_handle(current_thread). It furthermore converts the POSIX mutex
implementation to pick up and store the lock owner as handle in the
kernel/user-shared mutex. Finally it ensures that at least POSIX threads
always have an (anonymous) handle assigned.

As the value stored in the mutex variable is now an integer, we can
switch over to xnarch_atomic_t, removing all atomic_intptr users.

---
 include/asm-generic/bits/bind.h    |    9 ++++-
 include/asm-generic/bits/current.h |    5 +-
 include/nucleus/types.h            |   13 +++++++
 ksrc/nucleus/shadow.c              |   31 +++++++++++++++--
 ksrc/skins/posix/Kconfig           |    1 
 ksrc/skins/posix/cb_lock.h         |   15 ++++++--
 ksrc/skins/posix/cond.c            |   12 +++---
 ksrc/skins/posix/mutex.c           |   21 +++++++----
 ksrc/skins/posix/mutex.h           |   66 +++++++++++++++++++++++++------------
 ksrc/skins/posix/syscall.c         |   11 +++---
 ksrc/skins/posix/thread.c          |    8 ++++
 src/skins/posix/mutex.c            |   27 +++++++--------
 12 files changed, 158 insertions(+), 61 deletions(-)

Index: b/include/asm-generic/bits/current.h
===================================================================
--- a/include/asm-generic/bits/current.h
+++ b/include/asm-generic/bits/current.h
@@ -2,14 +2,15 @@
 #define _XENO_ASM_GENERIC_CURRENT_H
 
 #include <pthread.h>
+#include <nucleus/types.h>
 
 extern pthread_key_t xeno_current_key;
 
 extern void xeno_set_current(void);
 
-static inline void *xeno_get_current(void)
+static inline xnhandle_t xeno_get_current(void)
 {
-       return pthread_getspecific(xeno_current_key);
+       return (xnhandle_t)pthread_getspecific(xeno_current_key);
 }
 
 #endif /* _XENO_ASM_GENERIC_CURRENT_H */
Index: b/include/nucleus/types.h
===================================================================
--- a/include/nucleus/types.h
+++ b/include/nucleus/types.h
@@ -61,6 +61,19 @@ typedef unsigned long xnhandle_t;
 
 #define XN_NO_HANDLE ((xnhandle_t)0)
 
+#define XN_HANDLE_SPARE0       ((xnhandle_t)0x10000000)
+#define XN_HANDLE_SPARE1       ((xnhandle_t)0x20000000)
+#define XN_HANDLE_SPARE2       ((xnhandle_t)0x40000000)
+#define XN_HANDLE_SPARE3       ((xnhandle_t)0x80000000)
+#define XN_HANDLE_SPARE_MASK   ((xnhandle_t)0xf0000000)
+
+#define xnhandle_mask_spare(handle)  ((handle) & ~XN_HANDLE_SPARE_MASK)
+#define xnhandle_test_spare(handle, bits)  (!!((handle) & (bits)))
+#define xnhandle_set_spare(handle, bits) \
+       do { (handle) |= (bits); } while (0)
+#define xnhandle_clear_spare(handle, bits) \
+       do { (handle) &= ~(bits); } while (0)
+
 struct xnintr;
 
 typedef int (*xnisr_t)(struct xnintr *intr);
Index: b/ksrc/nucleus/shadow.c
===================================================================
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -52,6 +52,7 @@
 #include <nucleus/trace.h>
 #include <nucleus/stat.h>
 #include <nucleus/sys_ppd.h>
+#include <nucleus/registry.h>
 #include <asm/xenomai/features.h>
 #include <asm/xenomai/syscall.h>
 #include <asm/xenomai/bits/shadow.h>
@@ -1907,13 +1908,35 @@ static int xnshadow_sys_sem_heap(struct
        return __xn_safe_copy_to_user(us_hinfo, &hinfo, sizeof(*us_hinfo));
 }
 
+#ifdef CONFIG_XENO_OPT_REGISTRY
 static int xnshadow_sys_current(struct pt_regs *regs)
 {
-       xnthread_t * __user *us_current, *cur = xnshadow_thread(current);
-       us_current = (xnthread_t *__user *) __xn_reg_arg1(regs);
+       xnthread_t *cur = xnshadow_thread(current);
+       xnhandle_t cur_handle, __user *us_handle;
+       int err = 0;
+       spl_t s;
+
+       xnlock_get_irqsave(&nklock, s);
+
+       cur_handle = xnthread_handle(cur);
+
+       /* We need at least an anonymous registry entry to obtain a handle. */
+       if (!cur_handle) {
+               err = xnregistry_enter("", cur, &xnthread_handle(cur), NULL);
+               cur_handle = xnthread_handle(cur);
+       }
+
+       xnlock_put_irqrestore(&nklock, s);
+
+       if (err)
+               return err;
+
+       us_handle = (xnhandle_t __user *) __xn_reg_arg1(regs);
 
-       return __xn_safe_copy_to_user(us_current, &cur, sizeof(*us_current));
+       return __xn_safe_copy_to_user(us_handle, &cur_handle,
+                                     sizeof(*us_handle));
 }
+#endif /* CONFIG_XENO_OPT_REGISTRY */
 
 static xnsysent_t __systab[] = {
        [__xn_sys_migrate] = {&xnshadow_sys_migrate, __xn_exec_current},
@@ -1924,7 +1947,9 @@ static xnsysent_t __systab[] = {
        [__xn_sys_barrier] = {&xnshadow_sys_barrier, __xn_exec_lostage},
        [__xn_sys_trace] = {&xnshadow_sys_trace, __xn_exec_any},
        [__xn_sys_sem_heap] = {&xnshadow_sys_sem_heap, __xn_exec_any},
+#ifdef CONFIG_XENO_OPT_REGISTRY
        [__xn_sys_current] = {&xnshadow_sys_current, __xn_exec_any},
+#endif /* CONFIG_XENO_OPT_REGISTRY */
 };
 
 static void *xnshadow_sys_event(int event, void *data)
Index: b/ksrc/skins/posix/cb_lock.h
===================================================================
--- a/ksrc/skins/posix/cb_lock.h
+++ b/ksrc/skins/posix/cb_lock.h
@@ -3,15 +3,22 @@
 
 #include <asm/xenomai/atomic.h>
 #include <nucleus/compiler.h>
+#include <nucleus/types.h>
 
 #ifndef __KERNEL__
 typedef void xnthread_t;
 #endif /* __KERNEL__ */
 
-#define test_claimed(owner) ((long) (owner) & 1)
-#define clear_claimed(owner) ((xnthread_t *) ((long) (owner) & ~1))
-#define set_claimed(owner, bit) \
-        ((xnthread_t *) ((long) clear_claimed(owner) | !!(bit)))
+#define __CLAIMED_BIT          XN_HANDLE_SPARE3
+
+#define test_claimed(owner)    xnhandle_test_spare(owner, __CLAIMED_BIT)
+#define clear_claimed(owner)   xnhandle_mask_spare(owner)
+#define set_claimed(owner, bit) ({ \
+       xnhandle_t __tmp = xnhandle_mask_spare(owner); \
+       if (bit) \
+               xnhandle_set_spare(__tmp, __CLAIMED_BIT); \
+       __tmp; \
+})
 
 #ifdef CONFIG_XENO_FASTSEM
 
Index: b/ksrc/skins/posix/cond.c
===================================================================
--- a/ksrc/skins/posix/cond.c
+++ b/ksrc/skins/posix/cond.c
@@ -230,18 +230,20 @@ static inline int mutex_save_count(xnthr
 
        mutex = shadow->mutex;
 
-       if (clear_claimed(xnarch_atomic_intptr_get(mutex->owner)) != cur)
+       if (clear_claimed(xnarch_atomic_get(mutex->owner)) !=
+           xnthread_handle(cur))
                return EPERM;
 
        *count_ptr = shadow->lockcnt;
 
-       if (likely(xnarch_atomic_intptr_cmpxchg(mutex->owner, cur, NULL) == 
cur))
+       if (likely(xnarch_atomic_cmpxchg(mutex->owner, cur, XN_NO_HANDLE) ==
+                  xnthread_handle(cur)))
                return 0;
 
        owner = xnsynch_wakeup_one_sleeper(&mutex->synchbase);
-       xnarch_atomic_intptr_set
-               (mutex->owner,
-                set_claimed(owner,xnsynch_nsleepers(&mutex->synchbase)));
+       xnarch_atomic_set(mutex->owner,
+                         set_claimed(xnthread_handle(owner),
+                                     xnsynch_nsleepers(&mutex->synchbase)));
 
        /* Do not reschedule here, releasing the mutex and suspension must be
           done atomically in pthread_cond_*wait. */
Index: b/ksrc/skins/posix/mutex.c
===================================================================
--- a/ksrc/skins/posix/mutex.c
+++ b/ksrc/skins/posix/mutex.c
@@ -82,7 +82,7 @@ int pse51_mutex_check_init(struct __shad
 
 int pse51_mutex_init_internal(struct __shadow_mutex *shadow,
                              pse51_mutex_t *mutex,
-                             xnarch_atomic_intptr_t *ownerp,
+                             xnarch_atomic_t *ownerp,
                              const pthread_mutexattr_t *attr)
 {
        xnflags_t synch_flags = XNSYNCH_PRIO | XNSYNCH_NOPIP;
@@ -118,7 +118,7 @@ int pse51_mutex_init_internal(struct __s
        mutex->owner = ownerp;
        mutex->owningq = kq;
        mutex->sleepers = 0;
-       xnarch_atomic_intptr_set(ownerp, NULL);
+       xnarch_atomic_set(ownerp, XN_NO_HANDLE);
 
        xnlock_get_irqsave(&nklock, s);
        appendq(&kq->mutexq, &mutex->link);
@@ -159,7 +159,7 @@ int pthread_mutex_init(pthread_mutex_t *
            &((union __xeno_mutex *)mx)->shadow_mutex;
        DECLARE_CB_LOCK_FLAGS(s);
        pse51_mutex_t *mutex;
-       xnarch_atomic_intptr_t *ownerp;
+       xnarch_atomic_t *ownerp;
        int err;
 
        if (!attr)
@@ -185,9 +185,9 @@ int pthread_mutex_init(pthread_mutex_t *
        if (!mutex)
                return ENOMEM;
 
-       ownerp = (xnarch_atomic_intptr_t *)
+       ownerp = (xnarch_atomic_t *)
                xnheap_alloc(&xnsys_ppd_get(attr->pshared)->sem_heap,
-                            sizeof(xnarch_atomic_intptr_t));
+                            sizeof(xnarch_atomic_t));
        if (!ownerp) {
                xnfree(mutex);
                return EAGAIN;
@@ -266,7 +266,7 @@ int pthread_mutex_destroy(pthread_mutex_
                return EPERM;
        }
 
-       if (xnarch_atomic_intptr_get(mutex->owner)) {
+       if (xnarch_atomic_get(mutex->owner)) {
                cb_write_unlock(&shadow->lock, s);
                return EBUSY;
        }
@@ -290,6 +290,10 @@ int pse51_mutex_timedlock_break(struct _
        spl_t s;
        int err;
 
+       /* We need a valid thread handle for the fast lock. */
+       if (!xnthread_handle(cur))
+               return -EPERM;
+
        err = pse51_mutex_timedlock_internal(cur, shadow, 1, timed, abs_to);
        if (err != -EBUSY)
                goto unlock_and_return;
@@ -392,7 +396,7 @@ int pthread_mutex_trylock(pthread_mutex_
                return -PTR_ERR(owner);
 
        err = EBUSY;
-       if (clear_claimed(owner) == cur) {
+       if (owner == cur) {
                pse51_mutex_t *mutex = shadow->mutex;
 
                if (mutex->attr.type == PTHREAD_MUTEX_RECURSIVE) {
@@ -573,7 +577,8 @@ int pthread_mutex_unlock(pthread_mutex_t
 
        mutex = shadow->mutex;
        
-       if (clear_claimed(xnarch_atomic_intptr_get(mutex->owner)) != cur) {
+       if (clear_claimed(xnarch_atomic_get(mutex->owner)) !=
+           xnthread_handle(cur)) {
                err = EPERM;
                goto out;
        }
Index: b/ksrc/skins/posix/mutex.h
===================================================================
--- a/ksrc/skins/posix/mutex.h
+++ b/ksrc/skins/posix/mutex.h
@@ -34,7 +34,7 @@ union __xeno_mutex {
                xnarch_atomic_t lock;
                union {
                        unsigned owner_offset;
-                       xnarch_atomic_intptr_t *owner;
+                       xnarch_atomic_t *owner;
                };
                struct pse51_mutexattr attr;
 #endif /* CONFIG_XENO_FASTSEM */
@@ -43,6 +43,7 @@ union __xeno_mutex {
 
 #ifdef __KERNEL__
 
+#include <nucleus/registry.h>
 #include <posix/internal.h>
 #include <posix/thread.h>
 #include <posix/cb_lock.h>
@@ -54,7 +55,7 @@ typedef struct pse51_mutex {
 #define link2mutex(laddr)                                               \
        ((pse51_mutex_t *)(((char *)laddr) - offsetof(pse51_mutex_t, link)))
 
-       xnarch_atomic_intptr_t *owner;
+       xnarch_atomic_t *owner;
        pthread_mutexattr_t attr;
        unsigned sleepers;
        pse51_kqueues_t *owningq;
@@ -77,7 +78,7 @@ int pse51_mutex_check_init(struct __shad
 
 int pse51_mutex_init_internal(struct __shadow_mutex *shadow,
                              pse51_mutex_t *mutex,
-                             xnarch_atomic_intptr_t *ownerp,
+                             xnarch_atomic_t *ownerp,
                              const pthread_mutexattr_t *attr);
 
 void pse51_mutex_destroy_internal(pse51_mutex_t *mutex,
@@ -88,6 +89,7 @@ pse51_mutex_trylock_internal(xnthread_t
                             struct __shadow_mutex *shadow, unsigned count)
 {
        pse51_mutex_t *mutex = shadow->mutex;
+       xnhandle_t ownerh;
        xnthread_t *owner;
 
        if (xnpod_unblockable_p())
@@ -101,9 +103,14 @@ pse51_mutex_trylock_internal(xnthread_t
                return ERR_PTR(-EPERM);
 #endif /* XENO_DEBUG(POSIX) */
 
-       owner = xnarch_atomic_intptr_cmpxchg(mutex->owner, NULL, cur);
-       if (unlikely(owner != NULL))
+       ownerh = xnarch_atomic_cmpxchg(mutex->owner, XN_NO_HANDLE,
+                                      xnthread_handle(cur));
+       if (unlikely(ownerh)) {
+               owner = xnregistry_fetch(clear_claimed(ownerh));
+               if (!owner)
+                       return ERR_PTR(-EINVAL);
                return owner;
+       }
 
        shadow->lockcnt = count;
        return NULL;
@@ -118,7 +125,8 @@ static inline int pse51_mutex_timedlock_
 
 {
        pse51_mutex_t *mutex;
-       xnthread_t *owner, *old;
+       xnthread_t *owner;
+       xnhandle_t ownerh, old;
        spl_t s;
        int err;
 
@@ -128,27 +136,42 @@ static inline int pse51_mutex_timedlock_
                return PTR_ERR(owner);
 
        mutex = shadow->mutex;
-       if (clear_claimed(owner) == cur)
+       if (owner == cur)
                return -EBUSY;
 
        /* Set bit 0, so that mutex_unlock will know that the mutex is claimed.
           Hold the nklock, for mutual exclusion with slow mutex_unlock. */
        xnlock_get_irqsave(&nklock, s);
-       while(!test_claimed(owner)) {
-               old = xnarch_atomic_intptr_cmpxchg(mutex->owner,
-                                                  owner, set_claimed(owner, 
1));
-               if (likely(old == owner))
+
+       ownerh = xnarch_atomic_get(mutex->owner);
+       while (!test_claimed(ownerh)) {
+               old = xnarch_atomic_cmpxchg(mutex->owner, ownerh,
+                                           set_claimed(ownerh, 1));
+               if (likely(old == ownerh))
                        break;
-               if (old == NULL) {
+               if (old == 0) {
                        /* Owner called fast mutex_unlock
                           (on another cpu) */
                        xnlock_put_irqrestore(&nklock, s);
                        goto retry_lock;
                }
-               owner = old;
+
+               ownerh = old;
+               owner = xnregistry_fetch(clear_claimed(ownerh));
+
+               if (unlikely(!owner)) {
+                       err = -EINVAL;
+                       goto error;
+               }
+       }
+
+       /* Consistency check for owner handle - is the object a thread? */
+       if (unlikely(xnthread_handle(owner) != clear_claimed(ownerh))) {
+               err = -EINVAL;
+               goto error;
        }
 
-       xnsynch_set_owner(&mutex->synchbase, clear_claimed(owner));
+       xnsynch_set_owner(&mutex->synchbase, owner);
        ++mutex->sleepers;
        if (timed)
                xnsynch_sleep_on(&mutex->synchbase, abs_to, XN_REALTIME);
@@ -169,7 +192,8 @@ static inline int pse51_mutex_timedlock_
                goto error;
        }
 
-       xnarch_atomic_intptr_set(mutex->owner,set_claimed(cur, 
mutex->sleepers));
+       ownerh = set_claimed(xnthread_handle(cur), mutex->sleepers);
+       xnarch_atomic_set(mutex->owner, ownerh);
        shadow->lockcnt = count;
        xnlock_put_irqrestore(&nklock, s);
 
@@ -177,9 +201,9 @@ static inline int pse51_mutex_timedlock_
 
   error:
        if (!mutex->sleepers)
-               xnarch_atomic_intptr_set
+               xnarch_atomic_set
                        (mutex->owner,
-                        clear_claimed(xnarch_atomic_intptr_get(mutex->owner)));
+                        clear_claimed(xnarch_atomic_get(mutex->owner)));
        xnlock_put_irqrestore(&nklock, s);
        return err;
 }
@@ -187,16 +211,18 @@ static inline int pse51_mutex_timedlock_
 static inline void pse51_mutex_unlock_internal(xnthread_t *cur,
                                               pse51_mutex_t *mutex)
 {
+       xnhandle_t ownerh;
        xnthread_t *owner;
        spl_t s;
 
-       if (likely(xnarch_atomic_intptr_cmpxchg(mutex->owner, cur, NULL) == 
cur))
+       if (likely(xnarch_atomic_cmpxchg(mutex->owner, cur, XN_NO_HANDLE) ==
+                  xnthread_handle(cur)))
                return;
 
        xnlock_get_irqsave(&nklock, s);
        owner = xnsynch_wakeup_one_sleeper(&mutex->synchbase);
-       xnarch_atomic_intptr_set(mutex->owner,
-                                set_claimed(owner, mutex->sleepers));
+       ownerh = set_claimed(xnthread_handle(owner), mutex->sleepers);
+       xnarch_atomic_set(mutex->owner, ownerh);
        if (owner)
                xnpod_schedule();
        xnlock_put_irqrestore(&nklock, s);
Index: b/ksrc/skins/posix/syscall.c
===================================================================
--- a/ksrc/skins/posix/syscall.c
+++ b/ksrc/skins/posix/syscall.c
@@ -1060,7 +1060,8 @@ static int __pthread_mutex_unlock(struct
 
        mutex = shadow->mutex;
 
-       if (clear_claimed(xnarch_atomic_intptr_get(mutex->owner)) != cur) {
+       if (clear_claimed(xnarch_atomic_get(mutex->owner)) !=
+           xnthread_handle(cur)) {
                err = -EPERM;
                goto out;
        }
@@ -1119,7 +1120,7 @@ static int __pthread_mutex_init(struct p
        pthread_mutexattr_t locattr, *attr, *uattrp;
        union __xeno_mutex mx, *umx;
        pse51_mutex_t *mutex;
-       xnarch_atomic_intptr_t *ownerp;
+       xnarch_atomic_t *ownerp;
        int err;
 
        umx = (union __xeno_mutex *)__xn_reg_arg1(regs);
@@ -1144,9 +1145,9 @@ static int __pthread_mutex_init(struct p
        if (!mutex)
                return -ENOMEM;
 
-       ownerp = (xnarch_atomic_intptr_t *)
+       ownerp = (xnarch_atomic_t *)
                xnheap_alloc(&xnsys_ppd_get(attr->pshared)->sem_heap,
-                            sizeof(xnarch_atomic_intptr_t));
+                            sizeof(xnarch_atomic_t));
        if (!ownerp) {
                xnfree(mutex);
                return -EAGAIN;
@@ -1185,7 +1186,7 @@ static int __pthread_mutex_destroy(struc
        if (pse51_kqueues(mutex->attr.pshared) != mutex->owningq)
                return -EPERM;
 
-       if (xnarch_atomic_intptr_get(mutex->owner))
+       if (xnarch_atomic_get(mutex->owner))
                return -EBUSY;
 
        pse51_mark_deleted(shadow);
Index: b/src/skins/posix/mutex.c
===================================================================
--- a/src/skins/posix/mutex.c
+++ b/src/skins/posix/mutex.c
@@ -31,12 +31,12 @@ extern int __pse51_muxid;
 
 extern unsigned long xeno_sem_heap[2];
 
-static xnarch_atomic_intptr_t *get_ownerp(struct __shadow_mutex *shadow)
+static xnarch_atomic_t *get_ownerp(struct __shadow_mutex *shadow)
 {
        if (likely(!shadow->attr.pshared))
                return shadow->owner;
        
-       return (xnarch_atomic_intptr_t *) (xeno_sem_heap[1] + 
shadow->owner_offset);
+       return (xnarch_atomic_t *) (xeno_sem_heap[1] + shadow->owner_offset);
 }
 #endif /* CONFIG_XENO_FASTSEM */
 
@@ -117,7 +117,7 @@ int __wrap_pthread_mutex_init(pthread_mu
 
 #ifdef CONFIG_XENO_FASTSEM
        if (!shadow->attr.pshared)
-               shadow->owner = (xnarch_atomic_intptr_t *)
+               shadow->owner = (xnarch_atomic_t *)
                        (xeno_sem_heap[0] + shadow->owner_offset);
        
        cb_write_unlock(&shadow->lock, s);
@@ -149,7 +149,7 @@ int __wrap_pthread_mutex_lock(pthread_mu
        int err = 0;
 
 #ifdef CONFIG_XENO_FASTSEM
-       xnthread_t *cur, *owner;
+       xnhandle_t cur, owner;
 
        cur = xeno_get_current();
        if (!cur)
@@ -163,7 +163,7 @@ int __wrap_pthread_mutex_lock(pthread_mu
                goto out;
        }
 
-       owner = xnarch_atomic_intptr_cmpxchg(get_ownerp(shadow), NULL, cur);
+       owner = xnarch_atomic_cmpxchg(get_ownerp(shadow), XN_NO_HANDLE, cur);
        if (likely(!owner)) {
                shadow->lockcnt = 1;
                cb_read_unlock(&shadow->lock, s);
@@ -210,7 +210,7 @@ int __wrap_pthread_mutex_timedlock(pthre
        int err = 0;
 
 #ifdef CONFIG_XENO_FASTSEM
-       xnthread_t *cur, *owner;
+       xnhandle_t cur, owner;
 
        cur = xeno_get_current();
        if (!cur)
@@ -224,7 +224,7 @@ int __wrap_pthread_mutex_timedlock(pthre
                goto out;
        }       
 
-       owner = xnarch_atomic_intptr_cmpxchg(get_ownerp(shadow), NULL, cur);
+       owner = xnarch_atomic_cmpxchg(get_ownerp(shadow), XN_NO_HANDLE, cur);
        if (likely(!owner)) {
                shadow->lockcnt = 1;
                cb_read_unlock(&shadow->lock, s);
@@ -271,7 +271,7 @@ int __wrap_pthread_mutex_trylock(pthread
        int err = 0;
 
 #ifdef CONFIG_XENO_FASTSEM
-       xnthread_t *cur, *owner;
+       xnhandle_t cur, owner;
 
        cur = xeno_get_current();
        if (!cur)
@@ -285,7 +285,7 @@ int __wrap_pthread_mutex_trylock(pthread
                goto out;
        }       
 
-       owner = xnarch_atomic_intptr_cmpxchg(get_ownerp(shadow), NULL, cur);
+       owner = xnarch_atomic_cmpxchg(get_ownerp(shadow), XN_NO_HANDLE, cur);
        if (likely(!owner)) {
                shadow->lockcnt = 1;
                cb_read_unlock(&shadow->lock, s);
@@ -325,8 +325,8 @@ int __wrap_pthread_mutex_unlock(pthread_
        int err = 0;
 
 #ifdef CONFIG_XENO_FASTSEM
-       xnarch_atomic_intptr_t *ownerp;
-       xnthread_t *cur;
+       xnarch_atomic_t *ownerp;
+       xnhandle_t cur, owner;
 
        cur = xeno_get_current();
        if (!cur)
@@ -341,7 +341,8 @@ int __wrap_pthread_mutex_unlock(pthread_
        }
 
        ownerp = get_ownerp(shadow);
-       if (unlikely(clear_claimed(xnarch_atomic_intptr_get(ownerp)) != cur)) {
+       owner = clear_claimed(xnarch_atomic_get(ownerp));
+       if (unlikely(owner != cur)) {
                err = -EPERM;
                goto out_err;
        }
@@ -352,7 +353,7 @@ int __wrap_pthread_mutex_unlock(pthread_
                goto out;
        }
 
-       if (likely(xnarch_atomic_intptr_cmpxchg(ownerp, cur, NULL) == cur)) {
+       if (likely(xnarch_atomic_cmpxchg(ownerp, cur, XN_NO_HANDLE) == cur)) {
          out:
                cb_read_unlock(&shadow->lock, s);
                return 0;
Index: b/ksrc/skins/posix/thread.c
===================================================================
--- a/ksrc/skins/posix/thread.c
+++ b/ksrc/skins/posix/thread.c
@@ -28,6 +28,7 @@
  * 
  [EMAIL PROTECTED]/
 
+#include <nucleus/registry.h>
 #include <posix/thread.h>
 #include <posix/cancel.h>
 #include <posix/timer.h>
@@ -229,6 +230,13 @@ int pthread_create(pthread_t *tid,
        appendq(thread->container, &thread->link);
        xnlock_put_irqrestore(&nklock, s);
 
+#ifdef CONFIG_XENO_OPT_REGISTRY
+       /* We need an anonymous registry entry to obtain a handle for fast
+          mutex locking. */
+       xnregistry_enter("", &thread->threadbase,
+                        &xnthread_handle(&thread->threadbase), NULL);
+#endif /* CONFIG_XENO_OPT_REGISTRY */
+
 #ifdef CONFIG_XENO_OPT_PERVASIVE
        thread->hkey.u_tid = 0;
        thread->hkey.mm = NULL;
Index: b/ksrc/skins/posix/Kconfig
===================================================================
--- a/ksrc/skins/posix/Kconfig
+++ b/ksrc/skins/posix/Kconfig
@@ -1,5 +1,6 @@
 menuconfig XENO_SKIN_POSIX
        depends on XENO_OPT_NUCLEUS 
+       select XENO_OPT_REGISTRY if XENO_FASTSEM
        tristate "POSIX API"
        default y
        help
Index: b/include/asm-generic/bits/bind.h
===================================================================
--- a/include/asm-generic/bits/bind.h
+++ b/include/asm-generic/bits/bind.h
@@ -22,7 +22,14 @@ __attribute__ ((weak))
 void xeno_set_current(void)
 {
        void *kthread_cb;
-       XENOMAI_SYSCALL1(__xn_sys_current, &kthread_cb);
+       int err;
+
+       err = XENOMAI_SYSCALL1(__xn_sys_current, &kthread_cb);
+       if (err) {
+               fprintf(stderr, "Xenomai: error obtaining handle for current "
+                       "thread: %s\n", strerror(err));
+               exit(1);
+       }
        pthread_setspecific(xeno_current_key, kthread_cb);
 }
 


_______________________________________________
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core

Reply via email to