Module: xenomai-forge
Branch: master
Commit: 4ae748268e0bc01e0ac7ae791054dd6a64e3f989
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=4ae748268e0bc01e0ac7ae791054dd6a64e3f989

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Wed Nov 16 21:06:40 2011 +0100

cobalt: rework deferred condition signalling

Instead of using a spare bit in the mutex owner field to signal
that a condition was signalled, use a separate "flags" field. This
allows reverting the changes on spare bits which increased the cost
of fast synch operations.

---

 include/cobalt/nucleus/synch.h |   81 +++++----------------------------------
 include/cobalt/nucleus/types.h |    9 ++--
 kernel/cobalt/cond.c           |   21 +++++-----
 kernel/cobalt/cond.h           |   18 +++++----
 kernel/cobalt/mutex.c          |   19 +++++----
 kernel/cobalt/mutex.h          |   27 ++++++-------
 kernel/cobalt/nucleus/synch.c  |   22 +++-------
 lib/cobalt/cond.c              |   32 ++++++++-------
 lib/cobalt/mutex.c             |   28 ++++++++------
 9 files changed, 98 insertions(+), 159 deletions(-)

diff --git a/include/cobalt/nucleus/synch.h b/include/cobalt/nucleus/synch.h
index 0794852..fa05dfd 100644
--- a/include/cobalt/nucleus/synch.h
+++ b/include/cobalt/nucleus/synch.h
@@ -42,78 +42,20 @@
 static inline int xnsynch_fast_owner_check(xnarch_atomic_t *fastlock,
                                           xnhandle_t ownerh)
 {
-       return (xnhandle_mask_spares(xnarch_atomic_get(fastlock)) == ownerh) ?
+       return (xnhandle_mask_spare(xnarch_atomic_get(fastlock)) == ownerh) ?
                0 : -EPERM;
 }
 
-static inline int xnsynch_fast_check_spares(xnarch_atomic_t *fastlock,
-                                           unsigned spares)
-{
-       return (xnhandle_test_spares(xnarch_atomic_get(fastlock), spares));
-}
-
-static inline int xnsynch_fast_set_spares(xnarch_atomic_t *fastlock,
-                                         xnhandle_t owner,
-                                         xnhandle_t spares)
-{
-       xnhandle_t cur, old;
-       cur = xnarch_atomic_cmpxchg(fastlock, owner, owner | spares);
-       if (cur != owner) {
-               /* Only the current owner of the fastlock can change
-                  spare bits */
-               if (xnhandle_mask_spares(cur) != owner)
-                       return -EPERM;
-
-               do {
-                       if (xnhandle_test_spares(cur, spares))
-                               return 0;
-
-                       old = cur;
-                       cur = xnarch_atomic_cmpxchg(fastlock,
-                                                   old, old | spares);
-               } while(old != cur);
-       }
-       return 0;
-}
-
-static inline int xnsynch_fast_clear_spares(xnarch_atomic_t *fastlock,
-                                           xnhandle_t owner,
-                                           xnhandle_t spares)
-{
-       xnhandle_t cur, old;
-       cur = xnarch_atomic_cmpxchg(fastlock, owner | spares, owner & ~spares);
-       if (cur != (owner | spares)) {
-               /* Only the current owner of the fastlock can change
-                  spare bits */
-               if (xnhandle_mask_spares(cur) != owner)
-                       return -EPERM;
-
-               do {
-                       if (!xnhandle_test_spares(cur, spares))
-                               return 0;
-
-                       old = cur;
-                       cur = xnarch_atomic_cmpxchg(fastlock,
-                                                   old, old & ~spares);
-               } while(old != cur);
-       }
-       return 0;
-}
-
 static inline int xnsynch_fast_acquire(xnarch_atomic_t *fastlock,
                                       xnhandle_t new_ownerh)
 {
-       xnhandle_t lock_state, old, spares;
+       xnhandle_t lock_state =
+               xnarch_atomic_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh);
 
-       spares = xnhandle_get_spares(xnarch_atomic_get(fastlock),
-                                    XN_HANDLE_SPARE_MASK);
-       old = XN_NO_HANDLE | spares;
-       lock_state = xnarch_atomic_cmpxchg(fastlock, old, new_ownerh | spares);
-
-       if (likely(lock_state == old))
+       if (likely(lock_state == XN_NO_HANDLE))
                return 0;
 
-       if (xnhandle_mask_spares(lock_state) == new_ownerh)
+       if (xnhandle_mask_spare(lock_state) == new_ownerh)
                return -EBUSY;
 
        return -EAGAIN;
@@ -122,18 +64,16 @@ static inline int xnsynch_fast_acquire(xnarch_atomic_t 
*fastlock,
 static inline int xnsynch_fast_release(xnarch_atomic_t *fastlock,
                                       xnhandle_t cur_ownerh)
 {
-       xnhandle_t spares = xnhandle_get_spares(xnarch_atomic_get(fastlock),
-                                               XN_HANDLE_SPARE_MASK &
-                                               ~XNSYNCH_FLCLAIM);
-       cur_ownerh |= spares;
-       return (xnarch_atomic_cmpxchg(fastlock, cur_ownerh,
-                                     XN_NO_HANDLE | spares) == cur_ownerh);
+       return (xnarch_atomic_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE) ==
+               cur_ownerh);
 }
 
 #if defined(__KERNEL__) || defined(__XENO_SIM__)
 
 #define XNSYNCH_CLAIMED 0x10   /* Claimed by other thread(s) w/ PIP */
 
+#define XNSYNCH_FLCLAIM XN_HANDLE_SPARE3 /* Corresponding bit in fast lock */
+
 /* Spare flags usable by upper interfaces */
 #define XNSYNCH_SPARE0  0x01000000
 #define XNSYNCH_SPARE1  0x02000000
@@ -187,9 +127,10 @@ typedef struct xnsynch {
        xnsynch_fast_owner_check((synch)->fastlock, xnthread_handle(thread))
 
 #define xnsynch_fast_is_claimed(fastlock) \
-       xnhandle_test_spares(fastlock, XNSYNCH_FLCLAIM)
+       xnhandle_test_spare(fastlock, XNSYNCH_FLCLAIM)
 #define xnsynch_fast_set_claimed(fastlock, enable) \
        (((fastlock) & ~XNSYNCH_FLCLAIM) | ((enable) ? XNSYNCH_FLCLAIM : 0))
+#define xnsynch_fast_mask_claimed(fastlock) ((fastlock & ~XNSYNCH_FLCLAIM))
 
 #ifdef __cplusplus
 extern "C" {
diff --git a/include/cobalt/nucleus/types.h b/include/cobalt/nucleus/types.h
index 57733e3..0432005 100644
--- a/include/cobalt/nucleus/types.h
+++ b/include/cobalt/nucleus/types.h
@@ -70,12 +70,11 @@ typedef unsigned long xnhandle_t;
 #define XN_HANDLE_SPARE3       ((xnhandle_t)0x80000000)
 #define XN_HANDLE_SPARE_MASK   ((xnhandle_t)0xf0000000)
 
-#define xnhandle_mask_spares(handle)  ((handle) & ~XN_HANDLE_SPARE_MASK)
-#define xnhandle_get_spares(handle, bits)   ((handle) & (bits))
-#define xnhandle_test_spares(handle, bits)  
(!!xnhandle_get_spares(handle,bits))
-#define xnhandle_set_spares(handle, bits) \
+#define xnhandle_mask_spare(handle)  ((handle) & ~XN_HANDLE_SPARE_MASK)
+#define xnhandle_test_spare(handle, bits)  (!!((handle) & (bits)))
+#define xnhandle_set_spare(handle, bits) \
        do { (handle) |= (bits); } while (0)
-#define xnhandle_clear_spares(handle, bits) \
+#define xnhandle_clear_spare(handle, bits) \
        do { (handle) &= ~(bits); } while (0)
 
 struct xnintr;
diff --git a/kernel/cobalt/cond.c b/kernel/cobalt/cond.c
index a5c6928..8a08d65 100644
--- a/kernel/cobalt/cond.c
+++ b/kernel/cobalt/cond.c
@@ -146,7 +146,7 @@ pthread_cond_init(pthread_cond_t *cnd, const 
pthread_condattr_t *attr)
        shadow->pending_signals_offset =
                xnheap_mapped_offset(&sys_ppd->sem_heap,
                                     cond->pending_signals);
-       shadow->mutex_ownerp = (xnarch_atomic_t *)~0UL;
+       shadow->mutex_datp = (struct mutex_dat *)~0UL;
 
        shadow->magic = COBALT_COND_MAGIC;
        shadow->cond = cond;
@@ -410,7 +410,7 @@ int cobalt_cond_wait_prologue(union __xeno_cond __user 
*u_cnd,
                              struct timespec __user *u_ts)
 {
        xnthread_t *cur = xnshadow_thread(current);
-       xnarch_atomic_t *ownerp;
+       struct mutex_dat *datp;
        struct us_cond_data d;
        cobalt_cond_t *cnd;
        cobalt_mutex_t *mx;
@@ -421,8 +421,8 @@ int cobalt_cond_wait_prologue(union __xeno_cond __user 
*u_cnd,
        __xn_get_user(mx, &u_mx->shadow_mutex.mutex);
 
        if (!cnd->mutex) {
-               __xn_get_user(ownerp, &u_mx->shadow_mutex.owner);
-               __xn_put_user(ownerp, &u_cnd->shadow_cond.mutex_ownerp);
+               __xn_get_user(datp, &u_mx->shadow_mutex.dat);
+               __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
        }
 
        if (timed) {
@@ -437,8 +437,8 @@ int cobalt_cond_wait_prologue(union __xeno_cond __user 
*u_cnd,
                                                     XN_INFINITE);
 
        if (!cnd->mutex) {
-               ownerp = (xnarch_atomic_t *)~0UL;
-               __xn_put_user(ownerp, &u_cnd->shadow_cond.mutex_ownerp);
+               datp = (struct mutex_dat *)~0UL;
+               __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
        }
 
        switch(err) {
@@ -448,9 +448,8 @@ int cobalt_cond_wait_prologue(union __xeno_cond __user 
*u_cnd,
                err = cobalt_cond_timedwait_epilogue(cur, cnd, mx);
 
                if (!cnd->mutex) {
-                       ownerp = (xnarch_atomic_t *)~0UL;
-                       __xn_put_user(ownerp,
-                                     &u_cnd->shadow_cond.mutex_ownerp);
+                       datp = (struct mutex_dat *)~0UL;
+                       __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
                }
                break;
 
@@ -485,8 +484,8 @@ int cobalt_cond_wait_epilogue(union __xeno_cond __user 
*u_cnd,
        err = cobalt_cond_timedwait_epilogue(cur, cnd, mx);
 
        if (!cnd->mutex) {
-               xnarch_atomic_t *ownerp = (xnarch_atomic_t *)~0UL;
-               __xn_put_user(ownerp, &u_cnd->shadow_cond.mutex_ownerp);
+               struct mutex_dat *datp = (struct mutex_dat *)~0UL;
+               __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
        }
 
        return err;
diff --git a/kernel/cobalt/cond.h b/kernel/cobalt/cond.h
index 0936579..4afce07 100644
--- a/kernel/cobalt/cond.h
+++ b/kernel/cobalt/cond.h
@@ -23,6 +23,7 @@
 #include <pthread.h>
 
 struct cobalt_cond;
+struct mutex_dat;
 
 union __xeno_cond {
        pthread_cond_t native_cond;
@@ -35,8 +36,8 @@ union __xeno_cond {
                        unsigned long *pending_signals;
                };
                union {
-                       unsigned mutex_ownerp_offset;
-                       xnarch_atomic_t *mutex_ownerp;
+                       unsigned mutex_datp_offset;
+                       struct mutex_dat *mutex_datp;
                };
        } shadow_cond;
 };
@@ -75,11 +76,6 @@ static inline int cobalt_cond_deferred_signals(struct 
cobalt_cond *cond)
        pending_signals = *(cond->pending_signals);
 
        switch(pending_signals) {
-       case ~0UL:
-               need_resched =
-                       xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED;
-               break;
-
        case 0:
                need_resched = 0;
                break;
@@ -90,8 +86,14 @@ static inline int cobalt_cond_deferred_signals(struct 
cobalt_cond *cond)
                                break;
                        need_resched = 1;
                }
+               *cond->pending_signals = 0;
+               break;
+
+       case ~0UL:
+               need_resched =
+                       xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED;
+               *cond->pending_signals = 0;
        }
-       *cond->pending_signals = 0;
 
        return need_resched;
 }
diff --git a/kernel/cobalt/mutex.c b/kernel/cobalt/mutex.c
index d8f13d2..81e2ce7 100644
--- a/kernel/cobalt/mutex.c
+++ b/kernel/cobalt/mutex.c
@@ -55,7 +55,7 @@ pthread_mutexattr_t cobalt_default_mutex_attr;
 
 static int cobalt_mutex_init_inner(struct __shadow_mutex *shadow,
                                   cobalt_mutex_t *mutex,
-                                  xnarch_atomic_t *ownerp,
+                                  struct mutex_dat *datp,
                                   const pthread_mutexattr_t *attr)
 {
        xnflags_t synch_flags = XNSYNCH_PRIO | XNSYNCH_OWNER;
@@ -77,13 +77,14 @@ static int cobalt_mutex_init_inner(struct __shadow_mutex 
*shadow,
        shadow->lockcnt = 0;
 
        shadow->attr = *attr;
-       shadow->owner_offset = xnheap_mapped_offset(&sys_ppd->sem_heap, ownerp);
+       shadow->dat_offset = xnheap_mapped_offset(&sys_ppd->sem_heap, datp);
 
        if (attr->protocol == PTHREAD_PRIO_INHERIT)
                synch_flags |= XNSYNCH_PIP;
 
        mutex->magic = COBALT_MUTEX_MAGIC;
-       xnsynch_init(&mutex->synchbase, synch_flags, ownerp);
+       xnsynch_init(&mutex->synchbase, synch_flags, &datp->owner);
+       datp->flags = 0;
        inith(&mutex->link);
        mutex->attr = *attr;
        mutex->owningq = kq;
@@ -238,7 +239,7 @@ int cobalt_mutex_init(union __xeno_mutex __user *u_mx,
                      const pthread_mutexattr_t __user *u_attr)
 {
        pthread_mutexattr_t locattr, *attr;
-       xnarch_atomic_t *ownerp;
+       struct mutex_dat *datp;
        union __xeno_mutex mx;
        cobalt_mutex_t *mutex;
        int err;
@@ -261,17 +262,17 @@ int cobalt_mutex_init(union __xeno_mutex __user *u_mx,
        if (mutex == NULL)
                return -ENOMEM;
 
-       ownerp = xnheap_alloc(&xnsys_ppd_get(attr->pshared)->sem_heap,
-                             sizeof(xnarch_atomic_t));
-       if (ownerp == NULL) {
+       datp = xnheap_alloc(&xnsys_ppd_get(attr->pshared)->sem_heap,
+                            sizeof(*datp));
+       if (datp == NULL) {
                xnfree(mutex);
                return -EAGAIN;
        }
 
-       err = cobalt_mutex_init_inner(&mx.shadow_mutex, mutex, ownerp, attr);
+       err = cobalt_mutex_init_inner(&mx.shadow_mutex, mutex, datp, attr);
        if (err) {
                xnfree(mutex);
-               xnheap_free(&xnsys_ppd_get(attr->pshared)->sem_heap, ownerp);
+               xnheap_free(&xnsys_ppd_get(attr->pshared)->sem_heap, datp);
                return err;
        }
 
diff --git a/kernel/cobalt/mutex.h b/kernel/cobalt/mutex.h
index 6b6d360..e9a4673 100644
--- a/kernel/cobalt/mutex.h
+++ b/kernel/cobalt/mutex.h
@@ -24,6 +24,13 @@
 
 struct cobalt_mutex;
 
+struct mutex_dat {
+       xnarch_atomic_t owner;
+       unsigned flags;
+
+#define COBALT_MUTEX_COND_SIGNAL 0x00000001
+};
+
 union __xeno_mutex {
        pthread_mutex_t native_mutex;
        struct __shadow_mutex {
@@ -31,12 +38,10 @@ union __xeno_mutex {
                unsigned lockcnt;
                struct cobalt_mutex *mutex;
                union {
-                       unsigned owner_offset;
-                       xnarch_atomic_t *owner;
+                       unsigned dat_offset;
+                       struct mutex_dat *dat;
                };
                struct cobalt_mutexattr attr;
-
-#define COBALT_MUTEX_COND_SIGNAL XN_HANDLE_SPARE2
        } shadow_mutex;
 };
 
@@ -91,6 +96,7 @@ static inline int cobalt_mutex_acquire_unchecked(xnthread_t 
*cur,
 
 static inline int cobalt_mutex_release(xnthread_t *cur, cobalt_mutex_t *mutex)
 {
+       struct mutex_dat *datp;
        xnholder_t *holder;
        int need_resched;
 
@@ -109,17 +115,10 @@ static inline int cobalt_mutex_release(xnthread_t *cur, 
cobalt_mutex_t *mutex)
        for (holder = getheadq(&mutex->conds);
             holder; holder = nextq(&mutex->conds, holder)) {
                struct cobalt_cond *cond = mutex_link2cond(holder);
-               if (*(cond->pending_signals)) {
-                       if (xnsynch_nsleepers(&cond->synchbase))
-                               need_resched |=
-                                       cobalt_cond_deferred_signals(cond);
-                       else
-                               *(cond->pending_signals) = 0;
-               }
+               need_resched |= cobalt_cond_deferred_signals(cond);
        }
-       xnsynch_fast_clear_spares(mutex->synchbase.fastlock,
-                                 xnthread_handle(cur),
-                                 COBALT_MUTEX_COND_SIGNAL);
+       datp = container_of(mutex->synchbase.fastlock, struct mutex_dat, owner);
+       datp->flags &= ~COBALT_MUTEX_COND_SIGNAL;
        need_resched |= xnsynch_release(&mutex->synchbase) != NULL;
 
        return need_resched;
diff --git a/kernel/cobalt/nucleus/synch.c b/kernel/cobalt/nucleus/synch.c
index e23ee10..f65b99b 100644
--- a/kernel/cobalt/nucleus/synch.c
+++ b/kernel/cobalt/nucleus/synch.c
@@ -402,7 +402,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
                          xntmode_t timeout_mode)
 {
        struct xnthread *thread = xnpod_current_thread(), *owner;
-       xnhandle_t threadh = xnthread_handle(thread), fastlock, old, spares;
+       xnhandle_t threadh = xnthread_handle(thread), fastlock, old;
        xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
        spl_t s;
 
@@ -412,12 +412,9 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
 
       redo:
 
-       spares = xnhandle_get_spares(xnarch_atomic_get(lockp),
-                                            XN_HANDLE_SPARE_MASK);
-       old = XN_NO_HANDLE | spares;
-       fastlock = xnarch_atomic_cmpxchg(lockp, old, threadh | spares);
+       fastlock = xnarch_atomic_cmpxchg(lockp, XN_NO_HANDLE, threadh);
 
-       if (likely(fastlock == old)) {
+       if (likely(fastlock == XN_NO_HANDLE)) {
                if (xnthread_test_state(thread, XNOTHER))
                        xnthread_inc_rescnt(thread);
                xnthread_clear_info(thread,
@@ -453,7 +450,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
                fastlock = old;
        } while (!xnsynch_fast_is_claimed(fastlock));
 
-       owner = xnthread_lookup(xnhandle_mask_spares(fastlock));
+       owner = xnthread_lookup(xnsynch_fast_mask_claimed(fastlock));
 
        if (!owner) {
                /* The handle is broken, therefore pretend that the synch
@@ -525,11 +522,9 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
 
                /* We are the new owner, update the fastlock
                   accordingly. */
-               threadh |= xnhandle_get_spares(xnarch_atomic_get(lockp),
-                                              XN_HANDLE_SPARE_MASK);
-               threadh =
-                       xnsynch_fast_set_claimed(threadh,
-                                                xnsynch_pended_p(synch));
+               if (xnsynch_pended_p(synch))
+                       threadh =
+                               xnsynch_fast_set_claimed(threadh, 1);
                xnarch_atomic_set(lockp, threadh);
        }
 
@@ -698,9 +693,6 @@ xnsynch_release_thread(struct xnsynch *synch, struct 
xnthread *lastowner)
        }
 
        lockp = xnsynch_fastlock(synch);
-       newownerh |= xnhandle_get_spares(xnarch_atomic_get(lockp),
-                                        XN_HANDLE_SPARE_MASK
-                                        & ~XNSYNCH_FLCLAIM);
        xnarch_atomic_set(lockp, newownerh);
 
        xnlock_put_irqrestore(&nklock, s);
diff --git a/lib/cobalt/cond.c b/lib/cobalt/cond.c
index cdb4ca3..d70f538 100644
--- a/lib/cobalt/cond.c
+++ b/lib/cobalt/cond.c
@@ -38,16 +38,16 @@ static unsigned long *get_signalsp(struct __shadow_cond 
*shadow)
                                 + shadow->pending_signals_offset);
 }
 
-static xnarch_atomic_t *get_mutex_ownerp(struct __shadow_cond *shadow)
+static struct mutex_dat *get_mutex_datp(struct __shadow_cond *shadow)
 {
-       if (shadow->mutex_ownerp == (xnarch_atomic_t *)~0UL)
+       if (shadow->mutex_datp == (struct mutex_dat *)~0UL)
                return NULL;
 
        if (likely(!shadow->attr.pshared))
-               return shadow->mutex_ownerp;
+               return shadow->mutex_datp;
 
-       return (xnarch_atomic_t *)(xeno_sem_heap[1]
-                                  + shadow->mutex_ownerp_offset);
+       return (struct mutex_dat *)(xeno_sem_heap[1]
+                                   + shadow->mutex_datp_offset);
 }
 
 int __wrap_pthread_condattr_init(pthread_condattr_t *attr)
@@ -212,7 +212,7 @@ int __wrap_pthread_cond_signal(pthread_cond_t * cond)
        struct __shadow_cond *shadow =
                &((union __xeno_cond *)cond)->shadow_cond;
        unsigned long *pending_signals;
-       xnarch_atomic_t *mutex_ownerp;
+       struct mutex_dat *mutex_datp;
        xnhandle_t cur;
 
        cur = xeno_get_current();
@@ -222,12 +222,13 @@ int __wrap_pthread_cond_signal(pthread_cond_t * cond)
        if (shadow->magic != COBALT_COND_MAGIC)
                return EINVAL;
 
-       mutex_ownerp = get_mutex_ownerp(shadow);
-       if (mutex_ownerp) {
-               if (xnsynch_fast_set_spares(mutex_ownerp, cur,
-                                           COBALT_MUTEX_COND_SIGNAL) < 0)
+       mutex_datp = get_mutex_datp(shadow);
+       if (mutex_datp) {
+               if (xnsynch_fast_owner_check(&mutex_datp->owner, cur) < 0)
                        return EPERM;
 
+               mutex_datp->flags |= COBALT_MUTEX_COND_SIGNAL;
+
                pending_signals = get_signalsp(shadow);
                if (*pending_signals != ~0UL)
                        ++(*pending_signals);
@@ -241,7 +242,7 @@ int __wrap_pthread_cond_broadcast(pthread_cond_t * cond)
        struct __shadow_cond *shadow =
                &((union __xeno_cond *)cond)->shadow_cond;
        unsigned long *pending_signals;
-       xnarch_atomic_t *mutex_ownerp;
+       struct mutex_dat *mutex_datp;
        xnhandle_t cur;
 
        cur = xeno_get_current();
@@ -251,12 +252,13 @@ int __wrap_pthread_cond_broadcast(pthread_cond_t * cond)
        if (shadow->magic != COBALT_COND_MAGIC)
                return EINVAL;
 
-       mutex_ownerp = get_mutex_ownerp(shadow);
-       if (mutex_ownerp) {
-               if (xnsynch_fast_set_spares(mutex_ownerp, cur,
-                                           COBALT_MUTEX_COND_SIGNAL) < 0)
+       mutex_datp = get_mutex_datp(shadow);
+       if (mutex_datp) {
+               if (xnsynch_fast_owner_check(&mutex_datp->owner, cur) < 0)
                        return EPERM;
 
+               mutex_datp->flags |= COBALT_MUTEX_COND_SIGNAL;
+
                pending_signals = get_signalsp(shadow);
                *get_signalsp(shadow) = ~0UL;
        }
diff --git a/lib/cobalt/mutex.c b/lib/cobalt/mutex.c
index e8d0bca..e2ea7a0 100644
--- a/lib/cobalt/mutex.c
+++ b/lib/cobalt/mutex.c
@@ -30,12 +30,17 @@ extern int __cobalt_muxid;
 
 extern unsigned long xeno_sem_heap[2];
 
-static xnarch_atomic_t *get_ownerp(struct __shadow_mutex *shadow)
+static struct mutex_dat *get_datp(struct __shadow_mutex *shadow)
 {
        if (likely(!shadow->attr.pshared))
-               return shadow->owner;
+               return shadow->dat;
+
+       return (struct mutex_dat *)(xeno_sem_heap[1] + shadow->dat_offset);
+}
 
-       return (xnarch_atomic_t *)(xeno_sem_heap[1] + shadow->owner_offset);
+static xnarch_atomic_t *get_ownerp(struct __shadow_mutex *shadow)
+{
+       return &get_datp(shadow)->owner;
 }
 
 int __wrap_pthread_mutexattr_init(pthread_mutexattr_t *attr)
@@ -107,8 +112,8 @@ int __wrap_pthread_mutex_init(pthread_mutex_t *mutex,
        err = 
-XENOMAI_SKINCALL2(__cobalt_muxid,__cobalt_mutex_init,shadow,attr);
 
        if (!shadow->attr.pshared)
-               shadow->owner = (xnarch_atomic_t *)
-                       (xeno_sem_heap[0] + shadow->owner_offset);
+               shadow->dat = (struct mutex_dat *)
+                       (xeno_sem_heap[0] + shadow->dat_offset);
 
        return err;
 }
@@ -300,8 +305,8 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
        struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
-       xnarch_atomic_t *ownerp;
-       xnhandle_t cur, owner;
+       struct mutex_dat *datp;
+       xnhandle_t cur;
        int err;
 
        if (unlikely(shadow->magic != COBALT_MUTEX_MAGIC))
@@ -311,9 +316,8 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
-       ownerp = get_ownerp(shadow);
-       owner = xnarch_atomic_get(ownerp);
-       if (xnhandle_mask_spares(owner) != cur)
+       datp = get_datp(shadow);
+       if (xnsynch_fast_owner_check(&datp->owner, cur) != 0)
                return EPERM;
 
        if (shadow->lockcnt > 1) {
@@ -321,13 +325,13 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
                return 0;
        }
 
-       if ((owner & COBALT_MUTEX_COND_SIGNAL))
+       if ((datp->flags & COBALT_MUTEX_COND_SIGNAL))
                goto do_syscall;
 
        if (unlikely(xeno_get_current_mode() & XNOTHER))
                goto do_syscall;
 
-       if (likely(xnsynch_fast_release(ownerp, cur)))
+       if (likely(xnsynch_fast_release(&datp->owner, cur)))
                return 0;
 
 do_syscall:


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to