Module: xenomai-forge
Branch: master
Commit: 7fdbd7579d7780929fd4432e941100b0f8055458
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=7fdbd7579d7780929fd4432e941100b0f8055458

Author: Philippe Gerum <r...@xenomai.org>
Date:   Thu Dec 15 22:18:05 2011 +0100

copperplate/syncobj, lib: fix multiple races caused by deletion

This patch is a major rework of the syncobj deletion code. Basically,
deleting a synchronization object under heavy use just did not work,
triggering a variety of deadly races.

At this chance, we also fixed several syncobj API services (name
and/or goal), to fit the GRANT/DRAIN semantics introduced by the
Cobalt monitors (but still relevant for Mercury nevertheless).

---

 include/copperplate/syncobj.h |   79 ++++----
 lib/alchemy/buffer.c          |   22 +-
 lib/alchemy/event.c           |    8 +-
 lib/alchemy/heap.c            |    8 +-
 lib/alchemy/queue.c           |   16 +-
 lib/alchemy/task.c            |   18 +-
 lib/copperplate/cluster.c     |   12 +-
 lib/copperplate/semobj.c      |    6 +-
 lib/copperplate/syncobj.c     |  435 ++++++++++++++++++++---------------------
 lib/copperplate/threadobj.c   |    2 +-
 lib/psos/queue.c              |    8 +-
 lib/psos/rn.c                 |    6 +-
 lib/psos/task.c               |    4 +-
 lib/vxworks/msgQLib.c         |   10 +-
 lib/vxworks/semLib.c          |    6 +-
 15 files changed, 318 insertions(+), 322 deletions(-)

diff --git a/include/copperplate/syncobj.h b/include/copperplate/syncobj.h
index e3f0b01..55f3c50 100644
--- a/include/copperplate/syncobj.h
+++ b/include/copperplate/syncobj.h
@@ -29,13 +29,9 @@
 #define SYNCOBJ_LOCKED 0x2
 
 /* threadobj->wait_status */
-#define SYNCOBJ_DELETED                0x1
-#define SYNCOBJ_FLUSHED                0x2
-#define SYNCOBJ_BROADCAST      0x4
-#define SYNCOBJ_DRAINING       0x8
-
-#define SYNCOBJ_RELEASE_MASK   \
-       (SYNCOBJ_DELETED|SYNCOBJ_FLUSHED|SYNCOBJ_BROADCAST)
+#define SYNCOBJ_FLUSHED                0x1
+#define SYNCOBJ_SIGNALED       0x2
+#define SYNCOBJ_DRAINWAIT      0x4
 
 /* threadobj->wait_hook(status) */
 #define SYNCOBJ_BLOCK  0x1
@@ -67,9 +63,9 @@ struct syncobj_corespec {
 struct syncobj {
        unsigned int magic;
        int flags;
-       int release_count;
-       struct list pend_list;
-       int pend_count;
+       int wait_count;
+       struct list grant_list;
+       int grant_count;
        struct list drain_list;
        int drain_count;
        struct syncobj_corespec core;
@@ -77,10 +73,10 @@ struct syncobj {
 };
 
 #define syncobj_for_each_waiter(sobj, pos)             \
-       list_for_each_entry(pos, &(sobj)->pend_list, wait_link)
+       list_for_each_entry(pos, &(sobj)->grant_list, wait_link)
 
 #define syncobj_for_each_waiter_safe(sobj, pos, tmp)   \
-       list_for_each_entry_safe(pos, tmp, &(sobj)->pend_list, wait_link)
+       list_for_each_entry_safe(pos, tmp, &(sobj)->grant_list, wait_link)
 
 void __syncobj_cleanup_wait(struct syncobj *sobj,
                            struct threadobj *thobj);
@@ -123,18 +119,25 @@ static inline void __syncobj_check_locked(struct syncobj 
*sobj)
 extern "C" {
 #endif
 
+int __syncobj_broadcast_drain(struct syncobj *sobj, int reason);
+
+int __syncobj_broadcast_grant(struct syncobj *sobj, int reason);
+
 void syncobj_init(struct syncobj *sobj, int flags,
                  fnref_type(void (*)(struct syncobj *sobj)) finalizer);
 
-int syncobj_pend(struct syncobj *sobj,
+int syncobj_wait_grant(struct syncobj *sobj,
                 const struct timespec *timeout,
                 struct syncstate *syns);
 
-struct threadobj *syncobj_post(struct syncobj *sobj);
+struct threadobj *syncobj_grant_one(struct syncobj *sobj);
+
+void syncobj_grant_to(struct syncobj *sobj,
+                     struct threadobj *thobj);
 
-struct threadobj *syncobj_peek_at_pend(struct syncobj *sobj);
+struct threadobj *syncobj_peek_grant(struct syncobj *sobj);
 
-struct threadobj *syncobj_peek_at_drain(struct syncobj *sobj);
+struct threadobj *syncobj_peek_drain(struct syncobj *sobj);
 
 int syncobj_lock(struct syncobj *sobj,
                 struct syncstate *syns);
@@ -146,54 +149,60 @@ int syncobj_wait_drain(struct syncobj *sobj,
                       const struct timespec *timeout,
                       struct syncstate *syns);
 
-int __syncobj_signal_drain(struct syncobj *sobj);
+int syncobj_flush(struct syncobj *sobj);
+
+int syncobj_destroy(struct syncobj *sobj,
+                   struct syncstate *syns);
 
-static inline int syncobj_pended_p(struct syncobj *sobj)
+void syncobj_uninit(struct syncobj *sobj);
+
+static inline int syncobj_grant_wait_p(struct syncobj *sobj)
 {
        __syncobj_check_locked(sobj);
 
-       return !list_empty(&sobj->pend_list);
+       return !list_empty(&sobj->grant_list);
 }
 
-static inline int syncobj_pend_count(struct syncobj *sobj)
+static inline int syncobj_count_grant(struct syncobj *sobj)
 {
        __syncobj_check_locked(sobj);
 
-       return sobj->pend_count;
+       return sobj->grant_count;
 }
 
-static inline int syncobj_drain_count(struct syncobj *sobj)
+static inline int syncobj_count_drain(struct syncobj *sobj)
 {
        __syncobj_check_locked(sobj);
 
        return sobj->drain_count;
 }
 
-void syncobj_requeue_waiter(struct syncobj *sobj, struct threadobj *thobj);
-
-void syncobj_wakeup_waiter(struct syncobj *sobj, struct threadobj *thobj);
-
-int syncobj_flush(struct syncobj *sobj, int reason);
+static inline int syncobj_drain(struct syncobj *sobj)
+{
+       int ret = 0;
 
-int syncobj_destroy(struct syncobj *sobj,
-                   struct syncstate *syns);
+       __syncobj_check_locked(sobj);
 
-void syncobj_uninit(struct syncobj *sobj);
+       if (sobj->drain_count > 0)
+               ret = __syncobj_broadcast_drain(sobj, SYNCOBJ_SIGNALED);
 
-#ifdef __cplusplus
+       return ret;
 }
-#endif
 
-static inline int syncobj_signal_drain(struct syncobj *sobj)
+static inline int syncobj_grant_all(struct syncobj *sobj)
 {
        int ret = 0;
 
        __syncobj_check_locked(sobj);
 
-       if (sobj->drain_count > 0)
-               ret = __syncobj_signal_drain(sobj);
+       if (sobj->grant_count > 0)
+               ret = __syncobj_broadcast_grant(sobj, SYNCOBJ_SIGNALED);
 
        return ret;
 }
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _COPPERPLATE_SYNCOBJ_H */
diff --git a/lib/alchemy/buffer.c b/lib/alchemy/buffer.c
index 5447d71..5356793 100644
--- a/lib/alchemy/buffer.c
+++ b/lib/alchemy/buffer.c
@@ -201,13 +201,13 @@ redo:
                 * drain, if we freed enough room for the leading one
                 * to post its message.
                 */
-               thobj = syncobj_peek_at_drain(&bcb->sobj);
+               thobj = syncobj_peek_drain(&bcb->sobj);
                if (thobj == NULL)
                        goto done;
 
                wait = threadobj_get_wait(thobj);
                if (wait->size + bcb->fillsz <= bcb->bufsz)
-                       syncobj_signal_drain(&bcb->sobj);
+                       syncobj_drain(&bcb->sobj);
 
                goto done;
        wait:
@@ -223,7 +223,7 @@ redo:
                 * pathological use of the buffer. We must allow for a
                 * short read to prevent a deadlock.
                 */
-               if (bcb->fillsz > 0 && syncobj_drain_count(&bcb->sobj)) {
+               if (bcb->fillsz > 0 && syncobj_count_drain(&bcb->sobj)) {
                        len = bcb->fillsz;
                        goto redo;
                }
@@ -233,7 +233,7 @@ redo:
 
                wait->size = len;
 
-               ret = syncobj_pend(&bcb->sobj, abs_timeout, &syns);
+               ret = syncobj_wait_grant(&bcb->sobj, abs_timeout, &syns);
                if (ret) {
                        if (ret == -EIDRM)
                                goto out;
@@ -321,13 +321,13 @@ ssize_t rt_buffer_write_timed(RT_BUFFER *bf,
                 * Wake up all threads waiting for input, if we
                 * accumulated enough data to feed the leading one.
                 */
-               thobj = syncobj_peek_at_pend(&bcb->sobj);
+               thobj = syncobj_peek_grant(&bcb->sobj);
                if (thobj == NULL)
                        goto done;
 
                wait = threadobj_get_wait(thobj);
                if (wait->size <= bcb->fillsz)
-                       syncobj_flush(&bcb->sobj, SYNCOBJ_BROADCAST);
+                       syncobj_grant_all(&bcb->sobj);
 
                goto done;
        wait:
@@ -356,8 +356,8 @@ ssize_t rt_buffer_write_timed(RT_BUFFER *bf,
                 * the burden: this is an error condition, we just
                 * have to mitigate its effect, avoiding a deadlock.
                 */
-               if (bcb->fillsz > 0 && syncobj_pend_count(&bcb->sobj))
-                       syncobj_flush(&bcb->sobj, SYNCOBJ_BROADCAST);
+               if (bcb->fillsz > 0 && syncobj_count_grant(&bcb->sobj))
+                       syncobj_grant_all(&bcb->sobj);
 
                ret = syncobj_wait_drain(&bcb->sobj, abs_timeout, &syns);
                if (ret) {
@@ -393,7 +393,7 @@ int rt_buffer_clear(RT_BUFFER *bf)
        bcb->wroff = 0;
        bcb->rdoff = 0;
        bcb->fillsz = 0;
-       syncobj_signal_drain(&bcb->sobj);
+       syncobj_drain(&bcb->sobj);
 
        put_alchemy_buffer(bcb, &syns);
 out:
@@ -415,8 +415,8 @@ int rt_buffer_inquire(RT_BUFFER *bf, RT_BUFFER_INFO *info)
        if (bcb == NULL)
                goto out;
 
-       info->iwaiters = syncobj_pend_count(&bcb->sobj);
-       info->owaiters = syncobj_drain_count(&bcb->sobj);
+       info->iwaiters = syncobj_count_grant(&bcb->sobj);
+       info->owaiters = syncobj_count_drain(&bcb->sobj);
        info->totalmem = bcb->bufsz;
        info->availmem = bcb->bufsz - bcb->fillsz;
        strcpy(info->name, bcb->name);
diff --git a/lib/alchemy/event.c b/lib/alchemy/event.c
index c637290..27b51fa 100644
--- a/lib/alchemy/event.c
+++ b/lib/alchemy/event.c
@@ -148,7 +148,7 @@ int rt_event_wait_timed(RT_EVENT *event,
        wait->mask = mask;
        wait->mode = mode;
 
-       ret = syncobj_pend(&evcb->sobj, abs_timeout, &syns);
+       ret = syncobj_wait_grant(&evcb->sobj, abs_timeout, &syns);
        if (ret) {
                if (ret == -EIDRM) {
                        threadobj_finish_wait();
@@ -184,7 +184,7 @@ int rt_event_signal(RT_EVENT *event, unsigned long mask)
 
        evcb->value |= mask;
 
-       if (!syncobj_pended_p(&evcb->sobj))
+       if (!syncobj_grant_wait_p(&evcb->sobj))
                goto done;
 
        syncobj_for_each_waiter_safe(&evcb->sobj, thobj, tmp) {
@@ -193,7 +193,7 @@ int rt_event_signal(RT_EVENT *event, unsigned long mask)
                testval = wait->mode & EV_ANY ? bits : mask;
                if (bits && bits == testval) {
                        wait->mask = bits;
-                       syncobj_wakeup_waiter(&evcb->sobj, thobj);
+                       syncobj_grant_to(&evcb->sobj, thobj);
                }
        }
 done:
@@ -244,7 +244,7 @@ int rt_event_inquire(RT_EVENT *event, RT_EVENT_INFO *info)
                goto out;
 
        info->value = evcb->value;
-       info->nwaiters = syncobj_pend_count(&evcb->sobj);
+       info->nwaiters = syncobj_count_grant(&evcb->sobj);
        strcpy(info->name, evcb->name);
 
        put_alchemy_event(evcb, &syns);
diff --git a/lib/alchemy/heap.c b/lib/alchemy/heap.c
index 96e42ee..543c109 100644
--- a/lib/alchemy/heap.c
+++ b/lib/alchemy/heap.c
@@ -172,7 +172,7 @@ int rt_heap_alloc_timed(RT_HEAP *heap,
        wait = threadobj_prepare_wait(struct alchemy_heap_wait);
        wait->size = size;
 
-       ret = syncobj_pend(&hcb->sobj, abs_timeout, &syns);
+       ret = syncobj_wait_grant(&hcb->sobj, abs_timeout, &syns);
        if (ret) {
                if (ret == -EIDRM) {
                        threadobj_finish_wait();
@@ -217,7 +217,7 @@ int rt_heap_free(RT_HEAP *heap, void *block)
 
        heapobj_free(&hcb->hobj, block);
 
-       if (!syncobj_pended_p(&hcb->sobj))
+       if (!syncobj_grant_wait_p(&hcb->sobj))
                goto done;
        /*
         * We might be releasing a block large enough to satisfy
@@ -227,7 +227,7 @@ int rt_heap_free(RT_HEAP *heap, void *block)
                wait = threadobj_get_wait(thobj);
                wait->ptr = heapobj_alloc(&hcb->hobj, wait->size);
                if (wait->ptr)
-                       syncobj_wakeup_waiter(&hcb->sobj, thobj);
+                       syncobj_grant_to(&hcb->sobj, thobj);
        }
 done:
        put_alchemy_heap(hcb, &syns);
@@ -250,7 +250,7 @@ int rt_heap_inquire(RT_HEAP *heap, RT_HEAP_INFO *info)
        if (hcb == NULL)
                goto out;
 
-       info->nwaiters = syncobj_pend_count(&hcb->sobj);
+       info->nwaiters = syncobj_count_grant(&hcb->sobj);
        info->heapsize = hcb->size;
        info->usablemem = heapobj_size(&hcb->hobj);
        info->usedmem = heapobj_inquire(&hcb->hobj);
diff --git a/lib/alchemy/queue.c b/lib/alchemy/queue.c
index 0dcd7ce..b05dfcf 100644
--- a/lib/alchemy/queue.c
+++ b/lib/alchemy/queue.c
@@ -246,7 +246,7 @@ int rt_queue_send(RT_QUEUE *queue,
        ret = 0;  /* # of tasks unblocked. */
 
        do {
-               waiter = syncobj_post(&qcb->sobj);
+               waiter = syncobj_grant_one(&qcb->sobj);
                if (waiter == NULL)
                        break;
                wait = threadobj_get_wait(waiter);
@@ -300,7 +300,7 @@ int rt_queue_write(RT_QUEUE *queue,
        if (qcb == NULL)
                goto out;
 
-       waiter = syncobj_peek_at_pend(&qcb->sobj);
+       waiter = syncobj_peek_grant(&qcb->sobj);
        if (waiter && threadobj_local_p(waiter)) {
                /*
                 * Fast path for local threads already waiting for
@@ -317,13 +317,13 @@ int rt_queue_write(RT_QUEUE *queue,
                if (size > 0)
                        memcpy(wait->userbuf, buf, size);
                wait->usersz = size;
-               syncobj_wakeup_waiter(&qcb->sobj, waiter);
+               syncobj_grant_to(&qcb->sobj, waiter);
                ret = 1;
                goto done;
        }
 
 enqueue:
-       nwaiters = syncobj_pend_count(&qcb->sobj);
+       nwaiters = syncobj_count_grant(&qcb->sobj);
        if (nwaiters == 0 && (mode & Q_BROADCAST) != 0)
                goto done;
 
@@ -350,7 +350,7 @@ enqueue:
        }
 
        do {
-               waiter = syncobj_post(&qcb->sobj);
+               waiter = syncobj_grant_one(&qcb->sobj);
                if (waiter == NULL)
                        break;
                wait = threadobj_get_wait(waiter);
@@ -406,7 +406,7 @@ wait:
        wait = threadobj_prepare_wait(struct alchemy_queue_wait);
        wait->usersz = 0;
 
-       ret = syncobj_pend(&qcb->sobj, abs_timeout, &syns);
+       ret = syncobj_wait_grant(&qcb->sobj, abs_timeout, &syns);
        if (ret) {
                if (ret == -EIDRM) {
                        threadobj_finish_wait();
@@ -470,7 +470,7 @@ wait:
        wait->usersz = size;
        wait->msg = NULL;
 
-       ret = syncobj_pend(&qcb->sobj, abs_timeout, &syns);
+       ret = syncobj_wait_grant(&qcb->sobj, abs_timeout, &syns);
        if (ret) {
                if (ret == -EIDRM) {
                        threadobj_finish_wait();
@@ -545,7 +545,7 @@ int rt_queue_inquire(RT_QUEUE *queue, RT_QUEUE_INFO *info)
        if (qcb == NULL)
                goto out;
 
-       info->nwaiters = syncobj_pend_count(&qcb->sobj);
+       info->nwaiters = syncobj_count_grant(&qcb->sobj);
        info->nmessages = qcb->mcount;
        info->mode = qcb->mode;
        info->qlimit = qcb->limit;
diff --git a/lib/alchemy/task.c b/lib/alchemy/task.c
index e627a33..daa9026 100644
--- a/lib/alchemy/task.c
+++ b/lib/alchemy/task.c
@@ -341,7 +341,7 @@ int rt_task_delete(RT_TASK *task)
        }
 
        while (tcb->safecount) {
-               ret = syncobj_pend(&tcb->sobj_safe, NULL, &syns);
+               ret = syncobj_wait_grant(&tcb->sobj_safe, NULL, &syns);
                if (ret) {
                        if (ret == -EIDRM)
                                goto out;
@@ -735,7 +735,7 @@ ssize_t rt_task_send_timed(RT_TASK *task,
                goto out;
 
        if (alchemy_poll_mode(abs_timeout)) {
-               if (!syncobj_drain_count(&tcb->sobj_msg)) {
+               if (!syncobj_count_drain(&tcb->sobj_msg)) {
                        ret = -EWOULDBLOCK;
                        goto done;
                }
@@ -762,10 +762,10 @@ ssize_t rt_task_send_timed(RT_TASK *task,
                wait->reply.size = 0;
        }
 
-       if (syncobj_drain_count(&tcb->sobj_msg))
-               syncobj_signal_drain(&tcb->sobj_msg);
+       if (syncobj_count_drain(&tcb->sobj_msg))
+               syncobj_drain(&tcb->sobj_msg);
 
-       ret = syncobj_pend(&tcb->sobj_msg, abs_timeout, &syns);
+       ret = syncobj_wait_grant(&tcb->sobj_msg, abs_timeout, &syns);
        if (ret) {
                threadobj_finish_wait();
                if (ret == -EIDRM)
@@ -802,7 +802,7 @@ int rt_task_receive_timed(RT_TASK_MCB *mcb_r,
 
        __bt(syncobj_lock(&current->sobj_msg, &syns));
 
-       while (!syncobj_pended_p(&current->sobj_msg)) {
+       while (!syncobj_grant_wait_p(&current->sobj_msg)) {
                if (alchemy_poll_mode(abs_timeout)) {
                        ret = -EWOULDBLOCK;
                        goto done;
@@ -810,7 +810,7 @@ int rt_task_receive_timed(RT_TASK_MCB *mcb_r,
                syncobj_wait_drain(&current->sobj_msg, abs_timeout, &syns);
        }
 
-       thobj = syncobj_peek_at_pend(&current->sobj_msg);
+       thobj = syncobj_peek_grant(&current->sobj_msg);
        wait = threadobj_get_wait(thobj);
        mcb_s = &wait->request;
 
@@ -858,7 +858,7 @@ int rt_task_reply(int flowid, RT_TASK_MCB *mcb_s)
        __bt(syncobj_lock(&current->sobj_msg, &syns));
 
        ret = -ENXIO;
-       if (!syncobj_pended_p(&current->sobj_msg))
+       if (!syncobj_grant_wait_p(&current->sobj_msg))
                goto done;
 
        syncobj_for_each_waiter(&current->sobj_msg, thobj) {
@@ -869,7 +869,7 @@ int rt_task_reply(int flowid, RT_TASK_MCB *mcb_s)
        goto done;
  reply:
        size = mcb_s ? mcb_s->size : 0;
-       syncobj_wakeup_waiter(&current->sobj_msg, thobj);
+       syncobj_grant_to(&current->sobj_msg, thobj);
        mcb_r = &wait->reply;
 
        /*
diff --git a/lib/copperplate/cluster.c b/lib/copperplate/cluster.c
index 70d8bd4..47d2b54 100644
--- a/lib/copperplate/cluster.c
+++ b/lib/copperplate/cluster.c
@@ -232,7 +232,7 @@ int syncluster_addobj(struct syncluster *sc, const char 
*name,
        if (ret)
                goto out;
 
-       if (!syncobj_pended_p(sc->sobj))
+       if (!syncobj_grant_wait_p(sc->sobj))
                goto out;
        /*
         * Wake up all threads waiting for this key to appear in the
@@ -241,7 +241,7 @@ int syncluster_addobj(struct syncluster *sc, const char 
*name,
        syncobj_for_each_waiter_safe(sc->sobj, thobj, tmp) {
                wait = threadobj_get_wait(thobj);
                if (*wait->name == *name && strcmp(wait->name, name) == 0)
-                       syncobj_wakeup_waiter(sc->sobj, thobj);
+                       syncobj_grant_to(sc->sobj, thobj);
        }
 out:
        syncobj_unlock(sc->sobj, &syns);
@@ -297,7 +297,7 @@ int syncluster_findobj(struct syncluster *sc,
                        wait = threadobj_prepare_wait(struct 
syncluster_wait_struct);
                        wait->name = name;
                }
-               ret = syncobj_pend(sc->sobj, timeout, &syns);
+               ret = syncobj_wait_grant(sc->sobj, timeout, &syns);
                if (ret) {
                        if (ret == -EIDRM)
                                goto out;
@@ -397,7 +397,7 @@ int pvsyncluster_addobj(struct pvsyncluster *sc, const char 
*name,
        if (ret)
                goto out;
 
-       if (!syncobj_pended_p(&sc->sobj))
+       if (!syncobj_grant_wait_p(&sc->sobj))
                goto out;
        /*
         * Wake up all threads waiting for this key to appear in the
@@ -406,7 +406,7 @@ int pvsyncluster_addobj(struct pvsyncluster *sc, const char 
*name,
        syncobj_for_each_waiter_safe(&sc->sobj, thobj, tmp) {
                wait = threadobj_get_wait(thobj);
                if (*wait->name == *name && strcmp(wait->name, name) == 0)
-                       syncobj_wakeup_waiter(&sc->sobj, thobj);
+                       syncobj_grant_to(&sc->sobj, thobj);
        }
 out:
        syncobj_unlock(&sc->sobj, &syns);
@@ -462,7 +462,7 @@ int pvsyncluster_findobj(struct pvsyncluster *sc,
                        wait = threadobj_prepare_wait(struct 
syncluster_wait_struct);
                        wait->name = name;
                }
-               ret = syncobj_pend(&sc->sobj, timeout, &syns);
+               ret = syncobj_wait_grant(&sc->sobj, timeout, &syns);
                if (ret) {
                        if (ret == -EIDRM)
                                goto out;
diff --git a/lib/copperplate/semobj.c b/lib/copperplate/semobj.c
index ef90a96..de89ed9 100644
--- a/lib/copperplate/semobj.c
+++ b/lib/copperplate/semobj.c
@@ -169,7 +169,7 @@ int semobj_post(struct semobj *smobj)
                return ret;
 
        if (++smobj->core.value <= 0)
-               syncobj_post(&smobj->core.sobj);
+               syncobj_grant_one(&smobj->core.sobj);
        else if (smobj->core.flags & SEMOBJ_PULSE)
                smobj->core.value = 0;
 
@@ -189,7 +189,7 @@ int semobj_broadcast(struct semobj *smobj)
 
        if (smobj->core.value < 0) {
                smobj->core.value = 0;
-               syncobj_flush(&smobj->core.sobj, SYNCOBJ_BROADCAST);
+               syncobj_grant_all(&smobj->core.sobj);
        }
 
        syncobj_unlock(&smobj->core.sobj, &syns);
@@ -221,7 +221,7 @@ int semobj_wait(struct semobj *smobj, const struct timespec 
*timeout)
                goto done;
        }
 
-       ret = syncobj_pend(&smobj->core.sobj, timeout, &syns);
+       ret = syncobj_wait_grant(&smobj->core.sobj, timeout, &syns);
        if (ret) {
                /*
                 * -EIDRM means that the semaphore has been deleted,
diff --git a/lib/copperplate/syncobj.c b/lib/copperplate/syncobj.c
index c3323ea..dbd8f5f 100644
--- a/lib/copperplate/syncobj.c
+++ b/lib/copperplate/syncobj.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Philippe Gerum <r...@xenomai.org>.
+ * Copyright (C) 2008-2011 Philippe Gerum <r...@xenomai.org>.
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
@@ -98,12 +98,6 @@ void monitor_grant(struct syncobj *sobj, struct threadobj 
*thobj)
 }
 
 static inline
-void monitor_drain(struct syncobj *sobj)
-{
-       cobalt_monitor_drain(&sobj->core.monitor);
-}
-
-static inline
 void monitor_drain_all(struct syncobj *sobj)
 {
        cobalt_monitor_drain_all(&sobj->core.monitor);
@@ -118,6 +112,7 @@ static inline void syncobj_init_corespec(struct syncobj 
*sobj)
 
 static inline void syncobj_cleanup_corespec(struct syncobj *sobj)
 {
+       /* We hold the gate lock while destroying. */
        int ret = cobalt_monitor_destroy(&sobj->core.monitor);
        assert(ret == 0);
        (void)ret;
@@ -169,12 +164,6 @@ void monitor_grant(struct syncobj *sobj, struct threadobj 
*thobj)
 }
 
 static inline
-void monitor_drain(struct syncobj *sobj)
-{
-       pthread_cond_signal(&sobj->core.drain_sync);
-}
-
-static inline
 void monitor_drain_all(struct syncobj *sobj)
 {
        pthread_cond_broadcast(&sobj->core.drain_sync);
@@ -207,6 +196,7 @@ static inline void syncobj_init_corespec(struct syncobj 
*sobj)
 
 static inline void syncobj_cleanup_corespec(struct syncobj *sobj)
 {
+       monitor_exit(sobj);
        pthread_cond_destroy(&sobj->core.drain_sync);
        pthread_mutex_destroy(&sobj->core.lock);
 }
@@ -217,11 +207,11 @@ void syncobj_init(struct syncobj *sobj, int flags,
                  fnref_type(void (*)(struct syncobj *sobj)) finalizer)
 {
        sobj->flags = flags;
-       list_init(&sobj->pend_list);
+       list_init(&sobj->grant_list);
        list_init(&sobj->drain_list);
-       sobj->pend_count = 0;
+       sobj->grant_count = 0;
        sobj->drain_count = 0;
-       sobj->release_count = 0;
+       sobj->wait_count = 0;
        sobj->finalizer = finalizer;
        sobj->magic = SYNCOBJ_MAGIC;
        syncobj_init_corespec(sobj);
@@ -237,21 +227,25 @@ int syncobj_lock(struct syncobj *sobj, struct syncstate 
*syns)
         * This magic prevents concurrent locking while a deletion is
         * in progress, waiting for the release count to drop to zero.
         */
-       if (sobj->magic != SYNCOBJ_MAGIC)
-               return -EINVAL;
-
        pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
 
        ret = monitor_enter(sobj);
-       if (ret) {
-               pthread_setcancelstate(oldstate, NULL);
-               return ret;
+       if (ret)
+               goto fail;
+
+       /* Check for an ongoing deletion. */
+       if (sobj->magic != SYNCOBJ_MAGIC) {
+               monitor_exit(sobj);
+               ret = -EINVAL;
+               goto fail;
        }
 
        syns->state = oldstate;
        __syncobj_tag_locked(sobj);
-
        return 0;
+fail:
+       pthread_setcancelstate(oldstate, NULL);
+       return ret;
 }
 
 void syncobj_unlock(struct syncobj *sobj, struct syncstate *syns)
@@ -261,62 +255,75 @@ void syncobj_unlock(struct syncobj *sobj, struct 
syncstate *syns)
        pthread_setcancelstate(syns->state, NULL);
 }
 
-static void syncobj_test_finalize(struct syncobj *sobj,
-                                 struct syncstate *syns)
+static void __syncobj_finalize(struct syncobj *sobj)
 {
        void (*finalizer)(struct syncobj *sobj);
-       int relcount;
-
-       relcount = --sobj->release_count;
-       monitor_exit(sobj);
-
-       if (relcount == 0) {
-               syncobj_cleanup_corespec(sobj);
-               fnref_get(finalizer, sobj->finalizer);
-               if (finalizer)
-                       finalizer(sobj);
-       } else
-               assert(relcount > 0);
 
        /*
-        * Cancelability reset is postponed until here, so that we
-        * can't be wiped off asynchronously before the object is
-        * fully finalized, albeit we exited the monitor earlier to
-        * allow deletion.
+        * Cancelability is still disabled or we are running over the
+        * thread finalizer, therefore we can't be wiped off in the
+        * middle of the finalization process.
         */
-       pthread_setcancelstate(syns->state, NULL);
+       syncobj_cleanup_corespec(sobj);
+       fnref_get(finalizer, sobj->finalizer);
+       if (finalizer)
+               finalizer(sobj);
 }
 
-int __syncobj_signal_drain(struct syncobj *sobj)
+int __syncobj_broadcast_grant(struct syncobj *sobj, int reason)
 {
-       /* Release one thread waiting for the object to drain. */
-       --sobj->drain_count;
-       monitor_drain(sobj);
+       struct threadobj *thobj;
+       int ret;
+
+       assert(!list_empty(&sobj->grant_list));
+
+       do {
+               thobj = list_pop_entry(&sobj->grant_list,
+                                      struct threadobj, wait_link);
+               thobj->wait_status |= reason;
+               thobj->wait_sobj = NULL;
+               monitor_grant(sobj, thobj);
+       } while (!list_empty(&sobj->grant_list));
+
+       ret = sobj->grant_count;
+       sobj->grant_count = 0;
 
-       return 1;
+       return ret;
 }
 
-/*
- * NOTE: we don't use POSIX cleanup handlers in syncobj_pend() and
- * syncobj_wait() on purpose: these may have a significant impact on
- * latency due to I-cache misses on low-end hardware (e.g. ~6 us on
- * MPC5200), particularly when unwinding the cancel frame. So the
- * cleanup handler below is called by the threadobj finalizer instead
- * when appropriate, since we have enough internal information to
- * handle this situation.
- */
-void __syncobj_cleanup_wait(struct syncobj *sobj, struct threadobj *thobj)
+int __syncobj_broadcast_drain(struct syncobj *sobj, int reason)
 {
-       /*
-        * We don't care about resetting the original cancel type
-        * saved in the syncstate struct since we are there precisely
-        * because the caller got cancelled.
-        */
-       list_remove(&thobj->wait_link);
-       if (thobj->wait_status & SYNCOBJ_DRAINING)
-               sobj->drain_count--;
+       struct threadobj *thobj;
+       int ret;
 
-       monitor_exit(sobj);
+       assert(!list_empty(&sobj->drain_list));
+
+       do {
+               thobj = list_pop_entry(&sobj->drain_list,
+                                      struct threadobj, wait_link);
+               thobj->wait_sobj = NULL;
+               thobj->wait_status |= reason;
+       } while (!list_empty(&sobj->drain_list));
+
+       monitor_drain_all(sobj);
+
+       ret = sobj->drain_count;
+       sobj->drain_count = 0;
+
+       return ret;
+}
+
+int syncobj_flush(struct syncobj *sobj)
+{
+       __syncobj_check_locked(sobj);
+
+       if (sobj->grant_count > 0)
+               __syncobj_broadcast_grant(sobj, SYNCOBJ_FLUSHED);
+
+       if (sobj->drain_count > 0)
+               __syncobj_broadcast_drain(sobj, SYNCOBJ_FLUSHED);
+
+       return sobj->wait_count;
 }
 
 static inline void enqueue_waiter(struct syncobj *sobj,
@@ -325,141 +332,145 @@ static inline void enqueue_waiter(struct syncobj *sobj,
        struct threadobj *__thobj;
 
        thobj->wait_prio = threadobj_get_priority(thobj);
-       sobj->pend_count++;
-       if ((sobj->flags & SYNCOBJ_PRIO) == 0 || list_empty(&sobj->pend_list)) {
-               list_append(&thobj->wait_link, &sobj->pend_list);
+       if ((sobj->flags & SYNCOBJ_PRIO) == 0 || list_empty(&sobj->grant_list)) 
{
+               list_append(&thobj->wait_link, &sobj->grant_list);
                return;
        }
 
-       list_for_each_entry_reverse(__thobj, &sobj->pend_list, wait_link) {
+       list_for_each_entry_reverse(__thobj, &sobj->grant_list, wait_link) {
                if (thobj->wait_prio <= __thobj->wait_prio)
                        break;
        }
        ath(&__thobj->wait_link, &thobj->wait_link);
 }
 
-int syncobj_pend(struct syncobj *sobj, const struct timespec *timeout,
-                struct syncstate *syns)
+static inline void dequeue_waiter(struct syncobj *sobj,
+                                 struct threadobj *thobj)
 {
-       struct threadobj *current = threadobj_current();
-       int ret, state;
-
-       __syncobj_check_locked(sobj);
-
-       assert(current != NULL);
-
-       current->wait_status = 0;
-       enqueue_waiter(sobj, current);
-       current->wait_sobj = sobj;
+       list_remove(&thobj->wait_link);
+       if (thobj->wait_status & SYNCOBJ_DRAINWAIT)
+               sobj->drain_count--;
+       else
+               sobj->grant_count--;
 
-       if (current->wait_hook)
-               current->wait_hook(sobj, SYNCOBJ_BLOCK);
+       assert(sobj->wait_count > 0);
+}
 
+/*
+ * NOTE: we don't use POSIX cleanup handlers in syncobj_wait_grant() and
+ * syncobj_wait() on purpose: these may have a significant impact on
+ * latency due to I-cache misses on low-end hardware (e.g. ~6 us on
+ * MPC5200), particularly when unwinding the cancel frame. So the
+ * cleanup handler below is called by the threadobj finalizer instead
+ * when appropriate, since we have enough internal information to
+ * handle this situation.
+ */
+void __syncobj_cleanup_wait(struct syncobj *sobj, struct threadobj *thobj)
+{
        /*
-        * XXX: we are guaranteed to be in deferred cancel mode, with
-        * cancelability disabled (in syncobj_lock); enable
-        * cancelability before pending on the condvar.
-        */
-       pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &state);
-       /*
-        * Catch spurious unlocked calls: this must be a blatant bug
-        * in the calling code, don't even try to continue
-        * (syncobj_lock() required first).
+        * We don't care about resetting the original cancel type
+        * saved in the syncstate struct since we are there precisely
+        * because the caller got cancelled while sleeping on the
+        * GRANT/DRAIN condition.
         */
-       assert(state == PTHREAD_CANCEL_DISABLE);
-
-       do {
-               __syncobj_tag_unlocked(sobj);
-               ret = monitor_wait_grant(sobj, current, timeout);
-               __syncobj_tag_locked(sobj);
-               /* Check for spurious wake up. */
-       } while (ret == 0 && current->wait_sobj);
-
-       pthread_setcancelstate(state, NULL);
+       dequeue_waiter(sobj, thobj);
 
-       if (current->wait_hook)
-               current->wait_hook(sobj, SYNCOBJ_RESUME);
-
-       if (ret) {
-               current->wait_sobj = NULL;
-               list_remove(&current->wait_link);
-       } else if (current->wait_status & SYNCOBJ_DELETED) {
-               syncobj_test_finalize(sobj, syns);
-               ret = -EIDRM;
-       } else if (current->wait_status & SYNCOBJ_RELEASE_MASK) {
-               --sobj->release_count;
-               assert(sobj->release_count >= 0);
-               if (current->wait_status & SYNCOBJ_FLUSHED)
-                       ret = -EINTR;
+       if (--sobj->wait_count == 0 && sobj->magic != SYNCOBJ_MAGIC) {
+               __syncobj_finalize(sobj);
+               return;
        }
 
-       return ret;
+       monitor_exit(sobj);
 }
 
-void syncobj_requeue_waiter(struct syncobj *sobj, struct threadobj *thobj)
+struct threadobj *syncobj_grant_one(struct syncobj *sobj)
 {
+       struct threadobj *thobj;
+
        __syncobj_check_locked(sobj);
 
-       list_remove(&thobj->wait_link);
-       enqueue_waiter(sobj, thobj);
+       if (list_empty(&sobj->grant_list))
+               return NULL;
+
+       thobj = list_pop_entry(&sobj->grant_list, struct threadobj, wait_link);
+       thobj->wait_status |= SYNCOBJ_SIGNALED;
+       thobj->wait_sobj = NULL;
+       sobj->grant_count--;
+       monitor_grant(sobj, thobj);
+
+       return thobj;
 }
 
-void syncobj_wakeup_waiter(struct syncobj *sobj, struct threadobj *thobj)
+void syncobj_grant_to(struct syncobj *sobj, struct threadobj *thobj)
 {
        __syncobj_check_locked(sobj);
 
        list_remove(&thobj->wait_link);
+       thobj->wait_status |= SYNCOBJ_SIGNALED;
        thobj->wait_sobj = NULL;
-       sobj->pend_count--;
+       sobj->grant_count--;
        monitor_grant(sobj, thobj);
 }
 
-struct threadobj *syncobj_post(struct syncobj *sobj)
+struct threadobj *syncobj_peek_grant(struct syncobj *sobj)
 {
        struct threadobj *thobj;
 
        __syncobj_check_locked(sobj);
 
-       if (list_empty(&sobj->pend_list))
+       if (list_empty(&sobj->grant_list))
                return NULL;
 
-       thobj = list_pop_entry(&sobj->pend_list, struct threadobj, wait_link);
-       thobj->wait_sobj = NULL;
-       sobj->pend_count--;
-       monitor_grant(sobj, thobj);
-
+       thobj = list_first_entry(&sobj->grant_list, struct threadobj,
+                                wait_link);
        return thobj;
 }
 
-struct threadobj *syncobj_peek_at_pend(struct syncobj *sobj)
+struct threadobj *syncobj_peek_drain(struct syncobj *sobj)
 {
        struct threadobj *thobj;
 
        __syncobj_check_locked(sobj);
 
-       if (list_empty(&sobj->pend_list))
+       if (list_empty(&sobj->drain_list))
                return NULL;
 
-       thobj = list_first_entry(&sobj->pend_list, struct threadobj,
+       thobj = list_first_entry(&sobj->drain_list, struct threadobj,
                                 wait_link);
        return thobj;
 }
 
-struct threadobj *syncobj_peek_at_drain(struct syncobj *sobj)
+static inline int wait_epilogue(struct syncobj *sobj,
+                               struct syncstate *syns,
+                               struct threadobj *current)
 {
-       struct threadobj *thobj;
+       if (current->wait_sobj) {
+               dequeue_waiter(sobj, current);
+               current->wait_sobj = NULL;
+       }
 
-       __syncobj_check_locked(sobj);
+       if (current->wait_hook)
+               current->wait_hook(sobj, SYNCOBJ_RESUME);
 
-       if (list_empty(&sobj->drain_list))
-               return NULL;
+       sobj->wait_count--;
+       assert(sobj->wait_count >= 0);
 
-       thobj = list_first_entry(&sobj->drain_list, struct threadobj,
-                                wait_link);
-       return thobj;
+       if (sobj->magic != SYNCOBJ_MAGIC) {
+               if (sobj->wait_count == 0)
+                       __syncobj_finalize(sobj);
+               else
+                       monitor_exit(sobj);
+               pthread_setcancelstate(syns->state, NULL);
+               return -EIDRM;
+       }
+
+       if (current->wait_status & SYNCOBJ_FLUSHED)
+               return -EINTR;
+
+       return 0;
 }
 
-int syncobj_wait_drain(struct syncobj *sobj, const struct timespec *timeout,
+int syncobj_wait_grant(struct syncobj *sobj, const struct timespec *timeout,
                       struct syncstate *syns)
 {
        struct threadobj *current = threadobj_current();
@@ -469,98 +480,72 @@ int syncobj_wait_drain(struct syncobj *sobj, const struct 
timespec *timeout,
 
        assert(current != NULL);
 
-       /*
-        * XXX: syncobj_wait_drain() behaves slightly differently than
-        * syncobj_pend(), in that we don't process spurious wakeups
-        * internally, leaving it to the caller. We do this because a
-        * drain sync is broadcast so we can't be 100% sure whether
-        * the wait condition actually disappeared for all waiters.
-        *
-        * (e.g. in case the drain signal notifies about a single
-        * resource being released, only one waiter will be satisfied,
-        * albeit all waiters will compete to get that resource - this
-        * means that all waiters but one will get a spurious wakeup).
-        *
-        * On the other hand, syncobj_pend() only unblocks on a
-        * directed wakeup signal to the waiting thread, so we can
-        * check whether such signal has existed prior to exiting the
-        * wait loop (i.e. testing current->wait_sobj for NULL).
-        */
-       current->wait_status = SYNCOBJ_DRAINING;
-       list_append(&current->wait_link, &sobj->drain_list);
+       current->wait_status = 0;
+       enqueue_waiter(sobj, current);
        current->wait_sobj = sobj;
-       sobj->drain_count++;
-
-       pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &state);
-       assert(state == PTHREAD_CANCEL_DISABLE);
+       sobj->grant_count++;
+       sobj->wait_count++;
 
        if (current->wait_hook)
                current->wait_hook(sobj, SYNCOBJ_BLOCK);
 
        /*
-        * XXX: The caller must check for spurious wakeups, in case
-        * the drain condition became false again before it resumes.
+        * XXX: we are guaranteed to be in deferred cancel mode, with
+        * cancelability disabled (in syncobj_lock); re-enable it
+        * before pending on the condvar.
         */
-       __syncobj_tag_unlocked(sobj);
-       ret = monitor_wait_drain(sobj, timeout);
-       __syncobj_tag_locked(sobj);
-
-       pthread_setcancelstate(state, NULL);
-
-       current->wait_status &= ~SYNCOBJ_DRAINING;
-       if (current->wait_status == 0) { /* not flushed? */
-               current->wait_sobj = NULL;
-               list_remove(&current->wait_link);
-       }
+       pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &state);
+       assert(state == PTHREAD_CANCEL_DISABLE);
 
-       if (current->wait_hook)
-               current->wait_hook(sobj, SYNCOBJ_RESUME);
+       do {
+               __syncobj_tag_unlocked(sobj);
+               ret = monitor_wait_grant(sobj, current, timeout);
+               __syncobj_tag_locked(sobj);
+               /* Check for spurious wake up. */
+       } while (ret == 0 && current->wait_sobj);
 
-       if (current->wait_status & SYNCOBJ_DELETED) {
-               syncobj_test_finalize(sobj, syns);
-               ret = -EIDRM;
-       } else if (current->wait_status & SYNCOBJ_RELEASE_MASK) {
-               --sobj->release_count;
-               assert(sobj->release_count >= 0);
-               if (current->wait_status & SYNCOBJ_FLUSHED)
-                       ret = -EINTR;
-       }
+       pthread_setcancelstate(state, NULL);
 
-       return ret;
+       return wait_epilogue(sobj, syns, current) ?: ret;
 }
 
-int syncobj_flush(struct syncobj *sobj, int reason)
+int syncobj_wait_drain(struct syncobj *sobj, const struct timespec *timeout,
+                      struct syncstate *syns)
 {
-       struct threadobj *thobj;
+       struct threadobj *current = threadobj_current();
+       int ret, state;
 
        __syncobj_check_locked(sobj);
 
-       /* Must have a valid release flag set. */
-       assert(reason & SYNCOBJ_RELEASE_MASK);
+       assert(current != NULL);
 
-       while (!list_empty(&sobj->pend_list)) {
-               thobj = list_pop_entry(&sobj->pend_list,
-                                      struct threadobj, wait_link);
-               thobj->wait_status |= reason;
-               thobj->wait_sobj = NULL;
-               monitor_grant(sobj, thobj);
-               sobj->release_count++;
-       }
-       sobj->pend_count = 0;
-
-       if (sobj->drain_count > 0) {
-               do {
-                       thobj = list_pop_entry(&sobj->drain_list,
-                                              struct threadobj, wait_link);
-                       thobj->wait_sobj = NULL;
-                       thobj->wait_status |= reason;
-               } while (!list_empty(&sobj->drain_list));
-               sobj->release_count += sobj->drain_count;
-               sobj->drain_count = 0;
-               monitor_drain_all(sobj);
-       }
+       current->wait_status = SYNCOBJ_DRAINWAIT;
+       list_append(&current->wait_link, &sobj->drain_list);
+       current->wait_sobj = sobj;
+       sobj->drain_count++;
+       sobj->wait_count++;
+
+       if (current->wait_hook)
+               current->wait_hook(sobj, SYNCOBJ_BLOCK);
+
+       pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &state);
+       assert(state == PTHREAD_CANCEL_DISABLE);
+
+       /*
+        * XXX: Since the DRAINED signal is broadcast to all waiters,
+        * a race may exist for acting upon it among those
+        * threads. Therefore the caller must check that the drain
+        * condition is still true before proceeding.
+        */
+       do {
+               __syncobj_tag_unlocked(sobj);
+               ret = monitor_wait_drain(sobj, timeout);
+               __syncobj_tag_locked(sobj);
+       } while (ret == 0 && current->wait_sobj);
 
-       return sobj->release_count;
+       pthread_setcancelstate(state, NULL);
+
+       return wait_epilogue(sobj, syns, current) ?: ret;
 }
 
 int syncobj_destroy(struct syncobj *sobj, struct syncstate *syns)
@@ -570,19 +555,21 @@ int syncobj_destroy(struct syncobj *sobj, struct 
syncstate *syns)
        __syncobj_check_locked(sobj);
 
        sobj->magic = ~SYNCOBJ_MAGIC;
-       ret = syncobj_flush(sobj, SYNCOBJ_DELETED);
-       if (ret == 0) {
-               /* No thread awaken - we may dispose immediately. */
-               sobj->release_count = 1;
-               syncobj_test_finalize(sobj, syns);
-       } else
+       ret = syncobj_flush(sobj);
+       if (ret) {
                syncobj_unlock(sobj, syns);
+               return ret;
+       }
 
-       return ret;
+       /* No thread awaken - we may dispose immediately. */
+       __syncobj_finalize(sobj);
+       pthread_setcancelstate(syns->state, NULL);
+
+       return 0;
 }
 
 void syncobj_uninit(struct syncobj *sobj)
 {
-       assert(sobj->release_count == 0);
+       assert(sobj->wait_count == 0);
        syncobj_cleanup_corespec(sobj);
 }
diff --git a/lib/copperplate/threadobj.c b/lib/copperplate/threadobj.c
index 7dac280..49d3dfe 100644
--- a/lib/copperplate/threadobj.c
+++ b/lib/copperplate/threadobj.c
@@ -1090,7 +1090,7 @@ int threadobj_unblock(struct threadobj *thobj) /* 
thobj->lock held */
         * FIXME: racy. We can't assume thobj->wait_sobj is stable.
         */
        if (thobj->wait_sobj)   /* Remove PEND (+DELAY timeout) */
-               syncobj_flush(thobj->wait_sobj, SYNCOBJ_FLUSHED);
+               syncobj_flush(thobj->wait_sobj);
        else
                /* Remove standalone DELAY */
                ret = -__RT(pthread_kill(tid, SIGRELS));
diff --git a/lib/psos/queue.c b/lib/psos/queue.c
index beea6bc..386d43c 100644
--- a/lib/psos/queue.c
+++ b/lib/psos/queue.c
@@ -239,7 +239,7 @@ static u_long __q_send_inner(struct psos_queue *q, unsigned 
long flags,
        struct msgholder *msg;
        u_long maxbytes;
 
-       thobj = syncobj_peek_at_pend(&q->sobj);
+       thobj = syncobj_peek_grant(&q->sobj);
        if (thobj && threadobj_local_p(thobj)) {
                /* Fast path: direct copy to the receiver's buffer. */
                wait = threadobj_get_wait(thobj);
@@ -282,7 +282,7 @@ static u_long __q_send_inner(struct psos_queue *q, unsigned 
long flags,
        }
 done:
        if (thobj)
-               syncobj_wakeup_waiter(&q->sobj, thobj);
+               syncobj_grant_to(&q->sobj, thobj);
 
        return SUCCESS;
 }
@@ -375,7 +375,7 @@ static u_long __q_broadcast(u_long qid, u_long flags,
 
        /* Release all pending tasks atomically. */
        *count_r = 0;
-       while (syncobj_pended_p(&q->sobj)) {
+       while (syncobj_grant_wait_p(&q->sobj)) {
                ret = __q_send_inner(q, flags, buffer, bytes);
                if (ret)
                        break;
@@ -454,7 +454,7 @@ retry:
        wait->ptr = buffer;
        wait->size = msglen;
 
-       ret = syncobj_pend(&q->sobj, timespec, &syns);
+       ret = syncobj_wait_grant(&q->sobj, timespec, &syns);
        if (ret == -EIDRM) {
                ret = ERR_QKILLD;
                goto out;
diff --git a/lib/psos/rn.c b/lib/psos/rn.c
index 23bd747..5f6fe42 100644
--- a/lib/psos/rn.c
+++ b/lib/psos/rn.c
@@ -261,7 +261,7 @@ starve:
        wait->ptr = NULL;
        wait->size = size;
 
-       ret = syncobj_pend(&rn->sobj, timespec, &syns);
+       ret = syncobj_wait_grant(&rn->sobj, timespec, &syns);
        if (ret == -ETIMEDOUT)
                ret = ERR_TIMEOUT;
        /*
@@ -311,7 +311,7 @@ u_long rn_retseg(u_long rnid, void *segaddr)
        heapobj_free(&rn->hobj, segaddr);
        rn->busynr--;
 
-       if (!syncobj_pended_p(&rn->sobj))
+       if (!syncobj_grant_wait_p(&rn->sobj))
                goto done;
 
        syncobj_for_each_waiter_safe(&rn->sobj, thobj, tmp) {
@@ -324,7 +324,7 @@ u_long rn_retseg(u_long rnid, void *segaddr)
                        rn->busynr++;
                        rn->usedmem += heapobj_validate(&rn->hobj, seg);
                        wait->ptr = seg;
-                       syncobj_wakeup_waiter(&rn->sobj, thobj);
+                       syncobj_grant_to(&rn->sobj, thobj);
                }
        }
 done:
diff --git a/lib/psos/task.c b/lib/psos/task.c
index ca62c51..ecce2d7 100644
--- a/lib/psos/task.c
+++ b/lib/psos/task.c
@@ -616,7 +616,7 @@ u_long ev_receive(u_long events, u_long flags,
                timespec = NULL;
 
        for (;;) {
-               ret = syncobj_pend(&current->sobj, timespec, &syns);
+               ret = syncobj_wait_grant(&current->sobj, timespec, &syns);
                if (ret == -ETIMEDOUT) {
                        ret = ERR_TIMEOUT;
                        break;
@@ -658,7 +658,7 @@ u_long ev_send(u_long tid, u_long events)
         * it up immediately and let it confirm whether the condition
         * is now satisfied.
         */
-       syncobj_post(&task->sobj);
+       syncobj_grant_one(&task->sobj);
 
        syncobj_unlock(&task->sobj, &syns);
 out:
diff --git a/lib/vxworks/msgQLib.c b/lib/vxworks/msgQLib.c
index df88540..e90f1f3 100644
--- a/lib/vxworks/msgQLib.c
+++ b/lib/vxworks/msgQLib.c
@@ -194,7 +194,7 @@ retry:
                if (nbytes > 0)
                        memcpy(buffer, msg + 1, nbytes);
                heapobj_free(&mq->pool, msg);
-               syncobj_signal_drain(&mq->sobj);
+               syncobj_drain(&mq->sobj);
                goto done;
        }
 
@@ -213,7 +213,7 @@ retry:
        wait->ptr = buffer;
        wait->size = maxNBytes;
 
-       ret = syncobj_pend(&mq->sobj, timespec, &syns);
+       ret = syncobj_wait_grant(&mq->sobj, timespec, &syns);
        if (ret == -EIDRM) {
                errno = S_objLib_OBJ_DELETED;
                goto out;
@@ -225,7 +225,7 @@ retry:
        nbytes = wait->size;
        if (nbytes == -1L)      /* No direct copy? */
                goto retry;
-       syncobj_signal_drain(&mq->sobj);
+       syncobj_drain(&mq->sobj);
 done:
        syncobj_unlock(&mq->sobj, &syns);
 out:
@@ -268,7 +268,7 @@ STATUS msgQSend(MSG_Q_ID msgQId, const char *buffer, UINT 
bytes,
                goto fail;
        }
 
-       thobj = syncobj_peek_at_pend(&mq->sobj);
+       thobj = syncobj_peek_grant(&mq->sobj);
        if (thobj && threadobj_local_p(thobj)) {
                /* Fast path: direct copy to the receiver's buffer. */
                wait = threadobj_get_wait(thobj);
@@ -346,7 +346,7 @@ enqueue:
        }
 done:
        if (thobj)      /* Wakeup waiter. */
-               syncobj_wakeup_waiter(&mq->sobj, thobj);
+               syncobj_grant_to(&mq->sobj, thobj);
 
        ret = OK;
 fail:
diff --git a/lib/vxworks/semLib.c b/lib/vxworks/semLib.c
index 90fd381..39887ff 100644
--- a/lib/vxworks/semLib.c
+++ b/lib/vxworks/semLib.c
@@ -86,7 +86,7 @@ static STATUS xsem_take(struct wind_sem *sem, int timeout)
        } else
                timespec = NULL;
 
-       ret = syncobj_pend(&sem->u.xsem.sobj, timespec, &syns);
+       ret = syncobj_wait_grant(&sem->u.xsem.sobj, timespec, &syns);
        if (ret == -EIDRM) {
                ret = S_objLib_OBJ_DELETED;
                goto out;
@@ -124,7 +124,7 @@ static STATUS xsem_give(struct wind_sem *sem)
                        /* No wrap around. */
                        ret = S_semLib_INVALID_OPERATION;
        } else if (++sem->u.xsem.value <= 0)
-               syncobj_post(&sem->u.xsem.sobj);
+               syncobj_grant_one(&sem->u.xsem.sobj);
 
        syncobj_unlock(&sem->u.xsem.sobj, &syns);
 out:
@@ -146,7 +146,7 @@ static STATUS xsem_flush(struct wind_sem *sem)
                goto out;
        }
 
-       syncobj_flush(&sem->u.xsem.sobj, SYNCOBJ_FLUSHED);
+       syncobj_flush(&sem->u.xsem.sobj);
 
        syncobj_unlock(&sem->u.xsem.sobj, &syns);
 out:


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to