Module: xenomai-gch
Branch: for-forge
Commit: 31a900e3a63c4b4c8026d998d5562ddc547ea560
URL:    
http://git.xenomai.org/?p=xenomai-gch.git;a=commit;h=31a900e3a63c4b4c8026d998d5562ddc547ea560

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Sat Nov 12 22:43:13 2011 +0100

cobalt: implement deferred condition variables signalling

When signalling a condition variable, our previous and naive implementation
woke up waiting threads at the time of the pthread_cond_signal call. This has
an issue: if the woken up thread is higher priority than the signalling thread
there will be one context switch to the woken thread, which will try to grab
the mutex associated with the condition variable, go to sleep, and another
context switch will resume the signalling thread, which ultimately will release
the mutex to let the high priority thread run.

This new implementation, instead defers the actual waking up of the waiting
thread at the time of the mutex unlock. This avoids a system call for
pthread_cond_wait, and only needs one context switch to wake up a high priority
thread.

---

 include/cobalt/nucleus/synch.h |   47 +++++++++++++
 include/cobalt/pthread.h       |    8 --
 kernel/cobalt/cond.c           |  141 ++++++++++++++++++++++++++--------------
 kernel/cobalt/cond.h           |   55 +++++++++++++++-
 kernel/cobalt/internal.h       |   20 +++---
 kernel/cobalt/mutex.c          |    2 +
 kernel/cobalt/mutex.h          |   53 +++++++++++++++-
 kernel/cobalt/nucleus/synch.c  |    4 +-
 kernel/cobalt/syscall.c        |   81 ++++++++++++++++++-----
 lib/cobalt/cond.c              |   99 ++++++++++++++++++++++++++--
 lib/cobalt/mutex.c             |    5 +-
 11 files changed, 421 insertions(+), 94 deletions(-)

diff --git a/include/cobalt/nucleus/synch.h b/include/cobalt/nucleus/synch.h
index 3cb6e3f..c6d942e 100644
--- a/include/cobalt/nucleus/synch.h
+++ b/include/cobalt/nucleus/synch.h
@@ -53,6 +53,53 @@ static inline int xnsynch_fast_check_spares(xnarch_atomic_t 
*fastlock,
        return (xnhandle_test_spares(xnarch_atomic_get(fastlock), spares));
 }
 
+static inline int xnsynch_fast_set_spares(xnarch_atomic_t *fastlock,
+                                         xnhandle_t owner,
+                                         xnhandle_t spares)
+{
+       xnhandle_t cur, old;
+       cur = xnarch_atomic_cmpxchg(fastlock, owner, owner | spares);
+       if (cur != owner) {
+               /* Only the current owner of the fastlock can change
+                  spare bits */
+               if (xnhandle_mask_spares(cur) != owner)
+                       return -EPERM;
+
+               do {
+                       if (xnhandle_test_spares(cur, spares))
+                               return 0;
+
+                       old = cur;
+                       cur = xnarch_atomic_cmpxchg(fastlock,
+                                                   old, old | spares);
+               } while(old != cur);
+       }
+       return 0;
+}
+
+static inline int xnsynch_fast_clear_spares(xnarch_atomic_t *fastlock,
+                                           xnhandle_t owner,
+                                           xnhandle_t spares)
+{
+       xnhandle_t cur, old;
+       cur = xnarch_atomic_cmpxchg(fastlock, owner | spares, owner & ~spares);
+       if (cur != owner | spares) {
+               /* Only the current owner of the fastlock can change
+                  spare bits */
+               if (xnhandle_mask_spares(cur) != owner)
+                       return -EPERM;
+
+               do {
+                       if (!xnhandle_test_spares(cur, spares))
+                               return 0;
+
+                       old = cur;
+                       cur = xnarch_atomic_cmpxchg(fastlock,
+                                                   old, old & ~spares);
+               } while(old != cur);
+       }
+       return 0;
+}
 
 static inline int xnsynch_fast_acquire(xnarch_atomic_t *fastlock,
                                       xnhandle_t new_ownerh)
diff --git a/include/cobalt/pthread.h b/include/cobalt/pthread.h
index acfda12..e526f34 100644
--- a/include/cobalt/pthread.h
+++ b/include/cobalt/pthread.h
@@ -169,14 +169,6 @@ struct cobalt_condattr {
 
 struct cobalt_cond;
 
-union __xeno_cond {
-       pthread_cond_t native_cond;
-       struct __shadow_cond {
-               unsigned magic;
-               struct cobalt_cond *cond;
-       } shadow_cond;
-};
-
 struct cobalt_threadstat {
        unsigned long status;
        unsigned long long xtime;
diff --git a/kernel/cobalt/cond.c b/kernel/cobalt/cond.c
index 1de5a57..0d6637b 100644
--- a/kernel/cobalt/cond.c
+++ b/kernel/cobalt/cond.c
@@ -47,22 +47,10 @@
  *
  *@{*/
 
+#include <nucleus/sys_ppd.h>
 #include "mutex.h"
 #include "cond.h"
 
-typedef struct cobalt_cond {
-       unsigned magic;
-       xnsynch_t synchbase;
-       xnholder_t link;        /* Link in cobalt_condq */
-
-#define link2cond(laddr)                                                \
-    ((cobalt_cond_t *)(((char *)laddr) - offsetof(cobalt_cond_t, link)))
-
-       pthread_condattr_t attr;
-       struct cobalt_mutex *mutex;
-       cobalt_kqueues_t *owningq;
-} cobalt_cond_t;
-
 static pthread_condattr_t default_cond_attr;
 
 static void cond_destroy_internal(cobalt_cond_t * cond, cobalt_kqueues_t *q)
@@ -76,6 +64,10 @@ static void cond_destroy_internal(cobalt_cond_t * cond, 
cobalt_kqueues_t *q)
           xnpod_schedule(). */
        xnsynch_destroy(&cond->synchbase);
        xnlock_put_irqrestore(&nklock, s);
+#ifdef CONFIG_XENO_FASTSYNCH
+       xnheap_free(&xnsys_ppd_get(cond->attr.pshared)->sem_heap,
+                   cond->pending_signals);
+#endif /* CONFIG_XENO_FASTSYNCH */
        xnfree(cond);
 }
 
@@ -108,6 +100,7 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
 {
        struct __shadow_cond *shadow = &((union __xeno_cond *)cnd)->shadow_cond;
        xnflags_t synch_flags = XNSYNCH_PRIO | XNSYNCH_NOPIP;
+       struct xnsys_ppd *sys_ppd;
        cobalt_cond_t *cond;
        xnqueue_t *condq;
        spl_t s;
@@ -120,11 +113,23 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
        if (!cond)
                return ENOMEM;
 
+#ifdef CONFIG_XENO_FASTSYNCH
+       sys_ppd = xnsys_ppd_get(attr->pshared);
+       cond->pending_signals = (unsigned long *)
+               xnheap_alloc(&sys_ppd->sem_heap,
+                            sizeof(*(cond->pending_signals)));
+       if (!cond->pending_signals) {
+               err = EAGAIN;
+               goto err_free_cond;
+       }
+       *(cond->pending_signals) = 0;
+#endif /* CONFIG_XENO_FASTSYNCH */
+
        xnlock_get_irqsave(&nklock, s);
 
        if (attr->magic != COBALT_COND_ATTR_MAGIC) {
                err = EINVAL;
-               goto error;
+               goto err_free_pending_signals;
        }
 
        condq = &cobalt_kqueues(attr->pshared)->condq;
@@ -136,10 +141,18 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
                        if (holder == &shadow->cond->link) {
                                /* cond is already in the queue. */
                                err = EBUSY;
-                               goto error;
+                               goto err_free_pending_signals;
                        }
        }
 
+#ifdef CONFIG_XENO_FASTSYNCH
+       shadow->attr = *attr;
+       shadow->pending_signals_offset =
+               xnheap_mapped_offset(&sys_ppd->sem_heap,
+                                    cond->pending_signals);
+       shadow->mutex_ownerp = (xnarch_atomic_t *)~0UL;
+#endif /* CONFIG_XENO_FASTSYNCH */
+
        shadow->magic = COBALT_COND_MAGIC;
        shadow->cond = cond;
 
@@ -156,8 +169,14 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
 
        return 0;
 
-  error:
+  err_free_pending_signals:
        xnlock_put_irqrestore(&nklock, s);
+#ifdef CONFIG_XENO_FASTSYNCH
+       xnheap_free(&xnsys_ppd_get(cond->attr.pshared)->sem_heap,
+                   cond->pending_signals);
+  err_free_cond:
+       xnfree(cond);
+#endif
        return err;
 }
 
@@ -218,34 +237,6 @@ int pthread_cond_destroy(pthread_cond_t * cnd)
        return 0;
 }
 
-/* must be called with nklock locked, interrupts off.
-
-   Note: this function is very similar to mutex_unlock_internal() in mutex.c.
-*/
-static inline int mutex_save_count(xnthread_t *cur,
-                                  struct __shadow_mutex *shadow,
-                                  unsigned *count_ptr)
-{
-       cobalt_mutex_t *mutex;
-
-       mutex = shadow->mutex;
-       if (!cobalt_obj_active(shadow, COBALT_MUTEX_MAGIC, struct 
__shadow_mutex)
-           || !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct 
cobalt_mutex))
-                return EINVAL;
-
-       if (xnsynch_owner_check(&mutex->synchbase, cur) != 0)
-               return EPERM;
-
-       *count_ptr = shadow->lockcnt;
-
-       xnsynch_release(&mutex->synchbase);
-
-       /* Do not reschedule here, releasing the mutex and suspension must be
-          done atomically in pthread_cond_*wait. */
-
-       return 0;
-}
-
 int cobalt_cond_timedwait_prologue(xnthread_t *cur,
                                  struct __shadow_cond *shadow,
                                  struct __shadow_mutex *mutex,
@@ -282,15 +273,32 @@ int cobalt_cond_timedwait_prologue(xnthread_t *cur,
                goto unlock_and_return;
        }
 
+       if (mutex->attr.pshared != cond->attr.pshared) {
+               err = EINVAL;
+               goto unlock_and_return;
+       }
+
        /* Unlock mutex, with its previous recursive lock count stored
           in "*count_ptr". */
-       err = mutex_save_count(cur, mutex, count_ptr);
-       if (err)
+       err = cobalt_mutex_release(cur, mutex, count_ptr);
+       if (err < 0)
                goto unlock_and_return;
 
+       /* err == 1 means a reschedule is needed, but do not
+          reschedule here, releasing the mutex and suspension must be
+          done atomically in pthread_cond_*wait. */
+
        /* Bind mutex to cond. */
-       if (cond->mutex == NULL)
+       if (cond->mutex == NULL) {
                cond->mutex = mutex->mutex;
+               inith(&cond->mutex_link);
+               appendq(&mutex->mutex->conds, &cond->mutex_link);
+#ifdef CONFIG_XENO_FASTSYNCH
+               /* In case of previous calls to pthread_cond_signal
+                  without any waiting thread */
+               *(cond->pending_signals) = 0;
+#endif /* CONFIG_XENO_FASTSYNCH */
+       }
 
        /* Wait for another thread to signal the condition. */
        if (timed)
@@ -349,8 +357,10 @@ int cobalt_cond_timedwait_epilogue(xnthread_t *cur,
        /* Unbind mutex and cond, if no other thread is waiting, if the job was
           not already done. */
        if (!xnsynch_nsleepers(&cond->synchbase)
-           && cond->mutex == mutex->mutex)
+           && cond->mutex == mutex->mutex) {
                cond->mutex = NULL;
+               removeq(&mutex->mutex->conds, &cond->mutex_link);
+       }
 
        thread_cancellation_point(cur);
 
@@ -395,6 +405,8 @@ int cobalt_cond_timedwait_epilogue(xnthread_t *cur,
  * @return an error number if:
  * - EPERM, the caller context is invalid;
  * - EINVAL, the specified condition variable or mutex is invalid;
+ * - EINVAL, the specified condition variable and mutex process-shared
+ * attribute mismatch;
  * - EPERM, the specified condition variable is not process-shared and does not
  *   belong to the current process;
  * - EINVAL, another thread is currently blocked on @a cnd using another mutex
@@ -463,6 +475,8 @@ int pthread_cond_wait(pthread_cond_t * cnd, pthread_mutex_t 
* mx)
  * - EPERM, the specified condition variable is not process-shared and does not
  *   belong to the current process;
  * - EINVAL, the specified condition variable, mutex or timeout is invalid;
+ * - EINVAL, the specified condition variable and mutex process-shared
+ * attribute mismatch;
  * - EINVAL, another thread is currently blocked on @a cnd using another mutex
  *   than @a mx;
  * - EPERM, the specified mutex is not owned by the caller;
@@ -627,6 +641,37 @@ void cobalt_condq_cleanup(cobalt_kqueues_t *q)
        xnlock_put_irqrestore(&nklock, s);
 }
 
+#ifdef CONFIG_XENO_FASTSYNCH
+int cobalt_cond_deferred_signals(struct cobalt_cond *cond)
+{
+       unsigned long pending_signals;
+       int need_resched, i;
+
+       pending_signals = *(cond->pending_signals);
+
+       switch(pending_signals) {
+       case ~0UL:
+               need_resched =
+                       xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED;
+               break;
+
+       case 0:
+               need_resched = 0;
+               break;
+
+       default:
+               for(i = 0, need_resched = 0; i < pending_signals; i++)
+                       need_resched |=
+                               xnsynch_wakeup_one_sleeper(&cond->synchbase)
+                               != NULL;
+       }
+
+       *cond->pending_signals = 0;
+
+       return need_resched;
+}
+#endif /* CONFIG_XENO_FASTSYNCH */
+
 void cobalt_cond_pkg_init(void)
 {
        initq(&cobalt_global_kqueues.condq);
diff --git a/kernel/cobalt/cond.h b/kernel/cobalt/cond.h
index 385e2f8..cbce569 100644
--- a/kernel/cobalt/cond.h
+++ b/kernel/cobalt/cond.h
@@ -20,8 +20,53 @@
 #ifndef _POSIX_COND_H
 #define _POSIX_COND_H
 
-#include <cobalt/posix.h>
-#include "mutex.h"
+#include <pthread.h>
+
+struct cobalt_cond;
+
+union __xeno_cond {
+       pthread_cond_t native_cond;
+       struct __shadow_cond {
+               unsigned magic;
+#ifdef CONFIG_XENO_FASTSYNCH
+               struct cobalt_condattr attr;
+               union {
+                       unsigned pending_signals_offset;
+                       unsigned long *pending_signals;
+               };
+               union {
+                       unsigned mutex_ownerp_offset;
+                       xnarch_atomic_t *mutex_ownerp;
+               };
+#endif /* CONFIG_XENO_FASTSYNCH */
+               struct cobalt_cond *cond;
+       } shadow_cond;
+};
+
+#if defined(__KERNEL__) || defined(__XENO_SIM__)
+
+#include "internal.h"
+
+struct __shadow_mutex;
+
+typedef struct cobalt_cond {
+       unsigned magic;
+       xnsynch_t synchbase;
+       xnholder_t link;        /* Link in cobalt_condq */
+
+#define link2cond(laddr)                                                \
+    ((cobalt_cond_t *)(((char *)laddr) - offsetof(cobalt_cond_t, link)))
+
+       xnholder_t mutex_link;
+
+#define mutex_link2cond(laddr)                                         \
+    ((cobalt_cond_t *)(((char *)laddr) - offsetof(cobalt_cond_t, mutex_link)))
+
+       unsigned long *pending_signals;
+       pthread_condattr_t attr;
+       struct cobalt_mutex *mutex;
+       cobalt_kqueues_t *owningq;
+} cobalt_cond_t;
 
 int cobalt_cond_timedwait_prologue(xnthread_t *cur,
                                  struct __shadow_cond *shadow,
@@ -34,10 +79,16 @@ int cobalt_cond_timedwait_epilogue(xnthread_t *cur,
                                  struct __shadow_cond *shadow,
                                  struct __shadow_mutex *mutex, unsigned count);
 
+#ifdef CONFIG_XENO_FASTSYNCH
+int cobalt_cond_deferred_signals(struct cobalt_cond *cond);
+#endif /* CONFIG_XENO_FASTSYNCH */
+
 void cobalt_condq_cleanup(cobalt_kqueues_t *q);
 
 void cobalt_cond_pkg_init(void);
 
 void cobalt_cond_pkg_cleanup(void);
 
+#endif /* __KERNEL__ */
+
 #endif /* !_POSIX_COND_H */
diff --git a/kernel/cobalt/internal.h b/kernel/cobalt/internal.h
index 4de6425..9c64204 100644
--- a/kernel/cobalt/internal.h
+++ b/kernel/cobalt/internal.h
@@ -39,16 +39,16 @@
 #define COBALT_MUTEX_MAGIC       COBALT_MAGIC(03)
 #define COBALT_MUTEX_ATTR_MAGIC  (COBALT_MAGIC(04) & ((1 << 24) - 1))
 #define COBALT_COND_MAGIC        COBALT_MAGIC(05)
-#define COBALT_COND_ATTR_MAGIC   (COBALT_MAGIC(05) & ((1 << 24) - 1))
-#define COBALT_SEM_MAGIC         COBALT_MAGIC(06)
-#define COBALT_KEY_MAGIC         COBALT_MAGIC(07)
-#define COBALT_ONCE_MAGIC        COBALT_MAGIC(08)
-#define COBALT_MQ_MAGIC          COBALT_MAGIC(09)
-#define COBALT_MQD_MAGIC         COBALT_MAGIC(0A)
-#define COBALT_INTR_MAGIC        COBALT_MAGIC(0B)
-#define COBALT_NAMED_SEM_MAGIC   COBALT_MAGIC(0C)
-#define COBALT_TIMER_MAGIC       COBALT_MAGIC(0D)
-#define COBALT_SHM_MAGIC         COBALT_MAGIC(0E)
+#define COBALT_COND_ATTR_MAGIC   (COBALT_MAGIC(06) & ((1 << 24) - 1))
+#define COBALT_SEM_MAGIC         COBALT_MAGIC(07)
+#define COBALT_KEY_MAGIC         COBALT_MAGIC(08)
+#define COBALT_ONCE_MAGIC        COBALT_MAGIC(09)
+#define COBALT_MQ_MAGIC          COBALT_MAGIC(0A)
+#define COBALT_MQD_MAGIC         COBALT_MAGIC(0B)
+#define COBALT_INTR_MAGIC        COBALT_MAGIC(0C)
+#define COBALT_NAMED_SEM_MAGIC   COBALT_MAGIC(0D)
+#define COBALT_TIMER_MAGIC       COBALT_MAGIC(0E)
+#define COBALT_SHM_MAGIC         COBALT_MAGIC(0F)
 
 #define COBALT_MIN_PRIORITY      XNSCHED_LOW_PRIO
 #define COBALT_MAX_PRIORITY      XNSCHED_HIGH_PRIO
diff --git a/kernel/cobalt/mutex.c b/kernel/cobalt/mutex.c
index 5ae4cbb..4d43f2a 100644
--- a/kernel/cobalt/mutex.c
+++ b/kernel/cobalt/mutex.c
@@ -49,6 +49,7 @@
 
 #include <nucleus/sys_ppd.h>
 #include "mutex.h"
+#include "cond.h"
 
 pthread_mutexattr_t cobalt_default_mutex_attr;
 
@@ -117,6 +118,7 @@ int cobalt_mutex_init_internal(struct __shadow_mutex 
*shadow,
        inith(&mutex->link);
        mutex->attr = *attr;
        mutex->owningq = kq;
+       initq(&mutex->conds);
 
        xnlock_get_irqsave(&nklock, s);
        appendq(&kq->mutexq, &mutex->link);
diff --git a/kernel/cobalt/mutex.h b/kernel/cobalt/mutex.h
index a61b40c..3e58cd7 100644
--- a/kernel/cobalt/mutex.h
+++ b/kernel/cobalt/mutex.h
@@ -19,8 +19,8 @@
 #ifndef _POSIX_MUTEX_H
 #define _POSIX_MUTEX_H
 
-#include <asm/xenomai/atomic.h>
 #include <pthread.h>
+#include <asm/xenomai/atomic.h>
 
 struct cobalt_mutex;
 
@@ -37,6 +37,8 @@ union __xeno_mutex {
                        xnarch_atomic_t *owner;
                };
                struct cobalt_mutexattr attr;
+
+#define COBALT_MUTEX_COND_SIGNAL XN_HANDLE_SPARE2
 #endif /* CONFIG_XENO_FASTSYNCH */
        } shadow_mutex;
 };
@@ -45,6 +47,7 @@ union __xeno_mutex {
 
 #include "internal.h"
 #include "thread.h"
+#include "cond.h"
 #include "cb_lock.h"
 
 typedef struct cobalt_mutex {
@@ -55,6 +58,8 @@ typedef struct cobalt_mutex {
 #define link2mutex(laddr)                                               \
        ((cobalt_mutex_t *)(((char *)laddr) - offsetof(cobalt_mutex_t, link)))
 
+       xnqueue_t conds;
+
        pthread_mutexattr_t attr;
        cobalt_kqueues_t *owningq;
 } cobalt_mutex_t;
@@ -126,6 +131,52 @@ static inline int 
cobalt_mutex_timedlock_internal(xnthread_t *cur,
        return 0;
 }
 
+static inline int cobalt_mutex_release(xnthread_t *cur,
+                                      struct __shadow_mutex *shadow,
+                                      unsigned *count_ptr)
+{
+       cobalt_mutex_t *mutex;
+       xnholder_t *holder;
+       int need_resched;
+
+       mutex = shadow->mutex;
+       if (!cobalt_obj_active(shadow, COBALT_MUTEX_MAGIC, struct 
__shadow_mutex)
+           || !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct 
cobalt_mutex))
+                return -EINVAL;
+
+       if (mutex->owningq != cobalt_kqueues(mutex->attr.pshared))
+               return -EPERM;
+
+       if (xnsynch_owner_check(&mutex->synchbase, cur) != 0)
+               return -EPERM;
+
+       if (count_ptr)
+               *count_ptr = shadow->lockcnt;
+
+       need_resched = 0;
+#ifdef CONFIG_XENO_FASTSYNCH
+       for (holder = getheadq(&mutex->conds);
+            holder; holder = nextq(&mutex->conds, holder)) {
+               struct cobalt_cond *cond = mutex_link2cond(holder);
+               if (*(cond->pending_signals)) {
+                       if (xnsynch_nsleepers(&cond->synchbase))
+                               need_resched |=
+                                       cobalt_cond_deferred_signals(cond);
+                       else
+                               *(cond->pending_signals) = 0;
+               }
+       }
+       xnsynch_fast_clear_spares(mutex->synchbase.fastlock,
+                                 xnthread_handle(cur),
+                                 COBALT_MUTEX_COND_SIGNAL);
+#endif /* CONFIG_XENO_FASTSYNCH */
+       need_resched |= xnsynch_release(&mutex->synchbase) != NULL;
+
+       return need_resched;
+       /* Do not reschedule here, releasing the mutex and suspension must be
+          done atomically in pthread_cond_*wait. */
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* !_POSIX_MUTEX_H */
diff --git a/kernel/cobalt/nucleus/synch.c b/kernel/cobalt/nucleus/synch.c
index 9c8a8fb..b111056 100644
--- a/kernel/cobalt/nucleus/synch.c
+++ b/kernel/cobalt/nucleus/synch.c
@@ -545,9 +545,9 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
                        xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
                        /* We are the new owner, update the fastlock
                           accordingly. */
-                       threah |= xnhandle_get_spares(xnarch_atomic_get(lockp),
+                       threadh |= xnhandle_get_spares(xnarch_atomic_get(lockp),
                                                       XN_HANDLE_SPARE_MASK);
-                       threadh;
+                       threadh =
                                xnsynch_fast_set_claimed(threadh,
                                                         
xnsynch_pended_p(synch));
                        xnarch_atomic_set(lockp, threadh);
diff --git a/kernel/cobalt/syscall.c b/kernel/cobalt/syscall.c
index 140a12d..eea3c79 100644
--- a/kernel/cobalt/syscall.c
+++ b/kernel/cobalt/syscall.c
@@ -1233,7 +1233,7 @@ static int __pthread_mutex_unlock(union __xeno_mutex 
__user *u_mx)
 
        return err;
 }
-#else /* !CONFIG_XENO_FASTSYNCH */
+#else /* CONFIG_XENO_FASTSYNCH */
 static int __pthread_mutex_check_init(union __xeno_mutex __user *u_mx,
                                      const pthread_mutexattr_t __user *u_attr)
 {
@@ -1326,6 +1326,9 @@ static int __pthread_mutex_destroy(union __xeno_mutex 
__user *u_mx)
                                     XN_NO_HANDLE) != 0)
                return -EBUSY;
 
+       if (countq(&mutex->conds))
+               return -EBUSY;
+
        cobalt_mark_deleted(shadow);
        cobalt_mutex_destroy_internal(mutex, mutex->owningq);
 
@@ -1447,6 +1450,8 @@ static int __pthread_mutex_timedlock(union __xeno_mutex 
__user *u_mx,
 static int __pthread_mutex_unlock(union __xeno_mutex __user *u_mx)
 {
        union __xeno_mutex mx;
+       int need_resched;
+       spl_t s;
 
        if (xnpod_root_p())
                return -EPERM;
@@ -1456,12 +1461,21 @@ static int __pthread_mutex_unlock(union __xeno_mutex 
__user *u_mx)
                                     offsetof(struct __shadow_mutex, lock)))
                return -EFAULT;
 
-       if (xnsynch_release(&mx.shadow_mutex.mutex->synchbase))
+       xnlock_get_irqsave(&nklock, s);
+       need_resched = cobalt_mutex_release(xnpod_current_thread(),
+                                           &mx.shadow_mutex, NULL);
+       if (need_resched < 0) {
+               xnlock_put_irqrestore(&nklock, s);
+               return need_resched;
+       }
+
+       if (need_resched)
                xnpod_schedule();
+       xnlock_put_irqrestore(&nklock, s);
 
        return 0;
 }
-#endif /* !CONFIG_XENO_FASTSYNCH */
+#endif /* CONFIG_XENO_FASTSYNCH */
 
 static int __pthread_condattr_init(pthread_condattr_t __user *u_attr)
 {
@@ -1630,24 +1644,26 @@ static int __pthread_cond_wait_prologue(union 
__xeno_cond __user *u_cnd,
 
        if (__xn_safe_copy_from_user(&mx.shadow_mutex,
                                     &u_mx->shadow_mutex,
+                                    sizeof(mx.shadow_mutex)))
+               return -EFAULT;
+
 #ifdef CONFIG_XENO_FASTSYNCH
-                                    offsetof(struct __shadow_mutex, lock)
-#else /* !CONFIG_XENO_FASTSYNCH */
-                                    sizeof(mx.shadow_mutex)
-#endif /* !CONFIG_XENO_FASTSYNCH */
-                                    ))
+       cnd.shadow_cond.mutex_ownerp = mx.shadow_mutex.owner;
+       if (__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
+                                  &cnd.shadow_cond.mutex_ownerp,
+                                  sizeof(cnd.shadow_cond.mutex_ownerp)))
                return -EFAULT;
+#endif /* CONFIG_XENO_FASTSYNCH */
 
        if (timed) {
-               if (__xn_safe_copy_from_user(&ts, u_ts, sizeof(ts)))
-                       return -EFAULT;
-
-               err = cobalt_cond_timedwait_prologue(cur,
-                                                   &cnd.shadow_cond,
-                                                   &mx.shadow_mutex,
-                                                   &d.count,
-                                                   timed,
-                                                   ts2ns(&ts) + 1);
+               err = __xn_safe_copy_from_user(&ts, u_ts, sizeof(ts))?EFAULT:0;
+               if (!err)
+                       err = cobalt_cond_timedwait_prologue(cur,
+                                                            &cnd.shadow_cond,
+                                                            &mx.shadow_mutex,
+                                                            &d.count,
+                                                            timed,
+                                                            ts2ns(&ts) + 1);
        } else
                err = cobalt_cond_timedwait_prologue(cur,
                                                    &cnd.shadow_cond,
@@ -1655,12 +1671,33 @@ static int __pthread_cond_wait_prologue(union 
__xeno_cond __user *u_cnd,
                                                    &d.count,
                                                    timed, XN_INFINITE);
 
+#ifdef CONFIG_XENO_FASTSYNCH
+       if (!cnd.shadow_cond.cond->mutex) {
+               cnd.shadow_cond.mutex_ownerp = (xnarch_atomic_t *)~0UL;
+               if (__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
+                                          &cnd.shadow_cond.mutex_ownerp,
+                                          
sizeof(cnd.shadow_cond.mutex_ownerp)))
+                       return -EFAULT;
+       }
+#endif /* CONFIG_XENO_FASTSYNCH */
+
        switch(err) {
        case 0:
        case ETIMEDOUT:
                perr = d.err = err;
                err = -cobalt_cond_timedwait_epilogue(cur, &cnd.shadow_cond,
                                                    &mx.shadow_mutex, d.count);
+
+#ifdef CONFIG_XENO_FASTSYNCH
+               if (!cnd.shadow_cond.cond->mutex) {
+                       cnd.shadow_cond.mutex_ownerp = (xnarch_atomic_t *)~0UL;
+                       if 
(__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
+                                                  
&cnd.shadow_cond.mutex_ownerp,
+                                                  
sizeof(cnd.shadow_cond.mutex_ownerp)))
+                               return -EFAULT;
+               }
+#endif /* CONFIG_XENO_FASTSYNCH */
+
                if (err == 0 &&
                    __xn_safe_copy_to_user(&u_mx->shadow_mutex.lockcnt,
                                           &mx.shadow_mutex.lockcnt,
@@ -1708,6 +1745,16 @@ static int __pthread_cond_wait_epilogue(union 
__xeno_cond __user *u_cnd,
        err = cobalt_cond_timedwait_epilogue(cur, &cnd.shadow_cond,
                                            &mx.shadow_mutex, count);
 
+#ifdef CONFIG_XENO_FASTSYNCH
+       if (!cnd.shadow_cond.cond->mutex) {
+               cnd.shadow_cond.mutex_ownerp = (xnarch_atomic_t *)~0UL;
+               if (__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
+                                          &cnd.shadow_cond.mutex_ownerp,
+                                          
sizeof(cnd.shadow_cond.mutex_ownerp)))
+                       return -EFAULT;
+       }
+#endif /* CONFIG_XENO_FASTSYNCH */
+
        if (err == 0
            && __xn_safe_copy_to_user(&u_mx->shadow_mutex.lockcnt,
                                      &mx.shadow_mutex.lockcnt,
diff --git a/lib/cobalt/cond.c b/lib/cobalt/cond.c
index 1457c0e..df03c18 100644
--- a/lib/cobalt/cond.c
+++ b/lib/cobalt/cond.c
@@ -17,13 +17,43 @@
  */
 
 #include <errno.h>
-#include <cobalt/syscall.h>
 #include <pthread.h>
+#include <nucleus/synch.h>
+#include <cobalt/syscall.h>
 #include <kernel/cobalt/mutex.h>
+#include <kernel/cobalt/cond.h>
 #include <kernel/cobalt/cb_lock.h>
+#include <asm-generic/bits/current.h>
 
 extern int __cobalt_muxid;
 
+#ifdef CONFIG_XENO_FASTSYNCH
+#define COBALT_COND_MAGIC 0x86860505
+
+extern unsigned long xeno_sem_heap[2];
+
+static unsigned long *get_signalsp(struct __shadow_cond *shadow)
+{
+       if (likely(!shadow->attr.pshared))
+               return shadow->pending_signals;
+
+       return (unsigned long *)(xeno_sem_heap[1]
+                                + shadow->pending_signals_offset);
+}
+
+static xnarch_atomic_t *get_mutex_ownerp(struct __shadow_cond *shadow)
+{
+       if (shadow->mutex_ownerp == (xnarch_atomic_t *)~0UL)
+               return NULL;
+
+       if (likely(!shadow->attr.pshared))
+               return shadow->mutex_ownerp;
+
+       return (xnarch_atomic_t *)(xeno_sem_heap[1]
+                                  + shadow->mutex_ownerp_offset);
+}
+#endif /* CONFIG_XENO_FASTSYNCH */
+
 int __wrap_pthread_condattr_init(pthread_condattr_t *attr)
 {
        return -XENOMAI_SKINCALL1(__cobalt_muxid, __cobalt_condattr_init, attr);
@@ -64,11 +94,19 @@ int __wrap_pthread_condattr_setpshared(pthread_condattr_t 
*attr, int pshared)
 int __wrap_pthread_cond_init(pthread_cond_t * cond,
                             const pthread_condattr_t * attr)
 {
-       union __xeno_cond *_cond = (union __xeno_cond *)cond;
+       struct __shadow_cond *shadow =
+               &((union __xeno_cond *)cond)->shadow_cond;
        int err;
 
        err = -XENOMAI_SKINCALL2(__cobalt_muxid,
-                                __cobalt_cond_init, &_cond->shadow_cond, attr);
+                                __cobalt_cond_init, shadow, attr);
+#ifdef CONFIG_XENO_FASTSYNCH
+       if (!err && !shadow->attr.pshared) {
+               shadow->pending_signals = (unsigned long *)
+                       (xeno_sem_heap[0] + shadow->pending_signals_offset);
+       }
+#endif /* CONFIG_XENO_FASTSYNCH */
+
        return err;
 }
 
@@ -180,16 +218,67 @@ int __wrap_pthread_cond_timedwait(pthread_cond_t * cond,
 
 int __wrap_pthread_cond_signal(pthread_cond_t * cond)
 {
-       union __xeno_cond *_cond = (union __xeno_cond *)cond;
+       struct __shadow_cond *shadow =
+               &((union __xeno_cond *)cond)->shadow_cond;
+#ifdef CONFIG_XENO_FASTSYNCH
+       unsigned long *pending_signals;
+       xnarch_atomic_t *mutex_ownerp;
+       xnhandle_t cur;
+
+       cur = xeno_get_current();
+       if (cur == XN_NO_HANDLE)
+               return EPERM;
+
+       if (shadow->magic != COBALT_COND_MAGIC)
+               return EINVAL;
+
+       mutex_ownerp = get_mutex_ownerp(shadow);
+       if (mutex_ownerp) {
+               if (xnsynch_fast_set_spares(mutex_ownerp, cur,
+                                           COBALT_MUTEX_COND_SIGNAL) < 0)
+                       return -EPERM;
 
+               pending_signals = get_signalsp(shadow);
+               if (*pending_signals != ~0UL)
+                       ++(*pending_signals);
+       }
+
+       return 0;
+#else /* !CONFIG_XENO_FASTSYNCH */
        return -XENOMAI_SKINCALL1(__cobalt_muxid,
                                  __cobalt_cond_signal, &_cond->shadow_cond);
+#endif /* !CONFIG_XENO_FASTSYNCH */
 }
 
 int __wrap_pthread_cond_broadcast(pthread_cond_t * cond)
 {
-       union __xeno_cond *_cond = (union __xeno_cond *)cond;
+       struct __shadow_cond *shadow =
+               &((union __xeno_cond *)cond)->shadow_cond;
+#ifdef CONFIG_XENO_FASTSYNCH
+       unsigned long *pending_signals;
+       xnarch_atomic_t *mutex_ownerp;
+       xnhandle_t cur;
+
+       cur = xeno_get_current();
+       if (cur == XN_NO_HANDLE)
+               return EPERM;
+
+       if (shadow->magic != COBALT_COND_MAGIC)
+               return EINVAL;
+
+       mutex_ownerp = get_mutex_ownerp(shadow);
+       if (mutex_ownerp) {
+               if (xnsynch_fast_set_spares(mutex_ownerp, cur,
+                                           COBALT_MUTEX_COND_SIGNAL) < 0)
+                       return -EPERM;
+
+               pending_signals = get_signalsp(shadow);
+               *get_signalsp(shadow) = ~0UL;
+       }
 
+       return 0;
+#else /* !CONFIG_XENO_FASTSYNCH */
        return -XENOMAI_SKINCALL1(__cobalt_muxid,
                                  __cobalt_cond_broadcast, &_cond->shadow_cond);
+#endif /* !CONFIG_XENO_FASTSYNCH */
 }
diff --git a/lib/cobalt/mutex.c b/lib/cobalt/mutex.c
index 94bd488..99fdce7 100644
--- a/lib/cobalt/mutex.c
+++ b/lib/cobalt/mutex.c
@@ -37,7 +37,7 @@ static xnarch_atomic_t *get_ownerp(struct __shadow_mutex 
*shadow)
        if (likely(!shadow->attr.pshared))
                return shadow->owner;
 
-       return (xnarch_atomic_t *) (xeno_sem_heap[1] + shadow->owner_offset);
+       return (xnarch_atomic_t *)(xeno_sem_heap[1] + shadow->owner_offset);
 }
 #endif /* CONFIG_XENO_FASTSYNCH */
 
@@ -386,6 +386,9 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
        if (unlikely(xeno_get_current_mode() & XNOTHER))
                goto do_syscall;
 
+       if (unlikely(xnsynch_fast_check_spares(ownerp, 
COBALT_MUTEX_COND_SIGNAL)))
+               goto do_syscall;
+
        if (likely(xnsynch_fast_release(ownerp, cur))) {
          out:
                cb_read_unlock(&shadow->lock, s);


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to