Module: xenomai-gch
Branch: for-forge
Commit: 325e8314c72c80bcd903981760bb813672f3495e
URL:    
http://git.xenomai.org/?p=xenomai-gch.git;a=commit;h=325e8314c72c80bcd903981760bb813672f3495e

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Mon Nov 14 22:38:01 2011 +0100

cobalt: move condvars syscalls to kernel/cobalt/cond.c

---

 include/cobalt/pthread.h |   16 --
 include/cobalt/syscall.h |    3 +-
 kernel/cobalt/cond.c     |  440 ++++++++++++++++++----------------------------
 kernel/cobalt/cond.h     |   26 ++--
 kernel/cobalt/mutex.c    |    5 +-
 kernel/cobalt/mutex.h    |   21 +--
 kernel/cobalt/syscall.c  |  222 +-----------------------
 lib/cobalt/cond.c        |   60 ++++---
 8 files changed, 231 insertions(+), 562 deletions(-)

diff --git a/include/cobalt/pthread.h b/include/cobalt/pthread.h
index e526f34..aa3cb4b 100644
--- a/include/cobalt/pthread.h
+++ b/include/cobalt/pthread.h
@@ -337,22 +337,6 @@ int pthread_condattr_getpshared(const pthread_condattr_t 
*attr, int *pshared);
 
 int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared);
 
-int pthread_cond_init(pthread_cond_t *cond,
-                     const pthread_condattr_t *attr);
-
-int pthread_cond_destroy(pthread_cond_t *cond);
-
-int pthread_cond_wait(pthread_cond_t *cond,
-                     pthread_mutex_t *mutex);
-
-int pthread_cond_timedwait(pthread_cond_t *cond,
-                          pthread_mutex_t *mutex,
-                          const struct timespec *abstime);
-
-int pthread_cond_signal(pthread_cond_t *cond);
-
-int pthread_cond_broadcast(pthread_cond_t *cond);
-
 int pthread_cancel(pthread_t thread);
 
 void pthread_cleanup_push(void (*routine)(void *),
diff --git a/include/cobalt/syscall.h b/include/cobalt/syscall.h
index ab1d16e..e98e553 100644
--- a/include/cobalt/syscall.h
+++ b/include/cobalt/syscall.h
@@ -52,8 +52,7 @@
 #define __cobalt_cond_destroy          26
 #define __cobalt_cond_wait_prologue    27
 #define __cobalt_cond_wait_epilogue    28
-#define __cobalt_cond_signal           29
-#define __cobalt_cond_broadcast        30
+
 #define __cobalt_mq_open               31
 #define __cobalt_mq_close              32
 #define __cobalt_mq_unlink             33
diff --git a/kernel/cobalt/cond.c b/kernel/cobalt/cond.c
index 6f16481..e9bfe51 100644
--- a/kernel/cobalt/cond.c
+++ b/kernel/cobalt/cond.c
@@ -94,7 +94,8 @@ static void cond_destroy_internal(cobalt_cond_t * cond, 
cobalt_kqueues_t *q)
  * Specification.</a>
  *
  */
-int pthread_cond_init(pthread_cond_t * cnd, const pthread_condattr_t * attr)
+static int
+pthread_cond_init(pthread_cond_t *cnd, const pthread_condattr_t *attr)
 {
        struct __shadow_cond *shadow = &((union __xeno_cond *)cnd)->shadow_cond;
        xnflags_t synch_flags = XNSYNCH_PRIO | XNSYNCH_NOPIP;
@@ -194,7 +195,7 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
  * Specification.</a>
  *
  */
-int pthread_cond_destroy(pthread_cond_t * cnd)
+static int pthread_cond_destroy(pthread_cond_t * cnd)
 {
        struct __shadow_cond *shadow = &((union __xeno_cond *)cnd)->shadow_cond;
        cobalt_cond_t *cond;
@@ -229,18 +230,16 @@ int pthread_cond_destroy(pthread_cond_t * cnd)
        return 0;
 }
 
-int cobalt_cond_timedwait_prologue(xnthread_t *cur,
-                                 struct __shadow_cond *shadow,
-                                 struct __shadow_mutex *mutex,
-                                 unsigned *count_ptr,
-                                 int timed,
-                                 xnticks_t abs_to)
+static int cobalt_cond_timedwait_prologue(xnthread_t *cur,
+                                         cobalt_cond_t *cond,
+                                         cobalt_mutex_t *mutex,
+                                         int timed,
+                                         xnticks_t abs_to)
 {
-       cobalt_cond_t *cond;
        spl_t s;
        int err;
 
-       if (!shadow || !mutex)
+       if (!cond || !mutex)
                return EINVAL;
 
        if (xnpod_unblockable_p())
@@ -250,29 +249,27 @@ int cobalt_cond_timedwait_prologue(xnthread_t *cur,
 
        thread_cancellation_point(cur);
 
-       cond = shadow->cond;
-
        /* If another thread waiting for cond does not use the same mutex */
-       if (!cobalt_obj_active(shadow, COBALT_COND_MAGIC, struct __shadow_cond)
-           || !cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)
-           || (cond->mutex && cond->mutex != mutex->mutex)) {
-               err = EINVAL;
+       if (!cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)
+           || (cond->mutex && cond->mutex != mutex)) {
+               err = -EINVAL;
                goto unlock_and_return;
        }
 
+#if XENO_DEBUG(NUCLEUS)
        if (cond->owningq != cobalt_kqueues(cond->attr.pshared)) {
-               err = EPERM;
+               err = -EPERM;
                goto unlock_and_return;
        }
+#endif
 
        if (mutex->attr.pshared != cond->attr.pshared) {
-               err = EINVAL;
+               err = -EINVAL;
                goto unlock_and_return;
        }
 
-       /* Unlock mutex, with its previous recursive lock count stored
-          in "*count_ptr". */
-       err = cobalt_mutex_release(cur, mutex, count_ptr);
+       /* Unlock mutex. */
+       err = cobalt_mutex_release(cur, mutex);
        if (err < 0)
                goto unlock_and_return;
 
@@ -282,9 +279,9 @@ int cobalt_cond_timedwait_prologue(xnthread_t *cur,
 
        /* Bind mutex to cond. */
        if (cond->mutex == NULL) {
-               cond->mutex = mutex->mutex;
+               cond->mutex = mutex;
                inith(&cond->mutex_link);
-               appendq(&mutex->mutex->conds, &cond->mutex_link);
+               appendq(&mutex->conds, &cond->mutex_link);
        }
 
        /* Wait for another thread to signal the condition. */
@@ -313,9 +310,9 @@ int cobalt_cond_timedwait_prologue(xnthread_t *cur,
        err = 0;
 
        if (xnthread_test_info(cur, XNBREAK))
-               err = EINTR;
+               err = -EINTR;
        else if (xnthread_test_info(cur, XNTIMEO))
-               err = ETIMEDOUT;
+               err = -ETIMEDOUT;
 
       unlock_and_return:
        xnlock_put_irqrestore(&nklock, s);
@@ -323,30 +320,26 @@ int cobalt_cond_timedwait_prologue(xnthread_t *cur,
        return err;
 }
 
-int cobalt_cond_timedwait_epilogue(xnthread_t *cur,
-                                 struct __shadow_cond *shadow,
-                                 struct __shadow_mutex *mutex, unsigned count)
+static int cobalt_cond_timedwait_epilogue(xnthread_t *cur,
+                                         cobalt_cond_t *cond,
+                                         cobalt_mutex_t *mutex)
 {
-       cobalt_cond_t *cond;
        int err;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
 
-       cond = shadow->cond;
-
-       err = cobalt_mutex_timedlock_internal(cur, mutex, count, 0, 
XN_INFINITE);
+       err = cobalt_mutex_timedlock_internal(cur, mutex, 0, XN_INFINITE);
 
        if (err == -EINTR)
                goto unlock_and_return;
 
-
        /* Unbind mutex and cond, if no other thread is waiting, if the job was
           not already done. */
        if (!xnsynch_nsleepers(&cond->synchbase)
-           && cond->mutex == mutex->mutex) {
+           && cond->mutex == mutex) {
                cond->mutex = NULL;
-               removeq(&mutex->mutex->conds, &cond->mutex_link);
+               removeq(&mutex->conds, &cond->mutex_link);
        }
 
        thread_cancellation_point(cur);
@@ -357,237 +350,176 @@ int cobalt_cond_timedwait_epilogue(xnthread_t *cur,
        return err;
 }
 
-/**
- * Wait on a condition variable.
- *
- * This service atomically unlocks the mutex @a mx, and block the calling 
thread
- * until the condition variable @a cnd is signalled using pthread_cond_signal()
- * or pthread_cond_broadcast(). When the condition is signaled, this service
- * re-acquire the mutex before returning.
- *
- * Spurious wakeups occur if a signal is delivered to the blocked thread, so, 
an
- * application should not assume that the condition changed upon successful
- * return from this service.
- *
- * Even if the mutex @a mx is recursive and its recursion count is greater than
- * one on entry, it is unlocked before blocking the caller, and the recursion
- * count is restored once the mutex is re-acquired by this service before
- * returning.
- *
- * Once a thread is blocked on a condition variable, a dynamic binding is 
formed
- * between the condition vairable @a cnd and the mutex @a mx; if another thread
- * calls this service specifying @a cnd as a condition variable but another
- * mutex than @a mx, this service returns immediately with the EINVAL status.
- *
- * This service is a cancellation point for Xenomai POSIX skin threads
- * (created with the pthread_create() service). When such a thread is cancelled
- * while blocked in a call to this service, the mutex @a mx is re-acquired
- * before the cancellation cleanup handlers are called.
- *
- * @param cnd the condition variable to wait for;
- *
- * @param mx the mutex associated with @a cnd.
- *
- * @return 0 on success,
- * @return an error number if:
- * - EPERM, the caller context is invalid;
- * - EINVAL, the specified condition variable or mutex is invalid;
- * - EINVAL, the specified condition variable and mutex process-shared
- * attribute mismatch;
- * - EPERM, the specified condition variable is not process-shared and does not
- *   belong to the current process;
- * - EINVAL, another thread is currently blocked on @a cnd using another mutex
- *   than @a mx;
- * - EPERM, the specified mutex is not owned by the caller.
- *
- * @par Valid contexts:
- * - Xenomai kernel-space thread;
- * - Xenomai user-space thread (switches to primary mode).
- *
- * @see
- * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_wait.html";>
- * Specification.</a>
- *
- */
-int pthread_cond_wait(pthread_cond_t * cnd, pthread_mutex_t * mx)
+int cobalt_cond_deferred_signals(struct cobalt_cond *cond)
 {
-       struct __shadow_cond *cond = &((union __xeno_cond *)cnd)->shadow_cond;
-       struct __shadow_mutex *mutex =
-           &((union __xeno_mutex *)mx)->shadow_mutex;
-       xnthread_t *cur = xnpod_current_thread();
-       unsigned count;
-       int err;
+       unsigned long pending_signals;
+       int need_resched, i;
 
-       err = cobalt_cond_timedwait_prologue(cur, cond, mutex,
-                                           &count, 0, XN_INFINITE);
+       pending_signals = *(cond->pending_signals);
+
+       switch(pending_signals) {
+       case ~0UL:
+               need_resched =
+                       xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED;
+               break;
+
+       case 0:
+               need_resched = 0;
+               break;
+
+       default:
+               for(i = 0, need_resched = 0; i < pending_signals; i++)
+                       need_resched |=
+                               xnsynch_wakeup_one_sleeper(&cond->synchbase)
+                               != NULL;
+       }
 
-       if (!err || err == EINTR)
-               while (-EINTR == cobalt_cond_timedwait_epilogue(cur, cond,
-                                                              mutex, count))
-                       ;
+       *cond->pending_signals = 0;
 
-       return err != EINTR ? err : 0;
+       return need_resched;
 }
 
-/**
- * Wait a bounded time on a condition variable.
- *
- * This service is equivalent to pthread_cond_wait(), except that the calling
- * thread remains blocked on the condition variable @a cnd only until the
- * timeout specified by @a abstime expires.
- *
- * The timeout @a abstime is expressed as an absolute value of the @a clock
- * attribute passed to pthread_cond_init(). By default, @a CLOCK_REALTIME is
- * used.
- *
- * @param cnd the condition variable to wait for;
- *
- * @param mx the mutex associated with @a cnd;
- *
- * @param abstime the timeout, expressed as an absolute value of the clock
- * attribute passed to pthread_cond_init().
- *
- * @return 0 on success,
- * @return an error number if:
- * - EPERM, the caller context is invalid;
- * - EPERM, the specified condition variable is not process-shared and does not
- *   belong to the current process;
- * - EINVAL, the specified condition variable, mutex or timeout is invalid;
- * - EINVAL, the specified condition variable and mutex process-shared
- * attribute mismatch;
- * - EINVAL, another thread is currently blocked on @a cnd using another mutex
- *   than @a mx;
- * - EPERM, the specified mutex is not owned by the caller;
- * - ETIMEDOUT, the specified timeout expired.
- *
- * @par Valid contexts:
- * - Xenomai kernel-space thread;
- * - Xenomai user-space thread (switches to primary mode).
- *
- * @see
- * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_timedwait.html";>
- * Specification.</a>
- *
- */
-int pthread_cond_timedwait(pthread_cond_t * cnd,
-                          pthread_mutex_t * mx, const struct timespec *abstime)
+int cobalt_cond_init(union __xeno_cond __user *u_cnd,
+                    const pthread_condattr_t __user *u_attr)
 {
-       struct __shadow_cond *cond = &((union __xeno_cond *)cnd)->shadow_cond;
-       struct __shadow_mutex *mutex =
-           &((union __xeno_mutex *)mx)->shadow_mutex;
-       xnthread_t *cur = xnpod_current_thread();
-       unsigned count;
+       pthread_condattr_t locattr, *attr;
+       union __xeno_cond cnd;
        int err;
 
-       err = cobalt_cond_timedwait_prologue(cur, cond, mutex, &count, 1,
-                                           ts2ns(abstime) + 1);
+       if (__xn_safe_copy_from_user(&cnd.shadow_cond,
+                                    &u_cnd->shadow_cond,
+                                    sizeof(cnd.shadow_cond)))
+               return -EFAULT;
+
+       if (u_attr) {
+               if (__xn_safe_copy_from_user(&locattr,
+                                            u_attr, sizeof(locattr)))
+                       return -EFAULT;
 
-       if (!err || err == EINTR || err == ETIMEDOUT)
-               while (-EINTR == cobalt_cond_timedwait_epilogue(cur, cond,
-                                                              mutex, count))
-                       ;
+               attr = &locattr;
+       } else
+               attr = NULL;
 
-       return err != EINTR ? err : 0;
+       /* Always use default attribute. */
+       err = pthread_cond_init(&cnd.native_cond, attr);
+
+       if (err)
+               return -err;
+
+       return __xn_safe_copy_to_user(&u_cnd->shadow_cond,
+                                     &cnd.shadow_cond, 
sizeof(u_cnd->shadow_cond));
 }
 
-/**
- * Signal a condition variable.
- *
- * This service unblocks one thread blocked on the condition variable @a cnd.
- *
- * If more than one thread is blocked on the specified condition variable, the
- * highest priority thread is unblocked.
- *
- * @param cnd the condition variable to be signalled.
- *
- * @return 0 on succes,
- * @return an error number if:
- * - EINVAL, the condition variable is invalid;
- * - EPERM, the condition variable is not process-shared and does not belong to
- *   the current process.
- *
- * @see
- * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_signal.html.";>
- * Specification.</a>
- *
- */
-int pthread_cond_signal(pthread_cond_t * cnd)
+int cobalt_cond_destroy(union __xeno_cond __user *u_cnd)
 {
-       struct __shadow_cond *shadow = &((union __xeno_cond *)cnd)->shadow_cond;
-       cobalt_cond_t *cond;
-       spl_t s;
+       union __xeno_cond cnd;
+       int err;
 
-       xnlock_get_irqsave(&nklock, s);
+       if (__xn_safe_copy_from_user(&cnd.shadow_cond,
+                                    &u_cnd->shadow_cond,
+                                    sizeof(cnd.shadow_cond)))
+               return -EFAULT;
 
-       cond = shadow->cond;
-       if (!cobalt_obj_active(shadow, COBALT_COND_MAGIC, struct __shadow_cond)
-           || !cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)) 
{
-               xnlock_put_irqrestore(&nklock, s);
-               return EINVAL;
+       err = pthread_cond_destroy(&cnd.native_cond);
+       if (err)
+               return -err;
+
+       return __xn_safe_copy_to_user(&u_cnd->shadow_cond,
+                                     &cnd.shadow_cond, 
sizeof(u_cnd->shadow_cond));
+}
+
+struct us_cond_data {
+       int err;
+};
+
+/* pthread_cond_wait_prologue(cond, mutex, count_ptr, timed, timeout) */
+int cobalt_cond_wait_prologue(union __xeno_cond __user *u_cnd,
+                             union __xeno_mutex __user *u_mx,
+                             int *u_err,
+                             unsigned int timed,
+                             struct timespec __user *u_ts)
+{
+       xnthread_t *cur = xnshadow_thread(current);
+       xnarch_atomic_t *ownerp;
+       struct us_cond_data d;
+       cobalt_cond_t *cnd;
+       cobalt_mutex_t *mx;
+       struct timespec ts;
+       int err, perr = 0;
+
+       __xn_get_user(cnd, &u_cnd->shadow_cond.cond);
+       __xn_get_user(mx, &u_mx->shadow_mutex.mutex);
+
+       if (!cnd->mutex) {
+               __xn_get_user(ownerp, &u_mx->shadow_mutex.owner);
+               __xn_put_user(ownerp, &u_cnd->shadow_cond.mutex_ownerp);
        }
 
-#if XENO_DEBUG(POSIX)
-       if (cond->owningq != cobalt_kqueues(cond->attr.pshared)) {
-               xnlock_put_irqrestore(&nklock, s);
-               return EPERM;
+       if (timed) {
+               err = __xn_safe_copy_from_user(&ts, u_ts, sizeof(ts))?-EFAULT:0;
+               if (!err)
+                       err = cobalt_cond_timedwait_prologue(cur,
+                                                            cnd, mx, timed,
+                                                            ts2ns(&ts) + 1);
+       } else
+               err = cobalt_cond_timedwait_prologue(cur, cnd,
+                                                    mx, timed,
+                                                    XN_INFINITE);
+
+       if (!cnd->mutex) {
+               ownerp = (xnarch_atomic_t *)~0UL;
+               __xn_put_user(ownerp, &u_cnd->shadow_cond.mutex_ownerp);
        }
-#endif /* XENO_DEBUG(POSIX) */
 
-       /* FIXME: If the mutex associated with cnd is owned by the current
-          thread, we could postpone rescheduling until pthread_mutex_unlock is
-          called, this would save two useless context switches. */
-       if (xnsynch_wakeup_one_sleeper(&cond->synchbase) != NULL)
-               xnpod_schedule();
+       switch(err) {
+       case 0:
+       case -ETIMEDOUT:
+               perr = d.err = err;
+               err = cobalt_cond_timedwait_epilogue(cur, cnd, mx);
+
+               if (!cnd->mutex) {
+                       ownerp = (xnarch_atomic_t *)~0UL;
+                       __xn_put_user(ownerp,
+                                     &u_cnd->shadow_cond.mutex_ownerp);
+               }
+               break;
 
-       xnlock_put_irqrestore(&nklock, s);
+       case -EINTR:
+               perr = err;
+               d.err = 0;      /* epilogue should return 0. */
+               break;
 
-       return 0;
+       default:
+               /* Please gcc and handle the case which will never
+                  happen */
+               d.err = EINVAL;
+       }
+
+       if (err == -EINTR)
+               __xn_put_user(d.err, u_err);
+
+       return err == 0 ? perr : err;
 }
 
-/**
- * Broadcast a condition variable.
- *
- * This service unblocks all threads blocked on the condition variable @a cnd.
- *
- * @param cnd the condition variable to be signalled.
- *
- * @return 0 on succes,
- * @return an error number if:
- * - EINVAL, the condition variable is invalid;
- * - EPERM, the condition variable is not process-shared and does not belong to
- *   the current process.
- *
- * @see
- * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_broadcast.html";>
- * Specification.</a>
- *
- */
-int pthread_cond_broadcast(pthread_cond_t * cnd)
+int cobalt_cond_wait_epilogue(union __xeno_cond __user *u_cnd,
+                             union __xeno_mutex __user *u_mx)
 {
-       struct __shadow_cond *shadow = &((union __xeno_cond *)cnd)->shadow_cond;
-       cobalt_cond_t *cond;
-       spl_t s;
+       xnthread_t *cur = xnshadow_thread(current);
+       cobalt_cond_t *cnd;
+       cobalt_mutex_t *mx;
+       int err;
 
-       xnlock_get_irqsave(&nklock, s);
+       __xn_get_user(cnd, &u_cnd->shadow_cond.cond);
+       __xn_get_user(mx, &u_mx->shadow_mutex.mutex);
 
-       cond = shadow->cond;
-       if (!cobalt_obj_active(shadow, COBALT_COND_MAGIC, struct __shadow_cond)
-           || !cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)) 
{
-               xnlock_put_irqrestore(&nklock, s);
-               return EINVAL;
-       }
+       err = cobalt_cond_timedwait_epilogue(cur, cnd, mx);
 
-       if (cond->owningq != cobalt_kqueues(cond->attr.pshared)) {
-               xnlock_put_irqrestore(&nklock, s);
-               return EPERM;
+       if (!cnd->mutex) {
+               xnarch_atomic_t *ownerp = (xnarch_atomic_t *)~0UL;
+               __xn_put_user(ownerp, &u_cnd->shadow_cond.mutex_ownerp);
        }
 
-       if (xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED)
-               xnpod_schedule();
-
-       xnlock_put_irqrestore(&nklock, s);
-
-       return 0;
+       return err;
 }
 
 void cobalt_condq_cleanup(cobalt_kqueues_t *q)
@@ -610,35 +542,6 @@ void cobalt_condq_cleanup(cobalt_kqueues_t *q)
        xnlock_put_irqrestore(&nklock, s);
 }
 
-int cobalt_cond_deferred_signals(struct cobalt_cond *cond)
-{
-       unsigned long pending_signals;
-       int need_resched, i;
-
-       pending_signals = *(cond->pending_signals);
-
-       switch(pending_signals) {
-       case ~0UL:
-               need_resched =
-                       xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED;
-               break;
-
-       case 0:
-               need_resched = 0;
-               break;
-
-       default:
-               for(i = 0, need_resched = 0; i < pending_signals; i++)
-                       need_resched |=
-                               xnsynch_wakeup_one_sleeper(&cond->synchbase)
-                               != NULL;
-       }
-
-       *cond->pending_signals = 0;
-
-       return need_resched;
-}
-
 void cobalt_cond_pkg_init(void)
 {
        initq(&cobalt_global_kqueues.condq);
@@ -651,10 +554,3 @@ void cobalt_cond_pkg_cleanup(void)
 }
 
 /*@}*/
-
-EXPORT_SYMBOL_GPL(pthread_cond_init);
-EXPORT_SYMBOL_GPL(pthread_cond_destroy);
-EXPORT_SYMBOL_GPL(pthread_cond_wait);
-EXPORT_SYMBOL_GPL(pthread_cond_timedwait);
-EXPORT_SYMBOL_GPL(pthread_cond_signal);
-EXPORT_SYMBOL_GPL(pthread_cond_broadcast);
diff --git a/kernel/cobalt/cond.h b/kernel/cobalt/cond.h
index c4849c9..8d77ad9 100644
--- a/kernel/cobalt/cond.h
+++ b/kernel/cobalt/cond.h
@@ -29,6 +29,7 @@ union __xeno_cond {
        struct __shadow_cond {
                unsigned magic;
                struct cobalt_condattr attr;
+               struct cobalt_cond *cond;
                union {
                        unsigned pending_signals_offset;
                        unsigned long *pending_signals;
@@ -37,7 +38,6 @@ union __xeno_cond {
                        unsigned mutex_ownerp_offset;
                        xnarch_atomic_t *mutex_ownerp;
                };
-               struct cobalt_cond *cond;
        } shadow_cond;
 };
 
@@ -46,6 +46,7 @@ union __xeno_cond {
 #include "internal.h"
 
 struct __shadow_mutex;
+union __xeno_mutex;
 
 typedef struct cobalt_cond {
        unsigned magic;
@@ -66,18 +67,21 @@ typedef struct cobalt_cond {
        cobalt_kqueues_t *owningq;
 } cobalt_cond_t;
 
-int cobalt_cond_timedwait_prologue(xnthread_t *cur,
-                                 struct __shadow_cond *shadow,
-                                 struct __shadow_mutex *mutex,
-                                 unsigned *count_ptr,
-                                 int timed,
-                                 xnticks_t to);
+int cobalt_cond_deferred_signals(struct cobalt_cond *cond);
+
+int cobalt_cond_init(union __xeno_cond __user *u_cnd,
+                    const pthread_condattr_t __user *u_attr);
 
-int cobalt_cond_timedwait_epilogue(xnthread_t *cur,
-                                 struct __shadow_cond *shadow,
-                                 struct __shadow_mutex *mutex, unsigned count);
+int cobalt_cond_destroy(union __xeno_cond __user *u_cnd);
 
-int cobalt_cond_deferred_signals(struct cobalt_cond *cond);
+int cobalt_cond_wait_prologue(union __xeno_cond __user *u_cnd,
+                             union __xeno_mutex __user *u_mx,
+                             int *u_err,
+                             unsigned int timed,
+                             struct timespec __user *u_ts);
+
+int cobalt_cond_wait_epilogue(union __xeno_cond __user *u_cnd,
+                             union __xeno_mutex __user *u_mx);
 
 void cobalt_condq_cleanup(cobalt_kqueues_t *q);
 
diff --git a/kernel/cobalt/mutex.c b/kernel/cobalt/mutex.c
index 320bcf2..b204b31 100644
--- a/kernel/cobalt/mutex.c
+++ b/kernel/cobalt/mutex.c
@@ -261,7 +261,10 @@ int cobalt_mutex_timedlock_break(struct __shadow_mutex 
*shadow,
        if (xnthread_handle(cur) == XN_NO_HANDLE)
                return -EPERM;
 
-       err = cobalt_mutex_timedlock_internal(cur, shadow, 1, timed, abs_to);
+       err = cobalt_mutex_timedlock_internal(cur, shadow->mutex, timed, 
abs_to);
+       if (!err)
+               shadow->lockcnt = 1;
+
        if (err != -EBUSY)
                goto out;
 
diff --git a/kernel/cobalt/mutex.h b/kernel/cobalt/mutex.h
index 82b8c34..5f523ab 100644
--- a/kernel/cobalt/mutex.h
+++ b/kernel/cobalt/mutex.h
@@ -85,19 +85,15 @@ void cobalt_mutex_destroy_internal(cobalt_mutex_t *mutex,
 
 /* must be called with nklock locked, interrupts off. */
 static inline int cobalt_mutex_timedlock_internal(xnthread_t *cur,
-                                                struct __shadow_mutex *shadow,
-                                                unsigned count,
+                                                cobalt_mutex_t *mutex,
                                                 int timed,
                                                 xnticks_t abs_to)
 
 {
-       cobalt_mutex_t *mutex = shadow->mutex;
-
        if (xnpod_unblockable_p())
                return -EPERM;
 
-       if (!cobalt_obj_active(shadow, COBALT_MUTEX_MAGIC, struct 
__shadow_mutex)
-           || !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct 
cobalt_mutex))
+       if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex))
                return -EINVAL;
 
 #if XENO_DEBUG(POSIX)
@@ -122,22 +118,16 @@ static inline int 
cobalt_mutex_timedlock_internal(xnthread_t *cur,
                        return -EINVAL;
        }
 
-       shadow->lockcnt = count;
-
        return 0;
 }
 
 static inline int cobalt_mutex_release(xnthread_t *cur,
-                                      struct __shadow_mutex *shadow,
-                                      unsigned *count_ptr)
+                                      cobalt_mutex_t *mutex)
 {
-       cobalt_mutex_t *mutex;
        xnholder_t *holder;
        int need_resched;
 
-       mutex = shadow->mutex;
-       if (!cobalt_obj_active(shadow, COBALT_MUTEX_MAGIC, struct 
__shadow_mutex)
-           || !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct 
cobalt_mutex))
+       if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex))
                 return -EINVAL;
 
 #if XENO_DEBUG(POSIX)
@@ -148,9 +138,6 @@ static inline int cobalt_mutex_release(xnthread_t *cur,
        if (xnsynch_owner_check(&mutex->synchbase, cur) != 0)
                return -EPERM;
 
-       if (count_ptr)
-               *count_ptr = shadow->lockcnt;
-
        need_resched = 0;
        for (holder = getheadq(&mutex->conds);
             holder; holder = nextq(&mutex->conds, holder)) {
diff --git a/kernel/cobalt/syscall.c b/kernel/cobalt/syscall.c
index 6ca6979..7664ece 100644
--- a/kernel/cobalt/syscall.c
+++ b/kernel/cobalt/syscall.c
@@ -1277,7 +1277,7 @@ static int __pthread_mutex_unlock(union __xeno_mutex 
__user *u_mx)
 
        xnlock_get_irqsave(&nklock, s);
        err = cobalt_mutex_release(xnpod_current_thread(),
-                                  &mx.shadow_mutex, NULL);
+                                  mx.shadow_mutex.mutex);
        if (err < 0)
                goto out;
 
@@ -1383,215 +1383,6 @@ static int 
__pthread_condattr_setpshared(pthread_condattr_t __user *u_attr,
        return __xn_safe_copy_to_user(u_attr, &attr, sizeof(*u_attr));
 }
 
-static int __pthread_cond_init(union __xeno_cond __user *u_cnd,
-                              const pthread_condattr_t __user *u_attr)
-{
-       pthread_condattr_t locattr, *attr;
-       union __xeno_cond cnd;
-       int err;
-
-       if (__xn_safe_copy_from_user(&cnd.shadow_cond,
-                                    &u_cnd->shadow_cond,
-                                    sizeof(cnd.shadow_cond)))
-               return -EFAULT;
-
-       if (u_attr) {
-               if (__xn_safe_copy_from_user(&locattr,
-                                            u_attr, sizeof(locattr)))
-                       return -EFAULT;
-
-               attr = &locattr;
-       } else
-               attr = NULL;
-
-       /* Always use default attribute. */
-       err = pthread_cond_init(&cnd.native_cond, attr);
-
-       if (err)
-               return -err;
-
-       return __xn_safe_copy_to_user(&u_cnd->shadow_cond,
-                                     &cnd.shadow_cond, 
sizeof(u_cnd->shadow_cond));
-}
-
-static int __pthread_cond_destroy(union __xeno_cond __user *u_cnd)
-{
-       union __xeno_cond cnd;
-       int err;
-
-       if (__xn_safe_copy_from_user(&cnd.shadow_cond,
-                                    &u_cnd->shadow_cond,
-                                    sizeof(cnd.shadow_cond)))
-               return -EFAULT;
-
-       err = pthread_cond_destroy(&cnd.native_cond);
-       if (err)
-               return -err;
-
-       return __xn_safe_copy_to_user(&u_cnd->shadow_cond,
-                                     &cnd.shadow_cond, 
sizeof(u_cnd->shadow_cond));
-}
-
-struct us_cond_data {
-       unsigned count;
-       int err;
-};
-
-/* pthread_cond_wait_prologue(cond, mutex, count_ptr, timed, timeout) */
-static int __pthread_cond_wait_prologue(union __xeno_cond __user *u_cnd,
-                                       union __xeno_mutex __user *u_mx,
-                                       struct us_cond_data __user *u_d,
-                                       unsigned int timed,
-                                       struct timespec __user *u_ts)
-{
-       xnthread_t *cur = xnshadow_thread(current);
-       struct us_cond_data d;
-       union __xeno_cond cnd;
-       union __xeno_mutex mx;
-       struct timespec ts;
-       int err, perr = 0;
-
-       if (__xn_safe_copy_from_user(&cnd.shadow_cond,
-                                    &u_cnd->shadow_cond,
-                                    sizeof(cnd.shadow_cond)))
-               return -EFAULT;
-
-       if (__xn_safe_copy_from_user(&mx.shadow_mutex,
-                                    &u_mx->shadow_mutex,
-                                    sizeof(mx.shadow_mutex)))
-               return -EFAULT;
-
-       if (!cnd.shadow_cond.cond->mutex) {
-               cnd.shadow_cond.mutex_ownerp = mx.shadow_mutex.owner;
-               if (__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
-                                          &cnd.shadow_cond.mutex_ownerp,
-                                          
sizeof(cnd.shadow_cond.mutex_ownerp)))
-                       return -EFAULT;
-       }
-
-       if (timed) {
-               err = __xn_safe_copy_from_user(&ts, u_ts, sizeof(ts))?EFAULT:0;
-               if (!err)
-                       err = cobalt_cond_timedwait_prologue(cur,
-                                                            &cnd.shadow_cond,
-                                                            &mx.shadow_mutex,
-                                                            &d.count,
-                                                            timed,
-                                                            ts2ns(&ts) + 1);
-       } else
-               err = cobalt_cond_timedwait_prologue(cur,
-                                                   &cnd.shadow_cond,
-                                                   &mx.shadow_mutex,
-                                                   &d.count,
-                                                   timed, XN_INFINITE);
-
-       if (!cnd.shadow_cond.cond->mutex) {
-               cnd.shadow_cond.mutex_ownerp = (xnarch_atomic_t *)~0UL;
-               if (__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
-                                          &cnd.shadow_cond.mutex_ownerp,
-                                          
sizeof(cnd.shadow_cond.mutex_ownerp)))
-                       return -EFAULT;
-       }
-
-       switch(err) {
-       case 0:
-       case ETIMEDOUT:
-               perr = d.err = err;
-               err = -cobalt_cond_timedwait_epilogue(cur, &cnd.shadow_cond,
-                                                   &mx.shadow_mutex, d.count);
-
-               if (!cnd.shadow_cond.cond->mutex) {
-                       cnd.shadow_cond.mutex_ownerp = (xnarch_atomic_t *)~0UL;
-                       if 
(__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
-                                                  
&cnd.shadow_cond.mutex_ownerp,
-                                                  
sizeof(cnd.shadow_cond.mutex_ownerp)))
-                               return -EFAULT;
-               }
-
-               if (err == 0 &&
-                   __xn_safe_copy_to_user(&u_mx->shadow_mutex.lockcnt,
-                                          &mx.shadow_mutex.lockcnt,
-                                          sizeof(u_mx->shadow_mutex.lockcnt)))
-                       return -EFAULT;
-               break;
-
-       case EINTR:
-               perr = err;
-               d.err = 0;      /* epilogue should return 0. */
-               break;
-       }
-
-       if (err == EINTR
-           &&__xn_safe_copy_to_user(u_d, &d, sizeof(d)))
-                       return -EFAULT;
-
-       return err == 0 ? -perr : -err;
-}
-
-static int __pthread_cond_wait_epilogue(union __xeno_cond __user *u_cnd,
-                                       union __xeno_mutex __user *u_mx,
-                                       unsigned int count)
-{
-       xnthread_t *cur = xnshadow_thread(current);
-       union __xeno_cond cnd;
-       union __xeno_mutex mx;
-       int err;
-
-       if (__xn_safe_copy_from_user(&cnd.shadow_cond,
-                                    &u_cnd->shadow_cond,
-                                    sizeof(cnd.shadow_cond)))
-               return -EFAULT;
-
-       if (__xn_safe_copy_from_user(&mx.shadow_mutex,
-                                    &u_mx->shadow_mutex,
-                                    offsetof(struct __shadow_mutex, owner)
-                                    ))
-               return -EFAULT;
-
-       err = cobalt_cond_timedwait_epilogue(cur, &cnd.shadow_cond,
-                                           &mx.shadow_mutex, count);
-
-       if (!cnd.shadow_cond.cond->mutex) {
-               cnd.shadow_cond.mutex_ownerp = (xnarch_atomic_t *)~0UL;
-               if (__xn_safe_copy_to_user(&u_cnd->shadow_cond.mutex_ownerp,
-                                          &cnd.shadow_cond.mutex_ownerp,
-                                          
sizeof(cnd.shadow_cond.mutex_ownerp)))
-                       return -EFAULT;
-       }
-
-       if (err == 0
-           && __xn_safe_copy_to_user(&u_mx->shadow_mutex.lockcnt,
-                                     &mx.shadow_mutex.lockcnt,
-                                     sizeof(u_mx->shadow_mutex.lockcnt)))
-               return -EFAULT;
-
-       return err;
-}
-
-static int __pthread_cond_signal(union __xeno_cond __user *u_cnd)
-{
-       union __xeno_cond cnd;
-
-       if (__xn_safe_copy_from_user(&cnd.shadow_cond,
-                                    &u_cnd->shadow_cond,
-                                    sizeof(cnd.shadow_cond)))
-               return -EFAULT;
-
-       return -pthread_cond_signal(&cnd.native_cond);
-}
-
-static int __pthread_cond_broadcast(union __xeno_cond __user *u_cnd)
-{
-       union __xeno_cond cnd;
-
-       if (__xn_safe_copy_from_user(&cnd.shadow_cond,
-                                    &u_cnd->shadow_cond,
-                                    sizeof(cnd.shadow_cond)))
-               return -EFAULT;
-
-       return -pthread_cond_broadcast(&cnd.native_cond);
-}
-
 /* mq_open(name, oflags, mode, attr, ufd) */
 static int __mq_open(const char __user *u_name,
                     int oflags,
@@ -2504,12 +2295,11 @@ static struct xnsysent __systab[] = {
        SKINCALL_DEF(__cobalt_mutex_timedlock, __pthread_mutex_timedlock, 
primary),
        SKINCALL_DEF(__cobalt_mutex_trylock, __pthread_mutex_trylock, primary),
        SKINCALL_DEF(__cobalt_mutex_unlock, __pthread_mutex_unlock, 
nonrestartable),
-       SKINCALL_DEF(__cobalt_cond_init, __pthread_cond_init, any),
-       SKINCALL_DEF(__cobalt_cond_destroy, __pthread_cond_destroy, any),
-       SKINCALL_DEF(__cobalt_cond_wait_prologue, __pthread_cond_wait_prologue, 
nonrestartable),
-       SKINCALL_DEF(__cobalt_cond_wait_epilogue, __pthread_cond_wait_epilogue, 
primary),
-       SKINCALL_DEF(__cobalt_cond_signal, __pthread_cond_signal, any),
-       SKINCALL_DEF(__cobalt_cond_broadcast, __pthread_cond_broadcast, any),
+       SKINCALL_DEF(__cobalt_cond_init, cobalt_cond_init, any),
+       SKINCALL_DEF(__cobalt_cond_destroy, cobalt_cond_destroy, any),
+       SKINCALL_DEF(__cobalt_cond_wait_prologue, cobalt_cond_wait_prologue, 
nonrestartable),
+       SKINCALL_DEF(__cobalt_cond_wait_epilogue, cobalt_cond_wait_epilogue, 
primary),
+
        SKINCALL_DEF(__cobalt_mq_open, __mq_open, lostage),
        SKINCALL_DEF(__cobalt_mq_close, __mq_close, lostage),
        SKINCALL_DEF(__cobalt_mq_unlink, __mq_unlink, lostage),
diff --git a/lib/cobalt/cond.c b/lib/cobalt/cond.c
index a1c31a8..cdb4ca3 100644
--- a/lib/cobalt/cond.c
+++ b/lib/cobalt/cond.c
@@ -17,7 +17,6 @@
  */
 
 #include <errno.h>
-#include <pthread.h>
 #include <nucleus/synch.h>
 #include <cobalt/syscall.h>
 #include <kernel/cobalt/mutex.h>
@@ -95,14 +94,14 @@ int __wrap_pthread_cond_init(pthread_cond_t * cond,
                &((union __xeno_cond *)cond)->shadow_cond;
        int err;
 
-       err = -XENOMAI_SKINCALL2(__cobalt_muxid,
+       err = XENOMAI_SKINCALL2(__cobalt_muxid,
                                 __cobalt_cond_init, shadow, attr);
        if (!err && !shadow->attr.pshared) {
                shadow->pending_signals = (unsigned long *)
                        (xeno_sem_heap[0] + shadow->pending_signals_offset);
        }
 
-       return err;
+       return -err;
 }
 
 int __wrap_pthread_cond_destroy(pthread_cond_t * cond)
@@ -126,12 +125,13 @@ static void __pthread_cond_cleanup(void *data)
        int err;
 
        do {
-               err = -XENOMAI_SKINCALL3(__cobalt_muxid,
+               err = XENOMAI_SKINCALL2(__cobalt_muxid,
                                        __cobalt_cond_wait_epilogue,
                                        &c->cond->shadow_cond,
-                                       &c->mutex->shadow_mutex,
-                                       c->count);
-       } while (err == EINTR);
+                                       &c->mutex->shadow_mutex);
+       } while (err == -EINTR);
+
+       c->mutex->shadow_mutex.lockcnt = c->count;
 }
 
 int __wrap_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
@@ -144,27 +144,30 @@ int __wrap_pthread_cond_wait(pthread_cond_t *cond, 
pthread_mutex_t *mutex)
 
        pthread_cleanup_push(&__pthread_cond_cleanup, &c);
 
+       c.count = c.mutex->shadow_mutex.lockcnt;
+
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
 
-       err = -XENOMAI_SKINCALL5(__cobalt_muxid,
+       err = XENOMAI_SKINCALL5(__cobalt_muxid,
                                 __cobalt_cond_wait_prologue,
                                 &c.cond->shadow_cond,
-                                &c.mutex->shadow_mutex, &c.count, 0, NULL);
+                                &c.mutex->shadow_mutex, &c.err, 0, NULL);
 
        pthread_setcanceltype(oldtype, NULL);
 
        pthread_cleanup_pop(0);
 
-       while (err == EINTR)
-               err = -XENOMAI_SKINCALL3(__cobalt_muxid,
+       while (err == -EINTR)
+               err = XENOMAI_SKINCALL2(__cobalt_muxid,
                                         __cobalt_cond_wait_epilogue,
                                         &c.cond->shadow_cond,
-                                        &c.mutex->shadow_mutex,
-                                        c.count);
+                                        &c.mutex->shadow_mutex);
+
+       c.mutex->shadow_mutex.lockcnt = c.count;
 
        pthread_testcancel();
 
-       return err ?: c.err;
+       return -err ?: -c.err;
 }
 
 int __wrap_pthread_cond_timedwait(pthread_cond_t * cond,
@@ -179,26 +182,29 @@ int __wrap_pthread_cond_timedwait(pthread_cond_t * cond,
 
        pthread_cleanup_push(&__pthread_cond_cleanup, &c);
 
+       c.count = c.mutex->shadow_mutex.lockcnt;
+
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
 
-       err = -XENOMAI_SKINCALL5(__cobalt_muxid,
-                                __cobalt_cond_wait_prologue,
-                                &c.cond->shadow_cond,
-                                &c.mutex->shadow_mutex, &c.count, 1, abstime);
+       err = XENOMAI_SKINCALL5(__cobalt_muxid,
+                               __cobalt_cond_wait_prologue,
+                               &c.cond->shadow_cond,
+                               &c.mutex->shadow_mutex, &c.err, 1, abstime);
        pthread_setcanceltype(oldtype, NULL);
 
        pthread_cleanup_pop(0);
 
-       while (err == EINTR)
-               err = -XENOMAI_SKINCALL3(__cobalt_muxid,
-                                        __cobalt_cond_wait_epilogue,
-                                        &c.cond->shadow_cond,
-                                        &c.mutex->shadow_mutex,
-                                        c.count);
+       while (err == -EINTR)
+               err = XENOMAI_SKINCALL2(__cobalt_muxid,
+                                       __cobalt_cond_wait_epilogue,
+                                       &c.cond->shadow_cond,
+                                       &c.mutex->shadow_mutex);
+
+       c.mutex->shadow_mutex.lockcnt = c.count;
 
        pthread_testcancel();
 
-       return err ?: c.err;
+       return -err ?: -c.err;
 }
 
 int __wrap_pthread_cond_signal(pthread_cond_t * cond)
@@ -220,7 +226,7 @@ int __wrap_pthread_cond_signal(pthread_cond_t * cond)
        if (mutex_ownerp) {
                if (xnsynch_fast_set_spares(mutex_ownerp, cur,
                                            COBALT_MUTEX_COND_SIGNAL) < 0)
-                       return -EPERM;
+                       return EPERM;
 
                pending_signals = get_signalsp(shadow);
                if (*pending_signals != ~0UL)
@@ -249,7 +255,7 @@ int __wrap_pthread_cond_broadcast(pthread_cond_t * cond)
        if (mutex_ownerp) {
                if (xnsynch_fast_set_spares(mutex_ownerp, cur,
                                            COBALT_MUTEX_COND_SIGNAL) < 0)
-                       return -EPERM;
+                       return EPERM;
 
                pending_signals = get_signalsp(shadow);
                *get_signalsp(shadow) = ~0UL;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to