The new implementation. In user-space, without syscall in the common case when a
mutex is free when locking it, and was not claimed when unlocking it.

Note that this change entails a change in behaviour of the mutexes: before this
change locking a mutex caused the calling thread to switch to primary mode. Now,
the thread locking a mutex without syscall remains in the same mode.

---
 Makefile.am |    2
 cond.c      |   21 ++++
 mutex.c     |  262 ++++++++++++++++++++++++++++++++++++++++++++++++++++++------
 3 files changed, 260 insertions(+), 25 deletions(-)

Index: src/skins/posix/mutex.c
===================================================================
--- src/skins/posix/mutex.c     (revision 3718)
+++ src/skins/posix/mutex.c     (working copy)
@@ -18,9 +18,14 @@
 
 #include <errno.h>
 #include <posix/syscall.h>
+#include <posix/cb_lock.h>
 #include <pthread.h>
 
+#define PSE51_MUTEX_MAGIC (0x86860303)
+
 extern int __pse51_muxid;
+extern pthread_key_t pse51_cur_key;
+extern unsigned long pse51_shared_map[2];
 
 int __wrap_pthread_mutexattr_init(pthread_mutexattr_t *attr)
 {
@@ -73,66 +78,279 @@ int __wrap_pthread_mutexattr_setpshared(
                                  __pse51_mutexattr_setpshared, attr, pshared);
 }
 
-int __wrap_pthread_mutex_init(pthread_mutex_t * mutex,
-                             const pthread_mutexattr_t * attr)
+static atomic_ptr_t *get_ownerp(struct __shadow_mutex *shadow)
+{
+       if (likely(!shadow->attr.pshared))
+               return shadow->owner;
+       
+       return (atomic_ptr_t *) (pse51_shared_map[1] + shadow->owner_offset);
+}
+
+int __wrap_pthread_mutex_init(pthread_mutex_t *mutex,
+                             const pthread_mutexattr_t *attr)
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
+       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
        int err;
 
-       err = -XENOMAI_SKINCALL2(__pse51_muxid,
-                                __pse51_mutex_init,&_mutex->shadow_mutex,attr);
+#ifdef XNARCH_HAVE_US_ATOMIC_CMPXCHG
+       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
+               goto checked;
+
+       err = -XENOMAI_SKINCALL2(__pse51_muxid,__pse51_check_init,shadow,attr);
+
+       if (err) {
+               cb_read_unlock(&shadow->lock, s);
+               return err;
+       }
+
+  checked:
+       cb_force_write_lock(&shadow->lock, s);
+#endif /* XNARCH_HAVE_US_ATOMIC_CMPXCHG */
+
+       err = -XENOMAI_SKINCALL2(__pse51_muxid,__pse51_mutex_init,shadow,attr);
+
+       if (!shadow->attr.pshared)
+               shadow->owner = (atomic_ptr_t *)
+                       (pse51_shared_map[0] + shadow->owner_offset);
+       
+       cb_write_unlock(&shadow->lock, s);
+
        return err;
 }
 
-int __wrap_pthread_mutex_destroy(pthread_mutex_t * mutex)
+int __wrap_pthread_mutex_destroy(pthread_mutex_t *mutex)
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
+       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
+       int err;
 
-       return -XENOMAI_SKINCALL1(__pse51_muxid,
-                                 __pse51_mutex_destroy, &_mutex->shadow_mutex);
+       if (unlikely(cb_try_write_lock(&shadow->lock, s)))
+               return EINVAL;
+
+       err = -XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_destroy, shadow);
+
+       cb_write_unlock(&shadow->lock, s);
+
+       return err;
 }
 
-int __wrap_pthread_mutex_lock(pthread_mutex_t * mutex)
+int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
-       int err;
+       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
+       xnthread_t *cur, *owner;
+       int err = 0;
+
+#ifdef XNARCH_HAVE_US_ATOMIC_CMPXCHG
+       cur = pthread_getspecific(pse51_cur_key);
+       if (!cur)
+               return EPERM;
+
+       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
+               return EINVAL;
+
+       if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       owner = atomic_ptr_cmpxchg(get_ownerp(shadow), NULL, cur);
+       if (likely(!owner)) {
+               shadow->lockcnt = 1;
+               cb_read_unlock(&shadow->lock, s);
+               return 0;
+       }
+
+       if (clear_claimed(owner) == cur)
+               switch(shadow->attr.type) {
+               case PTHREAD_MUTEX_NORMAL:
+                       break;
+
+               case PTHREAD_MUTEX_ERRORCHECK:
+                       err = -EDEADLK;
+                       goto out;
+
+               case PTHREAD_MUTEX_RECURSIVE:
+                       if (shadow->lockcnt == UINT_MAX) {
+                               err = -EAGAIN;
+                               goto out;
+                       }
+
+                       ++shadow->lockcnt;
+                       goto out;
+               }
+#endif /* XNARCH_HAVE_US_ATOMIC_CMPXCHG */
 
        do {
-               err = XENOMAI_SKINCALL1(__pse51_muxid,
-                                       __pse51_mutex_lock,
-                                       &_mutex->shadow_mutex);
+               err = 
XENOMAI_SKINCALL1(__pse51_muxid,__pse51_mutex_lock,shadow);
        } while (err == -EINTR);
 
+#ifdef XNARCH_HAVE_US_ATOMIC_CMPXCHG
+  out:
+       cb_read_unlock(&shadow->lock, s);
+#endif /* XNARCH_HAVE_US_ATOMIC_CMPXCHG */
+
        return -err;
 }
 
-int __wrap_pthread_mutex_timedlock(pthread_mutex_t * mutex,
+int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
                                   const struct timespec *to)
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
-       int err;
+       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
+       xnthread_t *cur, *owner;
+       int err = 0;
+
+#ifdef XNARCH_HAVE_US_ATOMIC_CMPXCHG
+       cur = pthread_getspecific(pse51_cur_key);
+       if (!cur)
+               return EPERM;
+
+       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
+               return EINVAL;
+
+       if (shadow->magic != PSE51_MUTEX_MAGIC) {
+               err = -EINVAL;
+               goto out;
+       }       
+
+       owner = atomic_ptr_cmpxchg(get_ownerp(shadow), NULL, cur);
+       if (likely(!owner)) {
+               shadow->lockcnt = 1;
+               cb_read_unlock(&shadow->lock, s);
+               return 0;
+       }
+
+       if (clear_claimed(owner) == cur)
+               switch(shadow->attr.type) {
+               case PTHREAD_MUTEX_NORMAL:
+                       break;
+
+               case PTHREAD_MUTEX_ERRORCHECK:
+                       err = -EDEADLK;
+                       goto out;
+
+               case PTHREAD_MUTEX_RECURSIVE:
+                       if (shadow->lockcnt == UINT_MAX) {
+                               err = -EAGAIN;
+                               goto out;
+                       }
+
+                       ++shadow->lockcnt;
+                       goto out;
+               }
+#endif /* XNARCH_HAVE_US_ATOMIC_CMPXCHG */
 
        do {
                err = XENOMAI_SKINCALL2(__pse51_muxid,
-                                       __pse51_mutex_timedlock,
-                                       &_mutex->shadow_mutex, to);
+                                       __pse51_mutex_timedlock, shadow, to);
        } while (err == -EINTR);
 
+#ifdef XNARCH_HAVE_US_ATOMIC_CMPXCHG
+  out:
+       cb_read_unlock(&shadow->lock, s);
+#endif /* XNARCH_HAVE_US_ATOMIC_CMPXCHG */
+
        return -err;
 }
 
-int __wrap_pthread_mutex_trylock(pthread_mutex_t * mutex)
+int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
+       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
+       xnthread_t *cur, *owner;
+       int err = 0;
+
+#ifdef XNARCH_HAVE_US_ATOMIC_CMPXCHG
+       cur = pthread_getspecific(pse51_cur_key);
+       if (!cur)
+               return EPERM;
+
+       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
+               return EINVAL;
+
+       if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) {
+               err = -EINVAL;
+               goto out;
+       }       
+
+       owner = atomic_ptr_cmpxchg(get_ownerp(shadow), NULL, cur);
+       if (likely(!owner)) {
+               shadow->lockcnt = 1;
+               cb_read_unlock(&shadow->lock, s);
+               return 0;
+       }
+
+       err = -EBUSY;
+       if (clear_claimed(owner) == cur
+           && shadow->attr.type == PTHREAD_MUTEX_RECURSIVE)
+               if (shadow->lockcnt == UINT_MAX)
+                       err = -EAGAIN;
+               else {
+                       ++shadow->lockcnt;
+                       err = 0;
+               }
+
+  out:
+       cb_read_unlock(&shadow->lock, s);
+
+       return -err;
+
+#else /* !XNARCH_HAVE_US_ATOMIC_CMPXCHG */
+
+       return -XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_trylock, shadow);
 
-       return -XENOMAI_SKINCALL1(__pse51_muxid,
-                                 __pse51_mutex_trylock, &_mutex->shadow_mutex);
+#endif /* !XNARCH_HAVE_US_ATOMIC_CMPXCHG */
 }
 
-int __wrap_pthread_mutex_unlock(pthread_mutex_t * mutex)
+int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
+       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
 
-       return -XENOMAI_SKINCALL1(__pse51_muxid,
-                                 __pse51_mutex_unlock, &_mutex->shadow_mutex);
+#ifdef XNARCH_HAVE_US_ATOMIC_CMPXCHG
+       atomic_ptr_t *ownerp;
+       xnthread_t *cur;
+       int err = 0;
+
+       cur = pthread_getspecific(pse51_cur_key);
+       if (!cur)
+               return EPERM;
+
+       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
+               return EINVAL;
+
+       if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) {
+               err = -EINVAL;
+               goto out_err;
+       }
+
+       ownerp = get_ownerp(shadow);
+       if (unlikely(clear_claimed(atomic_ptr_read(ownerp)) != cur)) {
+               err = -EPERM;
+               goto out_err;
+       }
+
+       err = 0;
+       if (shadow->lockcnt > 1) {
+               --shadow->lockcnt;
+               goto out;
+       }
+
+       if (likely(atomic_ptr_cmpxchg(ownerp, cur, NULL) == cur)) {
+         out:
+               cb_read_unlock(&shadow->lock, s);
+               return 0;
+       }
+#endif /* XNARCH_HAVE_US_ATOMIC_CMPXCHG */
+
+       err = XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_unlock, shadow);
+
+#ifdef XNARCH_HAVE_US_ATOMIC_CMPXCHG
+  out_err:
+       cb_read_unlock(&shadow->lock, s);
+#endif /* XNARCH_HAVE_US_ATOMIC_CMPXCHG */
+
+       return -err;
 }
Index: src/skins/posix/cond.c
===================================================================
--- src/skins/posix/cond.c      (revision 3718)
+++ src/skins/posix/cond.c      (working copy)
@@ -19,6 +19,7 @@
 #include <errno.h>
 #include <posix/syscall.h>
 #include <pthread.h>
+#include <posix/cb_lock.h>
 
 extern int __pse51_muxid;
 
@@ -95,7 +96,7 @@ static void __pthread_cond_cleanup(void 
                          c->count);
 }
 
-int __wrap_pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
+int __wrap_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
 {
        struct pse51_cond_cleanup_t c = {
                .cond = (union __xeno_cond *)cond,
@@ -103,6 +104,9 @@ int __wrap_pthread_cond_wait(pthread_con
        };
        int err, oldtype;
 
+       if (cb_try_read_lock(&c.mutex->shadow_mutex.lock, s))
+               return EINVAL;
+
        pthread_cleanup_push(&__pthread_cond_cleanup, &c);
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
@@ -118,11 +122,15 @@ int __wrap_pthread_cond_wait(pthread_con
 
        pthread_cleanup_pop(0);
 
-       if (err)
+       if (err) {
+               cb_read_unlock(&c.mutex->shadow_mutex.lock, s);
                return err;
+       }
 
        __pthread_cond_cleanup(&c);
 
+       cb_read_unlock(&c.mutex->shadow_mutex.lock, s);
+
        pthread_testcancel();
 
        return 0;
@@ -138,6 +146,9 @@ int __wrap_pthread_cond_timedwait(pthrea
        };
        int err, oldtype;
 
+       if (cb_try_read_lock(&c.mutex->shadow_mutex.lock, s))
+               return EINVAL;
+
        pthread_cleanup_push(&__pthread_cond_cleanup, &c);
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
@@ -153,11 +164,15 @@ int __wrap_pthread_cond_timedwait(pthrea
 
        pthread_cleanup_pop(0);
 
-       if (err && err != ETIMEDOUT)
+       if (err && err != ETIMEDOUT) {
+               cb_read_unlock(&c.mutex->shadow_mutex.lock, s);         
                return err;
+       }
 
        __pthread_cond_cleanup(&c);
 
+       cb_read_unlock(&c.mutex->shadow_mutex.lock, s);
+
        pthread_testcancel();
 
        return err;
Index: src/skins/posix/Makefile.am
===================================================================
--- src/skins/posix/Makefile.am (revision 3718)
+++ src/skins/posix/Makefile.am (working copy)
@@ -2,6 +2,8 @@ includedir = $(prefix)/include/posix
 
 lib_LTLIBRARIES = libpthread_rt.la
 
+CPPFLAGS+=-I$(top_srcdir)/ksrc/skins
+
 libpthread_rt_la_LDFLAGS = -version-info 1:0:0 -lpthread
 
 libpthread_rt_la_SOURCES = \


-- 


                                            Gilles.

_______________________________________________
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core

Reply via email to