Module: xenomai-head
Branch: master
Commit: e768b24bd0173acefce3505d3c408766ce6cfa68
URL:    
http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=e768b24bd0173acefce3505d3c408766ce6cfa68

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Sun Aug 29 16:17:35 2010 +0200

posix: add a magic to internal structures.

These structures may still be referenced by a process' child if they
were destroyed by the father after the fork, so, use a magic in order
to be able to detect this case.

---

 ksrc/skins/posix/cond.c  |   37 +++++++++++++++++------------
 ksrc/skins/posix/mutex.c |   30 ++++++++++++++---------
 ksrc/skins/posix/mutex.h |    4 ++-
 ksrc/skins/posix/sem.c   |   57 ++++++++++++++++++++++++++-------------------
 src/skins/posix/mutex.c  |    6 ++--
 5 files changed, 79 insertions(+), 55 deletions(-)

diff --git a/ksrc/skins/posix/cond.c b/ksrc/skins/posix/cond.c
index f86e85e..769eb4b 100644
--- a/ksrc/skins/posix/cond.c
+++ b/ksrc/skins/posix/cond.c
@@ -51,6 +51,7 @@
 #include <posix/cond.h>
 
 typedef struct pse51_cond {
+       unsigned magic;
        xnsynch_t synchbase;
        xnholder_t link;        /* Link in pse51_condq */
 
@@ -101,7 +102,7 @@ static void cond_destroy_internal(pse51_cond_t * cond, 
pse51_kqueues_t *q)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_init.html";>
  * Specification.</a>
- * 
+ *
  */
 int pthread_cond_init(pthread_cond_t * cnd, const pthread_condattr_t * attr)
 {
@@ -142,6 +143,7 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
        shadow->magic = PSE51_COND_MAGIC;
        shadow->cond = cond;
 
+       cond->magic = PSE51_COND_MAGIC;
        xnsynch_init(&cond->synchbase, synch_flags, NULL);
        inith(&cond->link);
        cond->attr = *attr;
@@ -179,7 +181,7 @@ int pthread_cond_init(pthread_cond_t * cnd, const 
pthread_condattr_t * attr)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_destroy.html";>
  * Specification.</a>
- * 
+ *
  */
 int pthread_cond_destroy(pthread_cond_t * cnd)
 {
@@ -189,12 +191,13 @@ int pthread_cond_destroy(pthread_cond_t * cnd)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (!pse51_obj_active(shadow, PSE51_COND_MAGIC, struct __shadow_cond)) {
+       cond = shadow->cond;
+       if (!pse51_obj_active(shadow, PSE51_COND_MAGIC, struct __shadow_cond)
+           || !pse51_obj_active(cond, PSE51_COND_MAGIC, struct pse51_cond)) {
                xnlock_put_irqrestore(&nklock, s);
                return EINVAL;
        }
 
-       cond = shadow->cond;
        if (cond->owningq != pse51_kqueues(cond->attr.pshared)) {
                xnlock_put_irqrestore(&nklock, s);
                return EPERM;
@@ -206,6 +209,7 @@ int pthread_cond_destroy(pthread_cond_t * cnd)
        }
 
        pse51_mark_deleted(shadow);
+       pse51_mark_deleted(cond);
 
        xnlock_put_irqrestore(&nklock, s);
 
@@ -224,10 +228,10 @@ static inline int mutex_save_count(xnthread_t *cur,
 {
        pse51_mutex_t *mutex;
 
-       if (!pse51_obj_active(shadow, PSE51_MUTEX_MAGIC, struct __shadow_mutex))
-                return EINVAL;
-
        mutex = shadow->mutex;
+       if (!pse51_obj_active(shadow, PSE51_MUTEX_MAGIC, struct __shadow_mutex)
+           || !pse51_obj_active(mutex, PSE51_MUTEX_MAGIC, struct pse51_mutex))
+                return EINVAL;
 
        if (xnsynch_owner_check(&mutex->synchbase, cur) != 0)
                return EPERM;
@@ -267,6 +271,7 @@ int pse51_cond_timedwait_prologue(xnthread_t *cur,
 
        /* If another thread waiting for cond does not use the same mutex */
        if (!pse51_obj_active(shadow, PSE51_COND_MAGIC, struct __shadow_cond)
+           || !pse51_obj_active(cond, PSE51_COND_MAGIC, struct pse51_cond)
            || (cond->mutex && cond->mutex != mutex->mutex)) {
                err = EINVAL;
                goto unlock_and_return;
@@ -403,7 +408,7 @@ int pse51_cond_timedwait_epilogue(xnthread_t *cur,
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_wait.html";>
  * Specification.</a>
- * 
+ *
  */
 int pthread_cond_wait(pthread_cond_t * cnd, pthread_mutex_t * mx)
 {
@@ -470,7 +475,7 @@ int pthread_cond_wait(pthread_cond_t * cnd, pthread_mutex_t 
* mx)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_timedwait.html";>
  * Specification.</a>
- * 
+ *
  */
 int pthread_cond_timedwait(pthread_cond_t * cnd,
                           pthread_mutex_t * mx, const struct timespec *abstime)
@@ -521,7 +526,7 @@ int pthread_cond_timedwait(pthread_cond_t * cnd,
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_signal.html.";>
  * Specification.</a>
- * 
+ *
  */
 int pthread_cond_signal(pthread_cond_t * cnd)
 {
@@ -531,12 +536,13 @@ int pthread_cond_signal(pthread_cond_t * cnd)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (!pse51_obj_active(shadow, PSE51_COND_MAGIC, struct __shadow_cond)) {
+       cond = shadow->cond;
+       if (!pse51_obj_active(shadow, PSE51_COND_MAGIC, struct __shadow_cond)
+           || !pse51_obj_active(cond, PSE51_COND_MAGIC, struct pse51_cond)) {
                xnlock_put_irqrestore(&nklock, s);
                return EINVAL;
        }
 
-       cond = shadow->cond;
 #if XENO_DEBUG(POSIX)
        if (cond->owningq != pse51_kqueues(cond->attr.pshared)) {
                xnlock_put_irqrestore(&nklock, s);
@@ -571,7 +577,7 @@ int pthread_cond_signal(pthread_cond_t * cnd)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_broadcast.html";>
  * Specification.</a>
- * 
+ *
  */
 int pthread_cond_broadcast(pthread_cond_t * cnd)
 {
@@ -581,12 +587,13 @@ int pthread_cond_broadcast(pthread_cond_t * cnd)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (!pse51_obj_active(shadow, PSE51_COND_MAGIC, struct __shadow_cond)) {
+       cond = shadow->cond;
+       if (!pse51_obj_active(shadow, PSE51_COND_MAGIC, struct __shadow_cond)
+           || !pse51_obj_active(cond, PSE51_COND_MAGIC, struct pse51_cond)) {
                xnlock_put_irqrestore(&nklock, s);
                return EINVAL;
        }
 
-       cond = shadow->cond;
        if (cond->owningq != pse51_kqueues(cond->attr.pshared)) {
                xnlock_put_irqrestore(&nklock, s);
                return EPERM;
diff --git a/ksrc/skins/posix/mutex.c b/ksrc/skins/posix/mutex.c
index 79dee73..f4ceb87 100644
--- a/ksrc/skins/posix/mutex.c
+++ b/ksrc/skins/posix/mutex.c
@@ -112,6 +112,7 @@ int pse51_mutex_init_internal(struct __shadow_mutex *shadow,
        if (attr->protocol == PTHREAD_PRIO_INHERIT)
                synch_flags |= XNSYNCH_PIP;
 
+       mutex->magic = PSE51_MUTEX_MAGIC;
        xnsynch_init(&mutex->synchbase, synch_flags, ownerp);
        inith(&mutex->link);
        mutex->attr = *attr;
@@ -148,7 +149,7 @@ int pse51_mutex_init_internal(struct __shadow_mutex *shadow,
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutex_init.html";>
  * Specification.</a>
- * 
+ *
  */
 int pthread_mutex_init(pthread_mutex_t *mx, const pthread_mutexattr_t *attr)
 {
@@ -246,7 +247,7 @@ void pse51_mutex_destroy_internal(pse51_mutex_t *mutex,
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutex_destroy.html";>
  * Specification.</a>
- * 
+ *
  */
 int pthread_mutex_destroy(pthread_mutex_t * mx)
 {
@@ -258,12 +259,13 @@ int pthread_mutex_destroy(pthread_mutex_t * mx)
        if (unlikely(cb_try_write_lock(&shadow->lock, s)))
                return EBUSY;
 
-       if (!pse51_obj_active(shadow, PSE51_MUTEX_MAGIC, struct 
__shadow_mutex)) {
+       mutex = shadow->mutex;
+       if (!pse51_obj_active(shadow, PSE51_MUTEX_MAGIC, struct __shadow_mutex)
+           || !pse51_obj_active(mutex, PSE51_MUTEX_MAGIC, struct pse51_mutex)) 
{
                cb_write_unlock(&shadow->lock, s);
                return EINVAL;
        }
 
-       mutex = shadow->mutex;
        if (pse51_kqueues(mutex->attr.pshared) != mutex->owningq) {
                cb_write_unlock(&shadow->lock, s);
                return EPERM;
@@ -280,10 +282,11 @@ int pthread_mutex_destroy(pthread_mutex_t * mx)
        }
 
        pse51_mark_deleted(shadow);
+       pse51_mark_deleted(mutex);
        cb_write_unlock(&shadow->lock, s);
 
        pse51_mutex_destroy_internal(mutex, pse51_kqueues(mutex->attr.pshared));
-       
+
        return 0;
 }
 
@@ -352,7 +355,7 @@ int pse51_mutex_timedlock_break(struct __shadow_mutex 
*shadow,
 
   unlock_and_return:
        return err;
-               
+
 }
 
 /**
@@ -381,7 +384,7 @@ int pse51_mutex_timedlock_break(struct __shadow_mutex 
*shadow,
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutex_trylock.html";>
  * Specification.</a>
- * 
+ *
  */
 int pthread_mutex_trylock(pthread_mutex_t *mx)
 {
@@ -399,7 +402,9 @@ int pthread_mutex_trylock(pthread_mutex_t *mx)
                return EINVAL;
 
        if (!pse51_obj_active(shadow, PSE51_MUTEX_MAGIC,
-                             struct __shadow_mutex)) {
+                             struct __shadow_mutex)
+           || !pse51_obj_active(mutex, PSE51_MUTEX_MAGIC,
+                                struct pse51_mutex)) {
                err = EINVAL;
                goto unlock_and_return;
        }
@@ -585,7 +590,7 @@ int pthread_mutex_timedlock(pthread_mutex_t * mx, const 
struct timespec *to)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutex_unlock.html";>
  * Specification.</a>
- * 
+ *
  */
 int pthread_mutex_unlock(pthread_mutex_t * mx)
 {
@@ -602,14 +607,15 @@ int pthread_mutex_unlock(pthread_mutex_t * mx)
        if (unlikely(cb_try_read_lock(&shadow->lock, s)))
                return EINVAL;
 
+       mutex = shadow->mutex;
+
        if (!pse51_obj_active(shadow,
-                             PSE51_MUTEX_MAGIC, struct __shadow_mutex)) {
+                             PSE51_MUTEX_MAGIC, struct __shadow_mutex)
+           || !pse51_obj_active(mutex, PSE51_MUTEX_MAGIC, struct pse51_mutex)) 
{
                err = EINVAL;
                goto out;
        }
 
-       mutex = shadow->mutex;
-
        err = -xnsynch_owner_check(&mutex->synchbase, cur);
        if (err)
                goto out;
diff --git a/ksrc/skins/posix/mutex.h b/ksrc/skins/posix/mutex.h
index cb1b1ff..3bd2544 100644
--- a/ksrc/skins/posix/mutex.h
+++ b/ksrc/skins/posix/mutex.h
@@ -48,6 +48,7 @@ union __xeno_mutex {
 #include <posix/cb_lock.h>
 
 typedef struct pse51_mutex {
+       unsigned magic;
        xnsynch_t synchbase;
        xnholder_t link;            /* Link in pse51_mutexq */
 
@@ -94,7 +95,8 @@ static inline int pse51_mutex_timedlock_internal(xnthread_t 
*cur,
        if (xnpod_unblockable_p())
                return -EPERM;
 
-       if (!pse51_obj_active(shadow, PSE51_MUTEX_MAGIC, struct __shadow_mutex))
+       if (!pse51_obj_active(shadow, PSE51_MUTEX_MAGIC, struct __shadow_mutex)
+           || !pse51_obj_active(mutex, PSE51_MUTEX_MAGIC, struct pse51_mutex))
                return -EINVAL;
 
 #if XENO_DEBUG(POSIX)
diff --git a/ksrc/skins/posix/sem.c b/ksrc/skins/posix/sem.c
index 9ef7d90..1637422 100644
--- a/ksrc/skins/posix/sem.c
+++ b/ksrc/skins/posix/sem.c
@@ -39,6 +39,7 @@
 #include <posix/sem.h>
 
 typedef struct pse51_sem {
+       unsigned magic;
        xnsynch_t synchbase;
        xnholder_t link;        /* Link in semq */
 
@@ -84,7 +85,7 @@ static void sem_destroy_inner(pse51_sem_t * sem, 
pse51_kqueues_t *q)
        if (xnsynch_destroy(&sem->synchbase) == XNSYNCH_RESCHED)
                xnpod_schedule();
        xnlock_put_irqrestore(&nklock, s);
-       
+
        if (sem->is_named)
                xnfree(sem2named_sem(sem));
        else
@@ -97,6 +98,7 @@ static int pse51_sem_init_inner(pse51_sem_t * sem, int 
pshared, unsigned value)
        if (value > (unsigned)SEM_VALUE_MAX)
                return EINVAL;
 
+       sem->magic = PSE51_SEM_MAGIC;
        inith(&sem->link);
        appendq(&pse51_kqueues(pshared)->semq, &sem->link);
        xnsynch_init(&sem->synchbase, XNSYNCH_PRIO, NULL);
@@ -134,7 +136,7 @@ static int pse51_sem_init_inner(pse51_sem_t * sem, int 
pshared, unsigned value)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_init.html";>
  * Specification.</a>
- * 
+ *
  */
 int sem_init(sem_t * sm, int pshared, unsigned value)
 {
@@ -153,7 +155,7 @@ int sem_init(sem_t * sm, int pshared, unsigned value)
        xnlock_get_irqsave(&nklock, s);
 
        semq = &pse51_kqueues(pshared)->semq;
-       
+
        if (shadow->magic == PSE51_SEM_MAGIC
            || shadow->magic == PSE51_NAMED_SEM_MAGIC
            || shadow->magic == ~PSE51_NAMED_SEM_MAGIC) {
@@ -207,7 +209,7 @@ int sem_init(sem_t * sm, int pshared, unsigned value)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_destroy.html";>
  * Specification.</a>
- * 
+ *
  */
 int sem_destroy(sem_t * sm)
 {
@@ -215,7 +217,8 @@ int sem_destroy(sem_t * sm)
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
-       if (shadow->magic != PSE51_SEM_MAGIC) {
+       if (shadow->magic != PSE51_SEM_MAGIC
+           || shadow->sem->magic != PSE51_SEM_MAGIC) {
                thread_set_errno(EINVAL);
                goto error;
        }
@@ -226,10 +229,11 @@ int sem_destroy(sem_t * sm)
        }
 
        pse51_mark_deleted(shadow);
+       pse51_mark_deleted(shadow->sem);
        xnlock_put_irqrestore(&nklock, s);
 
        sem_destroy_inner(shadow->sem, pse51_kqueues(shadow->sem->pshared));
-       
+
        return 0;
 
       error:
@@ -280,7 +284,7 @@ int sem_destroy(sem_t * sm)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_open.html";>
  * Specification.</a>
- * 
+ *
  */
 sem_t *sem_open(const char *name, int oflags, ...)
 {
@@ -303,7 +307,7 @@ sem_t *sem_open(const char *name, int oflags, ...)
                named_sem = node2sem(node);
                goto got_sem;
        }
-       
+
        named_sem = (nsem_t *) xnmalloc(sizeof(*named_sem));
        if (!named_sem) {
                err = ENOSPC;
@@ -328,7 +332,7 @@ sem_t *sem_open(const char *name, int oflags, ...)
        err = pse51_node_add(&named_sem->nodebase, name, PSE51_NAMED_SEM_MAGIC);
        if (err && err != EEXIST)
                goto err_put_lock;
-       
+
        if (err == EEXIST) {
                err = pse51_node_get(&node, name, PSE51_NAMED_SEM_MAGIC, 
oflags);
                if (err)
@@ -380,7 +384,7 @@ sem_t *sem_open(const char *name, int oflags, ...)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_close.html";>
  * Specification.</a>
- * 
+ *
  */
 int sem_close(sem_t * sm)
 {
@@ -391,7 +395,8 @@ int sem_close(sem_t * sm)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (shadow->magic != PSE51_NAMED_SEM_MAGIC) {
+       if (shadow->magic != PSE51_NAMED_SEM_MAGIC
+           || shadow->sem->magic != PSE51_SEM_MAGIC) {
                err = EINVAL;
                goto error;
        }
@@ -406,6 +411,7 @@ int sem_close(sem_t * sm)
        if (pse51_node_removed_p(&named_sem->nodebase)) {
                /* unlink was called, and this semaphore is no longer 
referenced. */
                pse51_mark_deleted(shadow);
+               pse51_mark_deleted(&named_sem->sembase);
                xnlock_put_irqrestore(&nklock, s);
 
                sem_destroy_inner(&named_sem->sembase, pse51_kqueues(1));
@@ -448,7 +454,7 @@ int sem_close(sem_t * sm)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_unlink.html";>
  * Specification.</a>
- * 
+ *
  */
 int sem_unlink(const char *name)
 {
@@ -468,7 +474,7 @@ int sem_unlink(const char *name)
 
        if (pse51_node_removed_p(&named_sem->nodebase)) {
                xnlock_put_irqrestore(&nklock, s);
-               
+
                sem_destroy_inner(&named_sem->sembase, pse51_kqueues(1));
        } else
                xnlock_put_irqrestore(&nklock, s);
@@ -487,8 +493,9 @@ static inline int sem_trywait_internal(struct __shadow_sem 
*shadow)
 {
        pse51_sem_t *sem;
 
-       if (shadow->magic != PSE51_SEM_MAGIC
-           && shadow->magic != PSE51_NAMED_SEM_MAGIC)
+       if ((shadow->magic != PSE51_SEM_MAGIC
+            && shadow->magic != PSE51_NAMED_SEM_MAGIC)
+           || shadow->sem->magic != PSE51_SEM_MAGIC)
                return EINVAL;
 
        sem = shadow->sem;
@@ -525,7 +532,7 @@ static inline int sem_trywait_internal(struct __shadow_sem 
*shadow)
  * * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_trywait.html";>
  * Specification.</a>
- * 
+ *
  */
 int sem_trywait(sem_t * sm)
 {
@@ -615,7 +622,7 @@ static inline int sem_timedwait_internal(struct 
__shadow_sem *shadow,
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_wait.html";>
  * Specification.</a>
- * 
+ *
  */
 int sem_wait(sem_t * sm)
 {
@@ -665,7 +672,7 @@ int sem_wait(sem_t * sm)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_timedwait.html";>
  * Specification.</a>
- * 
+ *
  */
 int sem_timedwait(sem_t * sm, const struct timespec *abs_timeout)
 {
@@ -711,7 +718,7 @@ int sem_timedwait(sem_t * sm, const struct timespec 
*abs_timeout)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_post.html";>
  * Specification.</a>
- * 
+ *
  */
 int sem_post(sem_t * sm)
 {
@@ -721,8 +728,9 @@ int sem_post(sem_t * sm)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (shadow->magic != PSE51_SEM_MAGIC
-           && shadow->magic != PSE51_NAMED_SEM_MAGIC) {
+       if ((shadow->magic != PSE51_SEM_MAGIC
+            && shadow->magic != PSE51_NAMED_SEM_MAGIC)
+           || shadow->sem->magic != PSE51_SEM_MAGIC) {
                thread_set_errno(EINVAL);
                goto error;
        }
@@ -778,7 +786,7 @@ int sem_post(sem_t * sm)
  * @see
  * <a 
href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_getvalue.html";>
  * Specification.</a>
- * 
+ *
  */
 int sem_getvalue(sem_t * sm, int *value)
 {
@@ -788,8 +796,9 @@ int sem_getvalue(sem_t * sm, int *value)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (shadow->magic != PSE51_SEM_MAGIC
-           && shadow->magic != PSE51_NAMED_SEM_MAGIC) {
+       if ((shadow->magic != PSE51_SEM_MAGIC
+            && shadow->magic != PSE51_NAMED_SEM_MAGIC)
+           || shadow->sem->magic != PSE51_SEM_MAGIC) {
                xnlock_put_irqrestore(&nklock, s);
                thread_set_errno(EINVAL);
                return -1;
diff --git a/src/skins/posix/mutex.c b/src/skins/posix/mutex.c
index 4e7e35b..08eb441 100644
--- a/src/skins/posix/mutex.c
+++ b/src/skins/posix/mutex.c
@@ -36,7 +36,7 @@ static xnarch_atomic_t *get_ownerp(struct __shadow_mutex 
*shadow)
 {
        if (likely(!shadow->attr.pshared))
                return shadow->owner;
-       
+
        return (xnarch_atomic_t *) (xeno_sem_heap[1] + shadow->owner_offset);
 }
 #endif /* CONFIG_XENO_FASTSYNCH */
@@ -120,7 +120,7 @@ int __wrap_pthread_mutex_init(pthread_mutex_t *mutex,
        if (!shadow->attr.pshared)
                shadow->owner = (xnarch_atomic_t *)
                        (xeno_sem_heap[0] + shadow->owner_offset);
-       
+
        cb_write_unlock(&shadow->lock, s);
 #endif /* CONFIG_XENO_FASTSYNCH */
 
@@ -226,7 +226,7 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
        if (shadow->magic != PSE51_MUTEX_MAGIC) {
                err = -EINVAL;
                goto out;
-       }       
+       }
 
        if (likely(!(xeno_get_current_mode() & XNRELAX))) {
                err = xnsynch_fast_acquire(get_ownerp(shadow), cur);


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to