Module: xenomai-forge
Branch: master
Commit: ab75a7adf7c9cb8a972aa25438c647f75b7c7814
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=ab75a7adf7c9cb8a972aa25438c647f75b7c7814

Author: Philippe Gerum <r...@xenomai.org>
Date:   Wed Dec 14 15:36:23 2011 +0100

copperplate/syncobj, threadobj: catch unsafe (unlocked) calls in debug mode

---

 include/copperplate/syncobj.h   |   44 ++++++++++++++++++++++++++++++
 include/copperplate/threadobj.h |   56 +++++++++++++++++++++++++++++++++++++-
 lib/copperplate/syncobj.c       |   24 ++++++++++++++++
 lib/copperplate/threadobj.c     |   44 +++++++++++++++++++++++++++---
 4 files changed, 162 insertions(+), 6 deletions(-)

diff --git a/include/copperplate/syncobj.h b/include/copperplate/syncobj.h
index 3c5fa8b..5112c02 100644
--- a/include/copperplate/syncobj.h
+++ b/include/copperplate/syncobj.h
@@ -26,6 +26,7 @@
 /* syncobj->flags */
 #define SYNCOBJ_FIFO   0x0
 #define SYNCOBJ_PRIO   0x1
+#define SYNCOBJ_LOCKED 0x2
 
 /* threadobj->wait_status */
 #define SYNCOBJ_DELETED                0x1
@@ -80,6 +81,41 @@ struct syncobj {
 
 void __syncobj_cleanup_wait(struct syncobj *sobj,
                            struct threadobj *thobj);
+
+#ifdef __XENO_DEBUG__
+
+static inline void __syncobj_tag_locked(struct syncobj *sobj)
+{
+       sobj->flags |= SYNCOBJ_LOCKED;
+}
+
+static inline void __syncobj_tag_unlocked(struct syncobj *sobj)
+{
+       assert(sobj->flags & SYNCOBJ_LOCKED);
+       sobj->flags &= ~SYNCOBJ_LOCKED;
+}
+
+static inline void __syncobj_check_locked(struct syncobj *sobj)
+{
+       assert(sobj->flags & SYNCOBJ_LOCKED);
+}
+
+#else /* !__XENO_DEBUG__ */
+
+static inline void __syncobj_tag_locked(struct syncobj *sobj)
+{
+}
+
+static inline void __syncobj_tag_unlocked(struct syncobj *sobj)
+{
+}
+
+static inline void __syncobj_check_locked(struct syncobj *sobj)
+{
+}
+
+#endif /* !__XENO_DEBUG__ */
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -111,16 +147,22 @@ int __syncobj_signal_drain(struct syncobj *sobj);
 
 static inline int syncobj_pended_p(struct syncobj *sobj)
 {
+       __syncobj_check_locked(sobj);
+
        return !list_empty(&sobj->pend_list);
 }
 
 static inline int syncobj_pend_count(struct syncobj *sobj)
 {
+       __syncobj_check_locked(sobj);
+
        return sobj->pend_count;
 }
 
 static inline int syncobj_drain_count(struct syncobj *sobj)
 {
+       __syncobj_check_locked(sobj);
+
        return sobj->drain_count;
 }
 
@@ -143,6 +185,8 @@ static inline int syncobj_signal_drain(struct syncobj *sobj)
 {
        int ret = 0;
 
+       __syncobj_check_locked(sobj);
+
        if (sobj->drain_count > 0)
                ret = __syncobj_signal_drain(sobj);
 
diff --git a/include/copperplate/threadobj.h b/include/copperplate/threadobj.h
index 38043e0..1e77e2e 100644
--- a/include/copperplate/threadobj.h
+++ b/include/copperplate/threadobj.h
@@ -84,6 +84,7 @@ struct threadobj_stat {
 #define THREADOBJ_STARTED      0x4     /* threadobj_start() called. */
 #define THREADOBJ_WARMUP       0x8     /* threadobj_prologue() not called yet. 
*/
 #define THREADOBJ_ABORTED      0x10    /* cancelled before start. */
+#define THREADOBJ_LOCKED       0x20    /* threadobj_lock() granted (debug 
only). */
 #define THREADOBJ_DEBUG                0x8000  /* Debug mode enabled. */
 
 #define THREADOBJ_IRQCONTEXT    ((struct threadobj *)-2UL)
@@ -187,6 +188,40 @@ static inline struct threadobj *threadobj_current(void)
 
 #endif /* !HAVE_TLS */
 
+#ifdef __XENO_DEBUG__
+
+static inline void __threadobj_tag_locked(struct threadobj *thobj)
+{
+       thobj->status |= THREADOBJ_LOCKED;
+}
+
+static inline void __threadobj_tag_unlocked(struct threadobj *thobj)
+{
+       assert(thobj->status & THREADOBJ_LOCKED);
+       thobj->status &= ~THREADOBJ_LOCKED;
+}
+
+static inline void __threadobj_check_locked(struct threadobj *thobj)
+{
+       assert(thobj->status & THREADOBJ_LOCKED);
+}
+
+#else /* !__XENO_DEBUG__ */
+
+static inline void __threadobj_tag_locked(struct threadobj *thobj)
+{
+}
+
+static inline void __threadobj_tag_unlocked(struct threadobj *thobj)
+{
+}
+
+static inline void __threadobj_check_locked(struct threadobj *thobj)
+{
+}
+
+#endif /* !__XENO_DEBUG__ */
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -283,16 +318,33 @@ static inline int threadobj_get_priority(struct threadobj 
*thobj)
 
 static inline int threadobj_lock(struct threadobj *thobj)
 {
-       return write_lock_safe(&thobj->lock, thobj->cancel_state);
+       int ret;
+
+       ret = write_lock_safe(&thobj->lock, thobj->cancel_state);
+       if (ret)
+               return ret;
+
+       __threadobj_tag_locked(thobj);
+
+       return 0;
 }
 
 static inline int threadobj_trylock(struct threadobj *thobj)
 {
-       return write_trylock_safe(&thobj->lock, thobj->cancel_state);
+       int ret;
+
+       ret = write_trylock_safe(&thobj->lock, thobj->cancel_state);
+       if (ret)
+               return ret;
+
+       __threadobj_tag_locked(thobj);
+
+       return 0;
 }
 
 static inline int threadobj_unlock(struct threadobj *thobj)
 {
+       __threadobj_tag_unlocked(thobj);
        return write_unlock_safe(&thobj->lock, thobj->cancel_state);
 }
 
diff --git a/lib/copperplate/syncobj.c b/lib/copperplate/syncobj.c
index b1ad823..a70a7d2 100644
--- a/lib/copperplate/syncobj.c
+++ b/lib/copperplate/syncobj.c
@@ -241,12 +241,14 @@ int syncobj_lock(struct syncobj *sobj, struct syncstate 
*syns)
        }
 
        syns->state = oldstate;
+       __syncobj_tag_locked(sobj);
 
        return 0;
 }
 
 void syncobj_unlock(struct syncobj *sobj, struct syncstate *syns)
 {
+       __syncobj_tag_unlocked(sobj);
        monitor_exit(sobj);
        pthread_setcancelstate(syns->state, NULL);
 }
@@ -334,6 +336,8 @@ int syncobj_pend(struct syncobj *sobj, const struct 
timespec *timeout,
        struct threadobj *current = threadobj_current();
        int ret, state;
 
+       __syncobj_check_locked(sobj);
+
        assert(current != NULL);
 
        current->wait_status = 0;
@@ -357,7 +361,9 @@ int syncobj_pend(struct syncobj *sobj, const struct 
timespec *timeout,
        assert(state == PTHREAD_CANCEL_DISABLE);
 
        do {
+               __syncobj_tag_unlocked(sobj);
                ret = monitor_wait_grant(sobj, current, timeout);
+               __syncobj_tag_locked(sobj);
                /* Check for spurious wake up. */
        } while (ret == 0 && current->wait_sobj);
 
@@ -384,12 +390,16 @@ int syncobj_pend(struct syncobj *sobj, const struct 
timespec *timeout,
 
 void syncobj_requeue_waiter(struct syncobj *sobj, struct threadobj *thobj)
 {
+       __syncobj_check_locked(sobj);
+
        list_remove(&thobj->wait_link);
        enqueue_waiter(sobj, thobj);
 }
 
 void syncobj_wakeup_waiter(struct syncobj *sobj, struct threadobj *thobj)
 {
+       __syncobj_check_locked(sobj);
+
        list_remove(&thobj->wait_link);
        thobj->wait_sobj = NULL;
        sobj->pend_count--;
@@ -400,6 +410,8 @@ struct threadobj *syncobj_post(struct syncobj *sobj)
 {
        struct threadobj *thobj;
 
+       __syncobj_check_locked(sobj);
+
        if (list_empty(&sobj->pend_list))
                return NULL;
 
@@ -415,6 +427,8 @@ struct threadobj *syncobj_peek_at_pend(struct syncobj *sobj)
 {
        struct threadobj *thobj;
 
+       __syncobj_check_locked(sobj);
+
        if (list_empty(&sobj->pend_list))
                return NULL;
 
@@ -427,6 +441,8 @@ struct threadobj *syncobj_peek_at_drain(struct syncobj 
*sobj)
 {
        struct threadobj *thobj;
 
+       __syncobj_check_locked(sobj);
+
        if (list_empty(&sobj->drain_list))
                return NULL;
 
@@ -441,6 +457,8 @@ int syncobj_wait_drain(struct syncobj *sobj, const struct 
timespec *timeout,
        struct threadobj *current = threadobj_current();
        int ret, state;
 
+       __syncobj_check_locked(sobj);
+
        assert(current != NULL);
 
        /*
@@ -475,7 +493,9 @@ int syncobj_wait_drain(struct syncobj *sobj, const struct 
timespec *timeout,
         * XXX: The caller must check for spurious wakeups, in case
         * the drain condition became false again before it resumes.
         */
+       __syncobj_tag_unlocked(sobj);
        ret = monitor_wait_drain(sobj, timeout);
+       __syncobj_tag_locked(sobj);
 
        pthread_setcancelstate(state, NULL);
 
@@ -505,6 +525,8 @@ int syncobj_flush(struct syncobj *sobj, int reason)
 {
        struct threadobj *thobj;
 
+       __syncobj_check_locked(sobj);
+
        /* Must have a valid release flag set. */
        assert(reason & SYNCOBJ_RELEASE_MASK);
 
@@ -537,6 +559,8 @@ int syncobj_destroy(struct syncobj *sobj, struct syncstate 
*syns)
 {
        int ret;
 
+       __syncobj_check_locked(sobj);
+
        ret = syncobj_flush(sobj, SYNCOBJ_DELETED);
        if (ret == 0) {
                /* No thread awaken - we may dispose immediately. */
diff --git a/lib/copperplate/threadobj.c b/lib/copperplate/threadobj.c
index d04de92..79db06f 100644
--- a/lib/copperplate/threadobj.c
+++ b/lib/copperplate/threadobj.c
@@ -123,6 +123,8 @@ int threadobj_cancel(struct threadobj *thobj)
 {
        pthread_t tid;
 
+       __threadobj_check_locked(thobj);
+
        /*
         * This basically makes the thread enter a zombie state, since
         * it won't be reachable by anyone after its magic has been
@@ -174,6 +176,8 @@ int threadobj_suspend(struct threadobj *thobj) /* 
thobj->lock held */
        pthread_t tid = thobj->tid;
        int ret;
 
+       __threadobj_check_locked(thobj);
+
        /*
         * XXX: we must guarantee that a THREADOBJ_SUSPEND event is sent
         * only once the target thread is in an innocuous state,
@@ -200,6 +204,8 @@ int threadobj_resume(struct threadobj *thobj) /* 
thobj->lock held */
        pthread_t tid = thobj->tid;
        int ret;
 
+       __threadobj_check_locked(thobj);
+
        if (thobj == threadobj_current())
                return 0;
 
@@ -222,6 +228,8 @@ int threadobj_resume(struct threadobj *thobj) /* 
thobj->lock held */
 
 int threadobj_lock_sched(struct threadobj *thobj) /* thobj->lock held */
 {
+       __threadobj_check_locked(thobj);
+
        assert(thobj == threadobj_current());
 
        if (thobj->schedlock_depth++ > 0)
@@ -240,6 +248,8 @@ int threadobj_unlock_sched(struct threadobj *thobj) /* 
thobj->lock held */
 {
        int ret;
 
+       __threadobj_check_locked(thobj);
+
        assert(thobj == threadobj_current());
 
        /*
@@ -268,6 +278,8 @@ int threadobj_set_priority(struct threadobj *thobj, int 
prio) /* thobj->lock hel
        pthread_t tid = thobj->tid;
        int ret, policy;
 
+       __threadobj_check_locked(thobj);
+
        thobj->priority = prio;
        policy = SCHED_RT;
        if (prio == 0) {
@@ -296,6 +308,8 @@ int threadobj_set_mode(struct threadobj *thobj,
 {
        int ret, __clrmask = 0, __setmask = 0;
 
+       __threadobj_check_locked(thobj);
+
        if (setmask & __THREAD_M_LOCK)
                __setmask |= PTHREAD_LOCK_SCHED;
        else if (clrmask & __THREAD_M_LOCK)
@@ -352,8 +366,10 @@ int threadobj_set_rr(struct threadobj *thobj, struct 
timespec *quantum)
 {                              /* thobj->lock held if valid */
        int ret;
 
-       if (thobj)
+       if (thobj) {
+               __threadobj_check_locked(thobj);
                return __bt(set_rr(thobj, quantum));
+       }
 
        global_rr = (quantum != NULL);
        if (global_rr)
@@ -417,6 +433,8 @@ int threadobj_stat(struct threadobj *thobj, struct 
threadobj_stat *p) /* thobj->
        struct cobalt_threadstat stat;
        int ret;
 
+       __threadobj_check_locked(thobj);
+
        ret = __cobalt_thread_stat(thobj->tid, &stat);
        if (ret)
                return __bt(ret);
@@ -520,6 +538,8 @@ int threadobj_cancel(struct threadobj *thobj)
 {
        pthread_t tid;
 
+       __threadobj_check_locked(thobj);
+
        /*
         * This basically makes the thread enter a zombie state, since
         * it won't be reachable by anyone after its magic has been
@@ -555,6 +575,8 @@ int threadobj_suspend(struct threadobj *thobj) /* 
thobj->lock held */
 
 int threadobj_resume(struct threadobj *thobj) /* thobj->lock held */
 {
+       __threadobj_check_locked(thobj);
+
        if (thobj == threadobj_current())
                return 0;
 
@@ -644,6 +666,8 @@ int threadobj_set_mode(struct threadobj *thobj,
 {
        int ret = 0, old = 0;
 
+       __threadobj_check_locked(thobj);
+
        if (thobj->status & THREADOBJ_SCHEDLOCK)
                old |= __THREAD_M_LOCK;
 
@@ -826,6 +850,8 @@ int threadobj_wait_period(struct threadobj *thobj,
 int threadobj_stat(struct threadobj *thobj,
                   struct threadobj_stat *stat) /* thobj->lock held */
 {
+       __threadobj_check_locked(thobj);
+
        return 0;
 }
 
@@ -894,6 +920,8 @@ void threadobj_init(struct threadobj *thobj,
 
 void threadobj_start(struct threadobj *thobj)  /* thobj->lock held. */
 {
+       __threadobj_check_locked(thobj);
+
        if (thobj->status & THREADOBJ_STARTED)
                return;
 
@@ -912,9 +940,11 @@ void threadobj_wait_start(struct threadobj *thobj) /* 
thobj->lock free. */
         * syncstate argument to each thread locking operation, we
         * hold the cancel state of the locker directly into the
         * locked thread, prior to disabling cancellation for the
-        * calling thread. However, this means that we must save the
-        * currently saved state on the stack prior to calling any
-        * service which releases that lock implicitly, such as
+        * calling thread.
+        *
+        * However, this means that we must save some state
+        * information on the stack prior to calling any service which
+        * releases that lock implicitly, such as
         * pthread_cond_wait(). Failing to do so would introduce the
         * possibility for the saved state to be overwritten by
         * another thread which managed to grab the lock after
@@ -926,7 +956,9 @@ void threadobj_wait_start(struct threadobj *thobj) /* 
thobj->lock free. */
                if (status & (THREADOBJ_STARTED|THREADOBJ_ABORTED))
                        break;
                oldstate = thobj->cancel_state;
+               __threadobj_tag_unlocked(thobj);
                __RT(pthread_cond_wait(&thobj->barrier, &thobj->lock));
+               __threadobj_tag_locked(thobj);
                thobj->cancel_state = oldstate;
        }
 
@@ -994,6 +1026,8 @@ static void cancel_sync(struct threadobj *thobj) /* 
thobj->lock held */
 {
        int oldstate;
 
+       __threadobj_check_locked(thobj);
+
        while (thobj->status & THREADOBJ_WARMUP) {
                oldstate = thobj->cancel_state;
                __RT(pthread_cond_wait(&thobj->barrier, &thobj->lock));
@@ -1047,6 +1081,8 @@ int threadobj_unblock(struct threadobj *thobj) /* 
thobj->lock held */
        pthread_t tid = thobj->tid;
        int ret = 0;
 
+       __threadobj_check_locked(thobj);
+
        /*
         * FIXME: racy. We can't assume thobj->wait_sobj is stable.
         */


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to