Module: xenomai-3
Branch: next
Commit: 97e32440e9675ba91cdf80b320a35979b935dd8c
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=97e32440e9675ba91cdf80b320a35979b935dd8c

Author: Philippe Gerum <r...@xenomai.org>
Date:   Tue Jun 30 20:36:25 2015 +0200

cobalt/thread: generalize usage of ->lock_count for preemption control

XNLOCK is uselessly mirroring only part of the information
->lock_count conveys.

We only need to keep XNLOCK as a mode bit in the ABI between the
Cobalt core and lib/cobalt for switching the lock from user-space,
using ->lock_count internally for testing the current preemption
state.

---

 include/cobalt/kernel/sched.h       |    5 +++--
 include/cobalt/kernel/thread.h      |    2 +-
 include/cobalt/uapi/kernel/thread.h |    5 ++---
 kernel/cobalt/sched-rt.c            |    9 ++++-----
 kernel/cobalt/sched.c               |   10 +++-------
 kernel/cobalt/thread.c              |   25 +++++++++++++++++--------
 6 files changed, 30 insertions(+), 26 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index 1a2c6eb..ecd9605 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -359,7 +359,7 @@ static inline int xnsched_interrupt_p(void)
 
 static inline int xnsched_locked_p(void)
 {
-       return xnthread_test_state(xnsched_current_thread(), XNLOCK);
+       return xnsched_current_thread()->lock_count > 0;
 }
 
 static inline int xnsched_root_p(void)
@@ -526,7 +526,8 @@ static inline void xnsched_tick(struct xnsched *sched)
         */
        if (sched_class == curr->base_class &&
            sched_class->sched_tick &&
-           xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNLOCK|XNRRB) == 
XNRRB)
+           xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB &&
+               curr->lock_count == 0)
                sched_class->sched_tick(sched);
 }
 
diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index 45a9e6f..cc2e32c 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -38,7 +38,7 @@
  * @{
  */
 #define XNTHREAD_BLOCK_BITS   
(XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNMIGRATE|XNHELD)
-#define XNTHREAD_MODE_BITS    (XNLOCK|XNRRB|XNWARN|XNTRAPLB)
+#define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
 
 struct xnthread;
 struct xnsched;
diff --git a/include/cobalt/uapi/kernel/thread.h 
b/include/cobalt/uapi/kernel/thread.h
index eba9c01..6c714f7 100644
--- a/include/cobalt/uapi/kernel/thread.h
+++ b/include/cobalt/uapi/kernel/thread.h
@@ -39,10 +39,9 @@
 #define XNRELAX   0x00000080 /**< Relaxed shadow thread (blocking bit) */
 #define XNMIGRATE 0x00000100 /**< Thread is currently migrating to another 
CPU. */
 #define XNHELD    0x00000200 /**< Thread is held to process emergency. */
-
 #define XNBOOST   0x00000400 /**< Undergoes a PIP boost */
 #define XNSSTEP   0x00000800 /**< Single-stepped by debugger */
-#define XNLOCK    0x00001000 /**< Holds the scheduler lock (i.e. not 
preemptible) */
+#define XNLOCK    0x00001000 /**< Scheduler lock control (pseudo-bit, not in 
->state) */
 #define XNRRB     0x00002000 /**< Undergoes a round-robin scheduling */
 #define XNWARN    0x00004000 /**< Issue SIGDEBUG on error detection */
 #define XNFPU     0x00008000 /**< Thread uses FPU */
@@ -50,7 +49,7 @@
 #define XNWEAK    0x00020000 /**< Non real-time shadow (from the WEAK class) */
 #define XNUSER    0x00040000 /**< Shadow thread running in userland */
 #define XNJOINED  0x00080000 /**< Another thread waits for joining this thread 
*/
-#define XNTRAPLB  0x00100000 /**< Trap lock break (i.e. may not sleep with 
XNLOCK) */
+#define XNTRAPLB  0x00100000 /**< Trap lock break (i.e. may not sleep with 
sched lock) */
 #define XNDEBUG   0x00200000 /**< User-level debugging enabled */
 
 /** @} */
diff --git a/kernel/cobalt/sched-rt.c b/kernel/cobalt/sched-rt.c
index 3bd9308..1973863 100644
--- a/kernel/cobalt/sched-rt.c
+++ b/kernel/cobalt/sched-rt.c
@@ -73,11 +73,10 @@ static void xnsched_rt_rotate(struct xnsched *sched,
         * before we were called. The same goes if the current thread
         * holds the scheduler lock.
         */
-       if (thread == curr &&
-           xnthread_test_state(curr, XNTHREAD_BLOCK_BITS | XNLOCK))
-               return;
-
-       xnsched_putback(thread);
+       if (thread != curr ||
+           (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS) &&
+            curr->lock_count == 0))
+               xnsched_putback(thread);
 }
 
 void xnsched_rt_tick(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index d394ce8..bda61a6 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -252,7 +252,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
                 * Do not preempt the current thread if it holds the
                 * scheduler lock.
                 */
-               if (xnthread_test_state(curr, XNLOCK)) {
+               if (curr->lock_count > 0) {
                        xnsched_set_self_resched(sched);
                        return curr;
                }
@@ -324,10 +324,8 @@ void ___xnsched_lock(struct xnsched *sched)
 {
        struct xnthread *curr = sched->curr;
 
-       if (curr->lock_count++ == 0) {
+       if (curr->lock_count++ == 0)
                sched->lflags |= XNINLOCK;
-               xnthread_set_state(curr, XNLOCK);
-       }
 }
 EXPORT_SYMBOL_GPL(___xnsched_lock);
 
@@ -339,7 +337,6 @@ void ___xnsched_unlock(struct xnsched *sched)
                return;
 
        if (--curr->lock_count == 0) {
-               xnthread_clear_state(curr, XNLOCK);
                xnthread_clear_info(curr, XNLBALERT);
                sched->lflags &= ~XNINLOCK;
                xnsched_run();
@@ -352,7 +349,6 @@ void ___xnsched_unlock_fully(struct xnsched *sched)
        struct xnthread *curr = sched->curr;
 
        curr->lock_count = 0;
-       xnthread_clear_state(curr, XNLOCK);
        xnthread_clear_info(curr, XNLBALERT);
        sched->lflags &= ~XNINLOCK;
        xnsched_run();
@@ -882,7 +878,7 @@ out:
            xnsched_maybe_resched_after_unlocked_switch(sched))
                goto reschedule;
 
-       if (curr->lock_count)
+       if (curr->lock_count > 0)
                sched->lflags |= XNINLOCK;
 
        xnlock_put_irqrestore(&nklock, s);
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 46aca95..07a39b6 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -673,6 +673,8 @@ int xnthread_start(struct xnthread *thread,
        xnthread_set_state(thread, attr->mode & (XNTHREAD_MODE_BITS | XNSUSP));
        thread->entry = attr->entry;
        thread->cookie = attr->cookie;
+       if (attr->mode & XNLOCK)
+               thread->lock_count = 1;
 
        trace_cobalt_thread_start(thread);
 
@@ -737,28 +739,33 @@ EXPORT_SYMBOL_GPL(xnthread_start);
  */
 int xnthread_set_mode(int clrmask, int setmask)
 {
+       int oldmode, lock_count;
        struct xnthread *curr;
-       int oldmode;
        spl_t s;
 
        primary_mode_only();
 
        xnlock_get_irqsave(&nklock, s);
-
        curr = xnsched_current_thread();
        oldmode = xnthread_get_state(curr) & XNTHREAD_MODE_BITS;
+       lock_count = curr->lock_count;
        xnthread_clear_state(curr, clrmask & XNTHREAD_MODE_BITS);
        xnthread_set_state(curr, setmask & XNTHREAD_MODE_BITS);
        trace_cobalt_thread_set_mode(curr);
 
-       if (xnthread_test_state(curr, XNLOCK)) {
-               if ((oldmode & XNLOCK) == 0)
+       if (setmask & XNLOCK) {
+               if (lock_count == 0)
                        __xnsched_lock();
-       } else if (oldmode & XNLOCK)
-               __xnsched_unlock_fully();
+       } else if (clrmask & XNLOCK) {
+               if (lock_count > 0)
+                       __xnsched_unlock_fully();
+       }
 
        xnlock_put_irqrestore(&nklock, s);
 
+       if (lock_count > 0)
+               oldmode |= XNLOCK;
+
        return oldmode;
 }
 EXPORT_SYMBOL_GPL(xnthread_set_mode);
@@ -856,7 +863,8 @@ void xnthread_suspend(struct xnthread *thread, int mask,
                        if (xnthread_test_info(thread, XNKICKED))
                                goto abort;
                        if (thread == sched->curr &&
-                           (oldstate & (XNTRAPLB|XNLOCK)) == (XNTRAPLB|XNLOCK))
+                           thread->lock_count > 0 &&
+                           (oldstate & XNTRAPLB) != 0)
                                goto lock_break;
                }
                xnthread_clear_info(thread,
@@ -1813,7 +1821,8 @@ int __xnthread_set_schedparam(struct xnthread *thread,
         * - we currently hold the scheduler lock, so we don't want
         * any round-robin effect to take place.
         */
-       if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS|XNREADY|XNLOCK))
+       if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS|XNREADY) &&
+           thread->lock_count == 0)
                xnsched_putback(thread);
 
        return ret;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://xenomai.org/mailman/listinfo/xenomai-git

Reply via email to