Module: xenomai-forge
Branch: master
Commit: afac4dbd3c98abfac3edfb2814f37b16d88185ed
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=afac4dbd3c98abfac3edfb2814f37b16d88185ed

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sat Aug 31 17:46:23 2013 +0200

cobalt/thread: fix XNLOCK handling in xnthread_set_mode()

We were not rescheduling properly upon unlocking. Fix that.

---

 include/cobalt/kernel/sched.h |   11 +++++++++++
 kernel/cobalt/sched.c         |   11 +++++++++++
 kernel/cobalt/thread.c        |   38 ++++++++++++++++++++++++--------------
 3 files changed, 46 insertions(+), 14 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index eb1bb0c..21160e0 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -293,6 +293,8 @@ void ___xnsched_lock(struct xnsched *sched);
 
 void ___xnsched_unlock(struct xnsched *sched);
 
+void ___xnsched_unlock_fully(struct xnsched *sched);
+
 static inline void __xnsched_lock(void)
 {
        struct xnsched *sched;
@@ -311,6 +313,15 @@ static inline void __xnsched_unlock(void)
        ___xnsched_unlock(sched);
 }
 
+static inline void __xnsched_unlock_fully(void)
+{
+       struct xnsched *sched;
+
+       barrier();
+       sched = xnsched_current();
+       ___xnsched_unlock_fully(sched);
+}
+
 static inline void xnsched_lock(void)
 {
        struct xnsched *sched;
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 2f4f022..9359584 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -302,6 +302,17 @@ void ___xnsched_unlock(struct xnsched *sched)
 }
 EXPORT_SYMBOL_GPL(___xnsched_unlock);
 
+void ___xnsched_unlock_fully(struct xnsched *sched)
+{
+       struct xnthread *curr = sched->curr;
+
+       xnthread_lock_count(curr) = 0;
+       xnthread_clear_state(curr, XNLOCK);
+       sched->lflags &= ~XNINLOCK;
+       xnsched_run();
+}
+EXPORT_SYMBOL_GPL(___xnsched_unlock_fully);
+
 /* Must be called with nklock locked, interrupts off. */
 void xnsched_putback(struct xnthread *thread)
 {
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 1f9ed50..eb2dd1f 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -717,11 +717,12 @@ EXPORT_SYMBOL_GPL(xnthread_start);
  * @param setmask The new thread mode. The following flags may be set
  * in this bitmask:
  *
- * - XNLOCK causes the thread to lock the scheduler.  The target
- * thread will have to call the xnsched_unlock() service to unlock
- * the scheduler or clear the XNLOCK bit forcibly using this
- * service. A non-preemptible thread may still block, in which case,
- * the lock is reasserted when the thread is scheduled back in.
+ * - XNLOCK makes @a thread non-preemptible by other threads when
+ * running on a CPU.  A non-preemptible thread may still block, in
+ * which case, the lock is reasserted when the thread is scheduled
+ * back in. If @a thread is current, the scheduler is immediately
+ * locked, otherwise such lock will take effect next time @a thread
+ * resumes on a CPU.
  *
  * - XNTRAPSW causes the thread to receive a SIGDEBUG signal when it
  * switches to secondary mode. This is a debugging aid for detecting
@@ -734,8 +735,8 @@ EXPORT_SYMBOL_GPL(xnthread_start);
  * - Kernel-based task
  * - User-space task in primary mode.
  *
- * Rescheduling: never, therefore, the caller should reschedule if
- * XNLOCK has been passed into @a clrmask.
+ * Rescheduling: possible as a result of unlocking the scheduler
+ * (XNLOCK present in @a clrmask).
  *
  * @note Setting @a clrmask and @a setmask to zero leads to a nop,
  * only returning the previous mode if @a mode_r is a valid address.
@@ -756,13 +757,22 @@ int xnthread_set_mode(xnthread_t *thread, int clrmask, 
int setmask)
        xnthread_clear_state(thread, clrmask & XNTHREAD_MODE_BITS);
        xnthread_set_state(thread, setmask & XNTHREAD_MODE_BITS);
 
-       if (curr == thread) {
-               if (!(oldmode & XNLOCK)) {
-                       if (xnthread_test_state(thread, XNLOCK))
-                               /* Actually grab the scheduler lock. */
-                               xnsched_lock();
-               } else if (!xnthread_test_state(thread, XNLOCK))
-                       xnthread_lock_count(thread) = 0;
+       /*
+        * Marking the thread as (non-)preemptible requires special
+        * handling, depending on whether @thread is current.
+        */
+       if (xnthread_test_state(thread, XNLOCK)) {
+               if ((oldmode & XNLOCK) == 0) {
+                       if (thread == curr)
+                               __xnsched_lock();
+                       else
+                               xnthread_lock_count(curr) = 1;
+               }
+       } else if (oldmode & XNLOCK) {
+               if (thread == curr)
+                       __xnsched_unlock_fully(); /* Will resched. */
+               else
+                       xnthread_lock_count(curr) = 0;
        }
 
        xnlock_put_irqrestore(&nklock, s);


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to