Module: xenomai-2.6
Branch: master
Commit: fae534b67cd593974387442d46a4816c3cc113fc
URL:    
http://git.xenomai.org/?p=xenomai-2.6.git;a=commit;h=fae534b67cd593974387442d46a4816c3cc113fc

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Thu Dec  6 00:03:42 2012 +0100

nucleus/sched: allow disabling preemption temporarily

Some constructs require disabling thread preemption temporarily (e.g.
RTDM_EXECUTE_ATOMICALLY). We introduce a lightweight mechanism for this
purpose based on a core scheduler state flag. The per-thread persistent
scheduler lock feature (XNLOCK) is rebased over the new mechanism.

---

 include/nucleus/pod.h   |   30 +++++++++++++++---------------
 include/nucleus/sched.h |    1 +
 ksrc/nucleus/pod.c      |   23 ++++++++++++++++++-----
 3 files changed, 34 insertions(+), 20 deletions(-)

diff --git a/include/nucleus/pod.h b/include/nucleus/pod.h
index 8a7f23e..fd09dd6 100644
--- a/include/nucleus/pod.h
+++ b/include/nucleus/pod.h
@@ -281,54 +281,54 @@ static inline void xnpod_schedule(void)
                return;
 #else /* !XENO_DEBUG(NUCLEUS) */
        if (testbits(sched->status | sched->lflags,
-                    XNKCOUT|XNINIRQ|XNINSW|XNRESCHED) != XNRESCHED)
+                    XNKCOUT|XNINIRQ|XNINSW|XNRESCHED|XNINLOCK) != XNRESCHED)
                return;
 #endif /* !XENO_DEBUG(NUCLEUS) */
 
        __xnpod_schedule(sched);
 }
 
-void ___xnpod_lock_sched(struct xnthread *curr);
+void ___xnpod_lock_sched(xnsched_t *sched);
 
-void ___xnpod_unlock_sched(struct xnthread *curr);
+void ___xnpod_unlock_sched(xnsched_t *sched);
 
 static inline void __xnpod_lock_sched(void)
 {
-       struct xnthread *curr;
+       xnsched_t *sched;
 
        barrier();
-       curr = xnpod_current_thread();
-       ___xnpod_lock_sched(curr);
+       sched = xnpod_current_sched();
+       ___xnpod_lock_sched(sched);
 }
 
 static inline void __xnpod_unlock_sched(void)
 {
-       struct xnthread *curr;
+       xnsched_t *sched;
 
        barrier();
-       curr = xnpod_current_thread();
-       ___xnpod_unlock_sched(curr);
+       sched = xnpod_current_sched();
+       ___xnpod_unlock_sched(sched);
 }
 
 static inline void xnpod_lock_sched(void)
 {
-       struct xnthread *curr;
+       xnsched_t *sched;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
-       curr = xnpod_current_thread();
-       ___xnpod_lock_sched(curr);
+       sched = xnpod_current_sched();
+       ___xnpod_lock_sched(sched);
        xnlock_put_irqrestore(&nklock, s);
 }
 
 static inline void xnpod_unlock_sched(void)
 {
-       struct xnthread *curr;
+       xnsched_t *sched;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
-       curr = xnpod_current_thread();
-       ___xnpod_unlock_sched(curr);
+       sched = xnpod_current_sched();
+       ___xnpod_unlock_sched(sched);
        xnlock_put_irqrestore(&nklock, s);
 }
 
diff --git a/include/nucleus/sched.h b/include/nucleus/sched.h
index 59dece9..b9d0f01 100644
--- a/include/nucleus/sched.h
+++ b/include/nucleus/sched.h
@@ -42,6 +42,7 @@
 #define XNINTCK                0x40000000      /* In master tick handler 
context */
 #define XNINSW         0x20000000      /* In context switch */
 #define XNRESCHED      0x10000000      /* Needs rescheduling */
+#define XNINLOCK       0x08000000      /* Scheduler locked */
 
 /* Sched local flags */
 #define XNHTICK                0x00008000      /* Host tick pending  */
diff --git a/ksrc/nucleus/pod.c b/ksrc/nucleus/pod.c
index af53f6e..cf6c9de 100644
--- a/ksrc/nucleus/pod.c
+++ b/ksrc/nucleus/pod.c
@@ -1200,6 +1200,7 @@ void xnpod_delete_thread(xnthread_t *thread)
                 * thread zombie state to go through the rescheduling
                 * procedure then actually destroy the thread object.
                 */
+               __clrbits(sched->status, XNINLOCK);
                xnsched_set_resched(sched);
                xnpod_schedule();
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
@@ -1452,6 +1453,7 @@ void xnpod_suspend_thread(xnthread_t *thread, xnflags_t 
mask,
 #endif /* __XENO_SIM__ */
 
        if (thread == sched->curr) {
+               __clrbits(sched->status, XNINLOCK);
                /*
                 * If the current thread is being relaxed, we must
                 * have been called from xnshadow_relax(), in which
@@ -2302,7 +2304,6 @@ reschedule:
        xnpod_run_hooks(&nkpod->tswitchq, curr, "SWITCH");
 
       signal_unlock_and_exit:
-
        if (xnthread_signaled_p(curr))
                xnpod_dispatch_signals();
 
@@ -2310,6 +2311,9 @@ reschedule:
            xnsched_maybe_resched_after_unlocked_switch(sched))
                goto reschedule;
 
+       if (xnthread_lock_count(curr))
+               __setbits(sched->status, XNINLOCK);
+
        xnlock_put_irqrestore(&nklock, s);
 
        return;
@@ -2336,18 +2340,27 @@ reschedule:
 }
 EXPORT_SYMBOL_GPL(__xnpod_schedule);
 
-void ___xnpod_lock_sched(struct xnthread *curr)
+void ___xnpod_lock_sched(xnsched_t *sched)
 {
-       if (xnthread_lock_count(curr)++ == 0)
+       struct xnthread *curr = sched->curr;
+
+       if (xnthread_lock_count(curr)++ == 0) {
+               __setbits(sched->status, XNINLOCK);
                xnthread_set_state(curr, XNLOCK);
+       }
 }
 EXPORT_SYMBOL_GPL(___xnpod_lock_sched);
 
-void ___xnpod_unlock_sched(struct xnthread *curr)
+void ___xnpod_unlock_sched(xnsched_t *sched)
 {
+       struct xnthread *curr = sched->curr;
+       XENO_ASSERT(NUCLEUS, xnthread_lock_count(curr) > 0,
+                   xnpod_fatal("Unbalanced lock/unlock");
+                   );
+
        if (--xnthread_lock_count(curr) == 0) {
                xnthread_clear_state(curr, XNLOCK);
-               xnsched_set_self_resched(curr->sched);
+               __clrbits(sched->status, XNINLOCK);
                xnpod_schedule();
        }
 }


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to