Module: xenomai-forge
Branch: master
Commit: 40981d4a43111e3c96fc58deef6ae92ee8e9ee64
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=40981d4a43111e3c96fc58deef6ae92ee8e9ee64

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Thu Dec  6 00:03:42 2012 +0100

nucleus/sched: allow disabling preemption temporarily

Some constructs require disabling thread preemption temporarily (e.g.
RTDM_EXECUTE_ATOMICALLY). We introduce a lightweight mechanism for this
purpose based on a core scheduler state flag. The per-thread persistent
scheduler lock feature (XNLOCK) is rebased over the new mechanism.

---

 include/cobalt/nucleus/pod.h   |   30 +++++++++++++++---------------
 include/cobalt/nucleus/sched.h |    1 +
 kernel/cobalt/nucleus/pod.c    |   23 ++++++++++++++++++-----
 3 files changed, 34 insertions(+), 20 deletions(-)

diff --git a/include/cobalt/nucleus/pod.h b/include/cobalt/nucleus/pod.h
index 00fc5ad..b9728dd 100644
--- a/include/cobalt/nucleus/pod.h
+++ b/include/cobalt/nucleus/pod.h
@@ -270,54 +270,54 @@ static inline void xnpod_schedule(void)
                return;
 #else /* !XENO_DEBUG(NUCLEUS) */
        if (testbits(sched->status | sched->lflags,
-                    XNKCOUT|XNINIRQ|XNINSW|XNRESCHED) != XNRESCHED)
+                    XNKCOUT|XNINIRQ|XNINSW|XNRESCHED|XNINLOCK) != XNRESCHED)
                return;
 #endif /* !XENO_DEBUG(NUCLEUS) */
 
        __xnpod_schedule(sched);
 }
 
-void ___xnpod_lock_sched(struct xnthread *curr);
+void ___xnpod_lock_sched(xnsched_t *sched);
 
-void ___xnpod_unlock_sched(struct xnthread *curr);
+void ___xnpod_unlock_sched(xnsched_t *sched);
 
 static inline void __xnpod_lock_sched(void)
 {
-       struct xnthread *curr;
+       xnsched_t *sched;
 
        barrier();
-       curr = xnpod_current_thread();
-       ___xnpod_lock_sched(curr);
+       sched = xnpod_current_sched();
+       ___xnpod_lock_sched(sched);
 }
 
 static inline void __xnpod_unlock_sched(void)
 {
-       struct xnthread *curr;
+       xnsched_t *sched;
 
        barrier();
-       curr = xnpod_current_thread();
-       ___xnpod_unlock_sched(curr);
+       sched = xnpod_current_sched();
+       ___xnpod_unlock_sched(sched);
 }
 
 static inline void xnpod_lock_sched(void)
 {
-       struct xnthread *curr;
+       xnsched_t *sched;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
-       curr = xnpod_current_thread();
-       ___xnpod_lock_sched(curr);
+       sched = xnpod_current_sched();
+       ___xnpod_lock_sched(sched);
        xnlock_put_irqrestore(&nklock, s);
 }
 
 static inline void xnpod_unlock_sched(void)
 {
-       struct xnthread *curr;
+       xnsched_t *sched;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
-       curr = xnpod_current_thread();
-       ___xnpod_unlock_sched(curr);
+       sched = xnpod_current_sched();
+       ___xnpod_unlock_sched(sched);
        xnlock_put_irqrestore(&nklock, s);
 }
 
diff --git a/include/cobalt/nucleus/sched.h b/include/cobalt/nucleus/sched.h
index b032608..185537f 100644
--- a/include/cobalt/nucleus/sched.h
+++ b/include/cobalt/nucleus/sched.h
@@ -42,6 +42,7 @@
 #define XNINTCK                0x40000000      /* In master tick handler 
context */
 #define XNINSW         0x20000000      /* In context switch */
 #define XNRESCHED      0x10000000      /* Needs rescheduling */
+#define XNINLOCK       0x08000000      /* Scheduler locked */
 
 /* Sched local flags */
 #define XNHTICK                0x00008000      /* Host tick pending  */
diff --git a/kernel/cobalt/nucleus/pod.c b/kernel/cobalt/nucleus/pod.c
index 9a548fb..e7c619e 100644
--- a/kernel/cobalt/nucleus/pod.c
+++ b/kernel/cobalt/nucleus/pod.c
@@ -1087,6 +1087,7 @@ void xnpod_delete_thread(xnthread_t *thread)
                 * thread zombie state to go through the rescheduling
                 * procedure then actually destroy the thread object.
                 */
+               __clrbits(sched->status, XNINLOCK);
                xnsched_set_resched(sched);
                xnpod_schedule();
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
@@ -1325,6 +1326,7 @@ void xnpod_suspend_thread(xnthread_t *thread, xnflags_t 
mask,
                thread->wchan = wchan;
 
        if (thread == sched->curr) {
+               __clrbits(sched->status, XNINLOCK);
                /*
                 * If the current thread is being relaxed, we must
                 * have been called from xnshadow_relax(), in which
@@ -2151,7 +2153,6 @@ reschedule:
        xnpod_run_hooks(&nkpod->tswitchq, curr, "SWITCH");
 
       signal_unlock_and_exit:
-
        if (xnthread_signaled_p(curr))
                xnpod_dispatch_signals();
 
@@ -2159,6 +2160,9 @@ reschedule:
            xnsched_maybe_resched_after_unlocked_switch(sched))
                goto reschedule;
 
+       if (xnthread_lock_count(curr))
+               __setbits(sched->status, XNINLOCK);
+
        xnlock_put_irqrestore(&nklock, s);
 
        return;
@@ -2187,18 +2191,27 @@ reschedule:
 }
 EXPORT_SYMBOL_GPL(__xnpod_schedule);
 
-void ___xnpod_lock_sched(struct xnthread *curr)
+void ___xnpod_lock_sched(xnsched_t *sched)
 {
-       if (xnthread_lock_count(curr)++ == 0)
+       struct xnthread *curr = sched->curr;
+
+       if (xnthread_lock_count(curr)++ == 0) {
+               __setbits(sched->status, XNINLOCK);
                xnthread_set_state(curr, XNLOCK);
+       }
 }
 EXPORT_SYMBOL_GPL(___xnpod_lock_sched);
 
-void ___xnpod_unlock_sched(struct xnthread *curr)
+void ___xnpod_unlock_sched(xnsched_t *sched)
 {
+       struct xnthread *curr = sched->curr;
+       XENO_ASSERT(NUCLEUS, xnthread_lock_count(curr) > 0,
+                   xnpod_fatal("Unbalanced lock/unlock");
+                   );
+
        if (--xnthread_lock_count(curr) == 0) {
                xnthread_clear_state(curr, XNLOCK);
-               xnsched_set_self_resched(curr->sched);
+               __clrbits(sched->status, XNINLOCK);
                xnpod_schedule();
        }
 }


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to