Module: xenomai-forge
Branch: next
Commit: 02fc08ee6cf8b69410575c922daea6b9044af5db
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=02fc08ee6cf8b69410575c922daea6b9044af5db

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sun Dec 29 12:38:09 2013 +0100

cobalt/sched: drop sched flag debugging feature

This feature was aimed at detecting changes in the scheduler state
which were not covered by the XNSCHED flag. This situation would be
interpreted as some code affecting the run queue outside of the legit
routines.

As a pick_next() handler may reassess the need for switching context
based on information specific or even internal to the policy module,
XNSCHED may not always reflect the latest scheduler state change.

---

 include/cobalt/kernel/sched.h |   25 +++++++++++--------------
 kernel/cobalt/sched.c         |   30 ++++++------------------------
 2 files changed, 17 insertions(+), 38 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index 870de29..83fa25f 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -264,35 +264,32 @@ static inline int xnsched_run(void)
         * primary domain is needed, we won't use critical scheduler
         * information before we actually run in primary mode;
         * therefore we can first test the scheduler status then
-        * escalate.  Running in the primary domain means that no
-        * Linux-triggered CPU migration may occur from that point
-        * either. Finally, since migration is always a self-directed
-        * operation for Xenomai threads, we can safely read the
-        * scheduler state bits without holding the nklock.
+        * escalate.
+        *
+        * Running in the primary domain means that no Linux-triggered
+        * CPU migration may occur from that point either. Finally,
+        * since migration is always a self-directed operation for
+        * Xenomai threads, we can safely read the scheduler state
+        * bits without holding the nklock.
         *
         * Said differently, if we race here because of a CPU
         * migration, it must have been Linux-triggered because we run
         * in secondary mode; in which case we will escalate to the
         * primary domain, then unwind the current call frame without
         * running the rescheduling procedure in
-        * __xnsched_run(). Therefore, the scheduler pointer will
-        * be either valid, or unused.
+        * __xnsched_run(). Therefore, the scheduler slot
+        * (i.e. "sched") will be either valid, or unused.
         */
        sched = xnsched_current();
        smp_rmb();
        /*
         * No immediate rescheduling is possible if an ISR context is
-        * active, or if we are caught in the middle of a unlocked
-        * context switch.
+        * active, the current thread holds the scheduler lock, or if
+        * we are caught in the middle of an unlocked context switch.
         */
-#if XENO_DEBUG(NUCLEUS)
-       if ((sched->status|sched->lflags) & (XNINIRQ|XNINSW|XNINLOCK))
-               return 0;
-#else /* !XENO_DEBUG(NUCLEUS) */
        if (((sched->status|sched->lflags) &
             (XNINIRQ|XNINSW|XNRESCHED|XNINLOCK)) != XNRESCHED)
                return 0;
-#endif /* !XENO_DEBUG(NUCLEUS) */
 
        return __xnsched_run(sched);
 }
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index e4ef3bf..84881c6 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -257,7 +257,6 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
                if (!xnthread_test_state(curr, XNREADY)) {
                        xnsched_requeue(curr);
                        xnthread_set_state(curr, XNREADY);
-                       xnsched_set_self_resched(sched);
                }
        }
 
@@ -747,7 +746,7 @@ void __xnsched_run_handler(void) /* hw interrupts off. */
 int __xnsched_run(struct xnsched *sched)
 {
        struct xnthread *prev, *next, *curr;
-       int switched, need_resched, shadow;
+       int switched, shadow;
        spl_t s;
 
        if (xnarch_escalate())
@@ -761,24 +760,10 @@ int __xnsched_run(struct xnsched *sched)
        xntrace_pid(xnthread_host_pid(curr), xnthread_current_priority(curr));
 reschedule:
        switched = 0;
-#if !XENO_DEBUG(NUCLEUS)
-       need_resched = test_resched(sched);
-       if (!need_resched)
-               goto signal_unlock_and_exit;
-#endif /* !XENO_DEBUG(NUCLEUS) */
+       if (!test_resched(sched))
+               goto out;
+
        next = xnsched_pick_next(sched);
-#if XENO_DEBUG(NUCLEUS)
-       /*
-        * CAUTION: in debug mode, we unconditionally pick the next
-        * runnable thread in order to check for consistency with
-        * XNRESCHED (i.e. XNRESCHED shall be raised if the scheduler
-        * state has changed). Since xnsched_pick() may change the
-        * scheduler state - either directly, or indirectly via the
-        * policy plugin, we must collect need_resched _after_ the new
-        * current thread was picked.
-        */
-       need_resched = test_resched(sched);
-#endif
        if (next == curr) {
                if (unlikely(xnthread_test_state(next, XNROOT))) {
                        if (sched->lflags & XNHTICK)
@@ -786,11 +771,9 @@ reschedule:
                        if (sched->lflags & XNHDEFER)
                                xnclock_program_shot(&nkclock, sched);
                }
-               goto signal_unlock_and_exit;
+               goto out;
        }
 
-       XENO_BUGON(NUCLEUS, need_resched == 0);
-
        prev = curr;
 
        trace_mark(xn_nucleus, sched_switch,
@@ -845,8 +828,7 @@ reschedule:
        xnthread_switch_fpu(sched);
        xntrace_pid(xnthread_host_pid(curr), xnthread_current_priority(curr));
 
-signal_unlock_and_exit:
-
+out:
        if (switched &&
            xnsched_maybe_resched_after_unlocked_switch(sched))
                goto reschedule;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to