Module: xenomai-3
Branch: master
Commit: 79a94a46f0c8f280a052f244ddb064b61a1f95ce
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=79a94a46f0c8f280a052f244ddb064b61a1f95ce

Author: Philippe Gerum <r...@xenomai.org>
Date:   Thu Jul  2 09:18:41 2015 +0200

cobalt/sched: get rid of XNINLOCK scheduler flag

XNINLOCK and the per-thread scheduler lock nesting count are
redundant. Drop XNINLOCK which conveys less information.

---

 include/cobalt/kernel/sched.h |   42 ++++++++++++++++++++++++-----------------
 kernel/cobalt/clock.c         |    2 +-
 kernel/cobalt/sched.c         |   11 +++--------
 kernel/cobalt/thread.c        |   11 +++++------
 4 files changed, 34 insertions(+), 32 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index 9cc64e5..1c11756 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -45,7 +45,6 @@
 #define XNHTICK                0x00008000      /* Host tick pending  */
 #define XNINIRQ                0x00004000      /* In IRQ handling context */
 #define XNHDEFER       0x00002000      /* Host tick deferred */
-#define XNINLOCK       0x00001000      /* Scheduler locked */
 
 struct xnsched_rt {
        xnsched_queue_t runnable;       /*!< Runnable thread queue. */
@@ -255,19 +254,18 @@ static inline int xnsched_supported_cpu(int cpu)
        for_each_online_cpu(cpu)                \
                if (xnsched_supported_cpu(cpu)) \
 
-int __xnsched_run(struct xnsched *sched);
+int ___xnsched_run(struct xnsched *sched);
 
 void __xnsched_run_handler(void);
 
-static inline int xnsched_run(void)
+static inline int __xnsched_run(struct xnsched *sched)
 {
-       struct xnsched *sched;
        /*
-        * NOTE: Since __xnsched_run() won't run if an escalation to
-        * primary domain is needed, we won't use critical scheduler
-        * information before we actually run in primary mode;
-        * therefore we can first test the scheduler status then
-        * escalate.
+        * NOTE: Since ___xnsched_run() won't run immediately if an
+        * escalation to primary domain is needed, we won't use
+        * critical scheduler information before we actually run in
+        * primary mode; therefore we can first test the scheduler
+        * status then escalate.
         *
         * Running in the primary domain means that no Linux-triggered
         * CPU migration may occur from that point either. Finally,
@@ -280,18 +278,28 @@ static inline int xnsched_run(void)
         * in secondary mode; in which case we will escalate to the
         * primary domain, then unwind the current call frame without
         * running the rescheduling procedure in
-        * __xnsched_run(). Therefore, the scheduler slot
+        * ___xnsched_run(). Therefore, the scheduler slot
         * (i.e. "sched") will be either valid, or unused.
         */
-       sched = xnsched_current();
-       smp_rmb();
+       if (((sched->status|sched->lflags) &
+            (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
+               return 0;
+
+       return ___xnsched_run(sched);
+}
+
+static inline int xnsched_run(void)
+{
+       struct xnsched *sched = xnsched_current();
        /*
-        * No immediate rescheduling is possible if an ISR context is
-        * active, the current thread holds the scheduler lock, or if
-        * we are caught in the middle of an unlocked context switch.
+        * No rescheduling is possible, either if:
+        *
+        * - the current thread holds the scheduler lock
+        * - an ISR context is active
+        * - we are caught in the middle of an unlocked context switch.
         */
-       if (((sched->status|sched->lflags) &
-            (XNINIRQ|XNINSW|XNRESCHED|XNINLOCK)) != XNRESCHED)
+       smp_rmb();
+       if (unlikely(sched->curr->lock_count > 0))
                return 0;
 
        return __xnsched_run(sched);
diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c
index dfaa79e..e75d296 100644
--- a/kernel/cobalt/clock.c
+++ b/kernel/cobalt/clock.c
@@ -157,7 +157,7 @@ void xnclock_core_local_shot(struct xnsched *sched)
         * resumes.
         *
         * The host tick deferral is cleared whenever Xenomai is about
-        * to yield control to the host kernel (see __xnsched_run()),
+        * to yield control to the host kernel (see ___xnsched_run()),
         * or a timer with an earlier timeout date is scheduled,
         * whichever comes first.
         */
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 01ada7c..56014e0 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -338,8 +338,7 @@ void xnsched_lock(void)
                XENO_BUG_ON(COBALT, xnsched_current()->curr != curr);
        }
 
-       if (curr->lock_count++ == 0)
-               curr->sched->lflags |= XNINLOCK;
+       curr->lock_count++;
 }
 EXPORT_SYMBOL_GPL(xnsched_lock);
 
@@ -357,7 +356,6 @@ void xnsched_unlock(void)
        
        if (--curr->lock_count == 0) {
                xnthread_clear_localinfo(curr, XNLBALERT);
-               curr->sched->lflags &= ~XNINLOCK;
                xnsched_run();
        }
 }
@@ -800,7 +798,7 @@ void __xnsched_run_handler(void) /* hw interrupts off. */
        xnsched_run();
 }
 
-int __xnsched_run(struct xnsched *sched)
+int ___xnsched_run(struct xnsched *sched)
 {
        struct xnthread *prev, *next, *curr;
        int switched, shadow;
@@ -886,9 +884,6 @@ out:
            xnsched_maybe_resched_after_unlocked_switch(sched))
                goto reschedule;
 
-       if (curr->lock_count > 0)
-               sched->lflags |= XNINLOCK;
-
        xnlock_put_irqrestore(&nklock, s);
 
        return switched;
@@ -909,7 +904,7 @@ shadow_epilogue:
 
        return 1;
 }
-EXPORT_SYMBOL_GPL(__xnsched_run);
+EXPORT_SYMBOL_GPL(___xnsched_run);
 
 #ifdef CONFIG_XENO_OPT_VFILE
 
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 409ac16..fa2fffd 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -915,27 +915,26 @@ void xnthread_suspend(struct xnthread *thread, int mask,
         * opportunity for interrupt delivery right before switching
         * context, which shortens the uninterruptible code path.
         *
-        * We have to shut irqs off before xnsched_run() though: if an
-        * interrupt could preempt us in __xnsched_run() right after
-        * the call to xnarch_escalate() but before we grab the
+        * We have to shut irqs off before __xnsched_run() though: if
+        * an interrupt could preempt us in ___xnsched_run() right
+        * after the call to xnarch_escalate() but before we grab the
         * nklock, we would enter the critical section in
         * xnsched_run() while running in secondary mode, which would
         * defeat the purpose of xnarch_escalate().
         */
        if (likely(thread == sched->curr)) {
                xnsched_set_resched(sched);
-               sched->lflags &= ~XNINLOCK;
                if (unlikely(mask & XNRELAX)) {
                        xnlock_clear_irqon(&nklock);
                        splmax();
-                       xnsched_run();
+                       __xnsched_run(sched);
                        return;
                }
                /*
                 * If the thread is runnning on another CPU,
                 * xnsched_run will trigger the IPI as required.
                 */
-               xnsched_run();
+               __xnsched_run(sched);
                goto out;
        }
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://xenomai.org/mailman/listinfo/xenomai-git

Reply via email to