Module: xenomai-3
Branch: master
Commit: 725774014dea790c36005531f6061e8e10fb5155
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=725774014dea790c36005531f6061e8e10fb5155

Author: Philippe Gerum <r...@xenomai.org>
Date:   Fri Jul  3 19:08:27 2015 +0200

cobalt/sched: make scheduler locking lockless

Rework the implementation so that we don't have to hold the big
nucleus lock for updating the scheduler lock nesting count.

---

 include/cobalt/kernel/assert.h      |    1 +
 include/cobalt/kernel/rtdm/driver.h |   16 +++++++--------
 include/cobalt/kernel/sched.h       |   32 ++----------------------------
 kernel/cobalt/sched.c               |   37 ++++++++++++++++++++++++++---------
 kernel/cobalt/thread.c              |    2 +-
 5 files changed, 40 insertions(+), 48 deletions(-)

diff --git a/include/cobalt/kernel/assert.h b/include/cobalt/kernel/assert.h
index 82d01a5..ab52ff1 100644
--- a/include/cobalt/kernel/assert.h
+++ b/include/cobalt/kernel/assert.h
@@ -47,6 +47,7 @@
 #define interrupt_only()       XENO_BUG_ON(CONTEXT, !xnsched_interrupt_p())
 #define realtime_cpu_only()    XENO_BUG_ON(CONTEXT, 
!xnsched_supported_cpu(ipipe_processor_id()))
 #define thread_only()          XENO_BUG_ON(CONTEXT, xnsched_interrupt_p())
+#define irqoff_only()          XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0)
 #if XENO_DEBUG(LOCKING)
 #define atomic_only()          XENO_BUG_ON(CONTEXT, (xnlock_is_owner(&nklock) 
&& hard_irqs_disabled()) == 0)
 #define preemptible_only()     XENO_BUG_ON(CONTEXT, xnlock_is_owner(&nklock) 
|| hard_irqs_disabled())
diff --git a/include/cobalt/kernel/rtdm/driver.h 
b/include/cobalt/kernel/rtdm/driver.h
index c14198b..4853ba9 100644
--- a/include/cobalt/kernel/rtdm/driver.h
+++ b/include/cobalt/kernel/rtdm/driver.h
@@ -436,7 +436,7 @@ void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, 
nanosecs_rel_t timeout);
 #define cobalt_atomic_enter(context)                   \
        do {                                            \
                xnlock_get_irqsave(&nklock, (context)); \
-               __xnsched_lock();                       \
+               xnsched_lock();                 \
        } while (0)
 
 /**
@@ -453,7 +453,7 @@ void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, 
nanosecs_rel_t timeout);
  */
 #define cobalt_atomic_leave(context)                           \
        do {                                                    \
-               __xnsched_unlock();                             \
+               xnsched_unlock();                               \
                xnlock_put_irqrestore(&nklock, (context));      \
        } while (0)
 
@@ -502,9 +502,9 @@ rtdm_execute_atomically(void) { }
                                                        \
        rtdm_execute_atomically();                      \
        xnlock_get_irqsave(&nklock, __rtdm_s);          \
-       __xnsched_lock();                               \
+       xnsched_lock();                                 \
        code_block;                                     \
-       __xnsched_unlock();                             \
+       xnsched_unlock();                               \
        xnlock_put_irqrestore(&nklock, __rtdm_s);       \
 }
 #endif
@@ -553,7 +553,7 @@ static inline void rtdm_lock_get(rtdm_lock_t *lock)
 {
        XENO_BUG_ON(COBALT, !spltest());
        spin_lock(lock);
-       __xnsched_lock();
+       xnsched_lock();
 }
 
 /**
@@ -566,7 +566,7 @@ static inline void rtdm_lock_get(rtdm_lock_t *lock)
 static inline void rtdm_lock_put(rtdm_lock_t *lock)
 {
        spin_unlock(lock);
-       __xnsched_unlock();
+       xnsched_unlock();
 }
 
 /**
@@ -584,7 +584,7 @@ static inline rtdm_lockctx_t 
__rtdm_lock_get_irqsave(rtdm_lock_t *lock)
 
        context = ipipe_test_and_stall_head();
        spin_lock(lock);
-       __xnsched_lock();
+       xnsched_lock();
 
        return context;
 }
@@ -603,7 +603,7 @@ static inline
 void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context)
 {
        spin_unlock(lock);
-       __xnsched_unlock();
+       xnsched_unlock();
        ipipe_restore_head(context);
 }
 
diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index 774f441..9cc64e5 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -297,37 +297,9 @@ static inline int xnsched_run(void)
        return __xnsched_run(sched);
 }
 
-void ___xnsched_lock(struct xnsched *sched);
+void xnsched_lock(void);
 
-void ___xnsched_unlock(struct xnsched *sched);
-
-static inline void __xnsched_lock(void)
-{
-       ___xnsched_lock(xnsched_current());
-}
-
-static inline void __xnsched_unlock(void)
-{
-       ___xnsched_unlock(xnsched_current());
-}
-
-static inline void xnsched_lock(void)
-{
-       spl_t s;
-
-       xnlock_get_irqsave(&nklock, s);
-       __xnsched_lock();
-       xnlock_put_irqrestore(&nklock, s);
-}
-
-static inline void xnsched_unlock(void)
-{
-       spl_t s;
-
-       xnlock_get_irqsave(&nklock, s);
-       __xnsched_unlock();
-       xnlock_put_irqrestore(&nklock, s);
-}
+void xnsched_unlock(void);
 
 static inline int xnsched_interrupt_p(void)
 {
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 240b5da..01ada7c 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -320,29 +320,48 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
 
 #endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
 
-void ___xnsched_lock(struct xnsched *sched)
+void xnsched_lock(void)
 {
-       struct xnthread *curr = sched->curr;
+       struct xnthread *curr = xnthread_current();
+
+       /*
+        * CAUTION: The fast xnthread_current() accessor carries the
+        * relevant lock nesting count only if current runs in primary
+        * mode. Otherwise, if the caller is unknown or relaxed
+        * Xenomai-wise, then we fall back to the root thread on the
+        * current scheduler, which must be done with IRQs off.
+        * Either way, we don't need to grab the super lock.
+        */
+       if (unlikely(curr == NULL || xnthread_test_state(curr, XNRELAX))) {
+               irqoff_only();
+               curr = &xnsched_current()->rootcb;
+               XENO_BUG_ON(COBALT, xnsched_current()->curr != curr);
+       }
 
        if (curr->lock_count++ == 0)
-               sched->lflags |= XNINLOCK;
+               curr->sched->lflags |= XNINLOCK;
 }
-EXPORT_SYMBOL_GPL(___xnsched_lock);
+EXPORT_SYMBOL_GPL(xnsched_lock);
 
-void ___xnsched_unlock(struct xnsched *sched)
+void xnsched_unlock(void)
 {
-       struct xnthread *curr = sched->curr;
+       struct xnthread *curr = xnthread_current();
+
+       if (unlikely(curr == NULL || xnthread_test_state(curr, XNRELAX))) {
+               irqoff_only();
+               curr = &xnsched_current()->rootcb;
+       }
 
        if (!XENO_ASSERT(COBALT, curr->lock_count > 0))
                return;
-
+       
        if (--curr->lock_count == 0) {
                xnthread_clear_localinfo(curr, XNLBALERT);
-               sched->lflags &= ~XNINLOCK;
+               curr->sched->lflags &= ~XNINLOCK;
                xnsched_run();
        }
 }
-EXPORT_SYMBOL_GPL(___xnsched_unlock);
+EXPORT_SYMBOL_GPL(xnsched_unlock);
 
 /* Must be called with nklock locked, interrupts off. */
 void xnsched_putback(struct xnthread *thread)
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 6badab2..409ac16 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -756,7 +756,7 @@ int xnthread_set_mode(int clrmask, int setmask)
 
        if (setmask & XNLOCK) {
                if (lock_count == 0)
-                       __xnsched_lock();
+                       xnsched_lock();
        } else if (clrmask & XNLOCK) {
                if (lock_count > 0) {
                        curr->lock_count = 0;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://xenomai.org/mailman/listinfo/xenomai-git

Reply via email to