[Xenomai-git] Philippe Gerum : cobalt/sched: get rid of XNINLOCK scheduler flag

2015-07-27 Thread git repository hosting
Module: xenomai-3
Branch: master
Commit: 79a94a46f0c8f280a052f244ddb064b61a1f95ce
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=79a94a46f0c8f280a052f244ddb064b61a1f95ce

Author: Philippe Gerum 
Date:   Thu Jul  2 09:18:41 2015 +0200

cobalt/sched: get rid of XNINLOCK scheduler flag

XNINLOCK and the per-thread scheduler lock nesting count are
redundant. Drop XNINLOCK which conveys less information.

---

 include/cobalt/kernel/sched.h |   42 -
 kernel/cobalt/clock.c |2 +-
 kernel/cobalt/sched.c |   11 +++
 kernel/cobalt/thread.c|   11 +--
 4 files changed, 34 insertions(+), 32 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index 9cc64e5..1c11756 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -45,7 +45,6 @@
 #define XNHTICK0x8000  /* Host tick pending  */
 #define XNINIRQ0x4000  /* In IRQ handling context */
 #define XNHDEFER   0x2000  /* Host tick deferred */
-#define XNINLOCK   0x1000  /* Scheduler locked */
 
 struct xnsched_rt {
xnsched_queue_t runnable;   /*!< Runnable thread queue. */
@@ -255,19 +254,18 @@ static inline int xnsched_supported_cpu(int cpu)
for_each_online_cpu(cpu)\
if (xnsched_supported_cpu(cpu)) \
 
-int __xnsched_run(struct xnsched *sched);
+int ___xnsched_run(struct xnsched *sched);
 
 void __xnsched_run_handler(void);
 
-static inline int xnsched_run(void)
+static inline int __xnsched_run(struct xnsched *sched)
 {
-   struct xnsched *sched;
/*
-* NOTE: Since __xnsched_run() won't run if an escalation to
-* primary domain is needed, we won't use critical scheduler
-* information before we actually run in primary mode;
-* therefore we can first test the scheduler status then
-* escalate.
+* NOTE: Since ___xnsched_run() won't run immediately if an
+* escalation to primary domain is needed, we won't use
+* critical scheduler information before we actually run in
+* primary mode; therefore we can first test the scheduler
+* status then escalate.
 *
 * Running in the primary domain means that no Linux-triggered
 * CPU migration may occur from that point either. Finally,
@@ -280,18 +278,28 @@ static inline int xnsched_run(void)
 * in secondary mode; in which case we will escalate to the
 * primary domain, then unwind the current call frame without
 * running the rescheduling procedure in
-* __xnsched_run(). Therefore, the scheduler slot
+* ___xnsched_run(). Therefore, the scheduler slot
 * (i.e. "sched") will be either valid, or unused.
 */
-   sched = xnsched_current();
-   smp_rmb();
+   if (((sched->status|sched->lflags) &
+(XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
+   return 0;
+
+   return ___xnsched_run(sched);
+}
+
+static inline int xnsched_run(void)
+{
+   struct xnsched *sched = xnsched_current();
/*
-* No immediate rescheduling is possible if an ISR context is
-* active, the current thread holds the scheduler lock, or if
-* we are caught in the middle of an unlocked context switch.
+* No rescheduling is possible, either if:
+*
+* - the current thread holds the scheduler lock
+* - an ISR context is active
+* - we are caught in the middle of an unlocked context switch.
 */
-   if (((sched->status|sched->lflags) &
-(XNINIRQ|XNINSW|XNRESCHED|XNINLOCK)) != XNRESCHED)
+   smp_rmb();
+   if (unlikely(sched->curr->lock_count > 0))
return 0;
 
return __xnsched_run(sched);
diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c
index dfaa79e..e75d296 100644
--- a/kernel/cobalt/clock.c
+++ b/kernel/cobalt/clock.c
@@ -157,7 +157,7 @@ void xnclock_core_local_shot(struct xnsched *sched)
 * resumes.
 *
 * The host tick deferral is cleared whenever Xenomai is about
-* to yield control to the host kernel (see __xnsched_run()),
+* to yield control to the host kernel (see ___xnsched_run()),
 * or a timer with an earlier timeout date is scheduled,
 * whichever comes first.
 */
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 01ada7c..56014e0 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -338,8 +338,7 @@ void xnsched_lock(void)
XENO_BUG_ON(COBALT, xnsched_current()->curr != curr);
}
 
-   if (curr->lock_count++ == 0)
-   curr->sched->lflags |= XNINLOCK;
+   curr->lock_count++;
 }
 EXPORT_SYMBOL_GPL(xnsched_lock);
 
@@ -357,7 +356,6 @@ void xnsched_unlock(void)

if (--curr->lock_count == 0) {
x

[Xenomai-git] Philippe Gerum : cobalt/sched: get rid of XNINLOCK scheduler flag

2015-07-06 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 79a94a46f0c8f280a052f244ddb064b61a1f95ce
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=79a94a46f0c8f280a052f244ddb064b61a1f95ce

Author: Philippe Gerum 
Date:   Thu Jul  2 09:18:41 2015 +0200

cobalt/sched: get rid of XNINLOCK scheduler flag

XNINLOCK and the per-thread scheduler lock nesting count are
redundant. Drop XNINLOCK which conveys less information.

---

 include/cobalt/kernel/sched.h |   42 -
 kernel/cobalt/clock.c |2 +-
 kernel/cobalt/sched.c |   11 +++
 kernel/cobalt/thread.c|   11 +--
 4 files changed, 34 insertions(+), 32 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index 9cc64e5..1c11756 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -45,7 +45,6 @@
 #define XNHTICK0x8000  /* Host tick pending  */
 #define XNINIRQ0x4000  /* In IRQ handling context */
 #define XNHDEFER   0x2000  /* Host tick deferred */
-#define XNINLOCK   0x1000  /* Scheduler locked */
 
 struct xnsched_rt {
xnsched_queue_t runnable;   /*!< Runnable thread queue. */
@@ -255,19 +254,18 @@ static inline int xnsched_supported_cpu(int cpu)
for_each_online_cpu(cpu)\
if (xnsched_supported_cpu(cpu)) \
 
-int __xnsched_run(struct xnsched *sched);
+int ___xnsched_run(struct xnsched *sched);
 
 void __xnsched_run_handler(void);
 
-static inline int xnsched_run(void)
+static inline int __xnsched_run(struct xnsched *sched)
 {
-   struct xnsched *sched;
/*
-* NOTE: Since __xnsched_run() won't run if an escalation to
-* primary domain is needed, we won't use critical scheduler
-* information before we actually run in primary mode;
-* therefore we can first test the scheduler status then
-* escalate.
+* NOTE: Since ___xnsched_run() won't run immediately if an
+* escalation to primary domain is needed, we won't use
+* critical scheduler information before we actually run in
+* primary mode; therefore we can first test the scheduler
+* status then escalate.
 *
 * Running in the primary domain means that no Linux-triggered
 * CPU migration may occur from that point either. Finally,
@@ -280,18 +278,28 @@ static inline int xnsched_run(void)
 * in secondary mode; in which case we will escalate to the
 * primary domain, then unwind the current call frame without
 * running the rescheduling procedure in
-* __xnsched_run(). Therefore, the scheduler slot
+* ___xnsched_run(). Therefore, the scheduler slot
 * (i.e. "sched") will be either valid, or unused.
 */
-   sched = xnsched_current();
-   smp_rmb();
+   if (((sched->status|sched->lflags) &
+(XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
+   return 0;
+
+   return ___xnsched_run(sched);
+}
+
+static inline int xnsched_run(void)
+{
+   struct xnsched *sched = xnsched_current();
/*
-* No immediate rescheduling is possible if an ISR context is
-* active, the current thread holds the scheduler lock, or if
-* we are caught in the middle of an unlocked context switch.
+* No rescheduling is possible, either if:
+*
+* - the current thread holds the scheduler lock
+* - an ISR context is active
+* - we are caught in the middle of an unlocked context switch.
 */
-   if (((sched->status|sched->lflags) &
-(XNINIRQ|XNINSW|XNRESCHED|XNINLOCK)) != XNRESCHED)
+   smp_rmb();
+   if (unlikely(sched->curr->lock_count > 0))
return 0;
 
return __xnsched_run(sched);
diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c
index dfaa79e..e75d296 100644
--- a/kernel/cobalt/clock.c
+++ b/kernel/cobalt/clock.c
@@ -157,7 +157,7 @@ void xnclock_core_local_shot(struct xnsched *sched)
 * resumes.
 *
 * The host tick deferral is cleared whenever Xenomai is about
-* to yield control to the host kernel (see __xnsched_run()),
+* to yield control to the host kernel (see ___xnsched_run()),
 * or a timer with an earlier timeout date is scheduled,
 * whichever comes first.
 */
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 01ada7c..56014e0 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -338,8 +338,7 @@ void xnsched_lock(void)
XENO_BUG_ON(COBALT, xnsched_current()->curr != curr);
}
 
-   if (curr->lock_count++ == 0)
-   curr->sched->lflags |= XNINLOCK;
+   curr->lock_count++;
 }
 EXPORT_SYMBOL_GPL(xnsched_lock);
 
@@ -357,7 +356,6 @@ void xnsched_unlock(void)

if (--curr->lock_count == 0) {
xnt

[Xenomai-git] Philippe Gerum : cobalt/sched: get rid of XNINLOCK scheduler flag

2015-07-02 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: e517d01cb3e94107f9ae78e6785263df635e8b56
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=e517d01cb3e94107f9ae78e6785263df635e8b56

Author: Philippe Gerum 
Date:   Thu Jul  2 09:18:41 2015 +0200

cobalt/sched: get rid of XNINLOCK scheduler flag

XNINLOCK and the per-thread scheduler lock nesting count are
redundant. Drop XNINLOCK which conveys less information.

---

 include/cobalt/kernel/sched.h |9 ++---
 kernel/cobalt/sched.c |   14 --
 kernel/cobalt/thread.c|1 -
 3 files changed, 6 insertions(+), 18 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index ecd9605..ca0f732 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -45,7 +45,6 @@
 #define XNHTICK0x8000  /* Host tick pending  */
 #define XNINIRQ0x4000  /* In IRQ handling context */
 #define XNHDEFER   0x2000  /* Host tick deferred */
-#define XNINLOCK   0x1000  /* Scheduler locked */
 
 struct xnsched_rt {
xnsched_queue_t runnable;   /*!< Runnable thread queue. */
@@ -291,13 +290,17 @@ static inline int xnsched_run(void)
 * we are caught in the middle of an unlocked context switch.
 */
if (((sched->status|sched->lflags) &
-(XNINIRQ|XNINSW|XNRESCHED|XNINLOCK)) != XNRESCHED)
+(XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED ||
+   sched->curr->lock_count > 0)
return 0;
 
return __xnsched_run(sched);
 }
 
-void ___xnsched_lock(struct xnsched *sched);
+static inline void ___xnsched_lock(struct xnsched *sched)
+{
+   sched->curr->lock_count++;
+}
 
 void ___xnsched_unlock(struct xnsched *sched);
 
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index bda61a6..73965e7 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -320,15 +320,6 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
 
 #endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
 
-void ___xnsched_lock(struct xnsched *sched)
-{
-   struct xnthread *curr = sched->curr;
-
-   if (curr->lock_count++ == 0)
-   sched->lflags |= XNINLOCK;
-}
-EXPORT_SYMBOL_GPL(___xnsched_lock);
-
 void ___xnsched_unlock(struct xnsched *sched)
 {
struct xnthread *curr = sched->curr;
@@ -338,7 +329,6 @@ void ___xnsched_unlock(struct xnsched *sched)
 
if (--curr->lock_count == 0) {
xnthread_clear_info(curr, XNLBALERT);
-   sched->lflags &= ~XNINLOCK;
xnsched_run();
}
 }
@@ -350,7 +340,6 @@ void ___xnsched_unlock_fully(struct xnsched *sched)
 
curr->lock_count = 0;
xnthread_clear_info(curr, XNLBALERT);
-   sched->lflags &= ~XNINLOCK;
xnsched_run();
 }
 EXPORT_SYMBOL_GPL(___xnsched_unlock_fully);
@@ -878,9 +867,6 @@ out:
xnsched_maybe_resched_after_unlocked_switch(sched))
goto reschedule;
 
-   if (curr->lock_count > 0)
-   sched->lflags |= XNINLOCK;
-
xnlock_put_irqrestore(&nklock, s);
 
return switched;
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 07a39b6..541e8b1 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -920,7 +920,6 @@ void xnthread_suspend(struct xnthread *thread, int mask,
 */
if (likely(thread == sched->curr)) {
xnsched_set_resched(sched);
-   sched->lflags &= ~XNINLOCK;
if (unlikely(mask & XNRELAX)) {
xnlock_clear_irqon(&nklock);
splmax();


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://xenomai.org/mailman/listinfo/xenomai-git