[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-07-27 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 13b41f76d3126d082881f8ef091adfc5b3a04e5d
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=13b41f76d3126d082881f8ef091adfc5b3a04e5d

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index e071919..d6674a7 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -477,16 +477,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -692,11 +692,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -771,7 +771,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index b3027d0..adb15e6 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -408,7 +408,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-06-03 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 98645e1b912e77afbcfc73f866451e59131abd1a
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=98645e1b912e77afbcfc73f866451e59131abd1a

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index b5817c5..e0483b1 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -408,7 +408,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-05-21 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 27c321af1976a949bc334a49c76a1663741550ce
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=27c321af1976a949bc334a49c76a1663741550ce

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index b5817c5..e0483b1 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -408,7 +408,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-05-15 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: a3a5a9bdbf2bdef185fdd90b7be1e87fcad9bb26
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=a3a5a9bdbf2bdef185fdd90b7be1e87fcad9bb26

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index b5817c5..e0483b1 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -408,7 +408,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-05-14 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 90ad21aa908fa130cfcc72e8790f7f507730d5ce
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=90ad21aa908fa130cfcc72e8790f7f507730d5ce

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index b5817c5..e0483b1 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -408,7 +408,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-05-13 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 0af7dad30643c41d72cab5c2bbfb09f699853090
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=0af7dad30643c41d72cab5c2bbfb09f699853090

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index b5817c5..e0483b1 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -408,7 +408,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-04-17 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: ee791da368146f5626cafbecabba4638f477bdc7
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=ee791da368146f5626cafbecabba4638f477bdc7

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index b5817c5..e0483b1 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -408,7 +408,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-03-15 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 84d37ec23a4195ff0ab39329606f4baf2c28aa15
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=84d37ec23a4195ff0ab39329606f4baf2c28aa15

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index e0c488e..76dfbce 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -407,7 +407,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-03-13 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 435c844fec5c05c3c395d1a6a97c50363ce88cdf
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=435c844fec5c05c3c395d1a6a97c50363ce88cdf

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index e0c488e..76dfbce 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -407,7 +407,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-03-05 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: d15eed38294cd5d7a1fac7d2e20cfd31ed7567e6
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=d15eed38294cd5d7a1fac7d2e20cfd31ed7567e6

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index e0c488e..76dfbce 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -407,7 +407,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-02-15 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 3d54aabdb367bdf5c7053a31bcb189466d2c479d
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=3d54aabdb367bdf5c7053a31bcb189466d2c479d

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index e0c488e..76dfbce 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -407,7 +407,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2017-01-26 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 35cc74edf3c729844e7efea1a78c12628f80a0d4
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=35cc74edf3c729844e7efea1a78c12628f80a0d4

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index e0c488e..76dfbce 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -407,7 +407,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2016-12-09 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: d4e4bdfd1e2c1ad2479e99b7b344755d0b7503b7
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=d4e4bdfd1e2c1ad2479e99b7b344755d0b7503b7

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index e0c488e..76dfbce 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -407,7 +407,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2016-11-28 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 10291530a9a5807d158a77a404b0e56a0f170eb2
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=10291530a9a5807d158a77a404b0e56a0f170eb2

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index e0c488e..76dfbce 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -407,7 +407,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2016-11-21 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: a64c110c760a14532379fd11dee63153dd549653
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=a64c110c760a14532379fd11dee63153dd549653

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index e0c488e..76dfbce 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -407,7 +407,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2016-11-15 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: 07ef92ce0e77ca4593f2f8f8ae3d28fc98146a46
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=07ef92ce0e77ca4593f2f8f8ae3d28fc98146a46

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 09960ac..b0d65d3 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index e0c488e..76dfbce 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -407,7 +407,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2016-10-17 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: e9f50225e93ebb2439f35ec966b6a758b30a1119
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=e9f50225e93ebb2439f35ec966b6a758b30a1119

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 29d8a68..d124f79 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index f134890..3d5d588 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -409,7 +409,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2016-09-22 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: acacc96f264584f1bf7341a91b7969eb478276e2
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=acacc96f264584f1bf7341a91b7969eb478276e2

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 29d8a68..d124f79 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -293,7 +293,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -319,7 +319,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -494,16 +494,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -709,11 +709,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -788,7 +788,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index f134890..3d5d588 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -409,7 +409,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h

[Xenomai-git] Philippe Gerum : cobalt/sched: detect preemptible switch support in pipeline

2015-11-26 Thread git repository hosting
Module: xenomai-3
Branch: next
Commit: fbe1164d2ae9c11a24e4c25a64bad2774cba3e9d
URL:
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=fbe1164d2ae9c11a24e4c25a64bad2774cba3e9d

Author: Philippe Gerum 
Date:   Sat Nov 14 18:07:15 2015 +0100

cobalt/sched: detect preemptible switch support in pipeline

CONFIG_XENO_ARCH_UNLOCKED_SWITCH is merely an alias for
CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH, which is only meaningful to the
ARM architecture, now that PowerPC dropped such support.

Use the pipeline symbol directly to make the dependency explicit.

---

 include/cobalt/kernel/sched.h |8 
 kernel/cobalt/sched.c |   16 
 kernel/cobalt/thread.c|2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index fe1df7d..d5d93c2 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,7 +93,7 @@ struct xnsched {
struct xntimer rrbtimer;
/*!< Root thread control block. */
struct xnthread rootcb;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_ARCH_FPU
@@ -329,7 +329,7 @@ static inline int xnsched_primary_p(void)
return !xnsched_unblockable_p();
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
 
@@ -341,7 +341,7 @@ int xnsched_maybe_resched_after_unlocked_switch(struct 
xnsched *sched)
return sched->status & XNRESCHED;
 }
 
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 static inline struct xnsched *
 xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -358,7 +358,7 @@ xnsched_maybe_resched_after_unlocked_switch(struct xnsched 
*sched)
return 0;
 }
 
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 #ifdef CONFIG_XENO_OPT_WATCHDOG
 static inline void xnsched_reset_watchdog(struct xnsched *sched)
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index fa96cd3..fb20265 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -292,7 +292,7 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
 {
@@ -318,7 +318,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
return sched;
 }
 
-#endif /* CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
 
 void xnsched_lock(void)
 {
@@ -493,16 +493,16 @@ void xnsched_migrate(struct xnthread *thread, struct 
xnsched *sched)
xnsched_set_resched(thread->sched);
migrate_thread(thread, sched);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * Mark the thread in flight, xnsched_finish_unlocked_switch()
 * will put the thread on the remote runqueue.
 */
xnthread_set_state(thread, XNMIGRATE);
-#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#else
/* Move thread to the remote runnable queue. */
xnsched_putback(thread);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 }
 
 /*
@@ -708,11 +708,11 @@ struct xnthread *xnsched_rt_pick(struct xnsched *sched)
 static inline void switch_context(struct xnsched *sched,
  struct xnthread *prev, struct xnthread *next)
 {
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
sched->last = prev;
sched->status |= XNINSW;
xnlock_clear_irqon(&nklock);
-#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
+#endif
 
xnarch_switch_to(prev, next);
 }
@@ -787,7 +787,7 @@ static inline void enter_root(struct xnthread *root)
 {
struct xnarchtcb *rootcb __maybe_unused = xnthread_archtcb(root);
 
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
if (rootcb->core.mm == NULL)
set_ti_thread_flag(rootcb->core.tip, TIF_MMSWITCH_INT);
 #endif
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 77cea1a..f3d57a8 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -394,7 +394,7 @@ EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
 static inline int moving_target(struct xnsched *sched, struct xnthread *thread)
 {
int ret = 0;
-#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
/*
 * When deleting a thread in the course of a context switch or
 * in flight to another CPU with nklock unlocked on a distant


___
Xenomai-git mailing list
Xenomai-git@xenomai.org
h