Am 03.11.2010 23:11, Jan Kiszka wrote:
> Am 03.11.2010 23:03, Jan Kiszka wrote:
>> But we not not always use atomic ops for manipulating status bits (but
>> we do in other cases where this is no need - different story). This may
>> fix the race:
> 
> Err, nonsense. As we manipulate xnsched::status also outside of nklock
> protection, we must _always_ use atomic ops.
> 
> This screams for a cleanup: local-only bits like XNHTICK or XNINIRQ
> should be pushed in a separate status word that can then be safely
> modified non-atomically.

Second try to fix and clean up the sched status bits. Anders, please
test.

Jan

diff --git a/include/nucleus/pod.h b/include/nucleus/pod.h
index 01ff0a7..5987a1f 100644
--- a/include/nucleus/pod.h
+++ b/include/nucleus/pod.h
@@ -277,12 +277,10 @@ static inline void xnpod_schedule(void)
         * context is active, or if we are caught in the middle of a
         * unlocked context switch.
         */
-#if XENO_DEBUG(NUCLEUS)
        if (testbits(sched->status, XNKCOUT|XNINIRQ|XNSWLOCK))
                return;
-#else /* !XENO_DEBUG(NUCLEUS) */
-       if (testbits(sched->status,
-                    XNKCOUT|XNINIRQ|XNSWLOCK|XNRESCHED) != XNRESCHED)
+#if !XENO_DEBUG(NUCLEUS)
+       if (!sched->resched)
                return;
 #endif /* !XENO_DEBUG(NUCLEUS) */
 
diff --git a/include/nucleus/sched.h b/include/nucleus/sched.h
index df56417..1850208 100644
--- a/include/nucleus/sched.h
+++ b/include/nucleus/sched.h
@@ -44,7 +44,6 @@
 #define XNINTCK                0x10000000      /* In master tick handler 
context */
 #define XNINIRQ                0x08000000      /* In IRQ handling context */
 #define XNSWLOCK       0x04000000      /* In context switch */
-#define XNRESCHED      0x02000000      /* Needs rescheduling */
 #define XNHDEFER       0x01000000      /* Host tick deferred */
 
 struct xnsched_rt {
@@ -63,7 +62,8 @@ typedef struct xnsched {
        xnflags_t status;               /*!< Scheduler specific status bitmask. 
*/
        int cpu;
        struct xnthread *curr;          /*!< Current thread. */
-       xnarch_cpumask_t resched;       /*!< Mask of CPUs needing rescheduling. 
*/
+       xnarch_cpumask_t remote_resched; /*!< Mask of CPUs needing 
rescheduling. */
+       int resched;                    /*!< Rescheduling needed. */
 
        struct xnsched_rt rt;           /*!< Context of built-in real-time 
class. */
 #ifdef CONFIG_XENO_OPT_SCHED_TP
@@ -164,30 +164,21 @@ struct xnsched_class {
 #define xnsched_cpu(__sched__) ({ (void)__sched__; 0; })
 #endif /* CONFIG_SMP */
 
-/* Test all resched flags from the given scheduler mask. */
-static inline int xnsched_resched_p(struct xnsched *sched)
-{
-       return testbits(sched->status, XNRESCHED);
-}
-
-static inline int xnsched_self_resched_p(struct xnsched *sched)
-{
-       return testbits(sched->status, XNRESCHED);
-}
-
 /* Set self resched flag for the given scheduler. */
 #define xnsched_set_self_resched(__sched__) do {               \
-  setbits((__sched__)->status, XNRESCHED);                     \
+       (__sched__)->resched = 1;                               \
 } while (0)
 
 /* Set specific resched flag into the local scheduler mask. */
 #define xnsched_set_resched(__sched__) do {                            \
-  xnsched_t *current_sched = xnpod_current_sched();                    \
-  setbits(current_sched->status, XNRESCHED);                           \
-  if (current_sched != (__sched__))    {                               \
-      xnarch_cpu_set(xnsched_cpu(__sched__), current_sched->resched);  \
-      setbits((__sched__)->status, XNRESCHED);                         \
-  }                                                                    \
+       xnsched_t *current_sched = xnpod_current_sched();               \
+       current_sched->resched = 1;                                     \
+       if (current_sched != (__sched__)) {                             \
+               xnarch_cpu_set(xnsched_cpu(__sched__),                  \
+                              current_sched->remote_resched);          \
+               (__sched__)->resched = 1;                               \
+               xnarch_memory_barrier();                                \
+       }                                                               \
 } while (0)
 
 void xnsched_zombie_hooks(struct xnthread *thread);
@@ -209,7 +200,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched);
 static inline
 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
 {
-       return testbits(sched->status, XNRESCHED);
+       return sched->resched;
 }
 
 #else /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
diff --git a/ksrc/nucleus/pod.c b/ksrc/nucleus/pod.c
index 9e135f3..f7f8b2c 100644
--- a/ksrc/nucleus/pod.c
+++ b/ksrc/nucleus/pod.c
@@ -284,7 +284,7 @@ void xnpod_schedule_handler(void) /* Called with hw 
interrupts off. */
        trace_xn_nucleus_sched_remote(sched);
 #if defined(CONFIG_SMP) && defined(CONFIG_XENO_OPT_PRIOCPL)
        if (testbits(sched->status, XNRPICK)) {
-               clrbits(sched->status, XNRPICK);
+               __clrbits(sched->status, XNRPICK);
                xnshadow_rpi_check();
        }
 #endif /* CONFIG_SMP && CONFIG_XENO_OPT_PRIOCPL */
@@ -2162,15 +2162,15 @@ static inline void xnpod_switch_to(xnsched_t *sched,
 
 static inline int __xnpod_test_resched(struct xnsched *sched)
 {
-       int resched = testbits(sched->status, XNRESCHED);
+       int resched = sched->resched;
 #ifdef CONFIG_SMP
        /* Send resched IPI to remote CPU(s). */
-       if (unlikely(xnsched_resched_p(sched))) {
-               xnarch_send_ipi(sched->resched);
-               xnarch_cpus_clear(sched->resched);
+       if (unlikely(resched)) {
+               xnarch_send_ipi(sched->remote_resched);
+               xnarch_cpus_clear(sched->remote_resched);
        }
 #endif
-       clrbits(sched->status, XNRESCHED);
+       sched->resched = 0;
        return resched;
 }
 
diff --git a/ksrc/nucleus/sched.c b/ksrc/nucleus/sched.c
index 04a344e..2effea8 100644
--- a/ksrc/nucleus/sched.c
+++ b/ksrc/nucleus/sched.c
@@ -152,7 +152,8 @@ void xnsched_init(struct xnsched *sched, int cpu)
        xntimer_set_name(&sched->htimer, htimer_name);
        xntimer_set_sched(&sched->htimer, sched);
        sched->zombie = NULL;
-       xnarch_cpus_clear(sched->resched);
+       xnarch_cpus_clear(sched->remote_resched);
+       sched->resched = 0;
 
        attr.flags = XNROOT | XNSTARTED | XNFPU;
        attr.name = root_name;
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index f6bea1a..a3e1372 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -2815,7 +2815,7 @@ static inline void do_setsched_event(struct task_struct 
*p, int priority)
        __xnpod_set_thread_schedparam(thread, &xnsched_class_rt, &param, 0);
        sched = xnpod_current_sched();
 
-       if (!xnsched_resched_p(sched))
+       if (!sched->resched)
                return;
 
        if (p == current &&
diff --git a/ksrc/nucleus/timer.c b/ksrc/nucleus/timer.c
index 1fe3331..1639f28 100644
--- a/ksrc/nucleus/timer.c
+++ b/ksrc/nucleus/timer.c
@@ -97,7 +97,7 @@ void xntimer_next_local_shot(xnsched_t *sched)
        __clrbits(sched->status, XNHDEFER);
        timer = aplink2timer(h);
        if (unlikely(timer == &sched->htimer)) {
-               if (xnsched_self_resched_p(sched) ||
+               if (sched->resched ||
                    !xnthread_test_state(sched->curr, XNROOT)) {
                        h = xntimerq_it_next(&sched->timerqueue, &it, h);
                        if (h) {

Attachment: signature.asc
Description: OpenPGP digital signature

_______________________________________________
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core

Reply via email to