Module: xenomai-2.5
Branch: master
Commit: 74d4cadd28981c2ac4c53529ec976872d8540108
URL:    
http://git.xenomai.org/?p=xenomai-2.5.git;a=commit;h=74d4cadd28981c2ac4c53529ec976872d8540108

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Sat Nov  6 17:11:10 2010 +0100

sched: separate the scheduler status bits

In 3 groups:
- the scheduler status flags, which may be modified from a remote CPU, under
nklock, stay in sched->status
- the scheduler local flags, which are only changed from the local cpu,
but may be modified without holding the nklock are in sched->lflags
- the scheduler XNRPICK bit, which may be changed from a remote CPU, but
easier to modify under sched->rpilock than nklock, is moved to
sched->rpistatus.

---

 include/nucleus/pod.h   |    4 ++--
 include/nucleus/sched.h |   29 ++++++++++++++++++-----------
 ksrc/nucleus/intr.c     |   22 +++++++++++-----------
 ksrc/nucleus/pod.c      |   14 +++++++-------
 ksrc/nucleus/sched.c    |    2 ++
 ksrc/nucleus/shadow.c   |   19 +++++++++++++------
 ksrc/nucleus/timer.c    |   10 +++++-----
 7 files changed, 58 insertions(+), 42 deletions(-)

diff --git a/include/nucleus/pod.h b/include/nucleus/pod.h
index bab9f07..8fc4019 100644
--- a/include/nucleus/pod.h
+++ b/include/nucleus/pod.h
@@ -277,10 +277,10 @@ static inline void xnpod_schedule(void)
         * unlocked context switch.
         */
 #if XENO_DEBUG(NUCLEUS)
-       if (testbits(sched->status, XNKCOUT|XNINIRQ|XNSWLOCK))
+       if (testbits(sched->status | sched->lflags, XNKCOUT|XNINIRQ|XNSWLOCK))
                return;
 #else /* !XENO_DEBUG(NUCLEUS) */
-       if (testbits(sched->status,
+       if (testbits(sched->status | sched->lflags,
                     XNKCOUT|XNINIRQ|XNSWLOCK|XNRESCHED) != XNRESCHED)
                return;
 #endif /* !XENO_DEBUG(NUCLEUS) */
diff --git a/include/nucleus/sched.h b/include/nucleus/sched.h
index 0e9c25e..49a69de 100644
--- a/include/nucleus/sched.h
+++ b/include/nucleus/sched.h
@@ -38,13 +38,17 @@
 
 /* Sched status flags */
 #define XNKCOUT                0x80000000      /* Sched callout context */
-#define XNHTICK                0x40000000      /* Host tick pending  */
-#define XNRPICK                0x20000000      /* Check RPI state */
-#define XNINTCK                0x10000000      /* In master tick handler 
context */
-#define XNINIRQ                0x08000000      /* In IRQ handling context */
-#define XNSWLOCK       0x04000000      /* In context switch */
-#define XNRESCHED      0x02000000      /* Needs rescheduling */
-#define XNHDEFER       0x01000000      /* Host tick deferred */
+#define XNINTCK                0x40000000      /* In master tick handler 
context */
+#define XNSWLOCK       0x20000000      /* In context switch */
+#define XNRESCHED      0x10000000      /* Needs rescheduling */
+
+/* Sched local flags */
+#define XNHTICK                0x00008000      /* Host tick pending  */
+#define XNINIRQ                0x00004000      /* In IRQ handling context */
+#define XNHDEFER       0x00002000      /* Host tick deferred */
+
+/* Sched RPI status flags */
+#define XNRPICK                0x80000000      /* Check RPI state */
 
 struct xnsched_rt {
        xnsched_queue_t runnable;       /*!< Runnable thread queue. */
@@ -60,6 +64,7 @@ struct xnsched_rt {
 typedef struct xnsched {
 
        xnflags_t status;               /*!< Scheduler specific status bitmask. 
*/
+       xnflags_t lflags;               /*!< Scheduler specific local flags 
bitmask. */
        int cpu;
        struct xnthread *curr;          /*!< Current thread. */
        xnarch_cpumask_t resched;       /*!< Mask of CPUs needing rescheduling. 
*/
@@ -98,6 +103,7 @@ typedef struct xnsched {
 
 #ifdef CONFIG_XENO_OPT_PRIOCPL
        DECLARE_XNLOCK(rpilock);        /*!< RPI lock */
+       xnflags_t rpistatus;
 #endif
 
 #ifdef CONFIG_XENO_OPT_PERVASIVE
@@ -176,16 +182,17 @@ static inline int xnsched_self_resched_p(struct xnsched 
*sched)
 
 /* Set self resched flag for the given scheduler. */
 #define xnsched_set_self_resched(__sched__) do {               \
-  setbits((__sched__)->status, XNRESCHED);                     \
+  __setbits((__sched__)->status, XNRESCHED);                   \
 } while (0)
 
 /* Set specific resched flag into the local scheduler mask. */
 #define xnsched_set_resched(__sched__) do {                            \
   xnsched_t *current_sched = xnpod_current_sched();                    \
-  setbits(current_sched->status, XNRESCHED);                           \
-  if (current_sched != (__sched__))    {                               \
+  __setbits(current_sched->status, XNRESCHED);                         \
+  if (current_sched != (__sched__)                                     \
+      && !testbits((__sched__)->status, XNRESCHED)) {                  \
       xnarch_cpu_set(xnsched_cpu(__sched__), current_sched->resched);  \
-      setbits((__sched__)->status, XNRESCHED);                         \
+      __setbits((__sched__)->status, XNRESCHED);                       \
   }                                                                    \
 } while (0)
 
diff --git a/ksrc/nucleus/intr.c b/ksrc/nucleus/intr.c
index 0d6f64b..247905c 100644
--- a/ksrc/nucleus/intr.c
+++ b/ksrc/nucleus/intr.c
@@ -85,7 +85,7 @@ static void xnintr_irq_handler(unsigned irq, void *cookie);
 
 void xnintr_host_tick(struct xnsched *sched) /* Interrupts off. */
 {
-       __clrbits(sched->status, XNHTICK);
+       __clrbits(sched->lflags, XNHTICK);
        xnarch_relay_tick();
 }
 
@@ -106,7 +106,7 @@ void xnintr_clock_handler(void)
        trace_mark(xn_nucleus, tbase_tick, "base %s", nktbase.name);
 
        ++sched->inesting;
-       __setbits(sched->status, XNINIRQ);
+       __setbits(sched->lflags, XNINIRQ);
 
        xnlock_get(&nklock);
        xntimer_tick_aperiodic();
@@ -117,7 +117,7 @@ void xnintr_clock_handler(void)
                &nkclock.stat[xnsched_cpu(sched)].account, start);
 
        if (--sched->inesting == 0) {
-               __clrbits(sched->status, XNINIRQ);
+               __clrbits(sched->lflags, XNINIRQ);
                xnpod_schedule();
        }
        /*
@@ -127,7 +127,7 @@ void xnintr_clock_handler(void)
         * we only need to propagate the host tick in case the
         * interrupt preempted the root thread.
         */
-       if (testbits(sched->status, XNHTICK) &&
+       if (testbits(sched->lflags, XNHTICK) &&
            xnthread_test_state(sched->curr, XNROOT))
                xnintr_host_tick(sched);
 
@@ -178,7 +178,7 @@ static void xnintr_shirq_handler(unsigned irq, void *cookie)
        trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
 
        ++sched->inesting;
-       __setbits(sched->status, XNINIRQ);
+       __setbits(sched->lflags, XNINIRQ);
 
        xnlock_get(&shirq->lock);
        intr = shirq->handlers;
@@ -220,7 +220,7 @@ static void xnintr_shirq_handler(unsigned irq, void *cookie)
                xnarch_end_irq(irq);
 
        if (--sched->inesting == 0) {
-               __clrbits(sched->status, XNINIRQ);
+               __clrbits(sched->lflags, XNINIRQ);
                xnpod_schedule();
        }
 
@@ -247,7 +247,7 @@ static void xnintr_edge_shirq_handler(unsigned irq, void 
*cookie)
        trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
 
        ++sched->inesting;
-       __setbits(sched->status, XNINIRQ);
+       __setbits(sched->lflags, XNINIRQ);
 
        xnlock_get(&shirq->lock);
        intr = shirq->handlers;
@@ -291,7 +291,7 @@ static void xnintr_edge_shirq_handler(unsigned irq, void 
*cookie)
        if (unlikely(s == XN_ISR_NONE)) {
                if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
                        xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
-                                "line.\n", __FUNCTION__, irq);
+                                "line.\n", __FUNCTION__, irq);
                        s |= XN_ISR_NOENABLE;
                }
        } else
@@ -303,7 +303,7 @@ static void xnintr_edge_shirq_handler(unsigned irq, void 
*cookie)
                xnarch_end_irq(irq);
 
        if (--sched->inesting == 0) {
-               __clrbits(sched->status, XNINIRQ);
+               __clrbits(sched->lflags, XNINIRQ);
                xnpod_schedule();
        }
        trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
@@ -446,7 +446,7 @@ static void xnintr_irq_handler(unsigned irq, void *cookie)
        trace_mark(xn_nucleus, irq_enter, "irq %u", irq);
 
        ++sched->inesting;
-       __setbits(sched->status, XNINIRQ);
+       __setbits(sched->lflags, XNINIRQ);
 
        xnlock_get(&xnirqs[irq].lock);
 
@@ -493,7 +493,7 @@ static void xnintr_irq_handler(unsigned irq, void *cookie)
                xnarch_end_irq(irq);
 
        if (--sched->inesting == 0) {
-               __clrbits(sched->status, XNINIRQ);
+               __clrbits(sched->lflags, XNINIRQ);
                xnpod_schedule();
        }
 
diff --git a/ksrc/nucleus/pod.c b/ksrc/nucleus/pod.c
index f5326bd..a5993fa 100644
--- a/ksrc/nucleus/pod.c
+++ b/ksrc/nucleus/pod.c
@@ -279,12 +279,11 @@ void xnpod_schedule_handler(void) /* Called with hw 
interrupts off. */
        xnsched_t *sched;
 
        trace_mark(xn_nucleus, sched_remote, MARK_NOARGS);
+       xnarch_memory_barrier();
 #if defined(CONFIG_SMP) && defined(CONFIG_XENO_OPT_PRIOCPL)
        sched = xnpod_current_sched();
-       if (testbits(sched->status, XNRPICK)) {
-               clrbits(sched->status, XNRPICK);
+       if (testbits(sched->rpistatus, XNRPICK))
                xnshadow_rpi_check();
-       }
 #else
        (void)sched;
 #endif /* CONFIG_SMP && CONFIG_XENO_OPT_PRIOCPL */
@@ -2165,6 +2164,7 @@ static inline int __xnpod_test_resched(struct xnsched 
*sched)
 #ifdef CONFIG_SMP
        /* Send resched IPI to remote CPU(s). */
        if (unlikely(xnsched_resched_p(sched))) {
+               xnarch_memory_barrier();
                xnarch_send_ipi(sched->resched);
                xnarch_cpus_clear(sched->resched);
        }
@@ -2204,9 +2204,9 @@ reschedule:
        if (next == curr && !xnthread_test_state(curr, XNRESTART)) {
                /* Note: the root thread never restarts. */
                if (unlikely(xnthread_test_state(next, XNROOT))) {
-                       if (testbits(sched->status, XNHTICK))
+                       if (testbits(sched->lflags, XNHTICK))
                                xnintr_host_tick(sched);
-                       if (testbits(sched->status, XNHDEFER))
+                       if (testbits(sched->lflags, XNHDEFER))
                                xntimer_next_local_shot(sched);
                }
                goto signal_unlock_and_exit;
@@ -2241,9 +2241,9 @@ reschedule:
        if (xnthread_test_state(prev, XNROOT))
                xnarch_leave_root(xnthread_archtcb(prev));
        else if (xnthread_test_state(next, XNROOT)) {
-               if (testbits(sched->status, XNHTICK))
+               if (testbits(sched->lflags, XNHTICK))
                        xnintr_host_tick(sched);
-               if (testbits(sched->status, XNHDEFER))
+               if (testbits(sched->lflags, XNHDEFER))
                        xntimer_next_local_shot(sched);
                xnarch_enter_root(xnthread_archtcb(next));
        }
diff --git a/ksrc/nucleus/sched.c b/ksrc/nucleus/sched.c
index d3e0788..25ff553 100644
--- a/ksrc/nucleus/sched.c
+++ b/ksrc/nucleus/sched.c
@@ -137,10 +137,12 @@ void xnsched_init(struct xnsched *sched, int cpu)
        strcpy(root_name, "ROOT");
 #endif
        sched->status = 0;
+       sched->lflags = 0;
        sched->inesting = 0;
        sched->curr = &sched->rootcb;
 #ifdef CONFIG_XENO_OPT_PRIOCPL
        xnlock_init(&sched->rpilock);
+       sched->rpistatus = 0;
 #endif
        /*
         * No direct handler here since the host timer processing is
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index 449a9a4..6298850 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -311,8 +311,6 @@ static void rpi_clear_remote(struct xnthread *thread)
        if (xnsched_peek_rpi(rpi) == NULL)
                rcpu = xnsched_cpu(rpi);
 
-       xnlock_put_irqrestore(&rpi->rpilock, s);
-
        /*
         * Ok, this one is not trivial. Unless a relaxed shadow has
         * forced its CPU affinity, it may migrate to another CPU as a
@@ -329,12 +327,20 @@ static void rpi_clear_remote(struct xnthread *thread)
         */
        if (rcpu != -1 && rcpu != rthal_processor_id()) {
                if (!testbits(rpi->status, XNRPICK)) {
-                       setbits(rpi->status, XNRPICK);
-                       xnarch_cpus_clear(cpumask);
-                       xnarch_cpu_set(rcpu, cpumask);
-                       xnarch_send_ipi(cpumask);
+                       __setbits(rpi->rpistatus, XNRPICK);
+                       xnlock_put_irqrestore(&rpi->rpilock, s);
+                       goto exit_send_ipi;
                }
        }
+
+       xnlock_put_irqrestore(&rpi->rpilock, s);
+
+       return;
+
+  exit_send_ipi:
+       xnarch_cpus_clear(cpumask);
+       xnarch_cpu_set(rcpu, cpumask);
+       xnarch_send_ipi(cpumask);
 }
 
 static void rpi_migrate(struct xnsched *sched, struct xnthread *thread)
@@ -493,6 +499,7 @@ void xnshadow_rpi_check(void)
        spl_t s;
 
        xnlock_get_irqsave(&sched->rpilock, s);
+       __clrbits(sched->rpistatus, XNRPICK);
        top = xnsched_peek_rpi(sched);
        xnlock_put_irqrestore(&sched->rpilock, s);
 
diff --git a/ksrc/nucleus/timer.c b/ksrc/nucleus/timer.c
index fc8b98e..1a97295 100644
--- a/ksrc/nucleus/timer.c
+++ b/ksrc/nucleus/timer.c
@@ -94,14 +94,14 @@ void xntimer_next_local_shot(xnsched_t *sched)
         * __xnpod_schedule()), or a timer with an earlier timeout
         * date is scheduled, whichever comes first.
         */
-       __clrbits(sched->status, XNHDEFER);
+       __clrbits(sched->lflags, XNHDEFER);
        timer = aplink2timer(h);
        if (unlikely(timer == &sched->htimer)) {
                if (xnsched_self_resched_p(sched) ||
                    !xnthread_test_state(sched->curr, XNROOT)) {
                        h = xntimerq_it_next(&sched->timerqueue, &it, h);
                        if (h) {
-                               __setbits(sched->status, XNHDEFER);
+                               __setbits(sched->lflags, XNHDEFER);
                                timer = aplink2timer(h);
                        }
                }
@@ -130,7 +130,7 @@ static inline int xntimer_heading_p(struct xntimer *timer)
        if (h == &timer->aplink)
                return 1;
 
-       if (testbits(sched->status, XNHDEFER)) {
+       if (testbits(sched->lflags, XNHDEFER)) {
                h = xntimerq_it_next(&sched->timerqueue, &it, h);
                if (h == &timer->aplink)
                        return 1;
@@ -410,8 +410,8 @@ void xntimer_tick_aperiodic(void)
                         * save some I-cache, which translates into
                         * precious microsecs on low-end hw.
                         */
-                       __setbits(sched->status, XNHTICK);
-                       __clrbits(sched->status, XNHDEFER);
+                       __setbits(sched->lflags, XNHTICK);
+                       __clrbits(sched->lflags, XNHDEFER);
                        if (!testbits(timer->status, XNTIMER_PERIODIC))
                                continue;
                }


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to