Module: xenomai-rpm
Branch: for-upstream
Commit: c46e5e64488d73f86611c14fe5b023f3d30659a0
URL:    
http://git.xenomai.org/?p=xenomai-rpm.git;a=commit;h=c46e5e64488d73f86611c14fe5b023f3d30659a0

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sun Nov  7 16:08:55 2010 +0100

hal/generic, nucleus/intr: synchronize before releasing interrupt

At this chance, the patch also fixes the following issues:

- do not alter the irq descriptor (e.g. cookie and stats) if the
attachment fails early.

- do not set irq affinity before the validity checks, and set it only
for the first handler introduced in the shared list.

---

 ksrc/arch/generic/hal.c |   10 ++-
 ksrc/nucleus/intr.c     |  144 ++++++++++++++++++++++++++++++++++-------------
 2 files changed, 111 insertions(+), 43 deletions(-)

diff --git a/ksrc/arch/generic/hal.c b/ksrc/arch/generic/hal.c
index 6583975..061336d 100644
--- a/ksrc/arch/generic/hal.c
+++ b/ksrc/arch/generic/hal.c
@@ -209,11 +209,13 @@ int rthal_irq_request(unsigned irq,
 
 int rthal_irq_release(unsigned irq)
 {
-    if (irq >= IPIPE_NR_IRQS)
-       return -EINVAL;
+       if (irq >= IPIPE_NR_IRQS)
+               return -EINVAL;
 
-    return rthal_virtualize_irq(&rthal_domain,
-                               irq, NULL, NULL, NULL, IPIPE_PASS_MASK);
+       rthal_synchronize_irq(irq);
+
+       return rthal_virtualize_irq(&rthal_domain,
+                                   irq, NULL, NULL, NULL, IPIPE_PASS_MASK);
 }
 
 /**
diff --git a/ksrc/nucleus/intr.c b/ksrc/nucleus/intr.c
index 0d6f64b..55ade2d 100644
--- a/ksrc/nucleus/intr.c
+++ b/ksrc/nucleus/intr.c
@@ -41,6 +41,8 @@
 
 DEFINE_PRIVATE_XNLOCK(intrlock);
 
+static unsigned long teardown[BITS_TO_LONGS(XNARCH_NR_IRQS)];
+
 #ifdef CONFIG_XENO_OPT_STATS
 xnintr_t nkclock;           /* Only for statistics */
 static int xnintr_count = 1; /* Number of attached xnintr objects + nkclock */
@@ -314,7 +316,7 @@ static inline int xnintr_irq_attach(xnintr_t *intr)
 {
        xnintr_irq_t *shirq = &xnirqs[intr->irq];
        xnintr_t *prev, **p = &shirq->handlers;
-       int err;
+       int ret;
 
        if ((prev = *p) != NULL) {
                /* Check on whether the shared mode is allowed. */
@@ -324,11 +326,15 @@ static inline int xnintr_irq_attach(xnintr_t *intr)
                        (intr->flags & XN_ISR_EDGE)))
                        return -EBUSY;
 
-               /* Get a position at the end of the list to insert the new 
element. */
-               while (prev) {
+               do {
+                       /*
+                        * Get a position at the end of the list to
+                        * insert the new element.
+                        */
                        p = &prev->next;
                        prev = *p;
-               }
+               } while (prev);
+               ret = 1;        /* More than one handler will be present. */
        } else {
                /* Initialize the corresponding interrupt channel */
                void (*handler) (unsigned, void *) = &xnintr_irq_handler;
@@ -342,26 +348,27 @@ static inline int xnintr_irq_attach(xnintr_t *intr)
                }
                shirq->unhandled = 0;
 
-               err = xnarch_hook_irq(intr->irq, handler,
+               ret = xnarch_hook_irq(intr->irq, handler,
                                      (rthal_irq_ackfn_t)intr->iack, intr);
-               if (err)
-                       return err;
+               if (ret)
+                       return ret;
        }
 
        intr->next = NULL;
 
-       /* Add the given interrupt object. No need to synchronise with the IRQ
-          handler, we are only extending the chain. */
+       /*
+        * Add the given interrupt object. No need to synchronise with
+        * the IRQ handler, we are only extending the chain.
+        */
        *p = intr;
 
-       return 0;
+       return ret;
 }
 
 static inline int xnintr_irq_detach(xnintr_t *intr)
 {
        xnintr_irq_t *shirq = &xnirqs[intr->irq];
        xnintr_t *e, **p = &shirq->handlers;
-       int err = 0;
 
        while ((e = *p) != NULL) {
                if (e == intr) {
@@ -369,21 +376,20 @@ static inline int xnintr_irq_detach(xnintr_t *intr)
                        xnlock_get(&shirq->lock);
                        *p = e->next;
                        xnlock_put(&shirq->lock);
-
                        xnintr_sync_stat_references(intr);
-
-                       /* Release the IRQ line if this was the last user */
-                       if (shirq->handlers == NULL)
-                               err = xnarch_release_irq(intr->irq);
-
-                       return err;
+                       return shirq->handlers == NULL;
                }
                p = &e->next;
        }
 
        xnlogerr("attempted to detach a non previously attached interrupt "
                 "object.\n");
-       return err;
+       return 0;
+}
+
+static inline int xnintr_irq_release(xnintr_t *intr)
+{
+       return xnarch_release_irq(intr->irq);
 }
 
 #else /* !CONFIG_XENO_OPT_SHIRQ */
@@ -416,14 +422,19 @@ static inline int xnintr_irq_attach(xnintr_t *intr)
 
 static inline int xnintr_irq_detach(xnintr_t *intr)
 {
+       xnintr_sync_stat_references(intr);
+
+       return 1;
+}
+
+static inline int xnintr_irq_release(xnintr_t *intr)
+{
        int irq = intr->irq, ret;
 
        xnlock_get(&xnirqs[irq].lock);
        ret = xnarch_release_irq(irq);
        xnlock_put(&xnirqs[irq].lock);
 
-       xnintr_sync_stat_references(intr);
-
        return ret;
 }
 
@@ -453,7 +464,7 @@ static void xnintr_irq_handler(unsigned irq, void *cookie)
 #ifdef CONFIG_SMP
        /*
         * In SMP case, we have to reload the cookie under the per-IRQ
-        * lock to avoid racing with xnintr_detach.  However, we
+        * lock to avoid racing with xnintr_irq_release.  However, we
         * assume that no CPU migration will occur while running the
         * interrupt service routine, so the scheduler pointer will
         * remain valid throughout this function.
@@ -464,7 +475,10 @@ static void xnintr_irq_handler(unsigned irq, void *cookie)
                goto unlock_and_exit;
        }
 #else
-       /* cookie always valid, attach/detach happens with IRQs disabled */
+       /*
+        * cookie always valid, xnintr_detach disables the line before
+        * releasing the interrupt.
+        */
        intr = cookie;
 #endif
        s = intr->isr(intr);
@@ -600,7 +614,8 @@ int __init xnintr_mount(void)
  * to enable IRQ-sharing of edge-triggered interrupts.
  *
  * @return 0 is returned on success. Otherwise, -EINVAL is returned if
- * @a irq is not a valid interrupt number.
+ * @a irq is not a valid interrupt number, or @a flags contains
+ * invalid bits.
  *
  * Environments:
  *
@@ -619,6 +634,9 @@ int xnintr_init(xnintr_t *intr,
        if (irq >= XNARCH_NR_IRQS)
                return -EINVAL;
 
+       if (flags & ~(XN_ISR_SHARED|XN_ISR_EDGE))
+               return -EINVAL;
+               
        intr->irq = irq;
        intr->isr = isr;
        intr->iack = iack;
@@ -714,27 +732,51 @@ int xnintr_attach(xnintr_t *intr, void *cookie)
        trace_mark(xn_nucleus, irq_attach, "irq %u name %s",
                   intr->irq, intr->name);
 
-       intr->cookie = cookie;
-       memset(&intr->stat, 0, sizeof(intr->stat));
-
-#ifdef CONFIG_SMP
-       xnarch_set_irq_affinity(intr->irq, nkaffinity);
-#endif /* CONFIG_SMP */
-
+retry:
        xnlock_get_irqsave(&intrlock, s);
 
        if (__testbits(intr->flags, XN_ISR_ATTACHED)) {
                ret = -EBUSY;
-               goto out;
+               goto fail;
+       }
+
+       /*
+        * If a teardown is in progress for this interrupt on a remote
+        * CPU running xnintr_detach, relax a bit then retry. Note
+        * that we can't have preempted any ongoing xnintr_detach on
+        * the local CPU, since teardown is done with interrupts off.
+        *
+        * CAUTION: when a teardown is detected, we must re-enable
+        * interrupts shortly to accept any critical IPI from the
+        * remote CPU in xnintr_detach.
+        */
+       if (test_bit(intr->irq, teardown)) {
+               xnlock_put_irqrestore(&intrlock, s);
+               goto retry;
        }
 
        ret = xnintr_irq_attach(intr);
-       if (ret)
-               goto out;
+       if (ret < 0)
+               goto fail;
+
+       intr->cookie = cookie;
+       memset(&intr->stat, 0, sizeof(intr->stat));
 
        __setbits(intr->flags, XN_ISR_ATTACHED);
        xnintr_stat_counter_inc();
-out:
+
+       xnlock_put_irqrestore(&intrlock, s);
+#ifdef CONFIG_SMP
+       /*
+        * If this is the first handler we installed for this IRQ, the
+        * line must still be disabled and we won't override any prior
+        * setting: we may set the affinity.
+        */
+       if (ret == 0)
+               xnarch_set_irq_affinity(intr->irq, nkaffinity);
+#endif /* CONFIG_SMP */
+       return 0;
+fail:
        xnlock_put_irqrestore(&intrlock, s);
 
        return ret;
@@ -760,8 +802,9 @@ EXPORT_SYMBOL_GPL(xnintr_attach);
  * the interrupt, or if the interrupt object was not attached. In both
  * cases, no action is performed.
  *
- * @note The caller <b>must not</b> hold nklock when invoking this service,
- * this would cause deadlocks.
+ * @note The caller <b>must not</b> hold nklock when invoking this
+ * service, this would cause deadlocks. Additionally, interrupts must
+ * be enabled on entry to this service.
  *
  * Environments:
  *
@@ -789,10 +832,33 @@ int xnintr_detach(xnintr_t *intr)
        __clrbits(intr->flags, XN_ISR_ATTACHED);
 
        ret = xnintr_irq_detach(intr);
-       if (ret)
+       xnintr_stat_counter_dec();
+       if (ret == 0)   /* still busy? */
                goto out;
 
-       xnintr_stat_counter_dec();
+       /*
+        * We have no more handlers on this interrupt line, so we may
+        * disable it. When paired with interrupt synchronization in
+        * xnarch_release_irq, this guarantees that no handler could
+        * be running concurrently on any remote CPU before we tell
+        * the pipeline to release the IRQ.
+        */
+       xnarch_disable_irq(intr->irq);
+       /*
+        * We use the teardown flag to prevent further attachment on
+        * the same IRQ line, after we dropped the lock to release the
+        * interrupt. We must drop the intrlock to prevent other CPUs
+        * from answering the critical IPI request sent when releasing
+        * the interrupt (we don't want any of them to spin on this
+        * lock with interrupts off waiting for us, while we wait for
+        * them to ack the IPI). However, we keep the interrupts off
+        * to prevent deadlocking with xnintr_attach on the local CPU.
+        */
+       __set_bit(intr->irq, teardown);
+       xnlock_put(&intrlock);
+       ret = xnintr_irq_release(intr);
+       xnlock_get(&intrlock);
+       __clear_bit(intr->irq, teardown);
  out:
        xnlock_put_irqrestore(&intrlock, s);
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to