Hi there,

the following patches illustrate _experimental_ implementation of nested irq disable calls.
This new feature would allow us to have scalar return values of ISR and avoid the need
for NOENABLE bit.

[ Ok, repeating myself one more time... we would have NONE, HANDLED, PROPAGATE and it would be possible
to call xnintr_disable_nosync()/even xnintr_disable() from the ISR to defer the IRQ line
enabling. ]

The pre-requirement : implement as much code as possible on the Xeno layer with zero or
minimal changes on the ipipe layer (at least, they should not be intrusive and difficult to maintain for
different archs).


2 main issues which are quite arguable when it comes to possible implementations :

1) we need to store the "depth" counter and IRQ_DISABLED bit somewhere (actually, this bit is not that necessary
as the non-zero "depth" counter indicates the same).

So where? There is a sole per-IRQ table that's available for all possible configs - ipipe_domain::irqs[NR_IRQS].

So the minor changes below don't make any structure bigger. Read the comment about the 3-d byte.


----------------------------- IPIPE changes -------------------------------------------

--- ipipe.h    2006-02-27 15:10:41.000000000 +0100
+++ ipipe-exp.h    2006-03-02 12:08:27.000000000 +0100
@@ -62,6 +62,15 @@
 #define IPIPE_SHARED_FLAG    6
 #define IPIPE_EXCLUSIVE_FLAG    31    /* ipipe_catch_event() is the reason why. */
 
+/*
+ * The 3-d byte of ipipe_domain::irqs[IRQ]::control is reserved for use
+ * by upper layers and must not be changed on the ipipe layer.
+ * 
+ * e.g. experimantal support for nested irq disable calls in Xeno is based on it.
+ */
+#define IPIPE_RESERVED_AREA_SHIFT    16
+#define IPIPE_RESERVED_AREA_MASK    (0xff << IPIPE_RESERVED_AREA_SHIFT)
+
 #define IPIPE_HANDLE_MASK    (1 << IPIPE_HANDLE_FLAG)
 #define IPIPE_PASS_MASK        (1 << IPIPE_PASS_FLAG)
 #define IPIPE_ENABLE_MASK    (1 << IPIPE_ENABLE_FLAG)
@@ -190,6 +199,12 @@ struct ipipe_domain_attr {
 
 #define __ipipe_cpudata_irq_hits(ipd,cpuid,irq)    ((ipd)->cpudata[cpuid].irq_counters[irq].total_hits)
 
+#define __ipipe_get_reserved_area(ipd,irq) \
+    (((ipd)->irqs[irq].control & IPIPE_RESERVED_AREA_MASK) >> IPIPE_RESERVED_AREA_SHIFT)
+
+#define __ipipe_set_reserved_area(ipd,irq,value) \
+    ((ipd)->irqs[irq].control &= (((value) << IPIPE_RESERVED_AREA_SHIFT) & IPIPE_RESERVED_AREA_MASK)
+
 #define __ipipe_set_irq_bit(ipd,cpuid,irq) \
 do { \
     if (!test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { \


---------------------------------------------------------------------------------------

2) We need somehow to force the .end handler of the PIC (only PPC arch make uses of it at the moment;
other archs - use .enable instead as .ending for those is just .enabling) to _not_ re-enable the IRQ line
when the ISR has disabled the IRQ explicitly.

So there are 2 possible ways (at least, I can see only 2 now) :

    2.1) make changes for all PIC handlers supported for a given arch if their .end handler is about re-enabling the IRQ line :
   
    BEFORE
   
    static void openpic_end_irq(unsigned int irq_nr)
    {
    if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
        && irq_desc[irq_nr].action)
        openpic_enable_irq(irq_nr);
    }

   
    AFTER
   
    static void openpic_end_irq(unsigned int irq_nr)
    {
    if (!ipipe_root_domain_p()
        && !test_bit(IPIPE_DISABLED_FLAG,&ipipe_current_domain->irqs[irq_nr].control))
        return;
       
   
    if (!ipipe_root_domain_p() || !(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
        && irq_desc[irq_nr].action)
        openpic_enable_irq(irq_nr);
    }
   

    2.2) do some trick.
   
    That's how this patch does. But, well, I do _not_ like this way.
   
    Look at the xnarch_end_irq()'s implementation below.
   
---
p.s.

So I tend to think that 2.2 is, at the very least, ugly if even it seems to be safe at the first glance.

2.1 would be better probably. But then the ipipe layer must know at least the DISABLED bit. What concerns me is that the logic is implemented mostly in Xeno but the bits of this interface (check for DISABLED bit in the .end handlers) is visible for ipipe. Bad localization, I mean.
Maybe something different from ipipe_set/get_reserved_area() should be introduced to make DISABLED bit and "depth" counter invisible for the Xeno. Smth like ipipe_irq_depth_inc/dec() and
ipipe_irq_set/clear_disabled() .. I'm not sure yet.

>From another point of view, this new feature seems not to be too intrusive and not something really affecting the "fast path" so it could be used by default, I guess. Or maybe we don't need it at all?

Any comments please?

---
   
   
--------------------------------- XENO changes ------------------------------------------------

diff -urp xenomai/include/asm-generic/hal.h xenomai-nirq-2/include/asm-generic/hal.h
--- xenomai/include/asm-generic/hal.h    2006-02-27 14:42:59.000000000 +0100
+++ xenomai-nirq-2/include/asm-generic/hal.h    2006-03-02 12:12:55.000000000 +0100
@@ -106,6 +106,9 @@ typedef rwlock_t rthal_rwlock_t;
 
 #define rthal_cpudata_irq_hits(ipd,cpu,irq)    __ipipe_cpudata_irq_hits(ipd,cpu,irq)
 
+#define rthal_get_reserved_area(ipd,irq)    __ipipe_get_reserved_area(ipd,irq)
+#define rthal_set_reserved_area(ipd,irq,value)    __ipipe_set_reserved_area(ipd,irq,value)
+
 #define rthal_local_irq_disable()    ipipe_stall_pipeline_from(&rthal_domain)
 #define rthal_local_irq_enable()    ipipe_unstall_pipeline_from(&rthal_domain)
 #define rthal_local_irq_save(x)        ((x) = !!ipipe_test_and_stall_pipeline_from(&rthal_domain))
diff -urp xenomai/include/asm-generic/system.h xenomai-nirq-2/include/asm-generic/system.h
--- xenomai/include/asm-generic/system.h    2006-02-27 14:42:59.000000000 +0100
+++ xenomai-nirq-2/include/asm-generic/system.h    2006-03-03 11:28:09.000000000 +0100
@@ -490,6 +490,14 @@ static inline unsigned long long xnarch_
 
 #ifdef XENO_INTR_MODULE
 
+#ifdef CONFIG_SMP
+static xnlock_t xnarch_intr_lock = XNARCH_LOCK_UNLOCKED;
+#endif
+
+/* Operating the per-IRQ reserved area providen by the ipipe layer. */
+#define XNARCH_IRQ_DISABLED    0x80
+#define XNARCH_IRQ_DEPTH_MASK    0x7f
+
 static inline int xnarch_hook_irq (unsigned irq,
                    rthal_irq_handler_t handler,
                    rthal_irq_ackfn_t ackfn,
@@ -507,19 +515,81 @@ static inline int xnarch_release_irq (un
 static inline int xnarch_enable_irq (unsigned irq)
 
 {
-    return rthal_irq_enable(irq);
+    int ret = 0;
+    char area;
+    spl_t s;
+
+    xnlock_get_irqsave(&xnarch_intr_lock,s);
+
+    area = rthal_get_reserved_area(&rthal_domain,irq);
+    switch (area & XNARCH_IRQ_DEPTH_MASK)
+    {
+    case 0:
+        xnlogerr("xnintr_enable() : depth == 0. "
+             "Unbalanced enable/disable calls for IRQ%d!\n",irq);
+        break;
+    case 1:
+        area &= ~XNARCH_IRQ_DISABLED;
+        ret = rthal_irq_enable(irq);
+    default:
+        rthal_set_reserved_area(&rthal_domain,irq,--area);
+    }
+
+    xnlock_put_irqrestore(&xnarch_intr_lock,s);
+    return ret;
 }
 
 static inline int xnarch_disable_irq (unsigned irq)
 
 {
-    return rthal_irq_disable(irq);
+    char area, depth;
+    int ret = 0;
+    spl_t s;
+   
+    xnlock_get_irqsave(&xnarch_intr_lock,s);
+
+    area = rthal_get_reserved_area(&rthal_domain,irq);
+    depth = area & XNARCH_IRQ_DEPTH_MASK;
+
+    if (depth == XNARCH_IRQ_DEPTH_MASK)
+    {
+    xnlogerr("xnintr_disable_nosync() : depth == %d (MAX_ALLOWED)."
+         "Too many nested disable calls for IRQ%d.\n",depth,irq);
+    goto unlock_and_exit;
+    }
+
+    if (!depth)
+    {
+    area |= XNARCH_IRQ_DISABLED;
+    ret = rthal_irq_disable(irq);
+    }
+
+    rthal_set_reserved_area(&rthal_domain,irq,++area);
+
+unlock_and_exit:
+    xnlock_put_irqrestore(&xnarch_intr_lock,s);
+    return ret;
 }
 
 static inline int xnarch_end_irq (unsigned irq)
 
 {
-     return rthal_irq_end(irq);
+    int rt_off = rthal_get_reserved_area(&rthal_domain,irq) & XNARCH_IRQ_DISABLED,
+    linux_off = rthal_irq_descp(irq)->status & IRQ_DISABLED,
+    ret;
+   
+    if (rt_off)
+    set_bit(IRQ_DISABLED, &rthal_irq_descp(irq)->status);
+
+    /* We must not re-enable the IRQ line here in case it has been explicitly
+       disabled by the ISR. To this end, we _temporary_ set the IRQ_DISABLED bit
+       so that the .end handler of the PIC will not re-enable the IRQ line. */
+    ret = rthal_irq_end(irq);
+
+    if (!linux_off)
+    clear_bit(IRQ_DISABLED, &rthal_irq_descp(irq)->status);
+
+    return ret;
 }
                                                                                
 static inline void xnarch_chain_irq (unsigned irq)
diff -urp xenomai/include/nucleus/intr.h xenomai-nirq-2/include/nucleus/intr.h
--- xenomai/include/nucleus/intr.h    2006-02-27 14:42:59.000000000 +0100
+++ xenomai-nirq-2/include/nucleus/intr.h    2006-03-03 11:26:14.000000000 +0100
@@ -25,10 +25,7 @@
 /* Possible return values of ISR. */
 #define XN_ISR_NONE        0x1
 #define XN_ISR_HANDLED     0x2
-/* Additional bits. */
 #define XN_ISR_PROPAGATE 0x100
-#define XN_ISR_NOENABLE  0x200
-#define XN_ISR_BITMASK     ~0xff
 
 /* Creation flags. */
 #define XN_ISR_SHARED     0x1
diff -urp xenomai/ksrc/nucleus/intr.c xenomai-nirq-2/ksrc/nucleus/intr.c
--- xenomai/ksrc/nucleus/intr.c    2006-02-27 14:42:59.000000000 +0100
+++ xenomai-nirq-2/ksrc/nucleus/intr.c    2006-03-03 10:44:17.000000000 +0100
@@ -46,6 +46,11 @@ static void xnintr_irq_handler(unsigned
 /* Helper functions. */
 static int xnintr_shirq_attach(xnintr_t *intr, void *cookie);
 static int xnintr_shirq_detach(xnintr_t *intr);
+static void xnintr_synchronize(xnintr_t *intr);
+
+#else /* !CONFIG_XENO_OPT_SHIRQ_LEVEL && !CONFIG_XENO_OPT_SHIRQ_EDGE */
+
+#define xnintr_synchronize(irq)    xnarch_memory_barrier()
 
 #endif /* CONFIG_XENO_OPT_SHIRQ_LEVEL || CONFIG_XENO_OPT_SHIRQ_EDGE */
 
@@ -72,27 +77,18 @@ static int xnintr_shirq_detach(xnintr_t
  * return this value when it determines that the interrupt request has not been
  * issued by the dedicated hardware device.
  *
- * In addition, one of the following bits may be set by the ISR :
- *
- * NOTE: use these bits with care and only when you do understand their effect
- * on the system.
- * The ISR is not encouraged to use these bits in case it shares the IRQ line
- * with other ISRs in the real-time domain.
- *
  * - XN_ISR_PROPAGATE tells the nucleus to require the real-time control
  * layer to forward the IRQ. For instance, this would cause the Adeos
  * control layer to propagate the interrupt down the interrupt
  * pipeline to other Adeos domains, such as Linux. This is the regular
  * way to share interrupts between the nucleus and the host system.
  *
- * - XN_ISR_NOENABLE causes the nucleus to ask the real-time control
- * layer _not_ to re-enable the IRQ line (read the following section).
- * xnarch_end_irq() must be called to re-enable the IRQ line later.
- *
  * The nucleus re-enables the IRQ line by default. Over some real-time
  * control layers which mask and acknowledge IRQs, this operation is
  * necessary to revalidate the interrupt channel so that more interrupts
  * can be notified.
+ * The ISR may issue xnintr_disable_nosync() call in order to defer
+ * the IRQ line enabling until later.
  *
  * A count of interrupt receipts is tracked into the interrupt
  * descriptor, and reset to zero each time the interrupt object is
@@ -345,12 +341,22 @@ int xnintr_enable (xnintr_t *intr)
  * Rescheduling: never.
  */
 
-int xnintr_disable (xnintr_t *intr)
+int xnintr_disable_nosync (xnintr_t *intr)
 
 {
     return xnarch_disable_irq(intr->irq);
 }
 
+
+int xnintr_disable (xnintr_t *intr)
+
+{
+    int ret = xnintr_disable_nosync(intr);
+    xnintr_synchronize(intr);
+
+    return ret;
+}
+
 /*!
  * \fn xnarch_cpumask_t xnintr_affinity (xnintr_t *intr, xnarch_cpumask_t cpumask)
  * \brief Set interrupt's processor affinity.
@@ -405,9 +411,9 @@ static void xnintr_irq_handler (unsigned
     s = intr->isr(intr);
     ++intr->hits;
 
-    if (s & XN_ISR_PROPAGATE)
+    if (s == XN_ISR_PROPAGATE)
     xnarch_chain_irq(irq);
-    else if (!(s & XN_ISR_NOENABLE))
+    else
     xnarch_end_irq(irq);
 
     if (--sched->inesting == 0 && xnsched_resched_p())
@@ -487,7 +493,9 @@ static void xnintr_shirq_handler (unsign
 
     while (intr)
         {
-    s |= intr->isr(intr) & XN_ISR_BITMASK;
+    int temp = intr->isr(intr);
+    if (temp > s)
+        s = temp;
         ++intr->hits;
         intr = intr->next;
         }
@@ -495,9 +503,9 @@ static void xnintr_shirq_handler (unsign
 
     --sched->inesting;
 
-    if (s & XN_ISR_PROPAGATE)
+    if (s == XN_ISR_PROPAGATE)
     xnarch_chain_irq(irq);
-    else if (!(s & XN_ISR_NOENABLE))
+    else
     xnarch_end_irq(irq);
 
     if (sched->inesting == 0 && xnsched_resched_p())
@@ -535,19 +543,19 @@ static void xnintr_edge_shirq_handler (u
 
     while (intr != end)
     {
-    int ret = intr->isr(intr),
-        code = ret & ~XN_ISR_BITMASK,
-        bits = ret & XN_ISR_BITMASK;
+    int ret = intr->isr(intr);
 
-    if (code == XN_ISR_HANDLED)
+    if (ret == XN_ISR_HANDLED)
         {
         ++intr->hits;
         end = NULL;
-        s |= bits;       
         }
-    else if (code == XN_ISR_NONE && end == NULL)
+    else if (ret == XN_ISR_NONE && end == NULL)
         end = intr;
 
+    if (ret > s)
+        s = ret;
+
     if (counter++ > MAX_EDGEIRQ_COUNTER)
         break;
 
@@ -562,9 +570,9 @@ static void xnintr_edge_shirq_handler (u
     if (counter > MAX_EDGEIRQ_COUNTER)
     xnlogerr("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n", irq);
 
-    if (s & XN_ISR_PROPAGATE)
+    if (s == XN_ISR_PROPAGATE)
     xnarch_chain_irq(irq);
-    else if (!(s & XN_ISR_NOENABLE))
+    else
     xnarch_end_irq(irq);
 
     if (sched->inesting == 0 && xnsched_resched_p())
@@ -645,7 +653,7 @@ unlock_and_exit:
     return err;
 }
 
-int xnintr_shirq_detach (xnintr_t *intr)
+static int xnintr_shirq_detach (xnintr_t *intr)
 {
     xnintr_shirq_t *shirq = &xnshirqs[intr->irq];
     xnintr_t *e, **p = &shirq->handlers;
@@ -695,6 +703,11 @@ int xnintr_shirq_detach (xnintr_t *intr)
     return err;
 }
 
+static void xnintr_synchronize (xnintr_t *intr)
+{
+    xnintr_shirq_spin(&xnshirqs[intr->irq]);
+}
+
 int xnintr_mount(void)
 {
     int i;


-----------------------------------------------------------------------------------------------
   
   
   

--
Best regards,
Dmitry Adamushko
_______________________________________________
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core

Reply via email to