Hi folks,
there is some troubles with delayed non-realtime interrupts in
rtlinux.
The problem is in the rtl_schedule() function, which did not call
the rtl_process_pending() function (schedulers/rtl_sched.c) if
rtlinux goes into the idle mode state.
This will delay the pending non-realtime interrupts until a
rtl_soft_sti() (main/rtl_core.c) will be called and this can be
a very long time (for example: on a NSC Geode more than 4ms).
A second problem is, that pending non-realtime interrupts will
be processed in the order of the interrupt number. This is a bad
solution, because devices with less or none buffer will lose
data (I had the problem with the linux serial driver).
Interrupts should be processed in the order they occure,
the hardware will it do in the same way. The execption, that if
more than one interrupt is raising, was handled by the
interrupt controller long time ago.
Processing the non-realtime interrupt in the same order will make
the responce and delay as short as possible.
Also, a nice side effect is, that handling the order in a link list,
the interrupt dispatcher function rtl_process_pending will be an O1
function.
Please apply this patch if you like it ;-)
Greetings,
Stefani
Here comes the patch against rtlinux 3.1
-----------------------
--- rtlinux-3.1.old/schedulers/rtl_sched.c Thu Jul 26 21:32:41 2001
+++ rtlinux-3.1.new/schedulers/rtl_sched.c Fri Jan 25 12:33:58 2002
@@ -13,6 +13,12 @@
* Copyright (C) Finite State Machine Labs Inc., 1998,1999
* Released under the terms of the GNU General Public License Version 2
*
+ * Changlog:
+ * 25.Jan.2002 Stefani Seibold <[EMAIL PROTECTED]>
+ * process pending non realtime interrupts if the schedulers switch
+ * to idle mode. This make responce of non realtime interrupt much
+ * faster and prevent non realtime drivers losing data
+ * (ex. serial communication).
*/
#include <linux/module.h>
@@ -498,7 +504,12 @@
if (pthread_self()->pending & ~(1 << RTL_SIGNAL_READY))
do_signal(pthread_self());
rtl_trace2 (RTL_TRACE_SCHED_OUT, (long) pthread_self());
+
+ if ( rtl_rt_system_is_idle() )
+ rtl_process_pending();
+
rtl_restore_interrupts(interrupt_state);
+
return mask;
}
--- rtlinux-3.1.old/main/rtl_core.c Thu Jul 26 21:32:41 2001
+++ rtlinux-3.1.new/main/rtl_core.c Fri Jan 25 15:37:53 2002
@@ -7,6 +7,14 @@
* by the Open RTLinux Patent License which can be obtained from
* www.fsmlabs.com/PATENT or by sending email to
* [EMAIL PROTECTED]
+ *
+ * Changlog:
+ * 25.Jan.2002 Stefani Seibold <[EMAIL PROTECTED]>
+ * process pending non realtime interrupts in the same order they
+ * are raised.
+ * This prevent non realtime drivers losing data (ex. serial
+ * communication) and makes the non realtime interrupt dispatcher
+ * also to an O(1) function.
*/
#include <linux/kernel.h>
@@ -194,6 +202,7 @@
#define l_pend_since_sti 2
#define l_busy 3
#define l_psc_active 4
+#define l_in_process_pending 5
#define L_SET(f) set_bit(f,&rtl_local[cpu_id].flags)
#define L_CLEAR(f) clear_bit(f,&rtl_local[cpu_id].flags)
#define L_TEST(f) test_bit(f,&rtl_local[cpu_id].flags)
@@ -219,10 +228,10 @@
struct rtl_global{
spinlock_t hard_irq_controller_lock;
- unsigned long flags;
- unsigned long pending[IRQ_ARRAY_SIZE];
- unsigned long soft_enabled[IRQ_ARRAY_SIZE];
- unsigned long rtirq[IRQ_ARRAY_SIZE];
+ volatile unsigned long flags;
+ volatile unsigned long pending[IRQ_ARRAY_SIZE];
+ volatile unsigned long soft_enabled[IRQ_ARRAY_SIZE];
+ volatile unsigned long rtirq[IRQ_ARRAY_SIZE];
};
struct rtl_global rtl_global ={ SPIN_LOCK_UNLOCKED,0,IRQ_ZINIT,IRQ_NZINIT,IRQ_ZINIT} ;
@@ -231,6 +240,57 @@
unsigned int (*handler)(unsigned int irq, struct pt_regs *r);
}rtl_global_handlers[IRQ_MAX_COUNT];
+struct rtl_irq_list {
+ int prev;
+ int next;
+};
+
+static volatile struct rtl_irq_list rtl_global_irq_list[IRQ_MAX_COUNT];
+
+static volatile int rtl_global_irq_first;
+static volatile int rtl_global_irq_last;
+
+static void link_irq(int irq)
+{
+ rtl_global_irq_list[irq].prev=rtl_global_irq_last;
+ rtl_global_irq_list[irq].next=IRQ_NOT_VALID;
+
+ if (rtl_global_irq_last!=IRQ_NOT_VALID)
+ rtl_global_irq_list[rtl_global_irq_last].next=irq;
+ else
+ rtl_global_irq_first=irq;
+
+ rtl_global_irq_last=irq;
+}
+
+static void unlink_first_irq(void)
+{
+ int irq;
+
+ irq=rtl_global_irq_first;
+
+ rtl_global_irq_first=rtl_global_irq_list[irq].next;
+
+ if (rtl_global_irq_first==IRQ_NOT_VALID)
+ rtl_global_irq_last=IRQ_NOT_VALID;
+ else
+ rtl_global_irq_list[rtl_global_irq_first].prev=IRQ_NOT_VALID;
+}
+
+static void unlink_irq(int irq)
+{
+ if (rtl_global_irq_first==irq)
+ unlink_first_irq();
+ else {
+ if (rtl_global_irq_last==irq)
+ rtl_global_irq_last=rtl_global_irq_list[irq].prev;
+ else
+
+rtl_global_irq_list[rtl_global_irq_list[irq].next].prev=rtl_global_irq_list[irq].prev;
+
+
+rtl_global_irq_list[rtl_global_irq_list[irq].prev].next=rtl_global_irq_list[irq].next;
+ }
+}
+
#ifdef __LOCAL_IRQS__
void rtl_local_pend_vec(int vector,int cpu_id)
{
@@ -265,8 +325,6 @@
}
/* rtl_intercept intercepts global interrupts */
-#define RUN_LINUX_HANDLER(irq) (G_ISPEND(irq) && !L_TEST(l_busy)\
- && L_TEST(l_ienable) && G_ISENABLED(irq))
intercept_t rtl_intercept(MACHDEPREGS regs)
{
int irq;
@@ -284,13 +342,20 @@
dispatch_rtl_handler(irq,MACHDEPREGS_PTR(regs));
rtl_spin_lock(&rtl_global.hard_irq_controller_lock);
} else {
- G_PEND(irq);
+ if (!G_ISPEND(irq)) {
+
+ G_PEND(irq);
+
+ if (G_ISENABLED(irq))
+ link_irq(irq);
+ }
+
G_SET(g_pend_since_sti);
}
- if(RUN_LINUX_HANDLER(irq))
- {
- /* unpend so dispatch doesn't dispatch 2 times*/
+ if ( (irq==rtl_global_irq_first) && G_ISPEND(irq) && !L_TEST(l_busy)
+&& L_TEST(l_ienable) && G_ISENABLED(irq) ) {
+ /* unpend so dispatch doesn't dispatch 2 times */
+ unlink_irq(irq);
G_UNPEND(irq);
rtl_soft_cli(); /* disable local soft interrupts */
G_DISABLE(irq); /* disable this irq */
@@ -299,11 +364,10 @@
#ifdef DEBUG_PENDING
global_pending = 0;
#endif /* DEBUG_PENDING */
- dispatch_linux_irq(MACHDEPREGS_PTR(regs),irq);
- rtl_trace2 (RTL_TRACE_INTERCEPT_EXIT, irq);
- RETURN_FROM_INTERRUPT_LINUX; /* goes via ret_from_intr */
+ dispatch_linux_irq(MACHDEPREGS_PTR(regs),irq);
+ rtl_trace2 (RTL_TRACE_INTERCEPT_EXIT, irq);
+ RETURN_FROM_INTERRUPT_LINUX; /* goes via ret_from_intr */
}
-
#ifdef DEBUG_PENDING
/*
* If a Linux interrupt has been pended, and we haven't
@@ -399,22 +463,21 @@
/* tools for soft_sti */
static inline unsigned int get_gpended_irq(void)
{
- unsigned int i, j;
rtl_irqstate_t flags;
- unsigned long irqs;
+ int irq;
rtl_spin_lock_irqsave(&rtl_global.hard_irq_controller_lock, flags);
- for (i=0; i < IRQ_ARRAY_SIZE; i++) {
- irqs = rtl_global.pending[i] & rtl_global.soft_enabled[i];
- if (!irqs)
- continue;
- j = ffz(~irqs);
- clear_bit(j, &rtl_global.pending[i]);
- rtl_spin_unlock_irqrestore(&rtl_global.hard_irq_controller_lock,
flags);
- return pi_toirq (j, i);
+
+ irq=rtl_global_irq_first;
+
+ if (irq!=IRQ_NOT_VALID) {
+ unlink_first_irq();
+ G_UNPEND(irq);
}
+
rtl_spin_unlock_irqrestore(&rtl_global.hard_irq_controller_lock, flags);
- return IRQ_NOT_VALID;
+
+ return irq;
}
void rtl_soft_cli(void)
@@ -437,11 +500,26 @@
{
int irq = 0;
int last_irq = 0;
+ unsigned long flags;
+
DeclareAndInit(cpu_id);
+ if (! L_TEST(l_ienable))
+ return;
+
+ rtl_no_interrupts(flags);
+
+ if (L_TEST_AND_SET(l_in_process_pending)) {
+ rtl_restore_interrupts(flags);
+
+ return;
+ }
+
rtl_soft_cli(); /*disable soft interrupts !*/
+
+ rtl_hard_sti();
+
do{
- irq = IRQ_NOT_VALID;
G_CLEAR(g_pend_since_sti);
L_CLEAR(l_pend_since_sti);
#ifdef __LOCAL_IRQS__
@@ -465,10 +543,11 @@
soft_dispatch_global(irq);
}
}
+ }
#ifdef __RTL_LOCALIRQS__
- }while(irq != IRQ_NOT_VALID || (!test_bit(cpu_id, &rtl_reserved_cpumask) &&
G_TEST(g_pend_since_sti)) || L_TEST(l_pend_since_sti));
+ while(irq != IRQ_NOT_VALID || (!test_bit(cpu_id, &rtl_reserved_cpumask) &&
+G_TEST(g_pend_since_sti)) || L_TEST(l_pend_since_sti));
#else
- }while(irq != IRQ_NOT_VALID || G_TEST(g_pend_since_sti) ||
L_TEST(l_pend_since_sti));
+ while(irq != IRQ_NOT_VALID || G_TEST(g_pend_since_sti) ||
+L_TEST(l_pend_since_sti));
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
@@ -476,19 +555,26 @@
if ( softirq_active(cpu_id) & softirq_mask(cpu_id) )
do_softirq();
#endif
+ rtl_hard_cli();
+
+ L_CLEAR(l_in_process_pending);
+
+ rtl_soft_sti_no_emulation();
+
+ rtl_restore_interrupts(flags);
}
void rtl_soft_sti(void)
{
DeclareAndInit(cpu_id);
/*debug_test_enabled("rtl_soft_sti");*/
+ rtl_soft_sti_no_emulation();
if ( L_TEST(l_pend_since_sti) || G_TEST(g_pend_since_sti)
#if LINUX_VERSION_CODE >= 0x020300
|| (softirq_active(cpu_id) & softirq_mask(cpu_id) )
#endif
)
rtl_process_pending();
- rtl_soft_sti_no_emulation();
}
void rtl_soft_save_flags(unsigned long *x)
@@ -534,7 +620,17 @@
void rtl_virt_disable(unsigned int irq)
{
- G_DISABLE(irq);
+ rtl_irqstate_t flags;
+ rtl_no_interrupts(flags);
+
+ if ( G_ISENABLED(irq)) {
+
+ if(G_ISPEND(irq))
+ unlink_irq(irq);
+
+ G_DISABLE(irq);
+ }
+ rtl_restore_interrupts(flags);
}
void rtl_virt_enable(unsigned int irq)
@@ -551,6 +647,7 @@
else
{
HardDeclareAndInit(cpu_id);
+ link_irq(irq);
rtl_restore_interrupts(flags);
if( L_TEST(l_ienable))
__sti(); /* emulate the bastard */
@@ -558,7 +655,19 @@
}
/* these are exported so that they can be called by rt drivers */
-void rtl_global_pend_irq(int ix) { G_PEND(ix); G_SET(g_pend_since_sti); }
+void rtl_global_pend_irq(int ix)
+{
+ rtl_irqstate_t flags;
+ rtl_no_interrupts(flags);
+
+ if (!G_ISPEND(ix)) {
+ G_PEND(ix);
+ if ( G_ISENABLED(ix))
+ link_irq(ix);
+ }
+ G_SET(g_pend_since_sti);
+ rtl_restore_interrupts(flags);
+}
int rtl_global_ispending_irq(int ix) { return G_ISPEND(ix); }
@@ -663,6 +772,9 @@
int init_module(void)
{
int ret;
+
+ rtl_global_irq_first=IRQ_NOT_VALID;
+ rtl_global_irq_last =IRQ_NOT_VALID;
if ( arch_takeover() )
{
-- [rtl] ---
To unsubscribe:
echo "unsubscribe rtl" | mail [EMAIL PROTECTED] OR
echo "unsubscribe rtl <Your_email>" | mail [EMAIL PROTECTED]
--
For more information on Real-Time Linux see:
http://www.rtlinux.org/