Module: xenomai-head
Branch: master
Commit: 09e79ca8ce48f5e7e01e2452f75e947f76739fe7
URL:    
http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=09e79ca8ce48f5e7e01e2452f75e947f76739fe7

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sat Oct  9 12:04:59 2010 +0200

hal/generic, nucleus: remove PREEMPT_RT specifics

This patch cleans up the outdated PREEMPT_RT experimentation code from
the current dual kernel architecture. Combining dual kernel and native
preemption technologies in a single kernel does not make much sense
these days.

---

 include/asm-generic/bits/pod.h |    2 +-
 include/nucleus/sched.h        |    2 +-
 include/nucleus/types.h        |    5 --
 ksrc/arch/generic/hal.c        |   85 +---------------------------------------
 ksrc/nucleus/pipe.c            |   27 +++++-------
 ksrc/nucleus/registry.c        |   15 ++-----
 6 files changed, 19 insertions(+), 117 deletions(-)

diff --git a/include/asm-generic/bits/pod.h b/include/asm-generic/bits/pod.h
index 047278c..b90cb0a 100644
--- a/include/asm-generic/bits/pod.h
+++ b/include/asm-generic/bits/pod.h
@@ -174,7 +174,7 @@ static inline int xnarch_release_ipi (void)
                                IPIPE_PASS_MASK);
 }
 
-static struct linux_semaphore xnarch_finalize_sync;
+static struct semaphore xnarch_finalize_sync;
 
 static void xnarch_finalize_cpu(unsigned irq)
 {
diff --git a/include/nucleus/sched.h b/include/nucleus/sched.h
index 4461939..df56417 100644
--- a/include/nucleus/sched.h
+++ b/include/nucleus/sched.h
@@ -104,7 +104,7 @@ typedef struct xnsched {
 #ifdef CONFIG_XENO_OPT_PERVASIVE
        struct task_struct *gatekeeper;
        wait_queue_head_t gkwaitq;
-       struct linux_semaphore gksync;
+       struct semaphore gksync;
        struct xnthread *gktarget;
 #endif
 
diff --git a/include/nucleus/types.h b/include/nucleus/types.h
index f49131c..1c66465 100644
--- a/include/nucleus/types.h
+++ b/include/nucleus/types.h
@@ -22,11 +22,6 @@
 
 #ifdef __KERNEL__
 #include <linux/errno.h>
-#ifdef CONFIG_PREEMPT_RT
-#define linux_semaphore compat_semaphore
-#else /* CONFIG_PREEMPT_RT */
-#define linux_semaphore semaphore
-#endif /* !CONFIG_PREEMPT_RT */
 #else /* !__KERNEL__ */
 #include <stdio.h>
 #include <sys/types.h>
diff --git a/ksrc/arch/generic/hal.c b/ksrc/arch/generic/hal.c
index 53e0b95..b1335b0 100644
--- a/ksrc/arch/generic/hal.c
+++ b/ksrc/arch/generic/hal.c
@@ -414,55 +414,6 @@ static void rthal_apc_handler(unsigned virq, void *arg)
     rthal_spin_unlock(&rthal_apc_lock);
 }
 
-#ifdef CONFIG_PREEMPT_RT
-
-/* On PREEMPT_RT, we need to invoke the apc handlers over a process
-   context, so that the latter can access non-atomic kernel services
-   properly. So the Adeos virq is only used to kick a per-CPU apc
-   server process which in turns runs the apc dispatcher. A bit
-   twisted, but indeed consistent with the threaded IRQ model of
-   PREEMPT_RT. */
-
-#include <linux/kthread.h>
-
-static struct task_struct *rthal_apc_servers[RTHAL_NR_CPUS];
-
-static int rthal_apc_thread(void *data)
-{
-    unsigned cpu = (unsigned)(unsigned long)data;
-
-    set_cpus_allowed(current, cpumask_of_cpu(cpu));
-    sigfillset(&current->blocked);
-    current->flags |= PF_NOFREEZE;
-    /* Use highest priority here, since some apc handlers might
-       require to run as soon as possible after the request has been
-       pended. */
-    rthal_setsched_root(current, SCHED_FIFO, MAX_RT_PRIO - 1);
-
-    while (!kthread_should_stop()) {
-       set_current_state(TASK_INTERRUPTIBLE);
-       schedule();
-       rthal_apc_handler(0);
-    }
-
-    __set_current_state(TASK_RUNNING);
-
-    return 0;
-}
-
-void rthal_apc_kicker(unsigned virq, void *cookie)
-{
-    wake_up_process(rthal_apc_servers[smp_processor_id()]);
-}
-
-#define rthal_apc_trampoline rthal_apc_kicker
-
-#else /* !CONFIG_PREEMPT_RT */
-
-#define rthal_apc_trampoline rthal_apc_handler
-
-#endif /* CONFIG_PREEMPT_RT */
-
 /**
  * @fn int rthal_apc_alloc (const char *name,void (*handler)(void 
*cookie),void *cookie)
  *
@@ -477,8 +428,7 @@ void rthal_apc_kicker(unsigned virq, void *cookie)
  *
  * The HAL guarantees that any Linux kernel service which would be
  * callable from a regular Linux interrupt handler is also available
- * to APC handlers, including over PREEMPT_RT kernels exhibiting a
- * threaded IRQ model.
+ * to APC handlers.
  *
  * @param name is a symbolic name identifying the APC which will get
  * reported through the /proc/xenomai/apc interface. Passing NULL to
@@ -603,25 +553,12 @@ int rthal_init(void)
 
     err = rthal_virtualize_irq(rthal_current_domain,
                               rthal_apc_virq,
-                              &rthal_apc_trampoline,
+                              &rthal_apc_handler,
                               NULL, NULL, IPIPE_HANDLE_MASK);
     if (err) {
        printk(KERN_ERR "Xenomai: Failed to virtualize IRQ.\n");
        goto out_free_irq;
     }
-#ifdef CONFIG_PREEMPT_RT
-    {
-       int cpu;
-       for_each_online_cpu(cpu) {
-           rthal_apc_servers[cpu] =
-               kthread_create(&rthal_apc_thread, (void *)(unsigned long)cpu,
-                              "apc/%d", cpu);
-           if (!rthal_apc_servers[cpu])
-               goto out_kthread_stop;
-           wake_up_process(rthal_apc_servers[cpu]);
-       }
-    }
-#endif /* CONFIG_PREEMPT_RT */
 
     err = rthal_register_domain(&rthal_domain,
                                "Xenomai",
@@ -647,16 +584,6 @@ int rthal_init(void)
     return 0;
 
   fail:
-#ifdef CONFIG_PREEMPT_RT
-  out_kthread_stop:
-    {
-       int cpu;
-       for_each_online_cpu(cpu) {
-           if (rthal_apc_servers[cpu])
-               kthread_stop(rthal_apc_servers[cpu]);
-       }
-    }
-#endif /* CONFIG_PREEMPT_RT */
     rthal_virtualize_irq(rthal_current_domain, rthal_apc_virq, NULL, NULL, 
NULL,
                         0);
 
@@ -676,14 +603,6 @@ void rthal_exit(void)
        rthal_virtualize_irq(rthal_current_domain, rthal_apc_virq, NULL, NULL,
                             NULL, 0);
        rthal_free_virq(rthal_apc_virq);
-#ifdef CONFIG_PREEMPT_RT
-       {
-           int cpu;
-           for_each_online_cpu(cpu) {
-               kthread_stop(rthal_apc_servers[cpu]);
-           }
-       }
-#endif /* CONFIG_PREEMPT_RT */
     }
 
     if (rthal_init_done)
diff --git a/ksrc/nucleus/pipe.c b/ksrc/nucleus/pipe.c
index 2b7d36b..721fd70 100644
--- a/ksrc/nucleus/pipe.c
+++ b/ksrc/nucleus/pipe.c
@@ -148,7 +148,6 @@ static void xnpipe_wakeup_proc(void *cookie)
        xnlock_get_irqsave(&nklock, s);
 
        nh = getheadq(&xnpipe_sleepq);
-
        while ((h = nh) != NULL) {
                nh = nextq(&xnpipe_sleepq, h);
                state = link2xnpipe(h, slink);
@@ -156,10 +155,10 @@ static void xnpipe_wakeup_proc(void *cookie)
                if (rbits) {
                        __clrbits(state->status, rbits);
                        /*
-                        * PREEMPT_RT kernels could schedule us out as
-                        * a result of waking up a waiter, so we need
-                        * the housekeeping and release the nklock
-                        * before calling wake_up_interruptible().
+                        * We could be switched out as a result of
+                        * waking up a waiter, so we need the
+                        * housekeeping and release the nklock before
+                        * calling wake_up_interruptible().
                         */
                        if ((rbits & XNPIPE_USER_WREAD_READY) != 0) {
                                if (waitqueue_active(&state->readq)) {
@@ -175,18 +174,14 @@ static void xnpipe_wakeup_proc(void *cookie)
                                        xnlock_get_irqsave(&nklock, s);
                                }
                        }
+#ifdef CONFIG_SMP
                        /*
-                        * On PREEMPT_RT kernels, __wake_up() might
-                        * sleep, so we need to refetch the sleep
-                        * queue head just to be safe; for the very
-                        * same reason, livelocking inside this loop
-                        * cannot happen. On regular kernel variants,
-                        * we just keep processing the entire loop in
-                        * a row.
+                        * A waiter may have entered/left the queue
+                        * from another CPU, so we need to refetch the
+                        * sleep queue head to be safe.
                         */
-#if defined(CONFIG_PREEMPT_RT) || defined (CONFIG_SMP)
                        nh = getheadq(&xnpipe_sleepq);
-#endif /* CONFIG_PREEMPT_RT || CONFIG_SMP */
+#endif /* CONFIG_SMP */
                }
        }
 
@@ -205,9 +200,9 @@ static void xnpipe_wakeup_proc(void *cookie)
                        xnlock_put_irqrestore(&nklock, s);
                        kill_fasync(&state->asyncq, xnpipe_asyncsig, POLL_IN);
                        xnlock_get_irqsave(&nklock, s);
-#if defined(CONFIG_PREEMPT_RT) || defined (CONFIG_SMP)
+#ifdef CONFIG_SMP
                        nh = getheadq(&xnpipe_asyncq);
-#endif /* CONFIG_PREEMPT_RT || CONFIG_SMP */
+#endif
                }
        }
 
diff --git a/ksrc/nucleus/registry.c b/ksrc/nucleus/registry.c
index 01d2f6a..19cd507 100644
--- a/ksrc/nucleus/registry.c
+++ b/ksrc/nucleus/registry.c
@@ -73,9 +73,7 @@ static void registry_proc_schedule(void *cookie);
 
 static xnqueue_t registry_obj_procq;   /* Objects waiting for /proc handling. 
*/
 
-#ifndef CONFIG_PREEMPT_RT
 static DECLARE_WORK_NODATA(registry_proc_work, &registry_proc_callback);
-#endif /* !CONFIG_PREEMPT_RT */
 
 static int registry_proc_apc;
 
@@ -342,16 +340,11 @@ static DECLARE_WORK_FUNC(registry_proc_callback)
 
 static void registry_proc_schedule(void *cookie)
 {
-#ifdef CONFIG_PREEMPT_RT
-       /* On PREEMPT_RT, we are already running over a thread context, so
-          we don't need the workqueue indirection: let's invoke the
-          export handler directly. */
-       registry_proc_callback(cookie);
-#else /* CONFIG_PREEMPT_RT */
-       /* schedule_work() will check for us if the work has already been
-          scheduled, so just be lazy and submit blindly. */
+       /*
+        * schedule_work() will check for us if the work has already
+        * been scheduled, so just be lazy and submit blindly.
+        */
        schedule_work(&registry_proc_work);
-#endif /* CONFIG_PREEMPT_RT */
 }
 
 static int registry_export_vfsnap(struct xnobject *object,


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to