Module: xenomai-forge
Branch: master
Commit: 8918bfc4ee1cf8a976745dd02347ea24e9cf1c78
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=8918bfc4ee1cf8a976745dd02347ea24e9cf1c78

Author: Philippe Gerum <r...@xenomai.org>
Date:   Wed Jan  4 11:26:36 2012 +0100

nucleus: use builtin shadow migration service

The latest pipeline core introduces __ipipe_migrate_head(), to start
the migration of the current task to the head domain. To complete the
migration duties, ipipe_migration_hook() as implemented by the nucleus
is invoked. This mechanism does not require any third-party helper to
complete the migration, i.e. there is no gatekeeper thread anymore.

This patch rebases xnshadow_harden() over this new feature, and also
provides the former gatekeeper-based implementation in the legacy
support code for older pipelines.

---

 include/asm-generic/bits/shadow.h    |  130 ++++++++++++++++--
 include/asm-generic/hal.h            |    3 -
 include/asm-generic/ipipe/wrappers.h |   13 +--
 include/cobalt/nucleus/sched.h       |    5 -
 kernel/cobalt/nucleus/pod.c          |    1 +
 kernel/cobalt/nucleus/shadow.c       |  252 +++++++++++++++++-----------------
 6 files changed, 242 insertions(+), 162 deletions(-)

diff --git a/include/asm-generic/bits/shadow.h 
b/include/asm-generic/bits/shadow.h
index 258a82e..22a06d8 100644
--- a/include/asm-generic/bits/shadow.h
+++ b/include/asm-generic/bits/shadow.h
@@ -26,6 +26,15 @@
 
 #ifdef CONFIG_XENO_LEGACY_IPIPE
 
+struct gatekeeper_data {
+       struct task_struct *task_hijacked;
+       struct task_struct *gatekeeper;
+       struct semaphore gksync;
+       struct xnthread *gktarget;
+};
+
+static DEFINE_PER_CPU(struct gatekeeper_data, shadow_migration);
+
 #define WORKBUF_SIZE 2048
 static DEFINE_PER_CPU_ALIGNED(unsigned char[WORKBUF_SIZE], work_buf);
 static DEFINE_PER_CPU(void *, work_tail);
@@ -78,8 +87,89 @@ out:
        ipipe_restore_head(flags);
 }
 
+static inline void __ipipe_reenter_root(void)
+{
+       struct task_struct *prev;
+       int policy, prio, cpu;
+
+       cpu = task_cpu(current);
+       policy = current->rt_priority ? SCHED_FIFO : SCHED_NORMAL;
+       prio = current->rt_priority;
+       prev = per_cpu(shadow_migration, cpu).task_hijacked;
+
+       ipipe_reenter_root(prev, policy, prio);
+}
+
+static int gatekeeper_thread(void *data)
+{
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+       struct xnthread *target;
+       struct task_struct *p;
+       struct xnsched *sched;
+       int cpu = (long)data;
+       cpumask_t cpumask;
+       spl_t s;
+
+       p = current;
+       sched = xnpod_sched_slot(cpu);
+       p->flags |= PF_NOFREEZE;
+       sigfillset(&p->blocked);
+       cpumask = cpumask_of_cpu(cpu);
+       set_cpus_allowed(p, cpumask);
+       sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       /* Sync with __xnshadow_init(). */
+       up(&per_cpu(shadow_migration, cpu).gksync);
+
+       for (;;) {
+               /* Make the request token available. */
+               up(&per_cpu(shadow_migration, cpu).gksync);
+               schedule();
+
+               if (kthread_should_stop())
+                       break;
+
+               /*
+                * Real-time shadow TCBs are always removed on behalf
+                * of the killed thread.
+                */
+               target = per_cpu(shadow_migration, cpu).gktarget;
+
+               /*
+                * In the very rare case where the requestor has been
+                * awaken by a signal before we have been able to
+                * process the pending request, just ignore the
+                * latter.
+                */
+               if ((xnthread_user_task(target)->state & ~TASK_ATOMICSWITCH) == 
TASK_INTERRUPTIBLE) {
+                       xnlock_get_irqsave(&nklock, s);
+#ifdef CONFIG_SMP
+                       /*
+                        * If the task changed its CPU while in
+                        * secondary mode, change the CPU of the
+                        * underlying Xenomai shadow too. We do not
+                        * migrate the thread timers here, it would
+                        * not work. For a "full" migration comprising
+                        * timers, using xnpod_migrate_thread is
+                        * required.
+                        */
+                       if (target->sched != sched)
+                               xnsched_migrate_passive(target, sched);
+#endif /* CONFIG_SMP */
+                       xnpod_resume_thread(target, XNRELAX);
+                       xnlock_put_irqrestore(&nklock, s);
+                       xnpod_schedule();
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+       }
+
+       return 0;
+}
+
 static inline void __xnshadow_init(void)
 {
+       struct gatekeeper_data *gd;
        int key, cpu;
 
        key = ipipe_alloc_ptdkey();
@@ -94,10 +184,37 @@ static inline void __xnshadow_init(void)
 
        ipipe_request_irq(ipipe_root_domain, lostage_virq,
                          do_lostage_work, NULL, NULL);
+
+       for_each_online_cpu(cpu) {
+               gd = &per_cpu(shadow_migration, cpu);
+               if (!xnarch_cpu_supported(cpu)) {
+                       gd->gatekeeper = NULL;
+                       continue;
+               }
+               sema_init(&gd->gksync, 0);
+               xnarch_memory_barrier();
+               gd->gatekeeper = kthread_create(gatekeeper_thread,
+                                               (void *)(long)cpu,
+                                               "gatekeeper/%d", cpu);
+               wake_up_process(gd->gatekeeper);
+               down(&gd->gksync);
+       }
 }
 
 static inline void __xnshadow_exit(void)
 {
+       struct gatekeeper_data *gd;
+       int cpu;
+
+       for_each_online_cpu(cpu) {
+               gd = &per_cpu(shadow_migration, cpu);
+               if (gd->gatekeeper) {
+                       down(&gd->gksync);
+                       gd->gktarget = NULL;
+                       kthread_stop(gd->gatekeeper);
+               }
+       }
+
        ipipe_free_irq(ipipe_root_domain, lostage_virq);
        ipipe_free_virq(lostage_virq);
        ipipe_free_ptdkey(0);
@@ -113,14 +230,6 @@ static inline void clear_ptd(void)
        current->ptd[0] = NULL;
 }
 
-static inline void hijack_current(void)
-{ 
-       int cpu = task_cpu(current);
-
-       rthal_archdata.task_hijacked[cpu] = current;
-       schedule();
-}
-
 #else /* !CONFIG_XENO_LEGACY_IPIPE */
 
 static inline void __xnshadow_init(void) { }
@@ -131,11 +240,6 @@ static inline void __xnshadow_exit(void) { }
 
 static inline void clear_ptd(void) { }
 
-static inline void hijack_current(void)
-{ 
-       schedule();
-}
-
 #endif /* !CONFIG_XENO_LEGACY_IPIPE */
 
 #endif /* !_XENO_ASM_GENERIC_BITS_SHADOW_H */
diff --git a/include/asm-generic/hal.h b/include/asm-generic/hal.h
index 4e7d6f6..98b0c70 100644
--- a/include/asm-generic/hal.h
+++ b/include/asm-generic/hal.h
@@ -60,9 +60,6 @@ struct rthal_archdata {
 #ifdef CONFIG_SMP
        cpumask_t supported_cpus;
 #endif
-#ifdef CONFIG_XENO_LEGACY_IPIPE
-       struct task_struct *task_hijacked[NR_CPUS];
-#endif
 };
 
 extern struct rthal_archdata rthal_archdata;
diff --git a/include/asm-generic/ipipe/wrappers.h 
b/include/asm-generic/ipipe/wrappers.h
index 2f93095..536f1c4 100644
--- a/include/asm-generic/ipipe/wrappers.h
+++ b/include/asm-generic/ipipe/wrappers.h
@@ -159,18 +159,7 @@ static inline int __ipipe_disable_ondemand_mappings(struct 
task_struct *p)
        return ipipe_disable_ondemand_mappings(p);
 }
 
-static inline void __ipipe_reenter_root(void)
-{
-       struct task_struct *prev;
-       int policy, prio, cpu;
-
-       cpu = task_cpu(current);
-       policy = current->rt_priority ? SCHED_FIFO : SCHED_NORMAL;
-       prio = current->rt_priority;
-       prev = rthal_archdata.task_hijacked[cpu];
-
-       ipipe_reenter_root(prev, policy, prio);
-}
+static inline void __ipipe_complete_domain_migration(void) { }
 
 static inline void ipipe_raise_mayday(struct task_struct *p)
 {
diff --git a/include/cobalt/nucleus/sched.h b/include/cobalt/nucleus/sched.h
index e02249b..24d6cd0 100644
--- a/include/cobalt/nucleus/sched.h
+++ b/include/cobalt/nucleus/sched.h
@@ -97,11 +97,6 @@ typedef struct xnsched {
        xnticks_t last_account_switch;  /*!< Last account switch date (ticks). 
*/
        xnstat_exectime_t *current_account;     /*!< Currently active account */
 #endif
-
-       struct task_struct *gatekeeper;
-       struct semaphore gksync;
-       struct xnthread *gktarget;
-
 } xnsched_t;
 
 union xnsched_policy_param;
diff --git a/kernel/cobalt/nucleus/pod.c b/kernel/cobalt/nucleus/pod.c
index 2ee0bf8..495e4ae 100644
--- a/kernel/cobalt/nucleus/pod.c
+++ b/kernel/cobalt/nucleus/pod.c
@@ -2152,6 +2152,7 @@ reschedule:
        return;
 
       shadow_epilogue:
+       __ipipe_complete_domain_migration();
        /*
         * Shadow on entry and root without shadow extension on exit?
         * Mmmm... This must be the user-space mate of a deleted
diff --git a/kernel/cobalt/nucleus/shadow.c b/kernel/cobalt/nucleus/shadow.c
index a28ce30..0f4cd3f 100644
--- a/kernel/cobalt/nucleus/shadow.c
+++ b/kernel/cobalt/nucleus/shadow.c
@@ -346,81 +346,14 @@ static inline int normalize_priority(int prio)
        return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
 }
 
-static int gatekeeper_thread(void *data)
-{
-       struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
-       struct xnthread *target;
-       struct task_struct *p;
-       struct xnsched *sched;
-       int cpu = (long)data;
-       cpumask_t cpumask;
-       spl_t s;
-
-       p = current;
-       sched = xnpod_sched_slot(cpu);
-       p->flags |= PF_NOFREEZE;
-       sigfillset(&p->blocked);
-       cpumask = cpumask_of_cpu(cpu);
-       set_cpus_allowed(p, cpumask);
-       sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
-
-       set_current_state(TASK_INTERRUPTIBLE);
-       up(&sched->gksync);     /* Sync with xnshadow_mount(). */
-
-       for (;;) {
-               up(&sched->gksync); /* Make the request token available. */
-               schedule();
-
-               if (kthread_should_stop())
-                       break;
-
-               /*
-                * Real-time shadow TCBs are always removed on behalf
-                * of the killed thread.
-                */
-               target = sched->gktarget;
-
-               /*
-                * In the very rare case where the requestor has been
-                * awaken by a signal before we have been able to
-                * process the pending request, just ignore the
-                * latter.
-                */
-               if ((xnthread_user_task(target)->state & ~TASK_ATOMICSWITCH) == 
TASK_INTERRUPTIBLE) {
-                       xnlock_get_irqsave(&nklock, s);
-#ifdef CONFIG_SMP
-                       /*
-                        * If the task changed its CPU while in
-                        * secondary mode, change the CPU of the
-                        * underlying Xenomai shadow too. We do not
-                        * migrate the thread timers here, it would
-                        * not work. For a "full" migration comprising
-                        * timers, using xnpod_migrate_thread is
-                        * required.
-                        */
-                       if (target->sched != sched)
-                               xnsched_migrate_passive(target, sched);
-#endif /* CONFIG_SMP */
-                       xnpod_resume_thread(target, XNRELAX);
-                       xnlock_put_irqrestore(&nklock, s);
-                       xnpod_schedule();
-               }
-               set_current_state(TASK_INTERRUPTIBLE);
-       }
-
-       return 0;
-}
-
 /*!
  * @internal
  * \fn int xnshadow_harden(void);
  * \brief Migrate a Linux task to the Xenomai domain.
  *
  * This service causes the transition of "current" from the Linux
- * domain to Xenomai. This is obtained by asking the gatekeeper to
- * resume the shadow mated with "current" then triggering the
- * rescheduling procedure in the Xenomai domain. The shadow will
- * resume in the Xenomai domain as returning from schedule().
+ * domain to Xenomai. The shadow will resume in the Xenomai domain as
+ * returning from schedule().
  *
  * Environments:
  *
@@ -431,29 +364,27 @@ static int gatekeeper_thread(void *data)
  * Rescheduling: always.
  */
 
+#ifdef CONFIG_XENO_LEGACY_IPIPE
+
 int xnshadow_harden(void)
 {
-       struct task_struct *this_task = current;
+       struct task_struct *p = current;
+       struct gatekeeper_data *gd;
        struct xnthread *thread;
        struct xnsched *sched;
-       int cpu, err;
+       int cpu;
 
 redo:
        thread = xnshadow_current();
        if (thread == NULL)
                return -EPERM;
 
-       cpu = task_cpu(this_task);
-       sched = xnpod_sched_slot(cpu);
+       cpu = task_cpu(p);
+       gd = &per_cpu(shadow_migration, cpu);
 
        /* Grab the request token. */
-       if (down_interruptible(&sched->gksync)) {
-               err = -ERESTARTSYS;
-               goto failed;
-       }
-
-       if (thread->u_mode)
-               *(thread->u_mode) = thread->state & ~XNRELAX;
+       if (down_interruptible(&gd->gksync))
+               return -ERESTARTSYS;
 
        preempt_disable();
 
@@ -463,12 +394,15 @@ redo:
         * don't mistakenly send the request to the wrong
         * gatekeeper.
         */
-       if (cpu != task_cpu(this_task)) {
+       if (cpu != task_cpu(p)) {
                preempt_enable();
-               up(&sched->gksync);
+               up(&gd->gksync);
                goto redo;
        }
 
+       if (thread->u_mode)
+               *(thread->u_mode) = thread->state & ~XNRELAX;
+
        /*
         * Set up the request to move "current" from the Linux domain
         * to the Xenomai domain. This will cause the shadow thread to
@@ -481,12 +415,13 @@ redo:
 
        trace_mark(xn_nucleus, shadow_gohard,
                   "thread %p thread_name %s comm %s",
-                  thread, xnthread_name(thread), this_task->comm);
+                  thread, xnthread_name(thread), p->comm);
 
-       sched->gktarget = thread;
+       gd->gktarget = thread;
+       gd->task_hijacked = p;
        set_current_state(TASK_INTERRUPTIBLE | TASK_ATOMICSWITCH);
-       wake_up_process(sched->gatekeeper);
-       hijack_current();
+       wake_up_process(gd->gatekeeper);
+       schedule();
 
        /*
         * Rare case: we might have received a signal before entering
@@ -496,8 +431,8 @@ redo:
         * fail; the caller will have to process this signal anyway.
         */
        if (ipipe_current_domain == ipipe_root_domain) {
-               if (XENO_DEBUG(NUCLEUS) && (!signal_pending(this_task)
-                   || this_task->state != TASK_RUNNING))
+               if (XENO_DEBUG(NUCLEUS) && (!signal_pending(p)
+                   || p->state != TASK_RUNNING))
                        xnpod_fatal
                            ("xnshadow_harden() failed for thread %s[%d]",
                             thread->name, xnthread_user_pid(thread));
@@ -508,17 +443,15 @@ redo:
                 * idea to resume it for the Xenomai domain if, later on, we
                 * may happen to reenter TASK_INTERRUPTIBLE state.
                 */
-               down(&sched->gksync);
-               up(&sched->gksync);
+               down(&gd->gksync);
+               up(&gd->gksync);
 
                return -ERESTARTSYS;
        }
 
        /* "current" is now running into the Xenomai domain. */
        sched = xnsched_finish_unlocked_switch(thread->sched);
-
        xnsched_finalize_zombie(sched);
-
 #ifdef CONFIG_XENO_HW_FPU
        xnpod_switch_fpu(sched);
 #endif /* CONFIG_XENO_HW_FPU */
@@ -538,7 +471,7 @@ redo:
         * entering TASK_ATOMICSWITCH and starting the migration in
         * the gatekeeker thread is just silently queued up to here.
         */
-       if (signal_pending(this_task)) {
+       if (signal_pending(p)) {
                xnshadow_relax(!xnthread_test_state(thread, XNDEBUG),
                               SIGDEBUG_MIGRATE_SIGNAL);
                return -ERESTARTSYS;
@@ -547,14 +480,104 @@ redo:
        xnsched_resched_after_unlocked_switch();
 
        return 0;
+}
+EXPORT_SYMBOL_GPL(xnshadow_harden);
+
+#else /* !CONFIG_XENO_LEGACY_IPIPE */
+
+void ipipe_migration_hook(struct task_struct *p) /* hw IRQs off */
+{
+       struct xnthread *thread = xnshadow_thread(p);
+       struct xnsched *sched;
+
+       xnlock_get(&nklock);
+
+#ifdef CONFIG_SMP
+       /*
+        * If the task moved to another CPU while in secondary mode,
+        * update the CPU of the underlying Xenomai shadow to reflect
+        * the new situation. We do not migrate the thread timers
+        * here, this would not work. For a "full" migration including
+        * timers, using xnpod_migrate_thread() is required.
+        */
+       sched = xnpod_sched_slot(task_cpu(p));
+       if (sched != thread->sched)
+               xnsched_migrate_passive(thread, sched);
+
+#else
+       (void)sched;
+#endif /* CONFIG_SMP */
+
+       xnpod_resume_thread(thread, XNRELAX);
+
+       xnlock_put(&nklock);
+
+       xnpod_schedule();
+}
+
+int xnshadow_harden(void)
+{
+       struct task_struct *p = current;
+       struct xnthread *thread;
+       struct xnsched *sched;
+       int ret;
+
+       thread = xnshadow_current();
+       if (thread == NULL)
+               return -EPERM;
+
+       if (signal_pending(p))
+               return -ERESTARTSYS;
+
+       trace_mark(xn_nucleus, shadow_gohard,
+                  "thread %p name %s comm %s",
+                  thread, xnthread_name(thread), p->comm);
 
-      failed:
        if (thread->u_mode)
-               *(thread->u_mode) = thread->state;
-       return err;
+               *(thread->u_mode) = thread->state & ~XNRELAX;
+
+       ret = __ipipe_migrate_head();
+       if (ret) {
+               *(thread->u_mode) = thread->state | XNRELAX;
+               return ret;
+       }
+
+       /* "current" is now running into the Xenomai domain. */
+       sched = xnsched_finish_unlocked_switch(thread->sched);
+       xnsched_finalize_zombie(sched);
+#ifdef CONFIG_XENO_HW_FPU
+       xnpod_switch_fpu(sched);
+#endif /* CONFIG_XENO_HW_FPU */
+
+       if (xnthread_signaled_p(thread))
+               xnpod_dispatch_signals();
+
+       xnlock_clear_irqon(&nklock);
+
+       trace_mark(xn_nucleus, shadow_hardened, "thread %p name %s",
+                  thread, xnthread_name(thread));
+
+       /*
+        * Recheck pending signals once again. As we block task
+        * wakeups during the migration and handle_sigwake_event()
+        * ignores signals until XNRELAX is cleared, any signal
+        * between entering TASK_HARDENING and starting the migration
+        * is just silently queued up to here.
+        */
+       if (signal_pending(p)) {
+               xnshadow_relax(!xnthread_test_state(thread, XNDEBUG),
+                              SIGDEBUG_MIGRATE_SIGNAL);
+               return -ERESTARTSYS;
+       }
+
+       xnsched_resched_after_unlocked_switch();
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(xnshadow_harden);
 
+#endif /* !CONFIG_XENO_LEGACY_IPIPE */
+
 /*!
  * @internal
  * \fn void xnshadow_relax(int notify, int reason);
@@ -584,7 +607,6 @@ EXPORT_SYMBOL_GPL(xnshadow_harden);
  * @note "current" is valid here since the shadow runs with the
  * properties of the Linux task.
  */
-
 void xnshadow_relax(int notify, int reason)
 {
        xnthread_t *thread = xnpod_current_thread();
@@ -2576,9 +2598,8 @@ void xnshadow_release_events(void)
 
 int xnshadow_mount(void)
 {
-       struct xnsched *sched;
-       unsigned i, size;
-       int cpu, ret;
+       unsigned int i, size;
+       int ret;
 
        __xnshadow_init();
 
@@ -2588,20 +2609,6 @@ int xnshadow_mount(void)
        if (ret)
                return ret;
 
-       for_each_online_cpu(cpu) {
-               if (!xnarch_cpu_supported(cpu))
-                       continue;
-
-               sched = &nkpod_struct.sched[cpu];
-               sema_init(&sched->gksync, 0);
-               xnarch_memory_barrier();
-               sched->gatekeeper =
-                   kthread_create(&gatekeeper_thread, (void *)(long)cpu,
-                                  "gatekeeper/%d", cpu);
-               wake_up_process(sched->gatekeeper);
-               down(&sched->gksync);
-       }
-
        /*
         * Setup the mayday page early, before userland can mess with
         * real-time ops.
@@ -2632,27 +2639,14 @@ int xnshadow_mount(void)
 
 void xnshadow_cleanup(void)
 {
-       struct xnsched *sched;
-       int cpu;
-
        if (nucleus_muxid >= 0) {
                xnshadow_unregister_interface(nucleus_muxid);
                nucleus_muxid = -1;
        }
 
-       if (ppd_hash)
+       if (ppd_hash) {
                kfree(ppd_hash);
-
-       ppd_hash = NULL;
-
-       for_each_online_cpu(cpu) {
-               if (!xnarch_cpu_supported(cpu))
-                       continue;
-
-               sched = &nkpod_struct.sched[cpu];
-               down(&sched->gksync);
-               sched->gktarget = NULL;
-               kthread_stop(sched->gatekeeper);
+               ppd_hash = NULL;
        }
 
        __xnshadow_exit();


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to