Module: xenomai-forge
Branch: master
Commit: 1a0e255016ab5af2ab3ce5101fb84aa80aa9f13a
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=1a0e255016ab5af2ab3ce5101fb84aa80aa9f13a

Author: Philippe Gerum <r...@xenomai.org>
Date:   Wed Jul 31 11:59:30 2013 +0200

cobalt/sched: drop delayed thread cleanup via zombie state transition

Since Xenomai thread deletion now always involves a self-exit from
secondary mode, possibly after a pending cancellation request is
noticed, there is no way a thread could self-delete from primary mode
anymore.

For this reason, delaying resource cleanup until the exiting thread
schedules out from xnpod_schedule() becomes pointless, since it must
have done so earlier, when leaving primary mode prior to running
do_exit() and the taskexit handler. At this point, the exiting thread
runs in mere linux context, and may release any Xenomai-specific
resource.

We drop the mechanism for delaying thread finalization upon
self-deletion, removing all zombie state hooks in the same move.

---

 include/cobalt/kernel/sched.h |   68 +++++++++++++++++++++-------------------
 kernel/cobalt/lock.c          |    2 +-
 kernel/cobalt/pod.c           |   26 ++++-----------
 kernel/cobalt/sched.c         |   44 +--------------------------
 kernel/cobalt/shadow.c        |    1 -
 5 files changed, 45 insertions(+), 96 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index be3f2c5..402c385 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -55,51 +55,65 @@ struct xnsched_rt {
  * \brief Scheduling information structure.
  */
 
-typedef struct xnsched {
-
-       unsigned long status;           /*!< Scheduler specific status bitmask. 
*/
-       unsigned long lflags;           /*!< Scheduler specific local flags 
bitmask. */
-       struct xnthread *curr;          /*!< Current thread. */
+struct xnsched {
+       /*!< Scheduler specific status bitmask. */
+       unsigned long status;
+       /*!< Scheduler specific local flags bitmask. */
+       unsigned long lflags;
+       /*!< Current thread. */
+       struct xnthread *curr;
 #ifdef CONFIG_SMP
+       /*!< Owner CPU id. */
        int cpu;
-       cpumask_t resched;              /*!< Mask of CPUs needing rescheduling. 
*/
+       /*!< Mask of CPUs needing rescheduling. */
+       cpumask_t resched;
 #endif
-       struct xnsched_rt rt;           /*!< Context of built-in real-time 
class. */
+       /*!< Context of built-in real-time class. */
+       struct xnsched_rt rt;
 #ifdef CONFIG_XENO_OPT_SCHED_WEAK
-       struct xnsched_weak weak;       /*!< Context of weak scheduling class. 
*/
+       /*!< Context of weak scheduling class. */
+       struct xnsched_weak weak;
 #endif
 #ifdef CONFIG_XENO_OPT_SCHED_TP
-       struct xnsched_tp tp;           /*!< Context of TP class. */
+       /*!< Context of TP class. */
+       struct xnsched_tp tp;
 #endif
 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
-       struct xnsched_sporadic pss;    /*!< Context of sporadic scheduling 
class. */
+       /*!< Context of sporadic scheduling class. */
+       struct xnsched_sporadic pss;
 #endif
-       volatile unsigned inesting;     /*!< Interrupt nesting level. */
-       struct xntimer htimer;          /*!< Host timer. */
-       struct xnthread *zombie;
-       struct xnthread rootcb;         /*!< Root thread control block. */
+       /*!< Interrupt nesting level. */
+       volatile unsigned inesting;
+       /*!< Host timer. */
+       struct xntimer htimer;
+       /*!< Root thread control block. */
+       struct xnthread rootcb;
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
        struct xnthread *last;
 #endif
 #ifdef CONFIG_XENO_HW_FPU
-       struct xnthread *fpuholder;     /*!< Thread owning the current FPU 
context. */
+       /*!< Thread owning the current FPU context. */
+       struct xnthread *fpuholder;
 #endif
 #ifdef CONFIG_XENO_OPT_WATCHDOG
-       struct xntimer wdtimer;         /*!< Watchdog timer object. */
-       int wdcount;                    /*!< Watchdog tick count. */
+       /*!< Watchdog timer object. */
+       struct xntimer wdtimer;
+       /*!< Watchdog tick count. */
+       int wdcount;
 #endif
 #ifdef CONFIG_XENO_OPT_STATS
-       xnticks_t last_account_switch;  /*!< Last account switch date (ticks). 
*/
-       xnstat_exectime_t *current_account;     /*!< Currently active account */
+       /*!< Last account switch date (ticks). */
+       xnticks_t last_account_switch;
+       /*!< Currently active account */
+       xnstat_exectime_t *current_account;
 #endif
-} xnsched_t;
+};
 
 DECLARE_PER_CPU(struct xnsched, nksched);
 
 union xnsched_policy_param;
 
 struct xnsched_class {
-
        void (*sched_init)(struct xnsched *sched);
        void (*sched_enqueue)(struct xnthread *thread);
        void (*sched_dequeue)(struct xnthread *thread);
@@ -156,7 +170,7 @@ static inline int xnsched_resched_p(struct xnsched *sched)
 /* Set resched flag for the given scheduler. */
 #ifdef CONFIG_SMP
 #define xnsched_set_resched(__sched__) do {                            \
-  xnsched_t *current_sched = xnpod_current_sched();                    \
+  struct xnsched *current_sched = xnpod_current_sched();               \
   if (current_sched == (__sched__))                                    \
       current_sched->status |= XNRESCHED;                              \
   else if (!xnsched_resched_p(__sched__)) {                            \
@@ -169,16 +183,6 @@ static inline int xnsched_resched_p(struct xnsched *sched)
 #define xnsched_set_resched    xnsched_set_self_resched
 #endif /* !CONFIG_SMP */
 
-void xnsched_zombie_hooks(struct xnthread *thread);
-
-void __xnsched_finalize_zombie(struct xnsched *sched);
-
-static inline void xnsched_finalize_zombie(struct xnsched *sched)
-{
-       if (sched->zombie)
-               __xnsched_finalize_zombie(sched);
-}
-
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
diff --git a/kernel/cobalt/lock.c b/kernel/cobalt/lock.c
index bc9077e..71e8860 100644
--- a/kernel/cobalt/lock.c
+++ b/kernel/cobalt/lock.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(__xnlock_spin);
 
 #if XENO_DEBUG(XNLOCK)
 DEFINE_PER_CPU(struct xnlockinfo, xnlock_stats);
-EXPORT_PER_CPU_SYMBOL(xnlock_stats);
+EXPORT_PER_CPU_SYMBOL_GPL(xnlock_stats);
 #endif
 
 /*@}*/
diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index 0b35cb9..89ba956 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -111,7 +111,7 @@ static inline void __xnpod_switch_fpu(struct xnsched *sched)
 /* xnpod_switch_fpu() -- Switches to the current thread's FPU context,
    saving the previous one as needed. */
 
-void xnpod_switch_fpu(xnsched_t *sched)
+void xnpod_switch_fpu(struct xnsched *sched)
 {
        __xnpod_switch_fpu(sched);
 }
@@ -1511,7 +1511,7 @@ int xnpod_migrate_thread(int cpu)
 }
 EXPORT_SYMBOL_GPL(xnpod_migrate_thread);
 
-static inline void xnpod_switch_to(xnsched_t *sched,
+static inline void xnpod_switch_to(struct xnsched *sched,
                                   xnthread_t *prev, xnthread_t *next)
 {
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
@@ -1637,8 +1637,8 @@ static inline void leave_root(struct xnthread *root)
 
 void __xnpod_schedule(struct xnsched *sched)
 {
-       int zombie, switched, need_resched, shadow;
        struct xnthread *prev, *next, *curr;
+       int switched, need_resched, shadow;
        spl_t s;
 
        if (xnarch_escalate())
@@ -1657,8 +1657,6 @@ reschedule:
        if (!need_resched)
                goto signal_unlock_and_exit;
 #endif /* !XENO_DEBUG(NUCLEUS) */
-       zombie = xnthread_test_state(curr, XNZOMBIE);
-
        next = xnsched_pick_next(sched);
        if (next == curr) {
                if (unlikely(xnthread_test_state(next, XNROOT))) {
@@ -1685,9 +1683,6 @@ reschedule:
                xnfreesync();
        }
 
-       if (zombie)
-               xnsched_zombie_hooks(prev);
-
        sched->curr = next;
        shadow = 1;
 
@@ -1728,15 +1723,8 @@ reschedule:
         * because of relaxed/hardened transitions.
         */
        curr = sched->curr;
-       xntrace_pid(xnthread_host_pid(curr), xnthread_current_priority(curr));
-
-       if (zombie)
-               xnpod_fatal("zombie thread %s (%p) would not die...",
-                           prev->name, prev);
-
-       xnsched_finalize_zombie(sched);
-
        __xnpod_switch_fpu(sched);
+       xntrace_pid(xnthread_host_pid(curr), xnthread_current_priority(curr));
 
 signal_unlock_and_exit:
 
@@ -1777,7 +1765,7 @@ shadow_epilogue:
 }
 EXPORT_SYMBOL_GPL(__xnpod_schedule);
 
-void ___xnpod_lock_sched(xnsched_t *sched)
+void ___xnpod_lock_sched(struct xnsched *sched)
 {
        struct xnthread *curr = sched->curr;
 
@@ -1788,7 +1776,7 @@ void ___xnpod_lock_sched(xnsched_t *sched)
 }
 EXPORT_SYMBOL_GPL(___xnpod_lock_sched);
 
-void ___xnpod_unlock_sched(xnsched_t *sched)
+void ___xnpod_unlock_sched(struct xnsched *sched)
 {
        struct xnthread *curr = sched->curr;
        XENO_ASSERT(NUCLEUS, xnthread_lock_count(curr) > 0,
@@ -1913,7 +1901,7 @@ EXPORT_SYMBOL_GPL(xnpod_handle_exception);
 int xnpod_enable_timesource(void)
 {
        int err, htickval, cpu;
-       xnsched_t *sched;
+       struct xnsched *sched;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index 342fd00..b0f643a 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -31,7 +31,7 @@
 #include <asm/xenomai/thread.h>
 
 DEFINE_PER_CPU(struct xnsched, nksched);
-EXPORT_PER_CPU_SYMBOL(nksched);
+EXPORT_PER_CPU_SYMBOL_GPL(nksched);
 
 static struct xnsched_class *xnsched_class_highest;
 
@@ -151,7 +151,6 @@ void xnsched_init(struct xnsched *sched, int cpu)
        xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO);
        xntimer_set_name(&sched->htimer, htimer_name);
        xntimer_set_sched(&sched->htimer, sched);
-       sched->zombie = NULL;
 
        attr.flags = XNROOT | XNFPU;
        attr.name = root_name;
@@ -241,33 +240,6 @@ struct xnthread *xnsched_pick_next(struct xnsched *sched)
 #endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
 }
 
-/* Must be called with nklock locked, interrupts off. */
-void xnsched_zombie_hooks(struct xnthread *thread)
-{
-       XENO_BUGON(NUCLEUS, thread->sched->zombie != NULL);
-
-       thread->sched->zombie = thread;
-
-       trace_mark(xn_nucleus, sched_finalize,
-                  "thread_out %p thread_out_name %s",
-                  thread, xnthread_name(thread));
-
-       xnsched_forget(thread);
-       xnshadow_unmap(thread);
-}
-
-void __xnsched_finalize_zombie(struct xnsched *sched)
-{
-       struct xnthread *thread = sched->zombie;
-
-       xnthread_cleanup(thread);
-
-       if (xnthread_test_state(sched->curr, XNROOT))
-               xnfreesync();
-
-       sched->zombie = NULL;
-}
-
 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
 
 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
@@ -291,20 +263,6 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched)
                xnthread_clear_state(last, XNMIGRATE);
        }
 
-       if (xnthread_test_state(last, XNZOMBIE)) {
-               /*
-                * There are two cases where sched->last has the zombie
-                * bit:
-                * - either it had it before the context switch, the
-                * cleanup has be done and sched->zombie is last;
-                * - or it has been killed while the nklocked was unlocked
-                * during the context switch, in which case we must run the
-                * cleanup code, and we do it now.
-                */
-               if (sched->zombie != last)
-                       xnsched_zombie_hooks(last);
-       }
-
        return sched;
 }
 
diff --git a/kernel/cobalt/shadow.c b/kernel/cobalt/shadow.c
index 9efd929..06512e6 100644
--- a/kernel/cobalt/shadow.c
+++ b/kernel/cobalt/shadow.c
@@ -448,7 +448,6 @@ int xnshadow_harden(void)
 
        /* "current" is now running into the Xenomai domain. */
        sched = xnsched_finish_unlocked_switch(thread->sched);
-       xnsched_finalize_zombie(sched);
        xnpod_switch_fpu(sched);
 
        xnlock_clear_irqon(&nklock);


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to