Module: xenomai-forge
Branch: next
Commit: e2fb315b4bb77307fd90b9ff48d1b813195ec577
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=e2fb315b4bb77307fd90b9ff48d1b813195ec577

Author: Philippe Gerum <r...@xenomai.org>
Date:   Tue Dec 17 19:07:57 2013 +0100

cobalt/sched: introduce sched_kick() handler for policy modules

Scheduling policies may prevent threads from running based on some
extra state, by not returning them from the sched_pick() handler,
without reflecting such state with any block bit. Those threads remain
in the ready state from the scheduler core point of view, although
they won't be elected for running.

This behavior is typical of policies enforcing a runtime budget for
instance.

However, we also have to honor the request to move a thread out of
primary mode (i.e. shadow kicking). Therefore we need a way to tell
the policy module to release such thread temporarily, until it
eventually relaxes (e.g. sigwake -> kick -> relax).

To this end, the sched_kick() handler can be defined by policy modules
for dealing with this specific case. It is called for kicked threads
bearing the XNREADY bit, once all block bits have been lifted.

---

 include/cobalt/kernel/sched.h  |   19 +++++++++++++++++++
 kernel/cobalt/sched-idle.c     |    1 +
 kernel/cobalt/sched-rt.c       |    1 +
 kernel/cobalt/sched-sporadic.c |    1 +
 kernel/cobalt/sched-tp.c       |    6 ++++--
 kernel/cobalt/sched-weak.c     |    1 +
 kernel/cobalt/shadow.c         |   34 +++++++++++++++++++++-------------
 7 files changed, 48 insertions(+), 15 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index addfd0c..d32a404 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -147,6 +147,7 @@ struct xnsched_class {
        int (*sched_declare)(struct xnthread *thread,
                             const union xnsched_policy_param *p);
        void (*sched_forget)(struct xnthread *thread);
+       void (*sched_kick)(struct xnthread *thread);
 #ifdef CONFIG_XENO_OPT_VFILE
        int (*sched_init_vfile)(struct xnsched_class *schedclass,
                                struct xnvfile_directory *vfroot);
@@ -581,6 +582,18 @@ static inline void xnsched_forget(struct xnthread *thread)
                sched_class->sched_forget(thread);
 }
 
+static inline void xnsched_kick(struct xnthread *thread)
+{
+       struct xnsched_class *sched_class = thread->base_class;
+
+       xnthread_set_info(thread, XNKICKED);
+
+       if (sched_class->sched_kick)
+               sched_class->sched_kick(thread);
+
+       xnsched_set_resched(thread->sched);
+}
+
 #else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
 
 /*
@@ -655,6 +668,12 @@ static inline void xnsched_forget(struct xnthread *thread)
        __xnsched_rt_forget(thread);
 }
 
+static inline void xnsched_kick(struct xnthread *thread)
+{
+       xnthread_set_info(thread, XNKICKED);
+       xnsched_set_resched(thread->sched);
+}
+
 #endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
 
 /*@}*/
diff --git a/kernel/cobalt/sched-idle.c b/kernel/cobalt/sched-idle.c
index 51ce963..5127798 100644
--- a/kernel/cobalt/sched-idle.c
+++ b/kernel/cobalt/sched-idle.c
@@ -49,6 +49,7 @@ struct xnsched_class xnsched_class_idle = {
        .sched_tick             =       NULL,
        .sched_rotate           =       NULL,
        .sched_forget           =       NULL,
+       .sched_kick             =       NULL,
        .sched_declare          =       NULL,
        .sched_pick             =       xnsched_idle_pick,
        .sched_setparam         =       xnsched_idle_setparam,
diff --git a/kernel/cobalt/sched-rt.c b/kernel/cobalt/sched-rt.c
index c92b0a0..c5001fc 100644
--- a/kernel/cobalt/sched-rt.c
+++ b/kernel/cobalt/sched-rt.c
@@ -244,6 +244,7 @@ struct xnsched_class xnsched_class_rt = {
        .sched_tick             =       xnsched_rt_tick,
        .sched_rotate           =       xnsched_rt_rotate,
        .sched_forget           =       NULL,
+       .sched_kick             =       NULL,
        .sched_declare          =       NULL,
        .sched_setparam         =       xnsched_rt_setparam,
        .sched_trackprio        =       xnsched_rt_trackprio,
diff --git a/kernel/cobalt/sched-sporadic.c b/kernel/cobalt/sched-sporadic.c
index 44438ab..d3dae39 100644
--- a/kernel/cobalt/sched-sporadic.c
+++ b/kernel/cobalt/sched-sporadic.c
@@ -536,6 +536,7 @@ struct xnsched_class xnsched_class_sporadic = {
        .sched_trackprio        =       xnsched_sporadic_trackprio,
        .sched_declare          =       xnsched_sporadic_declare,
        .sched_forget           =       xnsched_sporadic_forget,
+       .sched_kick             =       NULL,
 #ifdef CONFIG_XENO_OPT_VFILE
        .sched_init_vfile       =       xnsched_sporadic_init_vfile,
        .sched_cleanup_vfile    =       xnsched_sporadic_cleanup_vfile,
diff --git a/kernel/cobalt/sched-tp.c b/kernel/cobalt/sched-tp.c
index 03c6d67..bf30c7a 100644
--- a/kernel/cobalt/sched-tp.c
+++ b/kernel/cobalt/sched-tp.c
@@ -88,8 +88,9 @@ static void xnsched_tp_init(struct xnsched *sched)
        int n;
 
        /*
-        * Build the runqueues. Thread priorities for the TP policy
-        * are valid RT priorities. TP is actually a subset of RT.
+        * Build the runqueues.
+        * CAUTION: we may inherit RT priority during PIP boost, so we
+        * need as many levels as SCHED_RT defines.
         */
        for (n = 0; n < CONFIG_XENO_OPT_SCHED_TP_NRPART; n++)
                xnsched_initq(&tp->partitions[n].runnable,
@@ -401,6 +402,7 @@ struct xnsched_class xnsched_class_tp = {
        .sched_trackprio        =       xnsched_tp_trackprio,
        .sched_declare          =       xnsched_tp_declare,
        .sched_forget           =       xnsched_tp_forget,
+       .sched_kick             =       NULL,
 #ifdef CONFIG_XENO_OPT_VFILE
        .sched_init_vfile       =       xnsched_tp_init_vfile,
        .sched_cleanup_vfile    =       xnsched_tp_cleanup_vfile,
diff --git a/kernel/cobalt/sched-weak.c b/kernel/cobalt/sched-weak.c
index 72b22a3..e55a25b 100644
--- a/kernel/cobalt/sched-weak.c
+++ b/kernel/cobalt/sched-weak.c
@@ -188,6 +188,7 @@ struct xnsched_class xnsched_class_weak = {
        .sched_tick             =       NULL,
        .sched_rotate           =       NULL,
        .sched_forget           =       NULL,
+       .sched_kick             =       NULL,
        .sched_declare          =       NULL,
        .sched_setparam         =       xnsched_weak_setparam,
        .sched_trackprio        =       xnsched_weak_trackprio,
diff --git a/kernel/cobalt/shadow.c b/kernel/cobalt/shadow.c
index 148bdc6..b0e302e 100644
--- a/kernel/cobalt/shadow.c
+++ b/kernel/cobalt/shadow.c
@@ -687,19 +687,6 @@ static int force_wakeup(struct xnthread *thread) /* nklock 
locked, irqs off */
        if (xnthread_test_info(thread, XNKICKED))
                return 1;
 
-       /*
-        * Tricky case: a ready thread does not actually run, but
-        * nevertheless waits for the CPU in primary mode, so we have
-        * to make sure that it will be notified of the pending break
-        * condition as soon as it enters xnthread_suspend() from a
-        * blocking Xenomai syscall.
-        */
-       if (xnthread_test_state(thread, XNREADY)) {
-               xnthread_set_info(thread, XNKICKED);
-               xnsched_set_resched(thread->sched);
-               return 0;
-       }
-
        if (xnthread_unblock(thread)) {
                xnthread_set_info(thread, XNKICKED);
                ret = 1;
@@ -734,6 +721,27 @@ static int force_wakeup(struct xnthread *thread) /* nklock 
locked, irqs off */
                xnthread_set_info(thread, XNKICKED);
        }
 
+       /*
+        * Tricky cases:
+        *
+        * - a thread which was ready on entry wasn't actually
+        * running, but nevertheless waits for the CPU in primary
+        * mode, so we have to make sure that it will be notified of
+        * the pending break condition as soon as it enters
+        * xnthread_suspend() from a blocking Xenomai syscall.
+        *
+        * - a ready/readied thread on exit may be prevented from
+        * running by the scheduling policy module it belongs
+        * to. Typically, policies enforcing a runtime budget do not
+        * block threads with no budget, but rather keep them out of
+        * their runnable queue, so that ->sched_pick() won't elect
+        * them. We tell the policy handler about the fact that we do
+        * want such thread to run until it relaxes, whatever this
+        * means internally for the implementation.
+        */
+       if (xnthread_test_state(thread, XNREADY))
+               xnsched_kick(thread);
+
        return ret;
 }
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to