Module: xenomai-jki
Branch: for-forge
Commit: bf1b75b6d9633622393526cf1ae4853416a8cdd9
URL:    
http://git.xenomai.org/?p=xenomai-jki.git;a=commit;h=bf1b75b6d9633622393526cf1ae4853416a8cdd9

Author: Jan Kiszka <jan.kis...@siemens.com>
Date:   Tue Mar  8 18:22:51 2016 +0100

cobalt/kernel: Introduce lazy Linux scheduling parameter synchronization

We currently require userspace to trigger the synchronization of a
real-time thread's scheduling parameters with Linux by calling
corresponding libc functions. Those usually trigger a migration to
secondary mode. This can be unwanted, specifically for workload that
makes heavy use of manual priority tunings from withing real-time
threads.

To avoid the mode switch, this makes the kernel initiate the parameter
synchronization. As we don't want to do this while the caller of
setschedparam & Co. is in real-time mode, the actual call to Linux'
sched_setscheduler is delegated to helper kernel tasks, one for each
CPU. When a thread received an scheduling parameter update, it is queued
to a per-helper list. The helper task is triggered from primary mode,
but it will only run once we next switch back to Linux on that CPU. Then
it will dequeue all entries from its work list, apply the scheduling
parameters also on the Linux side, and the threads' Linux mates will
then run with the right tunings.

In order to avoid most cases where the Linux parameters could be applied
too late, the helper tasks run with highest priority. One theoretical
case remains: If a thread drops from the very same highest priority
level, the helper may not be executed first, thus the Linux priority
drop may only be applied once the target thread is preempted or
suspended. However, in practice, the helper task is woken up first, and
only afterward the Linux mate. So the Linux scheduler is expected to run
the helper first.

Signed-off-by: Jan Kiszka <jan.kis...@siemens.com>

---

 include/cobalt/kernel/sched.h  |    6 +++
 include/cobalt/kernel/thread.h |    4 ++
 kernel/cobalt/init.c           |    9 ++++
 kernel/cobalt/sched.c          |   96 ++++++++++++++++++++++++++++++++++++++++
 kernel/cobalt/thread.c         |   41 +++++++++++++++++
 5 files changed, 156 insertions(+)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index d5d93c2..a2f7b8c 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -93,6 +93,10 @@ struct xnsched {
        struct xntimer rrbtimer;
        /*!< Root thread control block. */
        struct xnthread rootcb;
+       /*!< Worker kthread to apply Linux scheduling parameters. */
+       struct task_struct *setsched_worker;
+       /*!< List of pending Linux scheduling parameter updates. */
+       struct list_head setsched_list;
 #ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
        struct xnthread *last;
 #endif
@@ -380,6 +384,8 @@ void xnsched_cleanup_proc(void);
 
 void xnsched_register_classes(void);
 
+int xnsched_init_setsched(struct xnsched *sched);
+
 void xnsched_init(struct xnsched *sched, int cpu);
 
 void xnsched_destroy(struct xnsched *sched);
diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index b45de8e..4ffc632 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -188,6 +188,8 @@ struct xnthread {
        const char *exe_path;   /* Executable path */
        u32 proghash;           /* Hash value for exe_path */
 #endif
+
+       struct list_head setsched_link;
 };
 
 static inline int xnthread_get_state(const struct xnthread *thread)
@@ -541,6 +543,8 @@ static inline void xnthread_migrate_passive(struct xnthread 
*thread,
 
 #endif
 
+void __xnthread_post_setsched_root(struct xnthread *thread);
+
 int __xnthread_set_schedparam(struct xnthread *thread,
                              struct xnsched_class *sched_class,
                              const union xnsched_policy_param *sched_param);
diff --git a/kernel/cobalt/init.c b/kernel/cobalt/init.c
index 5a6fdbf..dc484a4 100644
--- a/kernel/cobalt/init.c
+++ b/kernel/cobalt/init.c
@@ -309,6 +309,15 @@ static __init int sys_init(void)
 
        xnregistry_init();
 
+       for_each_online_cpu(cpu) {
+               sched = &per_cpu(nksched, cpu);
+               ret = xnsched_init_setsched(sched);
+               if (ret) {
+                       sys_shutdown();
+                       return ret;
+               }
+       }
+
        /*
         * If starting in stopped mode, do all initializations, but do
         * not enable the core timer.
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index d124f79..f13fd83 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -16,6 +16,7 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  * 02111-1307, USA.
  */
+#include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/signal.h>
 #include <linux/wait.h>
@@ -143,6 +144,84 @@ static void watchdog_handler(struct xntimer *timer)
 
 #endif /* CONFIG_XENO_OPT_WATCHDOG */
 
+static int do_setsched_work(void *data)
+{
+       struct xnsched *sched = data;
+       struct sched_param param;
+       struct xnthread *thread;
+       struct task_struct *p;
+       int policy, prio, ret;
+       spl_t s;
+
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               xnlock_get_irqsave(&nklock, s);
+
+               while (!list_empty(&sched->setsched_list)) {
+                       thread = list_first_entry(&sched->setsched_list,
+                                                 struct xnthread,
+                                                 setsched_link);
+
+                       if (thread->sched_class == &xnsched_class_rt) {
+                               policy = xnthread_test_state(thread, XNRRB) ?
+                                       SCHED_RR : SCHED_FIFO;
+                               prio = normalize_priority(thread->bprio);
+                       } else if (thread->sched_class == &xnsched_class_weak) {
+                               policy = SCHED_NORMAL;
+                               prio = 0;
+                       } else {
+                               policy = SCHED_FIFO;
+                               prio = normalize_priority(thread->bprio);
+                       }
+
+                       list_del_init(&thread->setsched_link);
+
+                       p = xnthread_host_task(thread);
+                       get_task_struct(p);
+
+                       xnlock_put_irqrestore(&nklock, s);
+
+                       param.sched_priority = prio;
+                       ret = sched_setscheduler(p, policy, &param);
+                       if (ret)
+                               printk(XENO_WARNING
+                                      "failed to adjust scheduling parameters "
+                                      "for task %u\n", p->pid);
+
+                       put_task_struct(p);
+
+                       xnlock_get_irqsave(&nklock, s);
+               }
+
+               xnlock_put_irqrestore(&nklock, s);
+
+               schedule();
+       }
+       __set_current_state(TASK_RUNNING);
+
+       return 0;
+}
+
+int xnsched_init_setsched(struct xnsched *sched)
+{
+       struct sched_param kthread_param = {.sched_priority = 99};
+       struct task_struct *kthread;
+
+       kthread = kthread_create_on_cpu(do_setsched_work, sched,
+                                       xnsched_cpu(sched), "xnsetsched/%u");
+       if (IS_ERR(kthread))
+               return PTR_ERR(kthread);
+
+       sched_setscheduler_nocheck(kthread, SCHED_FIFO, &kthread_param);
+       sched->setsched_worker = kthread;
+       INIT_LIST_HEAD(&sched->setsched_list);
+
+       kthread_unpark(kthread);
+
+       return 0;
+}
+
 static void roundrobin_handler(struct xntimer *timer)
 {
        struct xnsched *sched = container_of(timer, struct xnsched, rrbtimer);
@@ -188,6 +267,8 @@ void xnsched_init(struct xnsched *sched, int cpu)
        __xnthread_init(&sched->rootcb, &attr,
                        sched, &xnsched_class_idle, &param);
 
+       sched->setsched_worker = NULL;
+
        /*
         * No direct handler here since the host timer processing is
         * postponed to xnintr_irq_handler(), as part of the interrupt
@@ -228,6 +309,8 @@ void xnsched_destroy(struct xnsched *sched)
 #ifdef CONFIG_XENO_OPT_WATCHDOG
        xntimer_destroy(&sched->wdtimer);
 #endif /* CONFIG_XENO_OPT_WATCHDOG */
+       if (sched->setsched_worker)
+               kthread_stop(sched->setsched_worker);
 }
 
 static inline void set_thread_running(struct xnsched *sched,
@@ -483,6 +566,19 @@ static void migrate_thread(struct xnthread *thread, struct 
xnsched *sched)
         * result of calling the per-class migration hook.
         */
        thread->sched = sched;
+
+       /*
+        * Migrate also the thread's setsched work and inform
+        * the responsible worker. This avoid delays in applying
+        * the Linux scheduling parameters when the thread relaxes
+        * on the new CPU.
+        */
+       if (!list_empty(&thread->setsched_link)) {
+               list_del_init(&thread->setsched_link);
+               list_add_tail(&thread->setsched_link,
+                             &thread->sched->setsched_list);
+               __xnthread_post_setsched_root(thread);
+       }
 }
 
 /*
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 7983165..cd3832d 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -200,6 +200,7 @@ int __xnthread_init(struct xnthread *thread,
        thread->entry = NULL;
        thread->cookie = NULL;
        init_completion(&thread->exited);
+       INIT_LIST_HEAD(&thread->setsched_link);
 
        gravity = flags & XNUSER ? XNTIMER_UGRAVITY : XNTIMER_KGRAVITY;
        if (flags & XNROOT)
@@ -1915,6 +1916,40 @@ int xnthread_set_schedparam(struct xnthread *thread,
 }
 EXPORT_SYMBOL_GPL(xnthread_set_schedparam);
 
+struct lostage_setsched {
+       struct ipipe_work_header work; /* Must be first. */
+       struct task_struct *worker;
+};
+
+static void lostage_task_setsched(struct ipipe_work_header *work)
+{
+       struct lostage_setsched *rq;
+       struct task_struct *p;
+
+       rq = container_of(work, struct lostage_setsched, work);
+       p = rq->worker;
+
+       trace_cobalt_lostage_wakeup(p);
+
+       wake_up_process(p);
+}
+
+void __xnthread_post_setsched_root(struct xnthread *thread)
+{
+       struct lostage_setsched setschedwork = {
+               .work = {
+                       .size = sizeof(setschedwork),
+                       .handler = lostage_task_setsched,
+               },
+               .worker = thread->sched->setsched_worker,
+       };
+
+       trace_cobalt_lostage_request("setsched", xnthread_host_task(thread));
+
+       ipipe_post_work_root(&setschedwork, work);
+}
+
+
 int __xnthread_set_schedparam(struct xnthread *thread,
                              struct xnsched_class *sched_class,
                              const union xnsched_policy_param *sched_param)
@@ -1967,6 +2002,12 @@ int __xnthread_set_schedparam(struct xnthread *thread,
            thread->lock_count == 0)
                xnsched_putback(thread);
 
+       if (list_empty(&thread->setsched_link)) {
+               list_add_tail(&thread->setsched_link,
+                             &thread->sched->setsched_list);
+               __xnthread_post_setsched_root(thread);
+       }
+
        return ret;
 }
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
https://xenomai.org/mailman/listinfo/xenomai-git

Reply via email to