Module: xenomai-forge
Branch: master
Commit: 9b6513daf5fae678be8fb347c8c25a490e7210d9
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=9b6513daf5fae678be8fb347c8c25a490e7210d9

Author: Philippe Gerum <r...@xenomai.org>
Date:   Fri Dec 20 09:36:45 2013 +0100

cobalt/thread: allow changing thread priority from secondary mode

The former restriction on calling xnthread_set_schedparam() from
primary mode only is no more.

In addition, we introduce the inner __xnthread_set_schedparam()
routine for callers already running in atomic context.

---

 include/cobalt/kernel/thread.h |    4 +++
 kernel/cobalt/sched-sporadic.c |    4 +--
 kernel/cobalt/shadow.c         |    2 +-
 kernel/cobalt/thread.c         |   69 +++++++++++++++++++++-------------------
 4 files changed, 43 insertions(+), 36 deletions(-)

diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index aa2fd46..4223ddc 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -437,6 +437,10 @@ static inline void xnthread_migrate_passive(struct 
xnthread *thread,
 
 #endif
 
+int __xnthread_set_schedparam(struct xnthread *thread,
+                             struct xnsched_class *sched_class,
+                             const union xnsched_policy_param *sched_param);
+
 int xnthread_set_schedparam(struct xnthread *thread,
                            struct xnsched_class *sched_class,
                            const union xnsched_policy_param *sched_param);
diff --git a/kernel/cobalt/sched-sporadic.c b/kernel/cobalt/sched-sporadic.c
index 44438ab..8f9d3b1 100644
--- a/kernel/cobalt/sched-sporadic.c
+++ b/kernel/cobalt/sched-sporadic.c
@@ -86,7 +86,7 @@ static void sporadic_drop_handler(struct xntimer *timer)
                        p.pss.init_budget = 0;
                        p.pss.current_prio = pss->param.low_prio;
                        /* Move sporadic thread to the background. */
-                       xnthread_set_schedparam(thread, 
&xnsched_class_sporadic, &p);
+                       __xnthread_set_schedparam(thread, 
&xnsched_class_sporadic, &p);
                }
        }
 }
@@ -161,7 +161,7 @@ retry:
                p.pss.init_budget = 0;
                p.pss.current_prio = pss->param.normal_prio;
                /* Move sporadic thread to the foreground. */
-               xnthread_set_schedparam(thread, &xnsched_class_sporadic, &p);
+               __xnthread_set_schedparam(thread, &xnsched_class_sporadic, &p);
        }
 
        /*
diff --git a/kernel/cobalt/shadow.c b/kernel/cobalt/shadow.c
index 148bdc6..6105622 100644
--- a/kernel/cobalt/shadow.c
+++ b/kernel/cobalt/shadow.c
@@ -818,7 +818,7 @@ void __xnshadow_demote(struct xnthread *thread) /* nklock 
locked, irqs off */
        param.rt.prio = 0;
        sched_class = &xnsched_class_rt;
 #endif
-       xnthread_set_schedparam(thread, sched_class, &param);
+       __xnthread_set_schedparam(thread, sched_class, &param);
 }
 
 void xnshadow_demote(struct xnthread *thread)
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 00d6c41..d05482f 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -1780,11 +1780,7 @@ void xnthread_migrate_passive(struct xnthread *thread, 
struct xnsched *sched)
  * or ready thread moves it to the end of the runnable queue, thus
  * causing a manual round-robin.
  *
- * Environments:
- *
- * This service can be called from:
- *
- * - Primary mode only.
+ * Calling context: any.
  *
  * Rescheduling: never.
  *
@@ -1797,18 +1793,43 @@ int xnthread_set_schedparam(struct xnthread *thread,
                            struct xnsched_class *sched_class,
                            const union xnsched_policy_param *sched_param)
 {
-       int old_wprio, new_wprio, ret;
        spl_t s;
-
-       primary_mode_only();
+       int ret;
 
        xnlock_get_irqsave(&nklock, s);
+       ret = __xnthread_set_schedparam(thread, sched_class, sched_param);
+       xnlock_put_irqrestore(&nklock, s);
 
+       return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_schedparam);
+
+int __xnthread_set_schedparam(struct xnthread *thread,
+                             struct xnsched_class *sched_class,
+                             const union xnsched_policy_param *sched_param)
+{
+       int old_wprio, new_wprio, ret;
+
+       /*
+        * NOTE: we do not prevent the caller from altering the
+        * scheduling parameters of a thread that currently undergoes
+        * a PIP boost.
+        *
+        * Rationale: Calling xnthread_set_schedparam() carelessly
+        * with no consideration for resource management is a bug in
+        * essence, and xnthread_set_schedparam() does not have to
+        * paper over it, especially at the cost of more complexity
+        * when dealing with multiple scheduling classes.
+        *
+        * In short, callers have to make sure that lowering a thread
+        * priority is safe with respect to what their application
+        * currently does.
+        */
        old_wprio = thread->wprio;
 
        ret = xnsched_set_policy(thread, sched_class, sched_param);
        if (ret)
-               goto unlock_and_exit;
+               return ret;
 
        new_wprio = thread->wprio;
 
@@ -1816,29 +1837,16 @@ int xnthread_set_schedparam(struct xnthread *thread,
                   "thread %p thread_name %s class %s prio %d",
                   thread, xnthread_name(thread),
                   thread->sched_class->name, thread->cprio);
+
        /*
-        * NOTE: The behaviour changed compared to v2.4.x: we do not
-        * prevent the caller from altering the scheduling parameters
-        * of a thread that currently undergoes a PIP boost
-        * anymore. Rationale: Calling xnthread_set_schedparam()
-        * carelessly with no consideration for resource management is
-        * a bug in essence, and xnthread_set_schedparam() does not
-        * have to paper over it, especially at the cost of more
-        * complexity when dealing with multiple scheduling classes.
-        * In short, callers have to make sure that lowering a thread
-        * priority is safe with respect to what their application
-        * currently does.
+        * Update the pending order of the thread inside its wait
+        * queue, unless this behaviour has been explicitly disabled
+        * for the pended synchronization object, or the requested
+        * (weighted) priority has not changed, thus preventing
+        * spurious round-robin effects.
         */
        if (old_wprio != new_wprio && thread->wchan != NULL &&
            (thread->wchan->status & XNSYNCH_DREORD) == 0)
-               /*
-                * Update the pending order of the thread inside its
-                * wait queue, unless this behaviour has been
-                * explicitly disabled for the pended synchronization
-                * object, or the requested (weighted) priority has
-                * not changed, thus preventing spurious round-robin
-                * effects.
-                */
                xnsynch_requeue_sleeper(thread);
        /*
         * We don't need/want to move the thread at the end of its
@@ -1852,13 +1860,8 @@ int xnthread_set_schedparam(struct xnthread *thread,
        if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS|XNREADY|XNLOCK))
                xnsched_putback(thread);
 
-unlock_and_exit:
-
-       xnlock_put_irqrestore(&nklock, s);
-
        return ret;
 }
-EXPORT_SYMBOL_GPL(xnthread_set_schedparam);
 
 void __xnthread_test_cancel(struct xnthread *curr)
 {


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to