To move these calls from dl_overflow() to deadline.c, we must change
the meaning of the third parameter of prio_changed_dl().
Instead of passing the "old priority" (which is always equal to the current
one, for SCHED_DEADLINE) we pass the old utilization. An alternative approach
is to change the prototype of the "prio_changed" method of the scheduling
class.
---
 kernel/sched/core.c     | 10 ++++++----
 kernel/sched/deadline.c | 10 +++++++---
 2 files changed, 13 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a4f08d1..5dc12db 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2449,9 +2449,7 @@ static int dl_overflow(struct task_struct *p, int policy,
        } else if (dl_policy(policy) && task_has_dl_policy(p) &&
                   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
                __dl_clear(dl_b, p->dl.dl_bw);
-               __dl_sub_ac(task_rq(p), p->dl.dl_bw);
                __dl_add(dl_b, new_bw);
-               __dl_add_ac(task_rq(p), new_bw);
                err = 0;
        } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
                __dl_clear(dl_b, p->dl.dl_bw);
@@ -3522,6 +3520,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
                } else
                        p->dl.dl_boosted = 0;
                p->sched_class = &dl_sched_class;
+               oldprio = 0;
        } else if (rt_prio(prio)) {
                if (dl_prio(oldprio))
                        p->dl.dl_boosted = 0;
@@ -3891,7 +3890,7 @@ static int __sched_setscheduler(struct task_struct *p,
 {
        int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
                      MAX_RT_PRIO - 1 - attr->sched_priority;
-       int retval, oldprio, oldpolicy = -1, queued, running;
+       int retval, oldprio, oldbw, oldpolicy = -1, queued, running;
        int new_effective_prio, policy = attr->sched_policy;
        unsigned long flags;
        const struct sched_class *prev_class;
@@ -4069,6 +4068,7 @@ change:
 
        p->sched_reset_on_fork = reset_on_fork;
        oldprio = p->prio;
+       oldbw = p->dl.dl_bw;
 
        if (pi) {
                /*
@@ -4081,6 +4081,8 @@ change:
                new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
                if (new_effective_prio == oldprio) {
                        __setscheduler_params(p, attr);
+                       if (p->sched_class == &dl_sched_class)
+                               p->sched_class->prio_changed(rq, p, oldbw);
                        task_rq_unlock(rq, p, &flags);
                        return 0;
                }
@@ -4110,7 +4112,7 @@ change:
                enqueue_task(rq, p, enqueue_flags);
        }
 
-       check_class_changed(rq, p, prev_class, oldprio);
+       check_class_changed(rq, p, prev_class, ((prev_class == &dl_sched_class) 
&& (p->sched_class == &dl_sched_class)) ? oldbw : oldprio);
        preempt_disable(); /* avoid rq from going away on us */
        task_rq_unlock(rq, p, &flags);
 
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 4cc713a..959e7b7 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1757,8 +1757,13 @@ static void switched_to_dl(struct rq *rq, struct 
task_struct *p)
  * a push or pull operation might be needed.
  */
 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
-                           int oldprio)
+                           int oldbw)
 {
+       if (oldbw) {
+               __dl_sub_ac(rq, oldbw);
+               __dl_add_ac(rq, p->dl.dl_bw);
+       }
+
        if (task_on_rq_queued(p) || rq->curr == p) {
 #ifdef CONFIG_SMP
                /*
@@ -1785,8 +1790,7 @@ static void prio_changed_dl(struct rq *rq, struct 
task_struct *p,
                 */
                resched_curr(rq);
 #endif /* CONFIG_SMP */
-       } else
-               switched_to_dl(rq, p);
+       }
 }
 
 const struct sched_class dl_sched_class = {
-- 
2.5.0

Reply via email to