switched_to_dl() can be used instead
---
 kernel/sched/core.c     |  1 -
 kernel/sched/deadline.c | 28 +++++-----------------------
 2 files changed, 5 insertions(+), 24 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5dc12db..4246b1b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2183,7 +2183,6 @@ void __dl_clear_params(struct task_struct *p)
        dl_se->dl_bw = 0;
 
        dl_se->dl_throttled = 0;
-       dl_se->dl_new = 1;
        dl_se->dl_yielded = 0;
 }
 
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 959e7b7..12cb934 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -355,8 +355,6 @@ static inline void setup_new_dl_entity(struct 
sched_dl_entity *dl_se,
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
        struct rq *rq = rq_of_dl_rq(dl_rq);
 
-       WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
-
        /*
         * We use the regular wall clock time to set deadlines in the
         * future; in fact, we must consider execution overheads (time
@@ -364,7 +362,6 @@ static inline void setup_new_dl_entity(struct 
sched_dl_entity *dl_se,
         */
        dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
        dl_se->runtime = pi_se->dl_runtime;
-       dl_se->dl_new = 0;
 }
 
 /*
@@ -503,15 +500,6 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
        struct rq *rq = rq_of_dl_rq(dl_rq);
 
-       /*
-        * The arrival of a new instance needs special treatment, i.e.,
-        * the actual scheduling parameters have to be "renewed".
-        */
-       if (dl_se->dl_new) {
-               setup_new_dl_entity(dl_se, pi_se);
-               return;
-       }
-
        if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
            dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
@@ -608,16 +596,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer 
*timer)
        }
 
        /*
-        * This is possible if switched_from_dl() raced against a running
-        * callback that took the above !dl_task() path and we've since then
-        * switched back into SCHED_DEADLINE.
-        *
-        * There's nothing to do except drop our task reference.
-        */
-       if (dl_se->dl_new)
-               goto unlock;
-
-       /*
         * The task might have been boosted by someone else and might be in the
         * boosting/deboosting path, its not throttled.
         */
@@ -920,7 +898,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
         * parameters of the task might need updating. Otherwise,
         * we want a replenishment of its runtime.
         */
-       if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
+       if (flags & ENQUEUE_WAKEUP)
                update_dl_entity(dl_se, pi_se);
        else if (flags & ENQUEUE_REPLENISH)
                replenish_dl_entity(dl_se, pi_se);
@@ -1738,6 +1716,10 @@ static void switched_from_dl(struct rq *rq, struct 
task_struct *p)
  */
 static void switched_to_dl(struct rq *rq, struct task_struct *p)
 {
+       if (p->dl.deadline <= rq_clock(rq)) {
+               setup_new_dl_entity(&p->dl, &p->dl);
+       }
+
        __dl_add_ac(rq, p->dl.dl_bw);
        if (task_on_rq_queued(p) && rq->curr != p) {
 #ifdef CONFIG_SMP
-- 
2.5.0

Reply via email to