From: Byungchul Park <[email protected]>

This patch removes a weird coupling between se->avg.last_update_time and
the condition checking for migration, and introduce a new migration flag.
Now, scheduler can use the flag instead of se->avg.last_update_time to
check if migration already happened or not.

Signed-off-by: Byungchul Park <[email protected]>
---
 include/linux/sched.h |    3 +++
 kernel/sched/core.c   |    1 +
 kernel/sched/fair.c   |   22 ++++++++++++----------
 kernel/sched/sched.h  |    1 +
 4 files changed, 17 insertions(+), 10 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 699228b..a104c72 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1379,6 +1379,9 @@ struct task_struct {
 #endif
        int on_rq;
 
+       /* For indicating if a migration has happened. */
+       int migrated;
+
        int prio, static_prio, normal_prio;
        unsigned int rt_priority;
        const struct sched_class *sched_class;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a91df61..57f4300 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2068,6 +2068,7 @@ void __dl_clear_params(struct task_struct *p)
 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 {
        p->on_rq                        = 0;
+       p->migrated                     = 0;
 
        p->se.on_rq                     = 0;
        p->se.exec_start                = 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 077076f..0f76903 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2771,14 +2771,15 @@ static void detach_entity_load_avg(struct cfs_rq 
*cfs_rq, struct sched_entity *s
 
 /* Add the load generated by se into cfs_rq's load average */
 static inline void
-enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int 
flags)
 {
        struct sched_avg *sa = &se->avg;
        u64 now = cfs_rq_clock_task(cfs_rq);
-       int migrated, decayed;
+       int decayed;
+       int migrated = flags & ENQUEUE_MIGRATED;
+       int created = !sa->last_update_time;
 
-       migrated = !sa->last_update_time;
-       if (!migrated) {
+       if (!migrated && !created) {
                __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
                        se->on_rq * scale_load_down(se->load.weight),
                        cfs_rq->curr == se, NULL);
@@ -2789,10 +2790,10 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
        cfs_rq->runnable_load_avg += sa->load_avg;
        cfs_rq->runnable_load_sum += sa->load_sum;
 
-       if (migrated)
+       if (migrated || created)
                attach_entity_load_avg(cfs_rq, se);
 
-       if (decayed || migrated)
+       if (decayed || migrated || created)
                update_tg_load_avg(cfs_rq, 0);
 }
 
@@ -2868,7 +2869,7 @@ static int idle_balance(struct rq *this_rq);
 
 static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
 static inline void
-enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int 
flags) {}
 static inline void
 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void remove_entity_load_avg(struct sched_entity *se) {}
@@ -3008,7 +3009,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity 
*se, int flags)
         * Update run-time statistics of the 'current'.
         */
        update_curr(cfs_rq);
-       enqueue_entity_load_avg(cfs_rq, se);
+       enqueue_entity_load_avg(cfs_rq, se, flags);
        account_entity_enqueue(cfs_rq, se);
        update_cfs_shares(cfs_rq);
 
@@ -4136,6 +4137,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, 
int flags)
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
 
+       flags = flags | (xchg(&p->migrated, 0) ? ENQUEUE_MIGRATED : 0);
        for_each_sched_entity(se) {
                if (se->on_rq)
                        break;
@@ -5021,7 +5023,7 @@ static void migrate_task_rq_fair(struct task_struct *p, 
int next_cpu)
        remove_entity_load_avg(&p->se);
 
        /* Tell new CPU we are migrated */
-       p->se.avg.last_update_time = 0;
+       p->migrated = 1;
 
        /* We have migrated, no longer consider this task hot */
        p->se.exec_start = 0;
@@ -8082,7 +8084,7 @@ static void task_move_group_fair(struct task_struct *p)
        set_task_rq(p, task_cpu(p));
 
 #ifdef CONFIG_SMP
-       /* Tell se's cfs_rq has been changed -- migrated */
+       /* Tell se's cfs_rq has been changed */
        p->se.avg.last_update_time = 0;
 #endif
        attach_task_cfs_rq(p);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index af6f252..66d0552 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1158,6 +1158,7 @@ static const u32 prio_to_wmult[40] = {
 #define ENQUEUE_WAKING         0
 #endif
 #define ENQUEUE_REPLENISH      8
+#define ENQUEUE_MIGRATED       16
 
 #define DEQUEUE_SLEEP          1
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to