From: Dietmar Eggemann <[email protected]>

The unweighted blocked load on a run queue is maintained alongside the
existing (weighted) blocked load.

This patch is the unweighted counterpart of "sched: Maintain the load
contribution of blocked entities" (commit id 9ee474f55664).

Note: The unweighted blocked load is not used for energy aware
scheduling yet.

Signed-off-by: Dietmar Eggemann <[email protected]>
---
 kernel/sched/debug.c |    2 ++
 kernel/sched/fair.c  |   22 +++++++++++++++++-----
 kernel/sched/sched.h |    2 +-
 3 files changed, 20 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 78d4151..ffa56a8 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -220,6 +220,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct 
cfs_rq *cfs_rq)
                        cfs_rq->uw_runnable_load_avg);
        SEQ_printf(m, "  .%-30s: %ld\n", "blocked_load_avg",
                        cfs_rq->blocked_load_avg);
+       SEQ_printf(m, "  .%-30s: %ld\n", "uw_blocked_load_avg",
+                       cfs_rq->uw_blocked_load_avg);
 #ifdef CONFIG_FAIR_GROUP_SCHED
        SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_contrib",
                        cfs_rq->tg_load_contrib);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1ee47b3..c6207f7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2481,12 +2481,18 @@ static long __update_entity_load_avg_contrib(struct 
sched_entity *se,
 }
 
 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
-                                                long load_contrib)
+                                                long load_contrib,
+                                                long uw_load_contrib)
 {
        if (likely(load_contrib < cfs_rq->blocked_load_avg))
                cfs_rq->blocked_load_avg -= load_contrib;
        else
                cfs_rq->blocked_load_avg = 0;
+
+       if (likely(uw_load_contrib < cfs_rq->uw_blocked_load_avg))
+               cfs_rq->uw_blocked_load_avg -= uw_load_contrib;
+       else
+               cfs_rq->uw_blocked_load_avg = 0;
 }
 
 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
@@ -2521,7 +2527,8 @@ static inline void update_entity_load_avg(struct 
sched_entity *se,
                cfs_rq->uw_runnable_load_avg += uw_contrib_delta;
        }
        else
-               subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
+               subtract_blocked_load_contrib(cfs_rq, -contrib_delta,
+                                             -uw_contrib_delta);
 }
 
 /*
@@ -2540,12 +2547,14 @@ static void update_cfs_rq_blocked_load(struct cfs_rq 
*cfs_rq, int force_update)
        if (atomic_long_read(&cfs_rq->removed_load)) {
                unsigned long removed_load;
                removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
-               subtract_blocked_load_contrib(cfs_rq, removed_load);
+               subtract_blocked_load_contrib(cfs_rq, removed_load, 0);
        }
 
        if (decays) {
                cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
                                                      decays);
+               cfs_rq->uw_blocked_load_avg =
+                               decay_load(cfs_rq->uw_blocked_load_avg, decays);
                atomic64_add(decays, &cfs_rq->decay_counter);
                cfs_rq->last_decay = now;
        }
@@ -2591,7 +2600,8 @@ static inline void enqueue_entity_load_avg(struct cfs_rq 
*cfs_rq,
 
        /* migrated tasks did not contribute to our blocked load */
        if (wakeup) {
-               subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
+               subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib,
+                                             se->avg.uw_load_avg_contrib);
                update_entity_load_avg(se, 0);
        }
 
@@ -2620,6 +2630,7 @@ static inline void dequeue_entity_load_avg(struct cfs_rq 
*cfs_rq,
 
        if (sleep) {
                cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
+               cfs_rq->uw_blocked_load_avg += se->avg.uw_load_avg_contrib;
                se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
        } /* migrations, e.g. sleep=0 leave decay_count == 0 */
 }
@@ -7481,7 +7492,8 @@ static void switched_from_fair(struct rq *rq, struct 
task_struct *p)
        */
        if (se->avg.decay_count) {
                __synchronize_entity_decay(se);
-               subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
+               subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib,
+                                             se->avg.uw_load_avg_contrib);
        }
 #endif
 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 46cb8bd..3f1eeb3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -337,7 +337,7 @@ struct cfs_rq {
         * the FAIR_GROUP_SCHED case).
         */
        unsigned long runnable_load_avg, blocked_load_avg;
-       unsigned long uw_runnable_load_avg;
+       unsigned long uw_runnable_load_avg, uw_blocked_load_avg;
        atomic64_t decay_counter;
        u64 last_decay;
        atomic_long_t removed_load;
-- 
1.7.9.5


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to