Currently, runnable_load_avg, which represents the portion of load avg
only from tasks which are currently active, is tracked by cfs_rq but
not by sched_entity.  We want to propagate runnable_load_avg of a
nested cfs_rq without affecting load_avg propagation.  To implement an
equivalent propagation channel, sched_entity needs to track
runnable_load_avg too.

This patch moves cfs_rq->runnable_load_{sum|avg} into struct
load_weight which is already used to track load_avg and shared by both
cfs_rq and sched_entity.

This patch only changes where runnable_load_{sum|avg} are located and
doesn't cause any actual behavior changes.  The fields are still only
used for cfs_rqs.

Signed-off-by: Tejun Heo <[email protected]>
Cc: Vincent Guittot <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Paul Turner <[email protected]>
Cc: Chris Mason <[email protected]>
---
 include/linux/sched.h |    2 +
 kernel/sched/debug.c  |    2 -
 kernel/sched/fair.c   |   63 +++++++++++++++++++++++++-------------------------
 kernel/sched/sched.h  |    2 -
 4 files changed, 35 insertions(+), 34 deletions(-)

--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -314,9 +314,11 @@ struct load_weight {
 struct sched_avg {
        u64                             last_update_time;
        u64                             load_sum;
+       u64                             runnable_load_sum;
        u32                             util_sum;
        u32                             period_contrib;
        unsigned long                   load_avg;
+       unsigned long                   runnable_load_avg;
        unsigned long                   util_avg;
 };
 
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -516,7 +516,7 @@ void print_cfs_rq(struct seq_file *m, in
        SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
                        cfs_rq->avg.load_avg);
        SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
-                       cfs_rq->runnable_load_avg);
+                       cfs_rq->avg.runnable_load_avg);
        SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
                        cfs_rq->avg.util_avg);
        SEQ_printf(m, "  .%-30s: %ld\n", "removed_load_avg",
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2656,10 +2656,10 @@ calc_cfs_shares(struct cfs_rq *cfs_rq, s
        case shares_runnable:
                /*
                 * Instead of the correct cfs_rq->avg.load_avg we use
-                * cfs_rq->runnable_load_avg, which does not include the
-                * blocked load.
+                * cfs_rq->avg.runnable_load_avg, which does not include
+                * the blocked load.
                 */
-               load = cfs_rq->runnable_load_avg;
+               load = cfs_rq->avg.runnable_load_avg;
                break;
 
        case shares_avg:
@@ -2877,7 +2877,7 @@ static u32 __compute_runnable_contrib(u6
  */
 static __always_inline int
 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
-                 unsigned long weight, int running, struct cfs_rq *cfs_rq)
+                 unsigned long weight, int running, bool update_runnable)
 {
        u64 delta, scaled_delta, periods;
        u32 contrib;
@@ -2923,10 +2923,8 @@ __update_load_avg(u64 now, int cpu, stru
                scaled_delta_w = cap_scale(delta_w, scale_freq);
                if (weight) {
                        sa->load_sum += weight * scaled_delta_w;
-                       if (cfs_rq) {
-                               cfs_rq->runnable_load_sum +=
-                                               weight * scaled_delta_w;
-                       }
+                       if (update_runnable)
+                               sa->runnable_load_sum += weight * 
scaled_delta_w;
                }
                if (running)
                        sa->util_sum += scaled_delta_w * scale_cpu;
@@ -2938,9 +2936,9 @@ __update_load_avg(u64 now, int cpu, stru
                delta %= 1024;
 
                sa->load_sum = decay_load(sa->load_sum, periods + 1);
-               if (cfs_rq) {
-                       cfs_rq->runnable_load_sum =
-                               decay_load(cfs_rq->runnable_load_sum, periods + 
1);
+               if (update_runnable) {
+                       sa->runnable_load_sum =
+                               decay_load(sa->runnable_load_sum, periods + 1);
                }
                sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
 
@@ -2949,8 +2947,8 @@ __update_load_avg(u64 now, int cpu, stru
                contrib = cap_scale(contrib, scale_freq);
                if (weight) {
                        sa->load_sum += weight * contrib;
-                       if (cfs_rq)
-                               cfs_rq->runnable_load_sum += weight * contrib;
+                       if (update_runnable)
+                               sa->runnable_load_sum += weight * contrib;
                }
                if (running)
                        sa->util_sum += contrib * scale_cpu;
@@ -2960,8 +2958,8 @@ __update_load_avg(u64 now, int cpu, stru
        scaled_delta = cap_scale(delta, scale_freq);
        if (weight) {
                sa->load_sum += weight * scaled_delta;
-               if (cfs_rq)
-                       cfs_rq->runnable_load_sum += weight * scaled_delta;
+               if (update_runnable)
+                       sa->runnable_load_sum += weight * scaled_delta;
        }
        if (running)
                sa->util_sum += scaled_delta * scale_cpu;
@@ -2970,9 +2968,9 @@ __update_load_avg(u64 now, int cpu, stru
 
        if (decayed) {
                sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
-               if (cfs_rq) {
-                       cfs_rq->runnable_load_avg =
-                               div_u64(cfs_rq->runnable_load_sum, 
LOAD_AVG_MAX);
+               if (update_runnable) {
+                       sa->runnable_load_avg =
+                               div_u64(sa->runnable_load_sum, LOAD_AVG_MAX);
                }
                sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
        }
@@ -3075,7 +3073,7 @@ void set_task_rq_fair(struct sched_entit
                n_last_update_time = next->avg.last_update_time;
 #endif
                __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
-                                 &se->avg, 0, 0, NULL);
+                                 &se->avg, 0, 0, false);
                se->avg.last_update_time = n_last_update_time;
        }
 }
@@ -3129,8 +3127,9 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq
         */
        if (se->on_rq) {
                /* Update parent cfs_rq runnable_load_avg */
-               add_positive(&cfs_rq->runnable_load_avg, delta);
-               cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * 
LOAD_AVG_MAX;
+               add_positive(&cfs_rq->avg.runnable_load_avg, delta);
+               cfs_rq->avg.runnable_load_sum =
+                       cfs_rq->avg.runnable_load_avg * LOAD_AVG_MAX;
        }
 }
 
@@ -3264,7 +3263,7 @@ update_cfs_rq_load_avg(u64 now, struct c
        }
 
        decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
-               scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, 
cfs_rq);
+               scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, 
true);
 
 #ifndef CONFIG_64BIT
        smp_wmb();
@@ -3299,7 +3298,7 @@ static inline void update_load_avg(struc
        if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {
                __update_load_avg(now, cpu, &se->avg,
                          se->on_rq * scale_load_down(se->load.weight),
-                         cfs_rq->curr == se, NULL);
+                         cfs_rq->curr == se, false);
        }
 
        decayed  = update_cfs_rq_load_avg(now, cfs_rq, true);
@@ -3355,8 +3354,8 @@ enqueue_entity_load_avg(struct cfs_rq *c
 {
        struct sched_avg *sa = &se->avg;
 
-       cfs_rq->runnable_load_avg += sa->load_avg;
-       cfs_rq->runnable_load_sum += sa->load_sum;
+       cfs_rq->avg.runnable_load_avg += sa->load_avg;
+       cfs_rq->avg.runnable_load_sum += sa->load_sum;
 
        if (!sa->last_update_time) {
                attach_entity_load_avg(cfs_rq, se);
@@ -3368,10 +3367,12 @@ enqueue_entity_load_avg(struct cfs_rq *c
 static inline void
 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       cfs_rq->runnable_load_avg =
-               max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
-       cfs_rq->runnable_load_sum =
-               max_t(s64,  cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
+       struct sched_avg *sa = &se->avg;
+
+       cfs_rq->avg.runnable_load_avg =
+               max_t(long, cfs_rq->avg.runnable_load_avg - sa->load_avg, 0);
+       cfs_rq->avg.runnable_load_sum =
+               max_t(s64,  cfs_rq->avg.runnable_load_sum - sa->load_sum, 0);
 }
 
 #ifndef CONFIG_64BIT
@@ -3405,7 +3406,7 @@ void sync_entity_load_avg(struct sched_e
        u64 last_update_time;
 
        last_update_time = cfs_rq_last_update_time(cfs_rq);
-       __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 
0, NULL);
+       __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 
0, false);
 }
 
 /*
@@ -3433,7 +3434,7 @@ void remove_entity_load_avg(struct sched
 
 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
 {
-       return cfs_rq->runnable_load_avg;
+       return cfs_rq->avg.runnable_load_avg;
 }
 
 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -422,8 +422,6 @@ struct cfs_rq {
         * CFS load tracking
         */
        struct sched_avg avg;
-       u64 runnable_load_sum;
-       unsigned long runnable_load_avg;
 #ifdef CONFIG_FAIR_GROUP_SCHED
        unsigned long tg_load_avg_contrib;
        unsigned long propagate_avg;

Reply via email to