Linus,

please pull the latest sched/urgent branch from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
sched-urgent-2020-05-24

up to:  39f23ce07b93: sched/fair: Fix unthrottle_cfs_rq() for leaf_cfs_rq list

A set of fixes for the scheduler:

 - Fix handling of throttled parents in enqueue_task_fair() completely. The
   recent fix overlooked a corner case where the first iteration terminates
   do a entiry being on rq which makes the list management incomplete and
   later triggers the assertion which checks for completeness.

 - Fix a similar problem in unthrottle_cfs_rq().

 - Show the correct uclamp values in procfs which prints the effective
   value twice instead of requested and effective.

Thanks,

        tglx

------------------>
Pavankumar Kondeti (1):
      sched/debug: Fix requested task uclamp values shown in procfs

Phil Auld (1):
      sched/fair: Fix enqueue_task_fair() warning some more

Vincent Guittot (1):
      sched/fair: Fix unthrottle_cfs_rq() for leaf_cfs_rq list


 kernel/sched/debug.c |  4 ++--
 kernel/sched/fair.c  | 49 +++++++++++++++++++++++++++++++++++++------------
 2 files changed, 39 insertions(+), 14 deletions(-)

diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index a562df57a86e..239970b991c0 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -948,8 +948,8 @@ void proc_sched_show_task(struct task_struct *p, struct 
pid_namespace *ns,
        P(se.avg.util_est.enqueued);
 #endif
 #ifdef CONFIG_UCLAMP_TASK
-       __PS("uclamp.min", p->uclamp[UCLAMP_MIN].value);
-       __PS("uclamp.max", p->uclamp[UCLAMP_MAX].value);
+       __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
+       __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
        __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
        __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
 #endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 02f323b85b6d..538ba5d94e99 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4774,7 +4774,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        struct rq *rq = rq_of(cfs_rq);
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
-       int enqueue = 1;
        long task_delta, idle_task_delta;
 
        se = cfs_rq->tg->se[cpu_of(rq)];
@@ -4798,26 +4797,44 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        idle_task_delta = cfs_rq->idle_h_nr_running;
        for_each_sched_entity(se) {
                if (se->on_rq)
-                       enqueue = 0;
+                       break;
+               cfs_rq = cfs_rq_of(se);
+               enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
+
+               cfs_rq->h_nr_running += task_delta;
+               cfs_rq->idle_h_nr_running += idle_task_delta;
+
+               /* end evaluation on encountering a throttled cfs_rq */
+               if (cfs_rq_throttled(cfs_rq))
+                       goto unthrottle_throttle;
+       }
 
+       for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
-               if (enqueue) {
-                       enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
-               } else {
-                       update_load_avg(cfs_rq, se, 0);
-                       se_update_runnable(se);
-               }
+
+               update_load_avg(cfs_rq, se, UPDATE_TG);
+               se_update_runnable(se);
 
                cfs_rq->h_nr_running += task_delta;
                cfs_rq->idle_h_nr_running += idle_task_delta;
 
+
+               /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
-                       break;
+                       goto unthrottle_throttle;
+
+               /*
+                * One parent has been throttled and cfs_rq removed from the
+                * list. Add it back to not break the leaf list.
+                */
+               if (throttled_hierarchy(cfs_rq))
+                       list_add_leaf_cfs_rq(cfs_rq);
        }
 
-       if (!se)
-               add_nr_running(rq, task_delta);
+       /* At this point se is NULL and we are at root level*/
+       add_nr_running(rq, task_delta);
 
+unthrottle_throttle:
        /*
         * The cfs_rq_throttled() breaks in the above iteration can result in
         * incomplete leaf list maintenance, resulting in triggering the
@@ -4826,7 +4843,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
 
-               list_add_leaf_cfs_rq(cfs_rq);
+               if (list_add_leaf_cfs_rq(cfs_rq))
+                       break;
        }
 
        assert_list_leaf_cfs_rq(rq);
@@ -5479,6 +5497,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, 
int flags)
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
                        goto enqueue_throttle;
+
+               /*
+                * One parent has been throttled and cfs_rq removed from the
+                * list. Add it back to not break the leaf list.
+                */
+               if (throttled_hierarchy(cfs_rq))
+                       list_add_leaf_cfs_rq(cfs_rq);
        }
 
 enqueue_throttle:

Reply via email to