Start tracking how many tasks with SCHED_IDLE policy are present in each
cfs_rq. This will be used by later commits.

Signed-off-by: Viresh Kumar <[email protected]>
---
 kernel/sched/fair.c  | 14 ++++++++++++--
 kernel/sched/sched.h |  2 ++
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e30dea59d215..ad0b09ddddc0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4453,7 +4453,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        struct rq *rq = rq_of(cfs_rq);
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
-       long task_delta, dequeue = 1;
+       long task_delta, idle_task_delta, dequeue = 1;
        bool empty;
 
        se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
@@ -4464,6 +4464,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        rcu_read_unlock();
 
        task_delta = cfs_rq->h_nr_running;
+       idle_task_delta = cfs_rq->idle_h_nr_running;
        for_each_sched_entity(se) {
                struct cfs_rq *qcfs_rq = cfs_rq_of(se);
                /* throttled entity or throttle-on-deactivate */
@@ -4473,6 +4474,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
                if (dequeue)
                        dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
                qcfs_rq->h_nr_running -= task_delta;
+               qcfs_rq->idle_h_nr_running -= idle_task_delta;
 
                if (qcfs_rq->load.weight)
                        dequeue = 0;
@@ -4512,7 +4514,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
        int enqueue = 1;
-       long task_delta;
+       long task_delta, idle_task_delta;
 
        se = cfs_rq->tg->se[cpu_of(rq)];
 
@@ -4532,6 +4534,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
                return;
 
        task_delta = cfs_rq->h_nr_running;
+       idle_task_delta = cfs_rq->idle_h_nr_running;
        for_each_sched_entity(se) {
                if (se->on_rq)
                        enqueue = 0;
@@ -4540,6 +4543,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
                if (enqueue)
                        enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
                cfs_rq->h_nr_running += task_delta;
+               cfs_rq->idle_h_nr_running += idle_task_delta;
 
                if (cfs_rq_throttled(cfs_rq))
                        break;
@@ -5092,6 +5096,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, 
int flags)
 {
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
+       int idle_h_nr_running = unlikely(task_has_idle_policy(p)) ? 1 : 0;
 
        /*
         * The code below (indirectly) updates schedutil which looks at
@@ -5124,6 +5129,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, 
int flags)
                if (cfs_rq_throttled(cfs_rq))
                        break;
                cfs_rq->h_nr_running++;
+               cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
                flags = ENQUEUE_WAKEUP;
        }
@@ -5131,6 +5137,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, 
int flags)
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
                cfs_rq->h_nr_running++;
+               cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
                if (cfs_rq_throttled(cfs_rq))
                        break;
@@ -5157,6 +5164,7 @@ static void dequeue_task_fair(struct rq *rq, struct 
task_struct *p, int flags)
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
        int task_sleep = flags & DEQUEUE_SLEEP;
+       int idle_h_nr_running = unlikely(task_has_idle_policy(p)) ? 1 : 0;
 
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
@@ -5171,6 +5179,7 @@ static void dequeue_task_fair(struct rq *rq, struct 
task_struct *p, int flags)
                if (cfs_rq_throttled(cfs_rq))
                        break;
                cfs_rq->h_nr_running--;
+               cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 
                /* Don't dequeue parent if it has other entities besides us */
                if (cfs_rq->load.weight) {
@@ -5190,6 +5199,7 @@ static void dequeue_task_fair(struct rq *rq, struct 
task_struct *p, int flags)
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
                cfs_rq->h_nr_running--;
+               cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 
                if (cfs_rq_throttled(cfs_rq))
                        break;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e0e052a50fcd..86a388c506ac 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -488,6 +488,8 @@ struct cfs_rq {
        unsigned long           runnable_weight;
        unsigned int            nr_running;
        unsigned int            h_nr_running;
+       /* h_nr_running for SCHED_IDLE tasks */
+       unsigned int            idle_h_nr_running;
 
        u64                     exec_clock;
        u64                     min_vruntime;
-- 
2.19.1.568.g152ad8e3369a

Reply via email to