An overloaded CPU has more than 1 runnable task.  When a CFS task wakes
on a CPU, if h_nr_running transitions from 1 to more, then set the CPU in
the cfs_overload_cpus bitmap.  When a CFS task sleeps, if h_nr_running
transitions from 2 to less, then clear the CPU in cfs_overload_cpus.

Signed-off-by: Steve Sistare <steven.sist...@oracle.com>
---
 kernel/sched/fair.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 48 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7fc4a37..c623338 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -23,6 +23,7 @@
 #include "sched.h"
 
 #include <trace/events/sched.h>
+#include <linux/sparsemask.h>
 
 /*
  * Targeted preemption latency for CPU-bound tasks:
@@ -3723,6 +3724,28 @@ static inline bool within_margin(int value, int margin)
        WRITE_ONCE(p->se.avg.util_est, ue);
 }
 
+static void overload_clear(struct rq *rq)
+{
+       struct sparsemask *overload_cpus;
+
+       rcu_read_lock();
+       overload_cpus = rcu_dereference(rq->cfs_overload_cpus);
+       if (overload_cpus)
+               sparsemask_clear_elem(rq->cpu, overload_cpus);
+       rcu_read_unlock();
+}
+
+static void overload_set(struct rq *rq)
+{
+       struct sparsemask *overload_cpus;
+
+       rcu_read_lock();
+       overload_cpus = rcu_dereference(rq->cfs_overload_cpus);
+       if (overload_cpus)
+               sparsemask_set_elem(rq->cpu, overload_cpus);
+       rcu_read_unlock();
+}
+
 #else /* CONFIG_SMP */
 
 #define UPDATE_TG      0x0
@@ -3746,6 +3769,9 @@ static inline int idle_balance(struct rq *rq, struct 
rq_flags *rf)
        return 0;
 }
 
+static inline void overload_clear(struct rq *rq) {}
+static inline void overload_set(struct rq *rq) {}
+
 static inline void
 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
 
@@ -4439,6 +4465,7 @@ static int tg_throttle_down(struct task_group *tg, void 
*data)
 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
 {
        struct rq *rq = rq_of(cfs_rq);
+       unsigned int prev_nr = rq->cfs.h_nr_running;
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
        long task_delta, dequeue = 1;
@@ -4466,8 +4493,12 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
                        dequeue = 0;
        }
 
-       if (!se)
+       if (!se) {
                sub_nr_running(rq, task_delta);
+               if (prev_nr >= 2 && prev_nr - task_delta < 2)
+                       overload_clear(rq);
+
+       }
 
        cfs_rq->throttled = 1;
        cfs_rq->throttled_clock = rq_clock(rq);
@@ -4493,6 +4524,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 {
        struct rq *rq = rq_of(cfs_rq);
+       unsigned int prev_nr = rq->cfs.h_nr_running;
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
        int enqueue = 1;
@@ -4529,8 +4561,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
                        break;
        }
 
-       if (!se)
+       if (!se) {
                add_nr_running(rq, task_delta);
+               if (prev_nr < 2 && prev_nr + task_delta >= 2)
+                       overload_set(rq);
+       }
 
        /* Determine whether we need to wake up potentially idle CPU: */
        if (rq->curr == rq->idle && rq->cfs.nr_running)
@@ -5064,6 +5099,7 @@ static inline void hrtick_update(struct rq *rq)
 {
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
+       unsigned int prev_nr = rq->cfs.h_nr_running;
 
        /*
         * The code below (indirectly) updates schedutil which looks at
@@ -5111,8 +5147,12 @@ static inline void hrtick_update(struct rq *rq)
                update_cfs_group(se);
        }
 
-       if (!se)
+       if (!se) {
                add_nr_running(rq, 1);
+               if (prev_nr == 1)
+                       overload_set(rq);
+
+       }
 
        hrtick_update(rq);
 }
@@ -5129,6 +5169,7 @@ static void dequeue_task_fair(struct rq *rq, struct 
task_struct *p, int flags)
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
        int task_sleep = flags & DEQUEUE_SLEEP;
+       unsigned int prev_nr = rq->cfs.h_nr_running;
 
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
@@ -5170,8 +5211,11 @@ static void dequeue_task_fair(struct rq *rq, struct 
task_struct *p, int flags)
                update_cfs_group(se);
        }
 
-       if (!se)
+       if (!se) {
                sub_nr_running(rq, 1);
+               if (prev_nr == 2)
+                       overload_clear(rq);
+       }
 
        util_est_dequeue(&rq->cfs, p, task_sleep);
        hrtick_update(rq);
-- 
1.8.3.1

Reply via email to