The commit is pushed to "branch-rh7-3.10.0-229.7.2-ovz" and will appear at 
https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-229.7.2.vz7.6.5
------>
commit 315dcacbe9e305082a46c9d2ea585d83576efae9
Author: Kirill Tkhai <ktk...@odin.com>
Date:   Tue Sep 1 17:08:03 2015 +0400

    ve/sched: Fix double put_prev_task_fair() because of 
trigger_cpulimit_balance()
    
    The scheduller code is written with the assumption, that rq->curr task can't
    be already put. For example, in sched_move_task() we check for
    
        running = task_current(rq, tsk);
    
    and call put_prev_task() if "running" is true.
    
    When we're unlocking rq->lock in trigger_cpulimit_balance(), the task has
    already been put, so concurrent cpu_cgroup_attach_task()->sched_move_task()
    puts it one more time.
    
    https://jira.sw.ru/browse/PSBM-35082
    
    Signed-off-by: Kirill Tkhai <ktk...@odin.com>
---
 kernel/sched/fair.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 167d0f6..3092f76 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5068,18 +5068,16 @@ static inline void trigger_cpulimit_balance(struct 
task_struct *p)
        int this_cpu, cpu, target_cpu = -1;
        struct sched_domain *sd;
 
-       if (!p->se.on_rq)
-               return;
-
        this_rq = rq_of(cfs_rq_of(&p->se));
        this_cpu = cpu_of(this_rq);
 
+       if (!p->se.on_rq || this_rq->active_balance)
+               return;
+
        cfs_rq = top_cfs_rq_of(&p->se);
        if (check_cpulimit_spread(cfs_rq, this_cpu) >= 0)
                return;
 
-       raw_spin_unlock(&this_rq->lock);
-
        rcu_read_lock();
        for_each_domain(this_cpu, sd) {
                if (!(sd->flags & SD_LOAD_BALANCE))
@@ -5096,17 +5094,14 @@ static inline void trigger_cpulimit_balance(struct 
task_struct *p)
 unlock:
        rcu_read_unlock();
 
-       raw_spin_lock(&this_rq->lock);
        if (target_cpu >= 0) {
-               if (!this_rq->active_balance) {
-                       this_rq->active_balance = 1;
-                       this_rq->push_cpu = target_cpu;
-                       raw_spin_unlock(&this_rq->lock);
-                       stop_one_cpu_nowait(this_cpu,
-                                           cpulimit_balance_cpu_stop, this_rq,
-                                           &this_rq->active_balance_work);
-                       raw_spin_lock(&this_rq->lock);
-               }
+               this_rq->active_balance = 1;
+               this_rq->push_cpu = target_cpu;
+               raw_spin_unlock(&this_rq->lock);
+               stop_one_cpu_nowait(this_rq->cpu,
+                                   cpulimit_balance_cpu_stop, this_rq,
+                                   &this_rq->active_balance_work);
+               raw_spin_lock(&this_rq->lock);
        }
 }
 #else
@@ -5127,8 +5122,6 @@ static void put_prev_task_fair(struct rq *rq, struct 
task_struct *prev)
                cfs_rq = cfs_rq_of(se);
                put_prev_entity(cfs_rq, se);
        }
-
-       trigger_cpulimit_balance(prev);
 }
 
 /*
@@ -5787,7 +5780,8 @@ static int cpulimit_balance_cpu_stop(void *data)
 
        raw_spin_lock_irq(&rq->lock);
 
-       if (unlikely(cpu != smp_processor_id() || !rq->active_balance))
+       if (unlikely(cpu != smp_processor_id() || !rq->active_balance ||
+                    !cpu_online(target_cpu)))
                goto out_unlock;
 
        if (unlikely(!rq->nr_running))
@@ -7269,6 +7263,11 @@ out_unlock:
        return 0;
 }
 
+static void pre_schedule_fair(struct rq *rq, struct task_struct *prev)
+{
+       trigger_cpulimit_balance(prev);
+}
+
 #ifdef CONFIG_NO_HZ_COMMON
 /*
  * idle load balancing details
@@ -8171,6 +8170,7 @@ const struct sched_class fair_sched_class = {
        .rq_offline             = rq_offline_fair,
 
        .task_waking            = task_waking_fair,
+       .pre_schedule           = pre_schedule_fair,
 #endif
 
        .set_curr_task          = set_curr_task_fair,
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to