* Daniel Bristot de Oliveira <bris...@redhat.com> wrote:

>  void migrate_disable(void)
>  {
>       struct task_struct *p = current;
> +     struct rq *rq;
> +     struct rq_flags rf;
> +
>  
>       if (in_atomic() || irqs_disabled()) {
>  #ifdef CONFIG_SCHED_DEBUG
> @@ -7593,10 +7596,21 @@ void migrate_disable(void)
>       preempt_disable();
>       preempt_lazy_disable();
>       pin_current_cpu();
> -     p->migrate_disable = 1;
>  
> -     p->cpus_ptr = cpumask_of(smp_processor_id());
> +     rq = task_rq_lock(p, &rf);
> +     if (unlikely((p->sched_class == &rt_sched_class ||
> +                   p->sched_class == &dl_sched_class) &&
> +                   p->nr_cpus_allowed > 1)) {
> +             if (p->sched_class == &rt_sched_class)
> +                     task_rq(p)->rt.rt_nr_migratory--;
> +             else
> +                     task_rq(p)->dl.dl_nr_migratory--;
> +     }
>       p->nr_cpus_allowed = 1;
> +     task_rq_unlock(rq, p, &rf);
> +     p->cpus_ptr = cpumask_of(smp_processor_id());
> +     p->migrate_disable = 1;
> +
>  
>       preempt_enable();
>  }
> @@ -7605,6 +7619,9 @@ EXPORT_SYMBOL(migrate_disable);
>  void migrate_enable(void)
>  {
>       struct task_struct *p = current;
> +     struct rq *rq;
> +     struct rq_flags rf;
> +
>  
>       if (in_atomic() || irqs_disabled()) {
>  #ifdef CONFIG_SCHED_DEBUG
> @@ -7628,17 +7645,24 @@ void migrate_enable(void)
>  
>       preempt_disable();
>  
> -     p->cpus_ptr = &p->cpus_mask;
> -     p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
>       p->migrate_disable = 0;
> +     p->cpus_ptr = &p->cpus_mask;
>  
> -     if (p->migrate_disable_update) {
> -             struct rq *rq;
> -             struct rq_flags rf;
> +     rq = task_rq_lock(p, &rf);
> +     p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
> +     if (unlikely((p->sched_class == &rt_sched_class ||
> +                   p->sched_class == &dl_sched_class) &&
> +                   p->nr_cpus_allowed > 1)) {
> +             if (p->sched_class == &rt_sched_class)
> +                     task_rq(p)->rt.rt_nr_migratory++;
> +             else
> +                     task_rq(p)->dl.dl_nr_migratory++;
> +     }
> +     task_rq_unlock(rq, p, &rf);

The fix looks good to me, but AFAICS the repeat pattern introduced here could 
be 
factored out into a helper function instead, right?

Thanks,

        Ingo

Reply via email to