Re: [RFC][PATCH 3/4] sched: Change sched_class::set_cpus_allowed calling context

2015-05-18 Thread Peter Zijlstra
On Mon, May 18, 2015 at 03:37:43PM +0800, pang.xun...@zte.com.cn wrote:
 Hi Peter,
 
 With this modification, I think the pushing action in my previous patch 
 Check to push the task away after its affinity was changed will not
 be able to be implemented inside sched_class::set_cpus_allowed().

Ah, right, I knew there was a patch I needed to look at.

I'll try and not forget, but there's a few regression reports I need to
look at first.
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC][PATCH 3/4] sched: Change sched_class::set_cpus_allowed calling context

2015-05-18 Thread Peter Zijlstra
On Mon, May 18, 2015 at 10:32:16AM +0200, Peter Zijlstra wrote:
 On Mon, May 18, 2015 at 03:37:43PM +0800, pang.xun...@zte.com.cn wrote:
  Hi Peter,
  
  With this modification, I think the pushing action in my previous patch 
  Check to push the task away after its affinity was changed will not
  be able to be implemented inside sched_class::set_cpus_allowed().
 
 Ah, right, I knew there was a patch I needed to look at.

So basically you want to do:

+check_push:
+   if (weight  1 
+   !task_running(rq, p) 
+   !test_tsk_need_resched(rq-curr) 
+   !cpumask_subset(new_mask, p-cpus_allowed)) {
+   /* Update new affinity and try to push. */
+   cpumask_copy(p-cpus_allowed, new_mask);
+   p-nr_cpus_allowed = weight;
+   push_rt_tasks(rq);
+   return true;
+   }

in set_cpus_allowed_rt(), which would not work because of us calling
put_prev_task(), which does enqueue_pushable_task() and would allow
pick_next_pushable_task() to select the current task, which would then
BUG_ON().

Note however that you already test for !task_running(), which precludes
that entire argument, because if @p is not running, we did not call
put_prev_task() etc..

So I think the above would still work; albeit it needs a comment on why
etc..
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC][PATCH 3/4] sched: Change sched_class::set_cpus_allowed calling context

2015-05-18 Thread Juri Lelli
Hi Peter,

On 05/18/2015 09:32 AM, Peter Zijlstra wrote:
 On Mon, May 18, 2015 at 03:37:43PM +0800, pang.xun...@zte.com.cn wrote:
 Hi Peter,

 With this modification, I think the pushing action in my previous patch 
 Check to push the task away after its affinity was changed will not
 be able to be implemented inside sched_class::set_cpus_allowed().
 
 Ah, right, I knew there was a patch I needed to look at.
 
 I'll try and not forget, but there's a few regression reports I need to
 look at first.
 

Apart from this (and the fact that I still have to look at Xunlei's patches too)
the changes seem ok with DL. Didn't test them yet though. I'll do it soon.

Best,

- Juri

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[RFC][PATCH 3/4] sched: Change sched_class::set_cpus_allowed calling context

2015-05-15 Thread Peter Zijlstra
Change the calling context of sched_class::set_cpus_allowed() such
that we can assume the task is inactive.

This allows us to easily make changes that affect accounting done by
enqueue/dequeue. This does in fact completely remove
set_cpus_allowed_rt and greatly reduces set_cpus_allowed_dl.


Signed-off-by: Peter Zijlstra (Intel) pet...@infradead.org
---
 kernel/sched/core.c |   23 +++
 kernel/sched/deadline.c |   39 ++-
 kernel/sched/rt.c   |   45 +
 3 files changed, 26 insertions(+), 81 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4803,8 +4803,31 @@ void set_cpus_allowed_common(struct task
 
 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
+   struct rq *rq = task_rq(p);
+   bool queued, running;
+
lockdep_assert_held(p-pi_lock);
+
+   queued = task_on_rq_queued(p);
+   running = task_current(rq, p);
+
+   if (queued) {
+   /*
+* Because __kthread_bind() calls this on blocked tasks without
+* holding rq-lock.
+*/
+   lockdep_assert_held(rq-lock);
+   dequeue_task(rq, p, 0);
+   }
+   if (running)
+   put_prev_task(rq, p);
+
p-sched_class-set_cpus_allowed(p, new_mask);
+
+   if (running)
+   p-sched_class-set_curr_task(rq);
+   if (queued)
+   enqueue_task(rq, p, 0);
 }
 
 /*
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1569,9 +1569,8 @@ static void task_woken_dl(struct rq *rq,
 static void set_cpus_allowed_dl(struct task_struct *p,
const struct cpumask *new_mask)
 {
-   struct rq *rq;
struct root_domain *src_rd;
-   int weight;
+   struct rq *rq;
 
BUG_ON(!dl_task(p));
 
@@ -1597,41 +1596,7 @@ static void set_cpus_allowed_dl(struct t
raw_spin_unlock(src_dl_b-lock);
}
 
-   weight = cpumask_weight(new_mask);
-
-   /*
-* Only update if the process changes its state from whether it
-* can migrate or not.
-*/
-   if ((p-nr_cpus_allowed  1) == (weight  1))
-   goto done;
-
-   /*
-* Update only if the task is actually running (i.e.,
-* it is on the rq AND it is not throttled).
-*/
-   if (!on_dl_rq(p-dl))
-   goto done;
-
-   /*
-* The process used to be able to migrate OR it can now migrate
-*/
-   if (weight = 1) {
-   if (!task_current(rq, p))
-   dequeue_pushable_dl_task(rq, p);
-   BUG_ON(!rq-dl.dl_nr_migratory);
-   rq-dl.dl_nr_migratory--;
-   } else {
-   if (!task_current(rq, p))
-   enqueue_pushable_dl_task(rq, p);
-   rq-dl.dl_nr_migratory++;
-   }
-
-   update_dl_migration(rq-dl);
-
-done:
-   cpumask_copy(p-cpus_allowed, new_mask);
-   p-nr_cpus_allowed = weight;
+   set_cpus_allowed_common(p, new_mask);
 }
 
 /* Assumes rq-lock is held */
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2057,49 +2057,6 @@ static void task_woken_rt(struct rq *rq,
push_rt_tasks(rq);
 }
 
-static void set_cpus_allowed_rt(struct task_struct *p,
-   const struct cpumask *new_mask)
-{
-   struct rq *rq;
-   int weight;
-
-   BUG_ON(!rt_task(p));
-
-   weight = cpumask_weight(new_mask);
-
-   /*
-* Only update if the process changes its state from whether it
-* can migrate or not.
-*/
-   if ((p-nr_cpus_allowed  1) == (weight  1))
-   goto done;
-
-   if (!task_on_rq_queued(p))
-   goto done;
-
-   rq = task_rq(p);
-
-   /*
-* The process used to be able to migrate OR it can now migrate
-*/
-   if (weight = 1) {
-   if (!task_current(rq, p))
-   dequeue_pushable_task(rq, p);
-   BUG_ON(!rq-rt.rt_nr_migratory);
-   rq-rt.rt_nr_migratory--;
-   } else {
-   if (!task_current(rq, p))
-   enqueue_pushable_task(rq, p);
-   rq-rt.rt_nr_migratory++;
-   }
-
-   update_rt_migration(rq-rt);
-
-done:
-   cpumask_copy(p-cpus_allowed, new_mask);
-   p-nr_cpus_allowed = weight;
-}
-
 /* Assumes rq-lock is held */
 static void rq_online_rt(struct rq *rq)
 {
@@ -2313,7 +2270,7 @@ const struct sched_class rt_sched_class
 #ifdef CONFIG_SMP
.select_task_rq = select_task_rq_rt,
 
-   .set_cpus_allowed   = set_cpus_allowed_rt,
+   .set_cpus_allowed   = set_cpus_allowed_common,
.rq_online  = rq_online_rt,
.rq_offline = rq_offline_rt,
.post_schedule  = post_schedule_rt,


--
To unsubscribe from this list: send the