Re: [PATCH v2 09/11] powerpc/smp: Optimize update_mask_by_l2

2020-10-07 Thread Qian Cai
On Wed, 2020-10-07 at 19:47 +0530, Srikar Dronamraju wrote:
> Can you confirm if CONFIG_CPUMASK_OFFSTACK is enabled in your config?

Yes, https://gitlab.com/cailca/linux-mm/-/blob/master/powerpc.config

We tested here almost daily on linux-next.



Re: [PATCH v2 09/11] powerpc/smp: Optimize update_mask_by_l2

2020-10-07 Thread Srikar Dronamraju
* Qian Cai  [2020-10-07 09:05:42]:

Hi Qian,

Thanks for testing and reporting the failure.

> On Mon, 2020-09-21 at 15:26 +0530, Srikar Dronamraju wrote:
> > All threads of a SMT4 core can either be part of this CPU's l2-cache
> > mask or not related to this CPU l2-cache mask. Use this relation to
> > reduce the number of iterations needed to find all the CPUs that share
> > the same l2-cache.
> > 
> > Use a temporary mask to iterate through the CPUs that may share l2_cache
> > mask. Also instead of setting one CPU at a time into cpu_l2_cache_mask,
> > copy the SMT4/sub mask at one shot.
> > 
> ...
> >  static bool update_mask_by_l2(int cpu)
> >  {
> > +   struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
> > struct device_node *l2_cache, *np;
> > +   cpumask_var_t mask;
> > int i;
> >  
> > l2_cache = cpu_to_l2cache(cpu);
> > @@ -1240,22 +1264,37 @@ static bool update_mask_by_l2(int cpu)
> > return false;
> > }
> >  
> > -   cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
> > -   for_each_cpu_and(i, cpu_online_mask, cpu_cpu_mask(cpu)) {
> > +   alloc_cpumask_var_node(, GFP_KERNEL, cpu_to_node(cpu));
> 
> Shouldn't this be GFP_ATOMIC? Otherwise, during the CPU hotplugging, we have,

Can you confirm if CONFIG_CPUMASK_OFFSTACK is enabled in your config?
Because if !CONFIG_CPUMASK_OFFSTACK, then alloc_cpumask_var_node would do
nothing but return true.

Regarding CONFIG_CPUMASK_OFFSTACK, not sure how much powerpc was tested
with that config enabled.

Please refer to
http://lore.kernel.org/lkml/87o8nv51bg@mpe.ellerman.id.au/t/#u
And we do have an issue to track the same.
https://github.com/linuxppc/issues/issues/321 for enabling/ testing /
verifying if CONFIG_CPUMASK_OFFSTACK works. I also dont see any
powerpc kconfig enabling this.

I do agree with your suggestion that we could substitute
GFP_ATOMIC/GFP_KERNEL.

> 
> (irqs were disabled in do_idle())
> 
> [  335.420001][T0] BUG: sleeping function called from invalid context at 
> mm/slab.h:494
> [  335.420003][T0] in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 
> 0, name: swapper/88
> [  335.420005][T0] no locks held by swapper/88/0.
> [  335.420007][T0] irq event stamp: 18074448
> [  335.420015][T0] hardirqs last  enabled at (18074447): 
> [] tick_nohz_idle_enter+0x9c/0x110
> [  335.420019][T0] hardirqs last disabled at (18074448): 
> [] do_idle+0x138/0x3b0
> do_idle at kernel/sched/idle.c:253 (discriminator 1)
> [  335.420023][T0] softirqs last  enabled at (18074440): 
> [] irq_enter_rcu+0x94/0xa0
> [  335.420026][T0] softirqs last disabled at (18074439): 
> [] irq_enter_rcu+0x70/0xa0
> [  335.420030][T0] CPU: 88 PID: 0 Comm: swapper/88 Tainted: GW
>  5.9.0-rc8-next-20201007 #1
> [  335.420032][T0] Call Trace:
> [  335.420037][T0] [c0002a4bfcf0] [c0649e98] 
> dump_stack+0xec/0x144 (unreliable)
> [  335.420043][T0] [c0002a4bfd30] [c00f6c34] 
> ___might_sleep+0x2f4/0x310
> [  335.420048][T0] [c0002a4bfdb0] [c0354f94] 
> slab_pre_alloc_hook.constprop.82+0x124/0x190
> [  335.420051][T0] [c0002a4bfe00] [c035e9e8] 
> __kmalloc_node+0x88/0x3a0
> slab_alloc_node at mm/slub.c:2817
> (inlined by) __kmalloc_node at mm/slub.c:4013
> [  335.420054][T0] [c0002a4bfe80] [c06494d8] 
> alloc_cpumask_var_node+0x38/0x80
> kmalloc_node at include/linux/slab.h:577
> (inlined by) alloc_cpumask_var_node at lib/cpumask.c:116
> [  335.420060][T0] [c0002a4bfef0] [c003eedc] 
> start_secondary+0x27c/0x800
> update_mask_by_l2 at arch/powerpc/kernel/smp.c:1267
> (inlined by) add_cpu_to_masks at arch/powerpc/kernel/smp.c:1387
> (inlined by) start_secondary at arch/powerpc/kernel/smp.c:1420
> [  335.420063][T0] [c0002a4bff90] [c000c468] 
> start_secondary_resume+0x10/0x14
> 
> > +   cpumask_and(mask, cpu_online_mask, cpu_cpu_mask(cpu));
> > +
> > +   if (has_big_cores)
> > +   submask_fn = cpu_smallcore_mask;
> > +
> > +   /* Update l2-cache mask with all the CPUs that are part of submask */
> > +   or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
> > +
> > +   /* Skip all CPUs already part of current CPU l2-cache mask */
> > +   cpumask_andnot(mask, mask, cpu_l2_cache_mask(cpu));
> > +
> > +   for_each_cpu(i, mask) {
> > /*
> >  * when updating the marks the current CPU has not been marked
> >  * online, but we need to update the cache masks
> >  */
> > np = cpu_to_l2cache(i);
> > -   if (!np)
> > -   continue;
> >  
> > -   if (np == l2_cache)
> > -   set_cpus_related(cpu, i, cpu_l2_cache_mask);
> > +   /* Skip all CPUs already part of current CPU l2-cache */
> > +   if (np == l2_cache) {
> > +   or_cpumasks_related(cpu, i, submask_fn,
> > cpu_l2_cache_mask);
> > +   cpumask_andnot(mask, mask, 

Re: [PATCH v2 09/11] powerpc/smp: Optimize update_mask_by_l2

2020-10-07 Thread Qian Cai
On Mon, 2020-09-21 at 15:26 +0530, Srikar Dronamraju wrote:
> All threads of a SMT4 core can either be part of this CPU's l2-cache
> mask or not related to this CPU l2-cache mask. Use this relation to
> reduce the number of iterations needed to find all the CPUs that share
> the same l2-cache.
> 
> Use a temporary mask to iterate through the CPUs that may share l2_cache
> mask. Also instead of setting one CPU at a time into cpu_l2_cache_mask,
> copy the SMT4/sub mask at one shot.
> 
...
>  static bool update_mask_by_l2(int cpu)
>  {
> + struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
>   struct device_node *l2_cache, *np;
> + cpumask_var_t mask;
>   int i;
>  
>   l2_cache = cpu_to_l2cache(cpu);
> @@ -1240,22 +1264,37 @@ static bool update_mask_by_l2(int cpu)
>   return false;
>   }
>  
> - cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
> - for_each_cpu_and(i, cpu_online_mask, cpu_cpu_mask(cpu)) {
> + alloc_cpumask_var_node(, GFP_KERNEL, cpu_to_node(cpu));

Shouldn't this be GFP_ATOMIC? Otherwise, during the CPU hotplugging, we have,

(irqs were disabled in do_idle())

[  335.420001][T0] BUG: sleeping function called from invalid context at 
mm/slab.h:494
[  335.420003][T0] in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 
0, name: swapper/88
[  335.420005][T0] no locks held by swapper/88/0.
[  335.420007][T0] irq event stamp: 18074448
[  335.420015][T0] hardirqs last  enabled at (18074447): 
[] tick_nohz_idle_enter+0x9c/0x110
[  335.420019][T0] hardirqs last disabled at (18074448): 
[] do_idle+0x138/0x3b0
do_idle at kernel/sched/idle.c:253 (discriminator 1)
[  335.420023][T0] softirqs last  enabled at (18074440): 
[] irq_enter_rcu+0x94/0xa0
[  335.420026][T0] softirqs last disabled at (18074439): 
[] irq_enter_rcu+0x70/0xa0
[  335.420030][T0] CPU: 88 PID: 0 Comm: swapper/88 Tainted: GW  
   5.9.0-rc8-next-20201007 #1
[  335.420032][T0] Call Trace:
[  335.420037][T0] [c0002a4bfcf0] [c0649e98] 
dump_stack+0xec/0x144 (unreliable)
[  335.420043][T0] [c0002a4bfd30] [c00f6c34] 
___might_sleep+0x2f4/0x310
[  335.420048][T0] [c0002a4bfdb0] [c0354f94] 
slab_pre_alloc_hook.constprop.82+0x124/0x190
[  335.420051][T0] [c0002a4bfe00] [c035e9e8] 
__kmalloc_node+0x88/0x3a0
slab_alloc_node at mm/slub.c:2817
(inlined by) __kmalloc_node at mm/slub.c:4013
[  335.420054][T0] [c0002a4bfe80] [c06494d8] 
alloc_cpumask_var_node+0x38/0x80
kmalloc_node at include/linux/slab.h:577
(inlined by) alloc_cpumask_var_node at lib/cpumask.c:116
[  335.420060][T0] [c0002a4bfef0] [c003eedc] 
start_secondary+0x27c/0x800
update_mask_by_l2 at arch/powerpc/kernel/smp.c:1267
(inlined by) add_cpu_to_masks at arch/powerpc/kernel/smp.c:1387
(inlined by) start_secondary at arch/powerpc/kernel/smp.c:1420
[  335.420063][T0] [c0002a4bff90] [c000c468] 
start_secondary_resume+0x10/0x14

> + cpumask_and(mask, cpu_online_mask, cpu_cpu_mask(cpu));
> +
> + if (has_big_cores)
> + submask_fn = cpu_smallcore_mask;
> +
> + /* Update l2-cache mask with all the CPUs that are part of submask */
> + or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
> +
> + /* Skip all CPUs already part of current CPU l2-cache mask */
> + cpumask_andnot(mask, mask, cpu_l2_cache_mask(cpu));
> +
> + for_each_cpu(i, mask) {
>   /*
>* when updating the marks the current CPU has not been marked
>* online, but we need to update the cache masks
>*/
>   np = cpu_to_l2cache(i);
> - if (!np)
> - continue;
>  
> - if (np == l2_cache)
> - set_cpus_related(cpu, i, cpu_l2_cache_mask);
> + /* Skip all CPUs already part of current CPU l2-cache */
> + if (np == l2_cache) {
> + or_cpumasks_related(cpu, i, submask_fn,
> cpu_l2_cache_mask);
> + cpumask_andnot(mask, mask, submask_fn(i));
> + } else {
> + cpumask_andnot(mask, mask, cpu_l2_cache_mask(i));
> + }
>  
>   of_node_put(np);
>   }
>   of_node_put(l2_cache);
> + free_cpumask_var(mask);
>  
>   return true;
>  }



[PATCH v2 09/11] powerpc/smp: Optimize update_mask_by_l2

2020-09-21 Thread Srikar Dronamraju
All threads of a SMT4 core can either be part of this CPU's l2-cache
mask or not related to this CPU l2-cache mask. Use this relation to
reduce the number of iterations needed to find all the CPUs that share
the same l2-cache.

Use a temporary mask to iterate through the CPUs that may share l2_cache
mask. Also instead of setting one CPU at a time into cpu_l2_cache_mask,
copy the SMT4/sub mask at one shot.

Cc: linuxppc-dev 
Cc: LKML 
Cc: Michael Ellerman 
Cc: Nicholas Piggin 
Cc: Anton Blanchard 
Cc: Oliver O'Halloran 
Cc: Nathan Lynch 
Cc: Michael Neuling 
Cc: Gautham R Shenoy 
Cc: Satheesh Rajendran 
Cc: Ingo Molnar 
Cc: Peter Zijlstra 
Cc: Valentin Schneider 
Signed-off-by: Srikar Dronamraju 
---
 arch/powerpc/kernel/smp.c | 51 ++-
 1 file changed, 45 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 6f866e6b12f8..17e90c2414af 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -670,6 +670,28 @@ static void set_cpus_unrelated(int i, int j,
 }
 #endif
 
+/*
+ * Extends set_cpus_related. Instead of setting one CPU at a time in
+ * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
+ */
+static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
+   struct cpumask *(*dstmask)(int))
+{
+   struct cpumask *mask;
+   int k;
+
+   mask = srcmask(j);
+   for_each_cpu(k, srcmask(i))
+   cpumask_or(dstmask(k), dstmask(k), mask);
+
+   if (i == j)
+   return;
+
+   mask = srcmask(i);
+   for_each_cpu(k, srcmask(j))
+   cpumask_or(dstmask(k), dstmask(k), mask);
+}
+
 /*
  * parse_thread_groups: Parses the "ibm,thread-groups" device tree
  *  property for the CPU device node @dn and stores
@@ -1220,7 +1242,9 @@ static struct device_node *cpu_to_l2cache(int cpu)
 
 static bool update_mask_by_l2(int cpu)
 {
+   struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
struct device_node *l2_cache, *np;
+   cpumask_var_t mask;
int i;
 
l2_cache = cpu_to_l2cache(cpu);
@@ -1240,22 +1264,37 @@ static bool update_mask_by_l2(int cpu)
return false;
}
 
-   cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
-   for_each_cpu_and(i, cpu_online_mask, cpu_cpu_mask(cpu)) {
+   alloc_cpumask_var_node(, GFP_KERNEL, cpu_to_node(cpu));
+   cpumask_and(mask, cpu_online_mask, cpu_cpu_mask(cpu));
+
+   if (has_big_cores)
+   submask_fn = cpu_smallcore_mask;
+
+   /* Update l2-cache mask with all the CPUs that are part of submask */
+   or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
+
+   /* Skip all CPUs already part of current CPU l2-cache mask */
+   cpumask_andnot(mask, mask, cpu_l2_cache_mask(cpu));
+
+   for_each_cpu(i, mask) {
/*
 * when updating the marks the current CPU has not been marked
 * online, but we need to update the cache masks
 */
np = cpu_to_l2cache(i);
-   if (!np)
-   continue;
 
-   if (np == l2_cache)
-   set_cpus_related(cpu, i, cpu_l2_cache_mask);
+   /* Skip all CPUs already part of current CPU l2-cache */
+   if (np == l2_cache) {
+   or_cpumasks_related(cpu, i, submask_fn, 
cpu_l2_cache_mask);
+   cpumask_andnot(mask, mask, submask_fn(i));
+   } else {
+   cpumask_andnot(mask, mask, cpu_l2_cache_mask(i));
+   }
 
of_node_put(np);
}
of_node_put(l2_cache);
+   free_cpumask_var(mask);
 
return true;
 }
-- 
2.17.1