Re: [PATCH 2/7] genirq/affinity: assign vectors to all present CPUs

2017-05-23 Thread Christoph Hellwig
On Sun, May 21, 2017 at 08:31:47PM +0200, Thomas Gleixner wrote:
> On Fri, 19 May 2017, Christoph Hellwig wrote:
> > -   /* Stabilize the cpumasks */
> > -   get_online_cpus();
> 
> How is that protected against physical CPU hotplug? Physical CPU hotplug
> manipulates the present mask.

It does indeed seem to.  Documentation/core-api/cpu_hotplug.rst claims
there are no locking rules for manipulations of cpu_present_mask,
maybe it needs and update to mention get/put_online_cpus() ?

Or maybe I should just switch to possible_cpu mask here like a lot of
code seems to do to avoid the hot plug issues, but that might be a bit
of a waste.


Re: [PATCH 2/7] genirq/affinity: assign vectors to all present CPUs

2017-05-21 Thread Thomas Gleixner
On Fri, 19 May 2017, Christoph Hellwig wrote:
> - /* Stabilize the cpumasks */
> - get_online_cpus();

How is that protected against physical CPU hotplug? Physical CPU hotplug
manipulates the present mask.

> - nodes = get_nodes_in_cpumask(cpu_online_mask, );
> + nodes = get_nodes_in_cpumask(cpu_present_mask, );
> +static int __init irq_build_cpumap(void)
> +{
> + int node, cpu;
> +
> + for (node = 0; node < nr_node_ids; node++) {
> + if (!zalloc_cpumask_var(_to_present_cpumask[node],
> + GFP_KERNEL))
> + panic("can't allocate early memory\n");
> + }
>  
> - return min(cpus, vecs) + resv;
> + for_each_present_cpu(cpu) {
> + node = cpu_to_node(cpu);
> + cpumask_set_cpu(cpu, node_to_present_cpumask[node]);
> + }

This mask needs updating on physical hotplug as well.

Thanks,

tglx


[PATCH 2/7] genirq/affinity: assign vectors to all present CPUs

2017-05-19 Thread Christoph Hellwig
Currently we only assign spread vectors to online CPUs, which ties the
IRQ mapping to the currently online devices and doesn't deal nicely with
the fact that CPUs could come and go rapidly due to e.g. power management.

Instead assign vectors to all present CPUs to avoid this churn.

Signed-off-by: Christoph Hellwig 
---
 kernel/irq/affinity.c | 43 ---
 1 file changed, 28 insertions(+), 15 deletions(-)

diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index e2d356dd7581..414b0be64bfc 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -4,6 +4,8 @@
 #include 
 #include 
 
+static cpumask_var_t node_to_present_cpumask[MAX_NUMNODES] __read_mostly;
+
 static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
int cpus_per_vec)
 {
@@ -40,8 +42,8 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, 
nodemask_t *nodemsk)
int n, nodes = 0;
 
/* Calculate the number of nodes in the supplied affinity mask */
-   for_each_online_node(n) {
-   if (cpumask_intersects(mask, cpumask_of_node(n))) {
+   for_each_node(n) {
+   if (cpumask_intersects(mask, node_to_present_cpumask[n])) {
node_set(n, *nodemsk);
nodes++;
}
@@ -77,9 +79,7 @@ irq_create_affinity_masks(int nvecs, const struct 
irq_affinity *affd)
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
cpumask_copy(masks + curvec, irq_default_affinity);
 
-   /* Stabilize the cpumasks */
-   get_online_cpus();
-   nodes = get_nodes_in_cpumask(cpu_online_mask, );
+   nodes = get_nodes_in_cpumask(cpu_present_mask, );
 
/*
 * If the number of nodes in the mask is greater than or equal the
@@ -87,7 +87,8 @@ irq_create_affinity_masks(int nvecs, const struct 
irq_affinity *affd)
 */
if (affv <= nodes) {
for_each_node_mask(n, nodemsk) {
-   cpumask_copy(masks + curvec, cpumask_of_node(n));
+   cpumask_copy(masks + curvec,
+node_to_present_cpumask[n]);
if (++curvec == last_affv)
break;
}
@@ -101,7 +102,7 @@ irq_create_affinity_masks(int nvecs, const struct 
irq_affinity *affd)
vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
 
/* Get the cpus on this node which are in the mask */
-   cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
+   cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]);
 
/* Calculate the number of cpus per vector */
ncpus = cpumask_weight(nmsk);
@@ -128,8 +129,6 @@ irq_create_affinity_masks(int nvecs, const struct 
irq_affinity *affd)
}
 
 done:
-   put_online_cpus();
-
/* Fill out vectors at the end that don't need affinity */
for (; curvec < nvecs; curvec++)
cpumask_copy(masks + curvec, irq_default_affinity);
@@ -147,12 +146,26 @@ int irq_calc_affinity_vectors(int maxvec, const struct 
irq_affinity *affd)
 {
int resv = affd->pre_vectors + affd->post_vectors;
int vecs = maxvec - resv;
-   int cpus;
 
-   /* Stabilize the cpumasks */
-   get_online_cpus();
-   cpus = cpumask_weight(cpu_online_mask);
-   put_online_cpus();
+   return min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
+}
+
+static int __init irq_build_cpumap(void)
+{
+   int node, cpu;
+
+   for (node = 0; node < nr_node_ids; node++) {
+   if (!zalloc_cpumask_var(_to_present_cpumask[node],
+   GFP_KERNEL))
+   panic("can't allocate early memory\n");
+   }
 
-   return min(cpus, vecs) + resv;
+   for_each_present_cpu(cpu) {
+   node = cpu_to_node(cpu);
+   cpumask_set_cpu(cpu, node_to_present_cpumask[node]);
+   }
+
+   return 0;
 }
+
+subsys_initcall(irq_build_cpumap);
-- 
2.11.0