Linus,

please pull the latest irq-urgent-for-linus git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
irq-urgent-for-linus

The irq department provides:

    - Two fixes for the CPU affinity spread infrastructure to prevent
      unbalanced spreading in corner cases which leads to horrible
      performance, because interrupts are rather aggregated than spread

    - Add a missing spinlock initializer in the imx-gpcv2 init code

Thanks,

        tglx

------------------>
Keith Busch (2):
      irq/affinity: Fix CPU spread for unbalanced nodes
      irq/affinity: Fix extra vecs calculation

Tyler Baker (1):
      irqchip/irq-imx-gpcv2: Fix spinlock initialization


 drivers/irqchip/irq-imx-gpcv2.c |  2 ++
 kernel/irq/affinity.c           | 20 +++++++++++---------
 2 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 15af9a9753e5..2d203b422129 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node 
*node,
                return -ENOMEM;
        }
 
+       raw_spin_lock_init(&cd->rlock);
+
        cd->gpc_base = of_iomap(node, 0);
        if (!cd->gpc_base) {
                pr_err("fsl-gpcv2: unable to map gpc registers\n");
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 4544b115f5eb..d052947fe785 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, 
nodemask_t *nodemsk)
 struct cpumask *
 irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
 {
-       int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
+       int n, nodes, cpus_per_vec, extra_vecs, curvec;
        int affv = nvecs - affd->pre_vectors - affd->post_vectors;
        int last_affv = affv + affd->pre_vectors;
        nodemask_t nodemsk = NODE_MASK_NONE;
@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct 
irq_affinity *affd)
                goto done;
        }
 
-       /* Spread the vectors per node */
-       vecs_per_node = affv / nodes;
-       /* Account for rounding errors */
-       extra_vecs = affv - (nodes * vecs_per_node);
-
        for_each_node_mask(n, nodemsk) {
-               int ncpus, v, vecs_to_assign = vecs_per_node;
+               int ncpus, v, vecs_to_assign, vecs_per_node;
+
+               /* Spread the vectors per node */
+               vecs_per_node = (affv - curvec) / nodes;
 
                /* Get the cpus on this node which are in the mask */
                cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
 
                /* Calculate the number of cpus per vector */
                ncpus = cpumask_weight(nmsk);
+               vecs_to_assign = min(vecs_per_node, ncpus);
+
+               /* Account for rounding errors */
+               extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
 
                for (v = 0; curvec < last_affv && v < vecs_to_assign;
                     curvec++, v++) {
@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct 
irq_affinity *affd)
                        /* Account for extra vectors to compensate rounding 
errors */
                        if (extra_vecs) {
                                cpus_per_vec++;
-                               if (!--extra_vecs)
-                                       vecs_per_node++;
+                               --extra_vecs;
                        }
                        irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
                }
 
                if (curvec >= last_affv)
                        break;
+               --nodes;
        }
 
 done:

Reply via email to