From: David Woodhouse <[email protected]>

This is the maximum possible set of CPUs which can be used. Use it
to calculate the default affinity requested from __irq_alloc_descs()
by first attempting to find the intersection with irq_default_affinity,
or falling back to using just the max_affinity if the intersection
would be empty.

Signed-off-by: David Woodhouse <[email protected]>
---
 include/linux/irqdomain.h |  3 ++-
 kernel/irq/ipi.c          |  2 +-
 kernel/irq/irqdomain.c    | 45 +++++++++++++++++++++++++++++++++------
 3 files changed, 42 insertions(+), 8 deletions(-)

diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 44445d9de881..6b5576da77f0 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -278,7 +278,8 @@ extern void irq_set_default_host(struct irq_domain *host);
 extern struct irq_domain *irq_get_default_host(void);
 extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
                                  irq_hw_number_t hwirq, int node,
-                                 const struct irq_affinity_desc *affinity);
+                                 const struct irq_affinity_desc *affinity,
+                                 const struct cpumask *max_affinity);
 
 static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
 {
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index 43e3d1be622c..13f56210eca9 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -75,7 +75,7 @@ int irq_reserve_ipi(struct irq_domain *domain,
                }
        }
 
-       virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
+       virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL, dest);
        if (virq <= 0) {
                pr_warn("Can't reserve IPI, failed to alloc descs\n");
                return -ENOMEM;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index c93e00ca11d8..ffd41f21afca 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -660,7 +660,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
        }
 
        /* Allocate a virtual interrupt number */
-       virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), 
NULL);
+       virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
+                                     NULL, NULL);
        if (virq <= 0) {
                pr_debug("-> virq allocation failed\n");
                return 0;
@@ -1011,25 +1012,57 @@ int irq_domain_translate_twocell(struct irq_domain *d,
 EXPORT_SYMBOL_GPL(irq_domain_translate_twocell);
 
 int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
-                          int node, const struct irq_affinity_desc *affinity)
+                          int node, const struct irq_affinity_desc *affinity,
+                          const struct cpumask *max_affinity)
 {
+       cpumask_var_t default_affinity;
        unsigned int hint;
+       int i;
+
+       /* Check requested per-IRQ affinities are in the possible range */
+       if (affinity && max_affinity) {
+               for (i = 0; i < cnt; i++)
+                       if (!cpumask_subset(&affinity[i].mask, max_affinity))
+                               return -EINVAL;
+       }
+
+       /*
+        * Generate default affinity. Either the possible subset of
+        * irq_default_affinity if such a subset is non-empty, or fall
+        * back to the provided max_affinity if there is no intersection.
+        * And just a copy of irq_default_affinity in the
+        * !CONFIG_CPUMASK_OFFSTACK case.
+        */
+       memset(&default_affinity, 0, sizeof(default_affinity));
+       if ((max_affinity &&
+            !cpumask_subset(irq_default_affinity, max_affinity))) {
+               if (!alloc_cpumask_var(&default_affinity, GFP_KERNEL))
+                       return -ENOMEM;
+               cpumask_and(default_affinity, max_affinity,
+                           irq_default_affinity);
+               if (cpumask_empty(default_affinity))
+                       cpumask_copy(default_affinity, max_affinity);
+       } else if (cpumask_available(default_affinity))
+               cpumask_copy(default_affinity, irq_default_affinity);
 
        if (virq >= 0) {
                virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
-                                        affinity, NULL);
+                                        affinity, default_affinity);
        } else {
                hint = hwirq % nr_irqs;
                if (hint == 0)
                        hint++;
                virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
-                                        affinity, NULL);
+                                        affinity, default_affinity);
                if (virq <= 0 && hint > 1) {
                        virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
-                                                affinity, NULL);
+                                                affinity, default_affinity);
                }
        }
 
+       if (cpumask_available(default_affinity))
+               free_cpumask_var(default_affinity);
+
        return virq;
 }
 
@@ -1342,7 +1375,7 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, 
int irq_base,
                virq = irq_base;
        } else {
                virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
-                                             affinity);
+                                             affinity, NULL);
                if (virq < 0) {
                        pr_debug("cannot allocate IRQ(base %d, count %d)\n",
                                 irq_base, nr_irqs);
-- 
2.26.2

_______________________________________________
iommu mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to