Now that the PCI/MSI core code does early checking for multi-MSI support
X86_IRQ_ALLOC_CONTIGUOUS_VECTORS is not required anymore.

Remove the flag and rely on MSI_FLAG_MULTI_PCI_MSI.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
---
 arch/x86/include/asm/irqdomain.h    |    4 +---
 arch/x86/kernel/apic/msi.c          |    6 ++----
 arch/x86/kernel/apic/vector.c       |    4 ----
 drivers/iommu/amd/iommu.c           |    7 -------
 drivers/iommu/intel/irq_remapping.c |    7 -------
 drivers/pci/controller/pci-hyperv.c |   15 +--------------
 6 files changed, 4 insertions(+), 39 deletions(-)

--- a/arch/x86/include/asm/irqdomain.h
+++ b/arch/x86/include/asm/irqdomain.h
@@ -7,9 +7,7 @@
 
 #ifdef CONFIG_X86_LOCAL_APIC
 enum {
-       /* Allocate contiguous CPU vectors */
-       X86_IRQ_ALLOC_CONTIGUOUS_VECTORS                = 0x1,
-       X86_IRQ_ALLOC_LEGACY                            = 0x2,
+       X86_IRQ_ALLOC_LEGACY                            = 0x1,
 };
 
 extern int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec);
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -161,12 +161,10 @@ int pci_msi_prepare(struct irq_domain *d
                    msi_alloc_info_t *arg)
 {
        init_irq_alloc_info(arg, NULL);
-       if (to_pci_dev(dev)->msix_enabled) {
+       if (to_pci_dev(dev)->msix_enabled)
                arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX;
-       } else {
+       else
                arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSI;
-               arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
-       }
 
        return 0;
 }
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -539,10 +539,6 @@ static int x86_vector_alloc_irqs(struct
        if (disable_apic)
                return -ENXIO;
 
-       /* Currently vector allocator can't guarantee contiguous allocations */
-       if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
-               return -ENOSYS;
-
        /*
         * Catch any attempt to touch the cascade interrupt on a PIC
         * equipped system.
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3297,13 +3297,6 @@ static int irq_remapping_alloc(struct ir
        if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI)
                return -EINVAL;
 
-       /*
-        * With IRQ remapping enabled, don't need contiguous CPU vectors
-        * to support multiple MSI interrupts.
-        */
-       if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
-               info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
-
        sbdf = get_devid(info);
        if (sbdf < 0)
                return -EINVAL;
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1337,13 +1337,6 @@ static int intel_irq_remapping_alloc(str
        if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI)
                return -EINVAL;
 
-       /*
-        * With IRQ remapping enabled, don't need contiguous CPU vectors
-        * to support multiple MSI interrupts.
-        */
-       if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
-               info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
-
        ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
        if (ret < 0)
                return ret;
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -611,20 +611,7 @@ static unsigned int hv_msi_get_int_vecto
        return cfg->vector;
 }
 
-static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
-                         int nvec, msi_alloc_info_t *info)
-{
-       int ret = pci_msi_prepare(domain, dev, nvec, info);
-
-       /*
-        * By using the interrupt remapper in the hypervisor IOMMU, contiguous
-        * CPU vectors is not needed for multi-MSI
-        */
-       if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
-               info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
-
-       return ret;
-}
+#define hv_msi_prepare         pci_msi_prepare
 
 /**
  * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current

Reply via email to