From: Ahmed S. Darwish <da...@linutronix.de>

To distangle the maze in msi.c, all exported device-driver MSI APIs are
now to be grouped in one file, api.c.

Move pci_alloc_irq_vectors_affinity() and let its kernel-doc reference
pci_alloc_irq_vectors() documentation added in parent commit.

Signed-off-by: Ahmed S. Darwish <da...@linutronix.de>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>

---
 drivers/pci/msi/api.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++-
 drivers/pci/msi/msi.c | 65 
+----------------------------------------------------
 2 files changed, 59 insertions(+), 65 deletions(-)
---
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index 1714905943fb..8546749afa6e 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -123,3 +123,62 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned 
int min_vecs,
                                              flags, NULL);
 }
 EXPORT_SYMBOL(pci_alloc_irq_vectors);
+
+/**
+ * pci_alloc_irq_vectors_affinity() - Allocate multiple device interrupt
+ *                                    vectors with affinity requirements
+ * @dev:      the PCI device to operate on
+ * @min_vecs: minimum required number of vectors (must be >= 1)
+ * @max_vecs: maximum desired number of vectors
+ * @flags:    allocation flags, as in pci_alloc_irq_vectors()
+ * @affd:     affinity requirements (can be %NULL).
+ *
+ * Same as pci_alloc_irq_vectors(), but with the extra @affd parameter.
+ * Check that function docs, and &struct irq_affinity, for more details.
+ */
+int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+                                  unsigned int max_vecs, unsigned int flags,
+                                  struct irq_affinity *affd)
+{
+       struct irq_affinity msi_default_affd = {0};
+       int nvecs = -ENOSPC;
+
+       if (flags & PCI_IRQ_AFFINITY) {
+               if (!affd)
+                       affd = &msi_default_affd;
+       } else {
+               if (WARN_ON(affd))
+                       affd = NULL;
+       }
+
+       if (flags & PCI_IRQ_MSIX) {
+               nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
+                                               affd, flags);
+               if (nvecs > 0)
+                       return nvecs;
+       }
+
+       if (flags & PCI_IRQ_MSI) {
+               nvecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
+               if (nvecs > 0)
+                       return nvecs;
+       }
+
+       /* use legacy IRQ if allowed */
+       if (flags & PCI_IRQ_LEGACY) {
+               if (min_vecs == 1 && dev->irq) {
+                       /*
+                        * Invoke the affinity spreading logic to ensure that
+                        * the device driver can adjust queue configuration
+                        * for the single interrupt case.
+                        */
+                       if (affd)
+                               irq_create_affinity_masks(1, affd);
+                       pci_intx(dev, 1);
+                       return 1;
+               }
+       }
+
+       return nvecs;
+}
+EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 6700ef1c734e..a028774f438d 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -887,71 +887,6 @@ int __pci_enable_msix_range(struct pci_dev *dev,
 }
 
 /**
- * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device
- * @dev:               PCI device to operate on
- * @min_vecs:          minimum number of vectors required (must be >= 1)
- * @max_vecs:          maximum (desired) number of vectors
- * @flags:             flags or quirks for the allocation
- * @affd:              optional description of the affinity requirements
- *
- * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
- * vectors if available, and fall back to a single legacy vector
- * if neither is available.  Return the number of vectors allocated,
- * (which might be smaller than @max_vecs) if successful, or a negative
- * error code on error. If less than @min_vecs interrupt vectors are
- * available for @dev the function will fail with -ENOSPC.
- *
- * To get the Linux IRQ number used for a vector that can be passed to
- * request_irq() use the pci_irq_vector() helper.
- */
-int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
-                                  unsigned int max_vecs, unsigned int flags,
-                                  struct irq_affinity *affd)
-{
-       struct irq_affinity msi_default_affd = {0};
-       int nvecs = -ENOSPC;
-
-       if (flags & PCI_IRQ_AFFINITY) {
-               if (!affd)
-                       affd = &msi_default_affd;
-       } else {
-               if (WARN_ON(affd))
-                       affd = NULL;
-       }
-
-       if (flags & PCI_IRQ_MSIX) {
-               nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
-                                               affd, flags);
-               if (nvecs > 0)
-                       return nvecs;
-       }
-
-       if (flags & PCI_IRQ_MSI) {
-               nvecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
-               if (nvecs > 0)
-                       return nvecs;
-       }
-
-       /* use legacy IRQ if allowed */
-       if (flags & PCI_IRQ_LEGACY) {
-               if (min_vecs == 1 && dev->irq) {
-                       /*
-                        * Invoke the affinity spreading logic to ensure that
-                        * the device driver can adjust queue configuration
-                        * for the single interrupt case.
-                        */
-                       if (affd)
-                               irq_create_affinity_masks(1, affd);
-                       pci_intx(dev, 1);
-                       return 1;
-               }
-       }
-
-       return nvecs;
-}
-EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
-
-/**
  * pci_free_irq_vectors - free previously allocated IRQs for a device
  * @dev:               PCI device to operate on
  *

Reply via email to