To allow Ethernet EQs to use dedicated or shared MSI-X vectors and RDMA
EQs to share the same MSI-X, introduce a GIC (GDMA IRQ Context) with
reference counting. This allows the driver to create an interrupt context
on an assigned or unassigned MSI-X vector and share it across multiple
EQ consumers.

Signed-off-by: Long Li <[email protected]>
---
 .../net/ethernet/microsoft/mana/gdma_main.c   | 158 ++++++++++++++++++
 include/net/mana/gdma.h                       |  10 ++
 2 files changed, 168 insertions(+)

diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c 
b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 62e3a2eb68e0..e9b839259c01 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1558,6 +1558,163 @@ static irqreturn_t mana_gd_intr(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+void mana_gd_put_gic(struct gdma_context *gc, bool use_msi_bitmap, int msi)
+{
+       struct pci_dev *dev = to_pci_dev(gc->dev);
+       struct msi_map irq_map;
+       struct gdma_irq_context *gic;
+       int irq;
+
+       mutex_lock(&gc->gic_mutex);
+
+       gic = xa_load(&gc->irq_contexts, msi);
+       if (WARN_ON(!gic)) {
+               mutex_unlock(&gc->gic_mutex);
+               return;
+       }
+
+       if (use_msi_bitmap)
+               gic->bitmap_refs--;
+
+       if (use_msi_bitmap && gic->bitmap_refs == 0)
+               clear_bit(msi, gc->msi_bitmap);
+
+       if (!refcount_dec_and_test(&gic->refcount))
+               goto out;
+
+       irq = pci_irq_vector(dev, msi);
+
+       irq_update_affinity_hint(irq, NULL);
+       free_irq(irq, gic);
+
+       if (pci_msix_can_alloc_dyn(dev)) {
+               irq_map.virq = irq;
+               irq_map.index = msi;
+               pci_msix_free_irq(dev, irq_map);
+       }
+
+       xa_erase(&gc->irq_contexts, msi);
+       kfree(gic);
+
+out:
+       mutex_unlock(&gc->gic_mutex);
+}
+EXPORT_SYMBOL_NS(mana_gd_put_gic, "NET_MANA");
+
+/*
+ * Get a GIC (GDMA IRQ Context) on a MSI vector
+ * a MSI can be shared between different EQs, this function supports setting
+ * up separate MSIs using a bitmap, or directly using the MSI index
+ *
+ * @use_msi_bitmap:
+ * True if MSI is assigned by this function on available slots from bitmap.
+ * False if MSI is passed from *msi_requested
+ */
+struct gdma_irq_context *mana_gd_get_gic(struct gdma_context *gc,
+                                        bool use_msi_bitmap,
+                                        int *msi_requested)
+{
+       struct gdma_irq_context *gic;
+       struct pci_dev *dev = to_pci_dev(gc->dev);
+       struct msi_map irq_map = { };
+       int irq;
+       int msi;
+       int err;
+
+       mutex_lock(&gc->gic_mutex);
+
+       if (use_msi_bitmap) {
+               msi = find_first_zero_bit(gc->msi_bitmap, gc->num_msix_usable);
+               if (msi >= gc->num_msix_usable) {
+                       dev_err(gc->dev, "No free MSI vectors available\n");
+                       gic = NULL;
+                       goto out;
+               }
+               *msi_requested = msi;
+       } else {
+               msi = *msi_requested;
+       }
+
+       gic = xa_load(&gc->irq_contexts, msi);
+       if (gic) {
+               refcount_inc(&gic->refcount);
+               if (use_msi_bitmap) {
+                       gic->bitmap_refs++;
+                       set_bit(msi, gc->msi_bitmap);
+               }
+               goto out;
+       }
+
+       irq = pci_irq_vector(dev, msi);
+       if (irq == -EINVAL) {
+               irq_map = pci_msix_alloc_irq_at(dev, msi, NULL);
+               if (!irq_map.virq) {
+                       err = irq_map.index;
+                       dev_err(gc->dev,
+                               "Failed to alloc irq_map msi %d err %d\n",
+                               msi, err);
+                       gic = NULL;
+                       goto out;
+               }
+               irq = irq_map.virq;
+               msi = irq_map.index;
+       }
+
+       gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+       if (!gic) {
+               if (irq_map.virq)
+                       pci_msix_free_irq(dev, irq_map);
+               goto out;
+       }
+
+       gic->handler = mana_gd_process_eq_events;
+       gic->msi = msi;
+       gic->irq = irq;
+       INIT_LIST_HEAD(&gic->eq_list);
+       spin_lock_init(&gic->lock);
+
+       if (!gic->msi)
+               snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
+                        pci_name(dev));
+       else
+               snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_msi%d@pci:%s",
+                        gic->msi, pci_name(dev));
+
+       err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
+       if (err) {
+               dev_err(gc->dev, "Failed to request irq %d %s\n",
+                       irq, gic->name);
+               kfree(gic);
+               gic = NULL;
+               if (irq_map.virq)
+                       pci_msix_free_irq(dev, irq_map);
+               goto out;
+       }
+
+       refcount_set(&gic->refcount, 1);
+       gic->bitmap_refs = use_msi_bitmap ? 1 : 0;
+
+       err = xa_err(xa_store(&gc->irq_contexts, msi, gic, GFP_KERNEL));
+       if (err) {
+               dev_err(gc->dev, "Failed to store irq context for msi %d: %d\n",
+                       msi, err);
+               free_irq(irq, gic);
+               kfree(gic);
+               gic = NULL;
+               if (irq_map.virq)
+                       pci_msix_free_irq(dev, irq_map);
+               goto out;
+       }
+
+       if (use_msi_bitmap)
+               set_bit(msi, gc->msi_bitmap);
+
+out:
+       mutex_unlock(&gc->gic_mutex);
+       return gic;
+}
+EXPORT_SYMBOL_NS(mana_gd_get_gic, "NET_MANA");
+
 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
 {
        r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
@@ -2040,6 +2197,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const 
struct pci_device_id *ent)
                goto release_region;
 
        mutex_init(&gc->eq_test_event_mutex);
+       mutex_init(&gc->gic_mutex);
        pci_set_drvdata(pdev, gc);
        gc->bar0_pa = pci_resource_start(pdev, 0);
 
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 477b751f124e..be6bdd169b3d 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -382,6 +382,10 @@ struct gdma_irq_context {
        spinlock_t lock;
        struct list_head eq_list;
        char name[MANA_IRQ_NAME_SZ];
+       unsigned int msi;
+       unsigned int irq;
+       refcount_t refcount;
+       unsigned int bitmap_refs;
 };
 
 enum gdma_context_flags {
@@ -441,6 +445,9 @@ struct gdma_context {
 
        unsigned long           flags;
 
+       /* Protect access to GIC context */
+       struct mutex            gic_mutex;
+
        /* Indicate if this device is sharing MSI for EQs on MANA */
        bool msi_sharing;
 
@@ -1007,6 +1014,9 @@ int mana_gd_resume(struct pci_dev *pdev);
 
 bool mana_need_log(struct gdma_context *gc, int err);
 
+struct gdma_irq_context *mana_gd_get_gic(struct gdma_context *gc, bool 
use_msi_bitmap,
+                                        int *msi_requested);
+void mana_gd_put_gic(struct gdma_context *gc, bool use_msi_bitmap, int msi);
 int mana_gd_query_device_cfg(struct gdma_context *gc, u32 proto_major_ver,
                             u32 proto_minor_ver, u32 proto_micro_ver,
                             u16 *max_num_vports, u8 *bm_hostmode);
-- 
2.43.0


Reply via email to