Replace the GDMA global interrupt setup code with the new GIC allocation and release functions for managing interrupt contexts.
Signed-off-by: Long Li <[email protected]> --- .../net/ethernet/microsoft/mana/gdma_main.c | 83 +++---------------- 1 file changed, 10 insertions(+), 73 deletions(-) diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index e9b839259c01..f3dbc4881be4 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1830,30 +1830,13 @@ static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec) * further used in irq_setup() */ for (i = 1; i <= nvec; i++) { - gic = kzalloc(sizeof(*gic), GFP_KERNEL); + gic = mana_gd_get_gic(gc, false, &i); if (!gic) { err = -ENOMEM; goto free_irq; } - gic->handler = mana_gd_process_eq_events; - INIT_LIST_HEAD(&gic->eq_list); - spin_lock_init(&gic->lock); - - snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", - i - 1, pci_name(pdev)); - - /* one pci vector is already allocated for HWC */ - irqs[i - 1] = pci_irq_vector(pdev, i); - if (irqs[i - 1] < 0) { - err = irqs[i - 1]; - goto free_current_gic; - } - - err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic); - if (err) - goto free_current_gic; - xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL); + irqs[i - 1] = gic->irq; } /* @@ -1875,19 +1858,11 @@ static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec) kfree(irqs); return 0; -free_current_gic: - kfree(gic); free_irq: for (i -= 1; i > 0; i--) { irq = pci_irq_vector(pdev, i); - gic = xa_load(&gc->irq_contexts, i); - if (WARN_ON(!gic)) - continue; - irq_update_affinity_hint(irq, NULL); - free_irq(irq, gic); - xa_erase(&gc->irq_contexts, i); - kfree(gic); + mana_gd_put_gic(gc, false, i); } kfree(irqs); return err; @@ -1908,34 +1883,13 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec) start_irqs = irqs; for (i = 0; i < nvec; i++) { - gic = kzalloc(sizeof(*gic), GFP_KERNEL); + gic = mana_gd_get_gic(gc, false, &i); if (!gic) { err = -ENOMEM; goto free_irq; } - gic->handler = mana_gd_process_eq_events; - INIT_LIST_HEAD(&gic->eq_list); - spin_lock_init(&gic->lock); - - if (!i) - snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s", - pci_name(pdev)); - else - snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", - i - 1, pci_name(pdev)); - - irqs[i] = pci_irq_vector(pdev, i); - if (irqs[i] < 0) { - err = irqs[i]; - goto free_current_gic; - } - - err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic); - if (err) - goto free_current_gic; - - xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL); + irqs[i] = gic->irq; } /* If number of IRQ is one extra than number of online CPUs, @@ -1964,19 +1918,11 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec) kfree(start_irqs); return 0; -free_current_gic: - kfree(gic); free_irq: for (i -= 1; i >= 0; i--) { irq = pci_irq_vector(pdev, i); - gic = xa_load(&gc->irq_contexts, i); - if (WARN_ON(!gic)) - continue; - irq_update_affinity_hint(irq, NULL); - free_irq(irq, gic); - xa_erase(&gc->irq_contexts, i); - kfree(gic); + mana_gd_put_gic(gc, false, i); } kfree(start_irqs); @@ -2051,26 +1997,17 @@ static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev) static void mana_gd_remove_irqs(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); - struct gdma_irq_context *gic; int irq, i; if (gc->max_num_msix < 1) return; - for (i = 0; i < gc->max_num_msix; i++) { - irq = pci_irq_vector(pdev, i); - if (irq < 0) - continue; - - gic = xa_load(&gc->irq_contexts, i); - if (WARN_ON(!gic)) - continue; - + for (i = 0; i < (gc->msi_sharing ? gc->max_num_msix : 1); i++) { /* Need to clear the hint before free_irq */ + irq = pci_irq_vector(pdev, i); irq_update_affinity_hint(irq, NULL); - free_irq(irq, gic); - xa_erase(&gc->irq_contexts, i); - kfree(gic); + + mana_gd_put_gic(gc, false, i); } pci_free_irq_vectors(pdev); -- 2.43.0

