Use the GIC functions to allocate interrupt contexts for RDMA EQs. These interrupt contexts may be shared with Ethernet EQs when MSI-X vectors are limited.
The driver now supports allocating dedicated MSI-X for each EQ. Indicate this capability through driver capability bits. Signed-off-by: Long Li <[email protected]> --- drivers/infiniband/hw/mana/main.c | 33 ++++++++++++++++++++++++++----- include/net/mana/gdma.h | 5 ++++- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index cfa954460585..029609fb91c5 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -787,6 +787,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev) { struct gdma_context *gc = mdev_to_gc(mdev); struct gdma_queue_spec spec = {}; + struct gdma_irq_context *gic; int err, i; spec.type = GDMA_EQ; @@ -797,9 +798,15 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev) spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; spec.eq.msix_index = 0; + gic = mana_gd_get_gic(gc, false, &spec.eq.msix_index); + if (!gic) + return -ENOMEM; + err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq); - if (err) + if (err) { + mana_gd_put_gic(gc, false, 0); return err; + } mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *), GFP_KERNEL); @@ -810,31 +817,47 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev) spec.eq.callback = NULL; for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) { spec.eq.msix_index = (i + 1) % gc->num_msix_usable; + + gic = mana_gd_get_gic(gc, false, &spec.eq.msix_index); + if (!gic) { + err = -ENOMEM; + goto destroy_eqs; + } + err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]); - if (err) + if (err) { + mana_gd_put_gic(gc, false, spec.eq.msix_index); goto destroy_eqs; + } } return 0; destroy_eqs: - while (i-- > 0) + while (i-- > 0) { mana_gd_destroy_queue(gc, mdev->eqs[i]); + mana_gd_put_gic(gc, false, (i + 1) % gc->num_msix_usable); + } kfree(mdev->eqs); destroy_fatal_eq: mana_gd_destroy_queue(gc, mdev->fatal_err_eq); + mana_gd_put_gic(gc, false, 0); return err; } void mana_ib_destroy_eqs(struct mana_ib_dev *mdev) { struct gdma_context *gc = mdev_to_gc(mdev); - int i; + int i, msi; mana_gd_destroy_queue(gc, mdev->fatal_err_eq); + mana_gd_put_gic(gc, false, 0); - for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) + for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) { mana_gd_destroy_queue(gc, mdev->eqs[i]); + msi = (i + 1) % gc->num_msix_usable; + mana_gd_put_gic(gc, false, msi); + } kfree(mdev->eqs); } diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index 4eb94d1df439..f0d5c873f856 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -610,6 +610,8 @@ enum { /* Driver supports dynamic MSI-X vector allocation */ #define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13) +/* Driver supports separate EQ/MSIs for each vPort */ +#define GDMA_DRV_CAP_FLAG_1_EQ_MSI_UNSHARE_MULTI_VPORT BIT(19) /* Driver can self reset on EQE notification */ #define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14) @@ -644,7 +646,8 @@ enum { GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \ GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \ GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY | \ - GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY) + GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY | \ + GDMA_DRV_CAP_FLAG_1_EQ_MSI_UNSHARE_MULTI_VPORT) #define GDMA_DRV_CAP_FLAGS2 0 -- 2.43.0

