When net_shaper_ops are enabled for MANA, netdev_ops_lock becomes active. The netvsc sets up MANA VF via following call chain:
netvsc_vf_setup() dev_change_flags() ... __dev_open() OR __dev_close() dev_change_flags() holds the netdev mutex via netdev_lock_ops. During this process, mana_create_txq() and mana_create_rxq() invoke netif_napi_add_tx(), netif_napi_add_weight(), and napi_enable(), all of which attempt to acquire the same lock, leading to a potential deadlock. Similarly, mana_destroy_txq() and mana_destroy_rxq() call netif_napi_disable() and netif_napi_del(), which also contend for the same lock. Switch to the _locked variants of these APIs to avoid deadlocks when the netdev_ops_lock is held. Fixes: d4c22ec680c8 ("net: hold netdev instance lock during ndo_open/ndo_stop") Signed-off-by: Erni Sri Satya Vennela <er...@linux.microsoft.com> Reviewed-by: Haiyang Zhang <haiya...@microsoft.com> Reviewed-by: Shradha Gupta <shradhagu...@linux.microsoft.com> --- drivers/net/ethernet/microsoft/mana/mana_en.c | 39 ++++++++++++++----- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index ccd2885c939e..3c879d8a39e3 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1911,8 +1911,13 @@ static void mana_destroy_txq(struct mana_port_context *apc) napi = &apc->tx_qp[i].tx_cq.napi; if (apc->tx_qp[i].txq.napi_initialized) { napi_synchronize(napi); - napi_disable(napi); - netif_napi_del(napi); + if (netdev_need_ops_lock(napi->dev)) { + napi_disable_locked(napi); + netif_napi_del_locked(napi); + } else { + napi_disable(napi); + netif_napi_del(napi); + } apc->tx_qp[i].txq.napi_initialized = false; } mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); @@ -2064,8 +2069,14 @@ static int mana_create_txq(struct mana_port_context *apc, mana_create_txq_debugfs(apc, i); - netif_napi_add_tx(net, &cq->napi, mana_poll); - napi_enable(&cq->napi); + if (netdev_need_ops_lock(net)) { + set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state); + netif_napi_add_locked(net, &cq->napi, mana_poll); + napi_enable_locked(&cq->napi); + } else { + netif_napi_add_tx(net, &cq->napi, mana_poll); + napi_enable(&cq->napi); + } txq->napi_initialized = true; mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); @@ -2101,9 +2112,13 @@ static void mana_destroy_rxq(struct mana_port_context *apc, if (napi_initialized) { napi_synchronize(napi); - napi_disable(napi); - - netif_napi_del(napi); + if (netdev_need_ops_lock(napi->dev)) { + napi_disable_locked(napi); + netif_napi_del_locked(napi); + } else { + napi_disable(napi); + netif_napi_del(napi); + } } xdp_rxq_info_unreg(&rxq->xdp_rxq); @@ -2354,14 +2369,20 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; - netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); + if (netdev_need_ops_lock(ndev)) + netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1); + else + netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, cq->napi.napi_id)); WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, rxq->page_pool)); - napi_enable(&cq->napi); + if (netdev_need_ops_lock(ndev)) + napi_enable_locked(&cq->napi); + else + napi_enable(&cq->napi); mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); out: -- 2.34.1