On Tue, Dec 23, 2025 at 10:25:32PM +0700, Bui Quang Minh wrote: > Calling napi_disable() on an already disabled napi can cause the > deadlock.
a deadlock? > Because the delayed refill work will call napi_disable(), we > must ensure that refill work is only enabled and scheduled after we have > enabled the rx queue's NAPI. a bugfix so needs a Fixes tag. > > Signed-off-by: Bui Quang Minh <[email protected]> > --- > drivers/net/virtio_net.c | 31 ++++++++++++++++++++++++------- > 1 file changed, 24 insertions(+), 7 deletions(-) > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > index 63126e490bda..8016d2b378cf 100644 > --- a/drivers/net/virtio_net.c > +++ b/drivers/net/virtio_net.c > @@ -3208,16 +3208,31 @@ static int virtnet_open(struct net_device *dev) > int i, err; > > for (i = 0; i < vi->max_queue_pairs; i++) { > + bool schedule_refill = false; > + > + /* - We must call try_fill_recv before enabling napi of the same > + * receive queue so that it doesn't race with the call in > + * virtnet_receive. > + * - We must enable and schedule delayed refill work only when > + * we have enabled all the receive queue's napi. Otherwise, in > + * refill_work, we have a deadlock when calling napi_disable on > + * an already disabled napi. > + */ > if (i < vi->curr_queue_pairs) { > - enable_delayed_refill(&vi->rq[i]); > /* Make sure we have some buffers: if oom use wq. */ > if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) > - schedule_delayed_work(&vi->rq[i].refill, 0); > + schedule_refill = true; > } > > err = virtnet_enable_queue_pair(vi, i); > if (err < 0) > goto err_enable_qp; > + > + if (i < vi->curr_queue_pairs) { > + enable_delayed_refill(&vi->rq[i]); > + if (schedule_refill) > + schedule_delayed_work(&vi->rq[i].refill, 0); > + } > } > > if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { > @@ -3456,11 +3471,16 @@ static void __virtnet_rx_resume(struct virtnet_info > *vi, > bool running = netif_running(vi->dev); > bool schedule_refill = false; > > + /* See the comment in virtnet_open for the ordering rule > + * of try_fill_recv, receive queue napi_enable and delayed > + * refill enable/schedule. > + */ > if (refill && !try_fill_recv(vi, rq, GFP_KERNEL)) > schedule_refill = true; > if (running) > virtnet_napi_enable(rq); > > + enable_delayed_refill(rq); > if (schedule_refill) > schedule_delayed_work(&rq->refill, 0); > } > @@ -3470,18 +3490,15 @@ static void virtnet_rx_resume_all(struct virtnet_info > *vi) > int i; > > for (i = 0; i < vi->max_queue_pairs; i++) { > - if (i < vi->curr_queue_pairs) { > - enable_delayed_refill(&vi->rq[i]); > + if (i < vi->curr_queue_pairs) > __virtnet_rx_resume(vi, &vi->rq[i], true); > - } else { > + else > __virtnet_rx_resume(vi, &vi->rq[i], false); > - } > } > } > > static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue > *rq) > { > - enable_delayed_refill(rq); > __virtnet_rx_resume(vi, rq, true); > } > > -- > 2.43.0

