When XDP_USE_NEED_WAKEUP is used and the fill ring is empty so no buffer
is allocated on RX side, allow RX NAPI to be descheduled. This avoids
wasting CPU cycles on polling. Users will be notified and they need to
make a wakeup call after refilling the ring.

Reviewed-by: Jason Xing <[email protected]>
Signed-off-by: Bui Quang Minh <[email protected]>
---
Changes in v3:
- Update the comment of try_fill_recv
- Link to v2: 
https://lore.kernel.org/netdev/[email protected]/
Changes in v2:
- Fix the flag check in virtnet_xsk_wakeup
- Link to v1: 
https://lore.kernel.org/netdev/[email protected]/
---
 drivers/net/virtio_net.c | 39 ++++++++++++++++++++++++++++++---------
 1 file changed, 30 insertions(+), 9 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index db88dcaefb20..4f4f4046a760 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1454,8 +1454,19 @@ static int virtnet_add_recvbuf_xsk(struct virtnet_info 
*vi, struct receive_queue
        xsk_buffs = rq->xsk_buffs;
 
        num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
-       if (!num)
+       if (!num) {
+               if (xsk_uses_need_wakeup(pool)) {
+                       xsk_set_rx_need_wakeup(pool);
+                       /* Return 0 instead of -ENOMEM so that NAPI is
+                        * descheduled.
+                        */
+                       return 0;
+               }
+
                return -ENOMEM;
+       } else {
+               xsk_clear_rx_need_wakeup(pool);
+       }
 
        len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
 
@@ -1588,20 +1599,19 @@ static bool virtnet_xsk_xmit(struct send_queue *sq, 
struct xsk_buff_pool *pool,
        return sent;
 }
 
-static void xsk_wakeup(struct send_queue *sq)
+static void xsk_wakeup(struct napi_struct *napi, struct virtqueue *vq)
 {
-       if (napi_if_scheduled_mark_missed(&sq->napi))
+       if (napi_if_scheduled_mark_missed(napi))
                return;
 
        local_bh_disable();
-       virtqueue_napi_schedule(&sq->napi, sq->vq);
+       virtqueue_napi_schedule(napi, vq);
        local_bh_enable();
 }
 
 static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
 {
        struct virtnet_info *vi = netdev_priv(dev);
-       struct send_queue *sq;
 
        if (!netif_running(dev))
                return -ENETDOWN;
@@ -1609,9 +1619,18 @@ static int virtnet_xsk_wakeup(struct net_device *dev, 
u32 qid, u32 flag)
        if (qid >= vi->curr_queue_pairs)
                return -EINVAL;
 
-       sq = &vi->sq[qid];
+       if (flag & XDP_WAKEUP_TX) {
+               struct send_queue *sq = &vi->sq[qid];
+
+               xsk_wakeup(&sq->napi, sq->vq);
+       }
+
+       if (flag & XDP_WAKEUP_RX) {
+               struct receive_queue *rq = &vi->rq[qid];
+
+               xsk_wakeup(&rq->napi, rq->vq);
+       }
 
-       xsk_wakeup(sq);
        return 0;
 }
 
@@ -1623,7 +1642,7 @@ static void virtnet_xsk_completed(struct send_queue *sq, 
int num)
         * wakeup the tx napi to consume the xsk tx queue, because the tx
         * interrupt may not be triggered.
         */
-       xsk_wakeup(sq);
+       xsk_wakeup(&sq->napi, sq->vq);
 }
 
 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
@@ -2816,7 +2835,9 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
 }
 
 /*
- * Returns false if we couldn't fill entirely (OOM).
+ * Returns false if we couldn't fill entirely (OOM) and need to retry.
+ * In XSK mode, it's when the receive buffer is not allocated and
+ * xsk_use_need_wakeup is not set.
  *
  * Normally run in the receive path, but can also be run from ndo_open
  * before we're receiving packets, or from refill_work which is
-- 
2.43.0


Reply via email to