Introduce virtnet_xsk_pool_enable() for xsk setup.

Signed-off-by: Xuan Zhuo <[email protected]>
---
 drivers/net/virtio/xsk.c | 59 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 59 insertions(+)

diff --git a/drivers/net/virtio/xsk.c b/drivers/net/virtio/xsk.c
index e01ff2abea11..a7e8005233d2 100644
--- a/drivers/net/virtio/xsk.c
+++ b/drivers/net/virtio/xsk.c
@@ -5,6 +5,8 @@
 
 #include "virtio_net.h"
 
+static struct virtio_net_hdr_mrg_rxbuf xsk_hdr;
+
 static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct 
receive_queue *rq,
                                    struct xsk_buff_pool *pool, struct 
net_device *dev)
 {
@@ -54,3 +56,60 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, 
struct receive_queu
 
        return err;
 }
+
+static int virtnet_xsk_pool_enable(struct net_device *dev,
+                                  struct xsk_buff_pool *pool,
+                                  u16 qid)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       struct receive_queue *rq;
+       struct send_queue *sq;
+       int err;
+
+       if (qid >= vi->curr_queue_pairs)
+               return -EINVAL;
+
+       sq = &vi->sq[qid];
+       rq = &vi->rq[qid];
+
+       /* xsk zerocopy depend on the tx napi.
+        *
+        * All xsk packets are actually consumed and sent out from the xsk tx
+        * queue under the tx napi mechanism.
+        */
+       if (!sq->napi.weight)
+               return -EPERM;
+
+       /* In big_packets mode, xdp cannot work, so there is no need to
+        * initialize xsk of rq.
+        */
+       if (vi->big_packets && !vi->mergeable_rx_bufs)
+               return -ENOENT;
+
+       sq->xsk.hdr_dma_address = virtio_dma_map(&vi->vdev->dev, &xsk_hdr,
+                                                vi->hdr_len, DMA_TO_DEVICE);
+       if (virtio_dma_mapping_error(&vi->vdev->dev, sq->xsk.hdr_dma_address))
+               return -ENOMEM;
+
+       err = xsk_pool_dma_map(pool, &vi->vdev->dev, 0);
+       if (err)
+               goto err_xsk_map;
+
+       err = virtnet_rq_bind_xsk_pool(vi, rq, pool, dev);
+       if (err)
+               goto err_rxq;
+
+       /* Here is already protected by rtnl_lock, so rcu_assign_pointer
+        * is safe.
+        */
+       rcu_assign_pointer(sq->xsk.pool, pool);
+
+       return 0;
+
+err_rxq:
+       xsk_pool_dma_unmap(pool, 0);
+err_xsk_map:
+       virtio_dma_unmap(&vi->vdev->dev, sq->xsk.hdr_dma_address, vi->hdr_len,
+                        DMA_TO_DEVICE);
+       return err;
+}
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to