XDP xmit uses virtio dma api for DMA operations. No longer let virtio
core manage DMA address.

To record the DMA address, allocate a space in the xdp_frame headroom to
store the DMA address.

Introduce virtnet_return_xdp_frame() to release the xdp frame and
complete the dma unmap operation.

Signed-off-by: Xuan Zhuo <[email protected]>
---
 drivers/net/virtio_net.c | 42 +++++++++++++++++++++++++++++++++-------
 1 file changed, 35 insertions(+), 7 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a801ea40908f..0efbf7992a95 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -321,6 +321,20 @@ static struct page *get_a_page(struct receive_queue *rq, 
gfp_t gfp_mask)
        return p;
 }
 
+static void virtnet_return_xdp_frame(struct send_queue *sq,
+                                    struct xdp_frame *frame)
+{
+       struct virtnet_info *vi = sq->vq->vdev->priv;
+       dma_addr_t *p_addr, addr;
+
+       p_addr = frame->data - sizeof(*p_addr);
+       addr = *p_addr;
+
+       virtio_dma_unmap(&vi->vdev->dev, addr, frame->len, DMA_TO_DEVICE);
+
+       xdp_return_frame(frame);
+}
+
 static void virtqueue_napi_schedule(struct napi_struct *napi,
                                    struct virtqueue *vq)
 {
@@ -504,9 +518,11 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
                                   struct xdp_frame *xdpf)
 {
        struct virtio_net_hdr_mrg_rxbuf *hdr;
+       struct device *dev = &vi->vdev->dev;
+       dma_addr_t addr, *p_addr;
        int err;
 
-       if (unlikely(xdpf->headroom < vi->hdr_len))
+       if (unlikely(xdpf->headroom < vi->hdr_len + sizeof(addr)))
                return -EOVERFLOW;
 
        /* Make room for virtqueue hdr (also change xdpf->headroom?) */
@@ -516,10 +532,21 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
        memset(hdr, 0, vi->hdr_len);
        xdpf->len   += vi->hdr_len;
 
-       sg_init_one(sq->sg, xdpf->data, xdpf->len);
+       p_addr = xdpf->data - sizeof(addr);
+
+       addr = virtio_dma_map(dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+
+       if (virtio_dma_mapping_error(dev, addr))
+               return -ENOMEM;
+
+       *p_addr = addr;
+
+       sg_init_table(sq->sg, 1);
+       sq->sg->dma_address = addr;
+       sq->sg->length = xdpf->len;
 
-       err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
-                                  GFP_ATOMIC);
+       err = virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 1,
+                                            xdp_to_ptr(xdpf), GFP_ATOMIC);
        if (unlikely(err))
                return -ENOSPC; /* Caller handle free/refcnt */
 
@@ -600,7 +627,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
                        struct xdp_frame *frame = ptr_to_xdp(ptr);
 
                        bytes += frame->len;
-                       xdp_return_frame(frame);
+                       virtnet_return_xdp_frame(sq, frame);
                } else {
                        struct sk_buff *skb = ptr;
 
@@ -1486,7 +1513,7 @@ static void free_old_xmit_skbs(struct send_queue *sq, 
bool in_napi)
                        struct xdp_frame *frame = ptr_to_xdp(ptr);
 
                        bytes += frame->len;
-                       xdp_return_frame(frame);
+                       virtnet_return_xdp_frame(sq, frame);
                }
                packets++;
        }
@@ -2815,7 +2842,8 @@ static void free_unused_bufs(struct virtnet_info *vi)
                        if (!is_xdp_frame(buf))
                                dev_kfree_skb(buf);
                        else
-                               xdp_return_frame(ptr_to_xdp(buf));
+                               virtnet_return_xdp_frame(vi->sq + i,
+                                                        ptr_to_xdp(buf));
                }
        }
 
-- 
2.31.0

_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to