__free_old_xmit
is_xdp_raw_buffer_queue

These two APIs are needed by the xsk part.
So this commit move theses to the header. And add prefix "virtnet_".

Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
---
 drivers/net/virtio/main.c       | 94 +++------------------------------
 drivers/net/virtio/virtio_net.h | 80 ++++++++++++++++++++++++++++
 2 files changed, 87 insertions(+), 87 deletions(-)

diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
index 02e054fd217c..6c608b3ce27d 100644
--- a/drivers/net/virtio/main.c
+++ b/drivers/net/virtio/main.c
@@ -45,9 +45,6 @@ module_param(napi_tx, bool, 0644);
 #define VIRTIO_XDP_TX          BIT(0)
 #define VIRTIO_XDP_REDIR       BIT(1)
 
-#define VIRTIO_XDP_FLAG        BIT(0)
-#define VIRTIO_XMIT_DATA_MASK (VIRTIO_XDP_FLAG)
-
 #define VIRTNET_DRIVER_VERSION "1.0.0"
 
 static const unsigned long guest_offloads[] = {
@@ -150,78 +147,11 @@ struct virtio_net_common_hdr {
        };
 };
 
-static bool is_xdp_frame(void *ptr)
-{
-       return (unsigned long)ptr & VIRTIO_XDP_FLAG;
-}
-
 static void *xdp_to_ptr(struct xdp_frame *ptr)
 {
        return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
 }
 
-static struct xdp_frame *ptr_to_xdp(void *ptr)
-{
-       return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
-}
-
-static inline void *virtnet_sq_unmap(struct virtnet_sq *sq, void *data)
-{
-       struct virtnet_sq_dma *next, *head;
-
-       head = (void *)((unsigned long)data & ~VIRTIO_XMIT_DATA_MASK);
-
-       data = head->data;
-
-       while (head) {
-               virtqueue_dma_unmap_single_attrs(sq->vq, head->addr, head->len,
-                                                DMA_TO_DEVICE, 0);
-
-               next = head->next;
-
-               head->next = sq->dmainfo.free;
-               sq->dmainfo.free = head;
-
-               head = next;
-       }
-
-       return data;
-}
-
-static void __free_old_xmit(struct virtnet_sq *sq, bool in_napi,
-                           u64 *bytes, u64 *packets)
-{
-       unsigned int len;
-       void *ptr;
-
-       while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-               if (!is_xdp_frame(ptr)) {
-                       struct sk_buff *skb;
-
-                       if (sq->do_dma)
-                               ptr = virtnet_sq_unmap(sq, ptr);
-
-                       skb = ptr;
-
-                       pr_debug("Sent skb %p\n", skb);
-
-                       *bytes += skb->len;
-                       napi_consume_skb(skb, in_napi);
-               } else {
-                       struct xdp_frame *frame;
-
-                       if (sq->do_dma)
-                               ptr = virtnet_sq_unmap(sq, ptr);
-
-                       frame = ptr_to_xdp(ptr);
-
-                       *bytes += xdp_get_frame_len(frame);
-                       xdp_return_frame(frame);
-               }
-               (*packets)++;
-       }
-}
-
 /* Converting between virtqueue no. and kernel tx/rx queue no.
  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
  */
@@ -700,7 +630,7 @@ static void free_old_xmit(struct virtnet_sq *sq, bool 
in_napi)
 {
        u64 bytes, packets = 0;
 
-       __free_old_xmit(sq, in_napi, &bytes, &packets);
+       virtnet_free_old_xmit(sq, in_napi, &bytes, &packets);
 
        /* Avoid overhead when no packets have been processed
         * happens when called speculatively from start_xmit.
@@ -714,16 +644,6 @@ static void free_old_xmit(struct virtnet_sq *sq, bool 
in_napi)
        u64_stats_update_end(&sq->stats.syncp);
 }
 
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
-       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
-               return false;
-       else if (q < vi->curr_queue_pairs)
-               return true;
-       else
-               return false;
-}
-
 static void check_sq_full_and_disable(struct virtnet_info *vi,
                                      struct net_device *dev,
                                      struct virtnet_sq *sq)
@@ -872,7 +792,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
        }
 
        /* Free up any pending old buffers before queueing new ones. */
-       __free_old_xmit(sq, false, &bytes, &packets);
+       virtnet_free_old_xmit(sq, false, &bytes, &packets);
 
        for (i = 0; i < n; i++) {
                struct xdp_frame *xdpf = frames[i];
@@ -883,7 +803,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
        }
        ret = nxmit;
 
-       if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
+       if (!virtnet_is_xdp_raw_buffer_queue(vi, sq - vi->sq))
                check_sq_full_and_disable(vi, dev, sq);
 
        if (flags & XDP_XMIT_FLUSH) {
@@ -2033,7 +1953,7 @@ static void virtnet_poll_cleantx(struct virtnet_rq *rq)
        struct virtnet_sq *sq = &vi->sq[index];
        struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
 
-       if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
+       if (!sq->napi.weight || virtnet_is_xdp_raw_buffer_queue(vi, index))
                return;
 
        if (__netif_tx_trylock(txq)) {
@@ -2157,7 +2077,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int 
budget)
        int opaque;
        bool done;
 
-       if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
+       if (unlikely(virtnet_is_xdp_raw_buffer_queue(vi, index))) {
                /* We don't need to enable cb for XDP */
                napi_complete_done(napi, 0);
                return 0;
@@ -3997,7 +3917,7 @@ void virtnet_sq_free_unused_buf(struct virtqueue *vq, 
void *buf)
 
        sq = &vi->sq[i];
 
-       if (!is_xdp_frame(buf)) {
+       if (!virtnet_is_xdp_frame(buf)) {
                if (sq->do_dma)
                        buf = virtnet_sq_unmap(sq, buf);
 
@@ -4006,7 +3926,7 @@ void virtnet_sq_free_unused_buf(struct virtqueue *vq, 
void *buf)
                if (sq->do_dma)
                        buf = virtnet_sq_unmap(sq, buf);
 
-               xdp_return_frame(ptr_to_xdp(buf));
+               xdp_return_frame(virtnet_ptr_to_xdp(buf));
        }
 }
 
diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
index 98ba23cfdb20..442af4673bf8 100644
--- a/drivers/net/virtio/virtio_net.h
+++ b/drivers/net/virtio/virtio_net.h
@@ -8,6 +8,9 @@
 #include <linux/virtio_net.h>
 #include <net/xdp_sock_drv.h>
 
+#define VIRTIO_XDP_FLAG        BIT(0)
+#define VIRTIO_XMIT_DATA_MASK (VIRTIO_XDP_FLAG)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -221,6 +224,83 @@ struct virtnet_info {
        struct failover *failover;
 };
 
+static inline bool virtnet_is_xdp_frame(void *ptr)
+{
+       return (unsigned long)ptr & VIRTIO_XDP_FLAG;
+}
+
+static inline struct xdp_frame *virtnet_ptr_to_xdp(void *ptr)
+{
+       return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
+}
+
+static inline void *virtnet_sq_unmap(struct virtnet_sq *sq, void *data)
+{
+       struct virtnet_sq_dma *next, *head;
+
+       head = (void *)((unsigned long)data & ~VIRTIO_XMIT_DATA_MASK);
+
+       data = head->data;
+
+       while (head) {
+               virtqueue_dma_unmap_single_attrs(sq->vq, head->addr, head->len,
+                                                DMA_TO_DEVICE, 0);
+
+               next = head->next;
+
+               head->next = sq->dmainfo.free;
+               sq->dmainfo.free = head;
+
+               head = next;
+       }
+
+       return data;
+}
+
+static inline void virtnet_free_old_xmit(struct virtnet_sq *sq, bool in_napi,
+                                        u64 *bytes, u64 *packets)
+{
+       unsigned int len;
+       void *ptr;
+
+       while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+               if (!virtnet_is_xdp_frame(ptr)) {
+                       struct sk_buff *skb;
+
+                       if (sq->do_dma)
+                               ptr = virtnet_sq_unmap(sq, ptr);
+
+                       skb = ptr;
+
+                       pr_debug("Sent skb %p\n", skb);
+
+                       *bytes += skb->len;
+                       napi_consume_skb(skb, in_napi);
+               } else {
+                       struct xdp_frame *frame;
+
+                       if (sq->do_dma)
+                               ptr = virtnet_sq_unmap(sq, ptr);
+
+                       frame = virtnet_ptr_to_xdp(ptr);
+
+                       *bytes += xdp_get_frame_len(frame);
+                       xdp_return_frame(frame);
+               }
+               (*packets)++;
+       }
+}
+
+static inline bool virtnet_is_xdp_raw_buffer_queue(struct virtnet_info *vi, 
int q)
+{
+       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+               return false;
+       else if (q < vi->curr_queue_pairs)
+               return true;
+       else
+               return false;
+}
+
 void virtnet_rx_pause(struct virtnet_info *vi, struct virtnet_rq *rq);
 void virtnet_rx_resume(struct virtnet_info *vi, struct virtnet_rq *rq);
 void virtnet_tx_pause(struct virtnet_info *vi, struct virtnet_sq *sq);
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to