Now, we chain the pages of big mode by the page's private variable.
But a subsequent patch aims to make the big mode to support
premapped mode. This requires additional space to store the dma addr.

Within the sub-struct that contains the 'private', there is no suitable
variable for storing the DMA addr.

                struct {        /* Page cache and anonymous pages */
                        /**
                         * @lru: Pageout list, eg. active_list protected by
                         * lruvec->lru_lock.  Sometimes used as a generic list
                         * by the page owner.
                         */
                        union {
                                struct list_head lru;

                                /* Or, for the Unevictable "LRU list" slot */
                                struct {
                                        /* Always even, to negate PageTail */
                                        void *__filler;
                                        /* Count page's or folio's mlocks */
                                        unsigned int mlock_count;
                                };

                                /* Or, free page */
                                struct list_head buddy_list;
                                struct list_head pcp_list;
                        };
                        /* See page-flags.h for PAGE_MAPPING_FLAGS */
                        struct address_space *mapping;
                        union {
                                pgoff_t index;          /* Our offset within 
mapping. */
                                unsigned long share;    /* share count for 
fsdax */
                        };
                        /**
                         * @private: Mapping-private opaque data.
                         * Usually used for buffer_heads if PagePrivate.
                         * Used for swp_entry_t if PageSwapCache.
                         * Indicates order in the buddy system if PageBuddy.
                         */
                        unsigned long private;
                };

But within the page pool struct, we have a variable called
dma_addr that is appropriate for storing dma addr.
And that struct is used by netstack. That works to our advantage.

                struct {        /* page_pool used by netstack */
                        /**
                         * @pp_magic: magic value to avoid recycling non
                         * page_pool allocated pages.
                         */
                        unsigned long pp_magic;
                        struct page_pool *pp;
                        unsigned long _pp_mapping_pad;
                        unsigned long dma_addr;
                        atomic_long_t pp_ref_count;
                };

On the other side, we should use variables from the same sub-struct.
So this patch replaces the "private" with "pp".

Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
Acked-by: Jason Wang <jasow...@redhat.com>
---
 drivers/net/virtio_net.c | 32 ++++++++++++++++++++------------
 1 file changed, 20 insertions(+), 12 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c22d1118a133..2c7a67ad4789 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -48,6 +48,14 @@ module_param(napi_tx, bool, 0644);
 
 #define VIRTIO_XDP_FLAG        BIT(0)
 
+/* In big mode, we use a page chain to manage multiple pages submitted to the
+ * ring. These pages are connected using page.pp. The following two macros are
+ * used to obtain the next page in a page chain and set the next page in the
+ * page chain.
+ */
+#define page_chain_next(p)     ((struct page *)((p)->pp))
+#define page_chain_add(p, n)   ((p)->pp = (void *)n)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -191,7 +199,7 @@ struct receive_queue {
 
        struct virtnet_interrupt_coalesce intr_coal;
 
-       /* Chain pages by the private ptr. */
+       /* Chain pages by the page's pp struct. */
        struct page *pages;
 
        /* Average packet length for mergeable receive buffers. */
@@ -432,16 +440,16 @@ skb_vnet_common_hdr(struct sk_buff *skb)
 }
 
 /*
- * private is used to chain pages for big packets, put the whole
- * most recent used list in the beginning for reuse
+ * put the whole most recent used list in the beginning for reuse
  */
 static void give_pages(struct receive_queue *rq, struct page *page)
 {
        struct page *end;
 
        /* Find end of list, sew whole thing into vi->rq.pages. */
-       for (end = page; end->private; end = (struct page *)end->private);
-       end->private = (unsigned long)rq->pages;
+       for (end = page; page_chain_next(end); end = page_chain_next(end));
+
+       page_chain_add(end, rq->pages);
        rq->pages = page;
 }
 
@@ -450,9 +458,9 @@ static struct page *get_a_page(struct receive_queue *rq, 
gfp_t gfp_mask)
        struct page *p = rq->pages;
 
        if (p) {
-               rq->pages = (struct page *)p->private;
-               /* clear private here, it is used to chain pages */
-               p->private = 0;
+               rq->pages = page_chain_next(p);
+               /* clear chain here, it is used to chain pages */
+               page_chain_add(p, NULL);
        } else
                p = alloc_page(gfp_mask);
        return p;
@@ -609,7 +617,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
                if (unlikely(!skb))
                        return NULL;
 
-               page = (struct page *)page->private;
+               page = page_chain_next(page);
                if (page)
                        give_pages(rq, page);
                goto ok;
@@ -657,7 +665,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
                                frag_size, truesize);
                len -= frag_size;
-               page = (struct page *)page->private;
+               page = page_chain_next(page);
                offset = 0;
        }
 
@@ -1909,7 +1917,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, 
struct receive_queue *rq,
                sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
 
                /* chain new page in list head to match sg */
-               first->private = (unsigned long)list;
+               page_chain_add(first, list);
                list = first;
        }
 
@@ -1929,7 +1937,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, 
struct receive_queue *rq,
        sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
 
        /* chain first in list head */
-       first->private = (unsigned long)list;
+       page_chain_add(first, list);
        err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags 
+ 2,
                                  first, gfp);
        if (err < 0)
-- 
2.32.0.3.g01195cf9f


Reply via email to