New allocator type MEM_TYPE_PAGE_POOL for page_pool usage.

The registered allocator page_pool pointer is not available directly
from xdp_rxq_info, but it could be (if needed).  For now, the driver
should keep separate track of the page_pool pointer, which it should
use for RX-ring page allocation.

As suggested by Saeed, to maintain a symmetric API it is the drivers
responsibility to allocate/create and free/destroy the page_pool.
Thus, after the driver have called xdp_rxq_info_unreg(), it is drivers
responsibility to free the page_pool, but with a RCU free call.  This
is done easily via the page_pool helper page_pool_destroy_rcu() (which
avoids touching any driver code during the RCU callback, which could
happen after the driver have been unloaded).

Signed-off-by: Jesper Dangaard Brouer <bro...@redhat.com>
---
 include/net/xdp.h |    3 +++
 net/core/xdp.c    |   23 ++++++++++++++++++++---
 2 files changed, 23 insertions(+), 3 deletions(-)

diff --git a/include/net/xdp.h b/include/net/xdp.h
index 859aa9b737fe..98b55eaf8fd7 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -36,6 +36,7 @@
 enum mem_type {
        MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
        MEM_TYPE_PAGE_ORDER0,     /* Orig XDP full page model */
+       MEM_TYPE_PAGE_POOL,
        MEM_TYPE_MAX,
 };
 
@@ -44,6 +45,8 @@ struct xdp_mem_info {
        u32 id;
 };
 
+struct page_pool;
+
 struct xdp_rxq_info {
        struct net_device *dev;
        u32 queue_index;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 06a5b39491ad..fe8e87abc266 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -8,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/idr.h>
 #include <linux/rhashtable.h>
+#include <net/page_pool.h>
 
 #include <net/xdp.h>
 
@@ -27,7 +28,10 @@ static struct rhashtable *mem_id_ht;
 
 struct xdp_mem_allocator {
        struct xdp_mem_info mem;
-       void *allocator;
+       union {
+               void *allocator;
+               struct page_pool *page_pool;
+       };
        struct rhash_head node;
        struct rcu_head rcu;
 };
@@ -74,7 +78,9 @@ void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
        /* Allow this ID to be reused */
        ida_simple_remove(&mem_id_pool, xa->mem.id);
 
-       /* TODO: Depending on allocator type/pointer free resources */
+       /* Notice, driver is expected to free the *allocator,
+        * e.g. page_pool, and MUST also use RCU free.
+        */
 
        /* Poison memory */
        xa->mem.id = 0xFFFF;
@@ -290,11 +296,21 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
 
 void xdp_return_frame(void *data, struct xdp_mem_info *mem)
 {
-       struct xdp_mem_allocator *xa;
+       struct xdp_mem_allocator *xa = NULL;
 
        rcu_read_lock();
        if (mem->id)
                xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
+
+       if (mem->type == MEM_TYPE_PAGE_POOL) {
+               struct page *page = virt_to_head_page(data);
+
+               if (xa)
+                       page_pool_put_page(xa->page_pool, page);
+               else
+                       put_page(page);
+               return;
+       }
        rcu_read_unlock();
 
        if (mem->type == MEM_TYPE_PAGE_SHARED) {
@@ -306,6 +322,7 @@ void xdp_return_frame(void *data, struct xdp_mem_info *mem)
                struct page *page = virt_to_page(data); /* Assumes order0 page*/
 
                put_page(page);
+               return;
        }
 }
 EXPORT_SYMBOL_GPL(xdp_return_frame);

Reply via email to