Instead of using a constant buffer length, allow configuring the size
for each queue separately. There is no way to change the length yet, and
it'll be passed from memory providers in a later patch.

Suggested-by: Jakub Kicinski <[email protected]>
Signed-off-by: Pavel Begunkov <[email protected]>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 56 +++++++++++--------
 drivers/net/ethernet/broadcom/bnxt/bnxt.h     |  1 +
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c |  6 +-
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h |  2 +-
 4 files changed, 38 insertions(+), 27 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d17d0ea89c36..f4c2ec243e9a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -905,7 +905,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi 
*bnapi, int budget)
 
 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
 {
-       return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
+       return rxr->need_head_pool || rxr->rx_page_size < PAGE_SIZE;
 }
 
 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -915,9 +915,9 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, 
dma_addr_t *mapping,
 {
        struct page *page;
 
-       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+       if (rxr->rx_page_size < PAGE_SIZE) {
                page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
-                                               BNXT_RX_PAGE_SIZE);
+                                               rxr->rx_page_size);
        } else {
                page = page_pool_dev_alloc_pages(rxr->page_pool);
                *offset = 0;
@@ -936,8 +936,9 @@ static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, 
dma_addr_t *mapping,
 {
        netmem_ref netmem;
 
-       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
-               netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, 
BNXT_RX_PAGE_SIZE, gfp);
+       if (rxr->rx_page_size < PAGE_SIZE) {
+               netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
+                                                    rxr->rx_page_size, gfp);
        } else {
                netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
                *offset = 0;
@@ -1155,9 +1156,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt 
*bp,
                return NULL;
        }
        dma_addr -= bp->rx_dma_offset;
-       dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+       dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
                                bp->rx_dir);
-       skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
+       skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
        if (!skb) {
                page_pool_recycle_direct(rxr->page_pool, page);
                return NULL;
@@ -1189,7 +1190,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
                return NULL;
        }
        dma_addr -= bp->rx_dma_offset;
-       dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+       dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
                                bp->rx_dir);
 
        if (unlikely(!payload))
@@ -1203,7 +1204,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
 
        skb_mark_for_recycle(skb);
        off = (void *)data_ptr - page_address(page);
-       skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
+       skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
        memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
               payload + NET_IP_ALIGN);
 
@@ -1288,7 +1289,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
                if (skb) {
                        skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
                                               cons_rx_buf->offset,
-                                              frag_len, BNXT_RX_PAGE_SIZE);
+                                              frag_len, rxr->rx_page_size);
                } else {
                        skb_frag_t *frag = &shinfo->frags[i];
 
@@ -1313,7 +1314,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
                        if (skb) {
                                skb->len -= frag_len;
                                skb->data_len -= frag_len;
-                               skb->truesize -= BNXT_RX_PAGE_SIZE;
+                               skb->truesize -= rxr->rx_page_size;
                        }
 
                        --shinfo->nr_frags;
@@ -1328,7 +1329,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
                }
 
                page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
-                                                 BNXT_RX_PAGE_SIZE);
+                                                 rxr->rx_page_size);
 
                total_frag_len += frag_len;
                prod = NEXT_RX_AGG(prod);
@@ -2281,8 +2282,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct 
bnxt_cp_ring_info *cpr,
                        if (!skb)
                                goto oom_next_rx;
                } else {
-                       skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
-                                                rxr->page_pool, &xdp);
+                       skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp);
                        if (!skb) {
                                /* we should be able to free the old skb here */
                                bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -3828,11 +3828,13 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
        pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
        if (BNXT_RX_PAGE_MODE(bp))
                pp.pool_size += bp->rx_ring_size / rx_size_fac;
+
+       pp.order = get_order(rxr->rx_page_size);
        pp.nid = numa_node;
        pp.netdev = bp->dev;
        pp.dev = &bp->pdev->dev;
        pp.dma_dir = bp->rx_dir;
-       pp.max_len = PAGE_SIZE;
+       pp.max_len = PAGE_SIZE << pp.order;
        pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
                   PP_FLAG_ALLOW_UNREADABLE_NETMEM;
        pp.queue_idx = rxr->bnapi->index;
@@ -3843,7 +3845,10 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
        rxr->page_pool = pool;
 
        rxr->need_head_pool = page_pool_is_unreadable(pool);
+       rxr->need_head_pool |= !!pp.order;
        if (bnxt_separate_head_pool(rxr)) {
+               pp.order = 0;
+               pp.max_len = PAGE_SIZE;
                pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
                pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
                pool = page_pool_create(&pp);
@@ -4319,6 +4324,8 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
                if (!rxr)
                        goto skip_rx;
 
+               rxr->rx_page_size = BNXT_RX_PAGE_SIZE;
+
                ring = &rxr->rx_ring_struct;
                rmem = &ring->ring_mem;
                rmem->nr_pages = bp->rx_nr_pages;
@@ -4478,7 +4485,7 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt 
*bp,
        ring = &rxr->rx_agg_ring_struct;
        ring->fw_ring_id = INVALID_HW_RING_ID;
        if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
-               type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+               type = ((u32)(u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
                        RX_BD_TYPE_RX_AGG_BD;
 
                /* On P7, setting EOP will cause the chip to disable
@@ -7056,6 +7063,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
 
 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
                                       struct hwrm_ring_alloc_input *req,
+                                      struct bnxt_rx_ring_info *rxr,
                                       struct bnxt_ring_struct *ring)
 {
        struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
@@ -7065,7 +7073,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, 
u32 ring_type,
        if (ring_type == HWRM_RING_ALLOC_AGG) {
                req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
                req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
-               req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+               req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
                enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
        } else {
                req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
@@ -7079,6 +7087,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, 
u32 ring_type,
 }
 
 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+                                   struct bnxt_rx_ring_info *rxr,
                                    struct bnxt_ring_struct *ring,
                                    u32 ring_type, u32 map_index)
 {
@@ -7135,7 +7144,8 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
                              cpu_to_le32(bp->rx_ring_mask + 1) :
                              cpu_to_le32(bp->rx_agg_ring_mask + 1);
                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
-                       bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
+                       bnxt_set_rx_ring_params_p5(bp, ring_type, req,
+                                                  rxr, ring);
                break;
        case HWRM_RING_ALLOC_CMPL:
                req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -7283,7 +7293,7 @@ static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
        u32 map_idx = bnapi->index;
        int rc;
 
-       rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+       rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
        if (rc)
                return rc;
 
@@ -7303,7 +7313,7 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
        int rc;
 
        map_idx = grp_idx + bp->rx_nr_rings;
-       rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+       rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
        if (rc)
                return rc;
 
@@ -7327,7 +7337,7 @@ static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
 
        ring = &cpr->cp_ring_struct;
        ring->handle = BNXT_SET_NQ_HDL(cpr);
-       rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+       rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
        if (rc)
                return rc;
        bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
@@ -7342,7 +7352,7 @@ static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
        const u32 type = HWRM_RING_ALLOC_TX;
        int rc;
 
-       rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+       rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
        if (rc)
                return rc;
        bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
@@ -7368,7 +7378,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
 
                vector = bp->irq_tbl[map_idx].vector;
                disable_irq_nosync(vector);
-               rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+               rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
                if (rc) {
                        enable_irq(vector);
                        goto err_out;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f5f07a7e6b29..4c880a9fba92 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1107,6 +1107,7 @@ struct bnxt_rx_ring_info {
 
        unsigned long           *rx_agg_bmap;
        u16                     rx_agg_bmap_size;
+       u16                     rx_page_size;
        bool                    need_head_pool;
 
        dma_addr_t              rx_desc_mapping[MAX_RX_PAGES];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 3e77a96e5a3e..619235b151a4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -183,7 +183,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct 
bnxt_rx_ring_info *rxr,
                        u16 cons, u8 *data_ptr, unsigned int len,
                        struct xdp_buff *xdp)
 {
-       u32 buflen = BNXT_RX_PAGE_SIZE;
+       u32 buflen = rxr->rx_page_size;
        struct bnxt_sw_rx_bd *rx_buf;
        struct pci_dev *pdev;
        dma_addr_t mapping;
@@ -461,7 +461,7 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 
 struct sk_buff *
 bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
-                  struct page_pool *pool, struct xdp_buff *xdp)
+                  struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp)
 {
        struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
 
@@ -469,7 +469,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 
num_frags,
                return NULL;
 
        xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
-                                 BNXT_RX_PAGE_SIZE * num_frags,
+                                 rxr->rx_page_size * num_frags,
                                  xdp_buff_get_skb_flags(xdp));
        return skb;
 }
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h 
b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 220285e190fc..8933a0dec09a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -32,6 +32,6 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct 
bnxt_rx_ring_info *rxr,
 void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
                              struct xdp_buff *xdp);
 struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
-                                  u8 num_frags, struct page_pool *pool,
+                                  u8 num_frags, struct bnxt_rx_ring_info *rxr,
                                   struct xdp_buff *xdp);
 #endif
-- 
2.52.0


Reply via email to