On Tue, Feb 24, 2026 at 05:45:50PM +0000, Simon Horman wrote: > On Mon, Feb 23, 2026 at 10:52:10AM +0100, Larysa Zaremba wrote: > > Use page_pool buffers by the means of libeth in the Rx queues, this > > significantly reduces code complexity of the driver itself. > > > > Suggested-by: Alexander Lobakin <[email protected]> > > Reviewed-by: Alexander Lobakin <[email protected]> > > Reviewed-by: Aleksandr Loktionov <[email protected]> > > Signed-off-by: Larysa Zaremba <[email protected]> > > ... > > > @@ -3257,12 +3133,26 @@ static int ixgbevf_setup_all_tx_resources(struct > > ixgbevf_adapter *adapter) > > int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, > > struct ixgbevf_ring *rx_ring) > > { > > - int size; > > + struct libeth_fq fq = { > > + .count = rx_ring->count, > > + .nid = NUMA_NO_NODE, > > + .type = LIBETH_FQE_MTU, > > + .xdp = !!rx_ring->xdp_prog, > > + .idx = rx_ring->queue_index, > > + .buf_len = IXGBEVF_RX_PAGE_LEN(rx_ring->xdp_prog ? > > + LIBETH_XDP_HEADROOM : > > + LIBETH_SKB_HEADROOM), > > + }; > > + int ret; > > > > - size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; > > - rx_ring->rx_buffer_info = vmalloc(size); > > - if (!rx_ring->rx_buffer_info) > > - goto err; > > + ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi); > > + if (ret) > > + return ret; > > + > > + rx_ring->pp = fq.pp; > > + rx_ring->rx_fqes = fq.fqes; > > + rx_ring->truesize = fq.truesize; > > + rx_ring->rx_buf_len = fq.buf_len; > > > > u64_stats_init(&rx_ring->syncp); > > > > @@ -3270,25 +3160,29 @@ int ixgbevf_setup_rx_resources(struct > > ixgbevf_adapter *adapter, > > rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); > > rx_ring->size = ALIGN(rx_ring->size, 4096); > > > > - rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, > > + rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size, > > &rx_ring->dma, GFP_KERNEL); > > > > if (!rx_ring->desc) > > Hi Larysa, > > Prior to this patch, if this error condition was met, > then function would return -ENOMEM. But now it will return 0. > This does not seem intentional. > > Flagged by Smatch.
No, it is not intentional, though the problem is fixed in a later patch. Will correct this one in v2. > > > goto err; > > > > /* XDP RX-queue info */ > > - if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, > > - rx_ring->queue_index, 0) < 0) > > + ret = __xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, > > + rx_ring->queue_index, 0, rx_ring->truesize); > > + if (ret) > > goto err; > > > > + xdp_rxq_info_attach_page_pool(&rx_ring->xdp_rxq, fq.pp); > > + > > rx_ring->xdp_prog = adapter->xdp_prog; > > > > return 0; > > err: > > - vfree(rx_ring->rx_buffer_info); > > - rx_ring->rx_buffer_info = NULL; > > + libeth_rx_fq_destroy(&fq); > > + rx_ring->rx_fqes = NULL; > > + rx_ring->pp = NULL; > > dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor > > ring\n"); > > - return -ENOMEM; > > + return ret; > > } > > > > /** > > ...
