On Tue, Mar 10, 2026 at 09:09:59AM -0700, Stephen Hemminger wrote: > Add RTE_ETH_RX_OFFLOAD_SCATTER to the advertised receive offload > capabilities. Validate in rx_queue_setup that the mbuf pool data > room is large enough when scatter is not enabled, following the > same pattern as the virtio driver. > > Gate the multi-segment receive path on the scatter offload flag > and drop oversized packets when scatter is disabled. > Reject scatter with infinite_rx mode since the ring-based replay > path does not support multi-segment mbufs. > > Signed-off-by: Stephen Hemminger <[email protected]> > --- > drivers/net/pcap/pcap_ethdev.c | 47 +++++++++++++++++++++++++++++++--- > 1 file changed, 44 insertions(+), 3 deletions(-) > > diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c > index 8a2b5c1b4b..d8a924b0cd 100644 > --- a/drivers/net/pcap/pcap_ethdev.c > +++ b/drivers/net/pcap/pcap_ethdev.c > @@ -79,6 +79,7 @@ struct pcap_rx_queue { > uint16_t port_id; > uint16_t queue_id; > bool vlan_strip; > + bool rx_scatter; > bool timestamp_offloading; > struct rte_mempool *mb_pool; > struct queue_stat rx_stat; > @@ -112,6 +113,7 @@ struct pmd_internals { > bool phy_mac; > bool infinite_rx; > bool vlan_strip; > + bool rx_scatter; > bool timestamp_offloading; > }; > > @@ -342,14 +344,19 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, > uint16_t nb_pkts) > /* pcap packet will fit in the mbuf, can copy it */ > rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, len); > mbuf->data_len = len; > - } else { > - /* Try read jumbo frame into multi mbufs. */ > + } else if (pcap_q->rx_scatter) { > + /* Scatter into multi-segment mbufs. */ > if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool, > mbuf, packet, len) == > -1)) { > pcap_q->rx_stat.err_pkts++; > rte_pktmbuf_free(mbuf); > break; > } > + } else { > + /* Packet too large and scatter not enabled, drop it. */ > + pcap_q->rx_stat.err_pkts++; > + rte_pktmbuf_free(mbuf); > + continue; > } > > mbuf->pkt_len = len; > @@ -907,6 +914,7 @@ eth_dev_configure(struct rte_eth_dev *dev) > const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode; > > internals->vlan_strip = !!(rxmode->offloads & > RTE_ETH_RX_OFFLOAD_VLAN_STRIP); > + internals->rx_scatter = !!(rxmode->offloads & > RTE_ETH_RX_OFFLOAD_SCATTER); > internals->timestamp_offloading = !!(rxmode->offloads & > RTE_ETH_RX_OFFLOAD_TIMESTAMP); > return 0; > } > @@ -927,7 +935,8 @@ eth_dev_info(struct rte_eth_dev *dev, > dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | > RTE_ETH_TX_OFFLOAD_VLAN_INSERT; > dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP | > - RTE_ETH_RX_OFFLOAD_TIMESTAMP; > + RTE_ETH_RX_OFFLOAD_TIMESTAMP | > + RTE_ETH_RX_OFFLOAD_SCATTER; >
You should check for the infinite_rx mode here and only add the capability if it's not set. With that fixed: Acked-by: Bruce Richardson <[email protected]>

