Enable jumbo frame reception with default mbuf data room size by
chaining multiple mbufs when packet exceeds single mbuf tailroom.
Scatter Rx is only enabled when RTE_ETH_RX_OFFLOAD_SCATTER is
requested. Packets are dropped if they exceed single mbuf size
and scatter is not enabled, or if mbuf allocation fails during
chaining. Error counter rx_dropped_pkts tracks all drops.
This allows receiving 9KB jumbo frames using standard 2KB mbufs,
chaining ~5 segments per jumbo packet.
---
doc/guides/rel_notes/release_26_03.rst | 5 ++
drivers/net/af_packet/rte_eth_af_packet.c | 76 +++++++++++++++++++----
2 files changed, 68 insertions(+), 13 deletions(-)
diff --git a/doc/guides/rel_notes/release_26_03.rst
b/doc/guides/rel_notes/release_26_03.rst
index fdc880687b..ec627e76c9 100644
--- a/doc/guides/rel_notes/release_26_03.rst
+++ b/doc/guides/rel_notes/release_26_03.rst
@@ -65,6 +65,11 @@ New Features
* Added support for V4000 Krackan2e.
+* **Updated AF_PACKET ethernet driver.**
+
+ * Added support for multi-segment mbuf reception to handle jumbo frames
+ with standard mbuf sizes when scatter Rx offload is enabled.
+
* **Updated CESNET nfb ethernet driver.**
* The timestamp value has been updated to make it usable.
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c
b/drivers/net/af_packet/rte_eth_af_packet.c
index e132dc387b..5d43306c09 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -56,6 +56,7 @@ struct __rte_cache_aligned pkt_rx_queue {
uint16_t in_port;
uint8_t vlan_strip;
uint8_t timestamp_offloading;
+ uint8_t scatter_enabled;
volatile unsigned long rx_pkts;
volatile unsigned long rx_bytes;
@@ -125,12 +126,13 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
{
unsigned i;
struct tpacket2_hdr *ppd;
- struct rte_mbuf *mbuf;
+ struct rte_mbuf *mbuf, *seg, *prev;
uint8_t *pbuf;
struct pkt_rx_queue *pkt_q = queue;
uint16_t num_rx = 0;
unsigned long num_rx_bytes = 0;
unsigned int framecount, framenum;
+ uint16_t pkt_len, data_len, remaining;
if (unlikely(nb_pkts == 0))
return 0;
@@ -154,8 +156,11 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
break;
}
- /* drop packets that won't fit in the mbuf */
- if (ppd->tp_snaplen > rte_pktmbuf_tailroom(mbuf)) {
+ pkt_len = ppd->tp_snaplen;
+ pbuf = (uint8_t *) ppd + ppd->tp_mac;
+
+ /* drop packets that won't fit in single mbuf if scatter not
enabled */
+ if (!pkt_q->scatter_enabled && pkt_len >
rte_pktmbuf_tailroom(mbuf)) {
rte_pktmbuf_free(mbuf);
ppd->tp_status = TP_STATUS_KERNEL;
if (++framenum >= framecount)
@@ -164,10 +169,57 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
continue;
}
- /* packet will fit in the mbuf, go ahead and receive it */
- rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) =
ppd->tp_snaplen;
- pbuf = (uint8_t *) ppd + ppd->tp_mac;
- memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf,
rte_pktmbuf_data_len(mbuf));
+ /* copy first segment */
+ data_len = RTE_MIN(pkt_len, rte_pktmbuf_tailroom(mbuf));
+ memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, data_len);
+ rte_pktmbuf_data_len(mbuf) = data_len;
+ mbuf->nb_segs = 1;
+ remaining = pkt_len - data_len;
+ pbuf += data_len;
+ prev = mbuf;
+
+ /* chain additional segments if needed */
+ while (remaining > 0) {
+ seg = rte_pktmbuf_alloc(pkt_q->mb_pool);
+ if (unlikely(seg == NULL)) {
+ rte_pktmbuf_free(mbuf);
+ ppd->tp_status = TP_STATUS_KERNEL;
+ if (++framenum >= framecount)
+ framenum = 0;
+ pkt_q->rx_dropped_pkts++;
+ continue;
+ }
+
+ /* Remove headroom to maximize data space in chained
segments */
+ rte_pktmbuf_prepend(seg, rte_pktmbuf_headroom(seg));
+
+ data_len = RTE_MIN(remaining,
rte_pktmbuf_tailroom(seg));
+ if (unlikely(data_len == 0)) {
+ rte_pktmbuf_free(seg);
+ rte_pktmbuf_free(mbuf);
+ ppd->tp_status = TP_STATUS_KERNEL;
+ if (++framenum >= framecount)
+ framenum = 0;
+ pkt_q->rx_dropped_pkts++;
+ continue;
+ }
+
+ memcpy(rte_pktmbuf_mtod(seg, void *), pbuf, data_len);
+ rte_pktmbuf_data_len(seg) = data_len;
+ pbuf += data_len;
+ remaining -= data_len;
+
+ prev->next = seg;
+ prev = seg;
+ mbuf->nb_segs++;
+ }
+
+ /* release incoming frame and advance ring buffer */
+ ppd->tp_status = TP_STATUS_KERNEL;
+ if (++framenum >= framecount)
+ framenum = 0;
+
+ rte_pktmbuf_pkt_len(mbuf) = pkt_len;
/* check for vlan info */
if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
@@ -188,14 +240,10 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
mbuf->ol_flags |= timestamp_dynflag;
}
- /* release incoming frame and advance ring buffer */
- ppd->tp_status = TP_STATUS_KERNEL;
- if (++framenum >= framecount)
- framenum = 0;
mbuf->port = pkt_q->in_port;
/* account for the receive frame */
- bufs[i] = mbuf;
+ bufs[num_rx] = mbuf;
num_rx++;
num_rx_bytes += mbuf->pkt_len;
}
@@ -412,7 +460,8 @@ eth_dev_info(struct rte_eth_dev *dev, struct
rte_eth_dev_info *dev_info)
dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
- RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+ RTE_ETH_RX_OFFLOAD_SCATTER;
return 0;
}
@@ -599,6 +648,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
pkt_q->in_port = dev->data->port_id;
pkt_q->vlan_strip = internals->vlan_strip;
pkt_q->timestamp_offloading = internals->timestamp_offloading;
+ pkt_q->scatter_enabled = !!(dev->data->dev_conf.rxmode.offloads &
RTE_ETH_RX_OFFLOAD_SCATTER);
return 0;
}
--
2.43.7