To handle possible multi segment mbufs, the driver would allocate a worst case 64k buffer on the stack. Since each Tx queue is single threaded, better to allocate the buffer from hugepage with rte_malloc when queue is setup.
Signed-off-by: Stephen Hemminger <[email protected]> --- drivers/net/pcap/pcap_ethdev.c | 41 +++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c index 61ba50e356..1a186142d3 100644 --- a/drivers/net/pcap/pcap_ethdev.c +++ b/drivers/net/pcap/pcap_ethdev.c @@ -12,6 +12,7 @@ #include <errno.h> #include <sys/time.h> #include <sys/types.h> +#include <unistd.h> #include <pcap.h> #include <rte_cycles.h> @@ -91,6 +92,9 @@ struct pcap_tx_queue { struct queue_stat tx_stat; char name[PATH_MAX]; char type[ETH_PCAP_ARG_MAXLEN]; + + /* Temp buffer used to for non-linear packets */ + uint8_t *bounce_buf; }; struct pmd_internals { @@ -392,11 +396,12 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint32_t tx_bytes = 0; struct pcap_pkthdr header; pcap_dumper_t *dumper; - unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; + unsigned char *temp_data; size_t len, caplen; pp = rte_eth_devices[dumper_q->port_id].process_private; dumper = pp->tx_dumper[dumper_q->queue_id]; + temp_data = dumper_q->bounce_buf; if (dumper == NULL || nb_pkts == 0) return 0; @@ -406,10 +411,6 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) for (i = 0; i < nb_pkts; i++) { mbuf = bufs[i]; len = caplen = rte_pktmbuf_pkt_len(mbuf); - if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && - len > sizeof(temp_data))) { - caplen = sizeof(temp_data); - } calculate_timestamp(&header.ts); header.len = len; @@ -419,7 +420,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) * a pointer to temp_data after copying into it. */ pcap_dump((u_char *)dumper, &header, - rte_pktmbuf_read(mbuf, 0, caplen, temp_data)); + rte_pktmbuf_read(mbuf, 0, caplen, temp_data)); num_tx++; tx_bytes += caplen; @@ -474,11 +475,12 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint16_t num_tx = 0; uint32_t tx_bytes = 0; pcap_t *pcap; - unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; + unsigned char *temp_data; size_t len; pp = rte_eth_devices[tx_queue->port_id].process_private; pcap = pp->tx_pcap[tx_queue->queue_id]; + temp_data = tx_queue->bounce_buf; if (unlikely(nb_pkts == 0 || pcap == NULL)) return 0; @@ -486,13 +488,6 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) for (i = 0; i < nb_pkts; i++) { mbuf = bufs[i]; len = rte_pktmbuf_pkt_len(mbuf); - if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && - len > sizeof(temp_data))) { - PMD_LOG(ERR, - "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).", - len, sizeof(temp_data)); - continue; - } /* rte_pktmbuf_read() returns a pointer to the data directly * in the mbuf (when the mbuf is contiguous) or, otherwise, @@ -962,7 +957,7 @@ static int eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused, - unsigned int socket_id __rte_unused, + unsigned int socket_id, const struct rte_eth_txconf *tx_conf __rte_unused) { struct pmd_internals *internals = dev->data->dev_private; @@ -970,11 +965,26 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, pcap_q->port_id = dev->data->port_id; pcap_q->queue_id = tx_queue_id; + pcap_q->bounce_buf = rte_malloc_socket(NULL, RTE_ETH_PCAP_SNAPSHOT_LEN, + RTE_CACHE_LINE_SIZE, socket_id); + if (pcap_q->bounce_buf == NULL) + return -ENOMEM; + dev->data->tx_queues[tx_queue_id] = pcap_q; return 0; } +static void +eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id]; + + rte_free(pcap_q->bounce_buf); + pcap_q->bounce_buf = NULL; +} + static int eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { @@ -1015,6 +1025,7 @@ static const struct eth_dev_ops ops = { .dev_infos_get = eth_dev_info, .rx_queue_setup = eth_rx_queue_setup, .tx_queue_setup = eth_tx_queue_setup, + .tx_queue_release = eth_tx_queue_release, .rx_queue_start = eth_rx_queue_start, .tx_queue_start = eth_tx_queue_start, .rx_queue_stop = eth_rx_queue_stop, -- 2.51.0

