The command "tso set <tso_segsz> <port_id>" is used to enable UFO, please
see commit ce8e6e742807 ("app/testpmd: support UFO in checksum engine")
The above patch configures the RTE_MBUF_F_TX_UDP_SEG to enable UFO only if
tso_segsz is set. Then tx_prepare() may call rte_net_intel_cksum_prepare()
to compute pseudo header checksum (because some PMDs may supports TSO).
As a result, if the peer sends UDP packets, all packets with UDP checksum
error are received for the PMDs only supported TSO.
So enabling UFO also depends on if driver has RTE_ETH_TX_OFFLOAD_UDP_TSO
capability. Similarly, TSO also need to do like this.
In addition, this patch also fixes cmd_tso_set_parsed() for UFO to make
it better to support TSO and UFO.
Fixes: ce8e6e742807 ("app/testpmd: support UFO in checksum engine")
Signed-off-by: Huisong Li <lihuis...@huawei.com>
---
v2: add handle for tunnel TSO offload in process_inner_cksums
---
app/test-pmd/cmdline.c | 47 +++++++++++++++++++++--------------------
app/test-pmd/csumonly.c | 11 ++++++++--
2 files changed, 33 insertions(+), 25 deletions(-)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0d0723f659..8be593d405 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -4906,6 +4906,7 @@ cmd_tso_set_parsed(void *parsed_result,
{
struct cmd_tso_set_result *res = parsed_result;
struct rte_eth_dev_info dev_info;
+ uint64_t offloads;
int ret;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
@@ -4922,37 +4923,37 @@ cmd_tso_set_parsed(void *parsed_result,
if (ret != 0)
return;
- if ((ports[res->port_id].tso_segsz != 0) &&
- (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
- fprintf(stderr, "Error: TSO is not supported by port %d\n",
- res->port_id);
- return;
+ if (ports[res->port_id].tso_segsz != 0) {
+ if ((dev_info.tx_offload_capa & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TSO)) == 0) {
+ fprintf(stderr, "Error: both TSO and UFO are not supported
by port %d\n",
+ res->port_id);
+ return;
+ }
+ /* display warnings if configuration is not supported by the
NIC */
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) ==
0)
+ fprintf(stderr, "Warning: port %d doesn't support
TSO\n",
+ res->port_id);
+ if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TSO) ==
0)
+ fprintf(stderr, "Warning: port %d doesn't support
UFO\n",
+ res->port_id);
}
if (ports[res->port_id].tso_segsz == 0) {
ports[res->port_id].dev_conf.txmode.offloads &=
- ~RTE_ETH_TX_OFFLOAD_TCP_TSO;
- printf("TSO for non-tunneled packets is disabled\n");
+ ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
RTE_ETH_TX_OFFLOAD_UDP_TSO);
+ printf("TSO and UFO for non-tunneled packets is disabled\n");
} else {
- ports[res->port_id].dev_conf.txmode.offloads |=
- RTE_ETH_TX_OFFLOAD_TCP_TSO;
- printf("TSO segment size for non-tunneled packets is %d\n",
+ offloads = (dev_info.tx_offload_capa &
RTE_ETH_TX_OFFLOAD_TCP_TSO) ?
+ RTE_ETH_TX_OFFLOAD_TCP_TSO : 0;
+ offloads |= (dev_info.tx_offload_capa &
RTE_ETH_TX_OFFLOAD_UDP_TSO) ?
+ RTE_ETH_TX_OFFLOAD_UDP_TSO : 0;
+ ports[res->port_id].dev_conf.txmode.offloads |= offloads;
+ printf("segment size for non-tunneled packets is %d\n",
ports[res->port_id].tso_segsz);
}
- cmd_config_queue_tx_offloads(&ports[res->port_id]);
-
- /* display warnings if configuration is not supported by the NIC */
- ret = eth_dev_info_get_print_err(res->port_id, &dev_info);
- if (ret != 0)
- return;
-
- if ((ports[res->port_id].tso_segsz != 0) &&
- (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
- fprintf(stderr,
- "Warning: TSO enabled but not supported by port %d\n",
- res->port_id);
- }
+ cmd_config_queue_tx_offloads(&ports[res->port_id]);
cmd_reconfig_device_queue(res->port_id, 1, 1);
}
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index c103e54111..21210aff43 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -466,6 +466,12 @@ process_inner_cksums(void *l3_hdr, const struct
testpmd_offload_info *info,
uint64_t ol_flags = 0;
uint32_t max_pkt_len, tso_segsz = 0;
uint16_t l4_off;
+ uint64_t all_tunnel_tso = RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
+ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO;
/* ensure packet is large enough to require tso */
if (!info->is_tunnel) {
@@ -505,7 +511,7 @@ process_inner_cksums(void *l3_hdr, const struct
testpmd_offload_info *info,
udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
- if (tso_segsz)
+ if (tso_segsz && (tx_offloads &
RTE_ETH_TX_OFFLOAD_UDP_TSO))
ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
else if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
@@ -528,7 +534,8 @@ process_inner_cksums(void *l3_hdr, const struct
testpmd_offload_info *info,
#endif
} else if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
- if (tso_segsz)
+ if (tso_segsz &&
+ (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
all_tunnel_tso)))
ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;