Since i40e and ice have the same checksum offload logic, merge their functions into one. Future rework should enable this to be used by more drivers also.
Signed-off-by: Bruce Richardson <[email protected]> --- drivers/net/intel/common/tx_scalar.h | 58 +++++++++++++++++++++++++++ drivers/net/intel/i40e/i40e_rxtx.c | 52 +----------------------- drivers/net/intel/i40e/i40e_rxtx.h | 1 + drivers/net/intel/ice/ice_rxtx.c | 60 +--------------------------- drivers/net/intel/ice/ice_rxtx.h | 1 + 5 files changed, 62 insertions(+), 110 deletions(-) diff --git a/drivers/net/intel/common/tx_scalar.h b/drivers/net/intel/common/tx_scalar.h index 573f5136a9..cf0dcb4b2c 100644 --- a/drivers/net/intel/common/tx_scalar.h +++ b/drivers/net/intel/common/tx_scalar.h @@ -59,6 +59,64 @@ ci_tx_xmit_cleanup(struct ci_tx_queue *txq) return 0; } +/* Common checksum enable function for Intel drivers (ice, i40e, etc.) */ +static inline void +ci_txd_enable_checksum(uint64_t ol_flags, + uint32_t *td_cmd, + uint32_t *td_offset, + union ci_tx_offload tx_offload) +{ + /* Enable L3 checksum offloads */ + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { + *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4_CSUM; + *td_offset |= (tx_offload.l3_len >> 2) << + CI_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { + *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4; + *td_offset |= (tx_offload.l3_len >> 2) << + CI_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { + *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV6; + *td_offset |= (tx_offload.l3_len >> 2) << + CI_TX_DESC_LEN_IPLEN_S; + } + + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { + *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (tx_offload.l4_len >> 2) << + CI_TX_DESC_LEN_L4_LEN_S; + return; + } + + if (ol_flags & RTE_MBUF_F_TX_UDP_SEG) { + *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (tx_offload.l4_len >> 2) << + CI_TX_DESC_LEN_L4_LEN_S; + return; + } + + /* Enable L4 checksum offloads */ + switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { + case RTE_MBUF_F_TX_TCP_CKSUM: + *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << + CI_TX_DESC_LEN_L4_LEN_S; + break; + case RTE_MBUF_F_TX_SCTP_CKSUM: + *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_SCTP; + *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << + CI_TX_DESC_LEN_L4_LEN_S; + break; + case RTE_MBUF_F_TX_UDP_CKSUM: + *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << + CI_TX_DESC_LEN_L4_LEN_S; + break; + default: + break; + } +} + static inline uint16_t ci_div_roundup16(uint16_t x, uint16_t y) { diff --git a/drivers/net/intel/i40e/i40e_rxtx.c b/drivers/net/intel/i40e/i40e_rxtx.c index 12a21407c5..c318b4c84e 100644 --- a/drivers/net/intel/i40e/i40e_rxtx.c +++ b/drivers/net/intel/i40e/i40e_rxtx.c @@ -310,56 +310,6 @@ i40e_parse_tunneling_params(uint64_t ol_flags, *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; } -static inline void -i40e_txd_enable_checksum(uint64_t ol_flags, - uint32_t *td_cmd, - uint32_t *td_offset, - union ci_tx_offload tx_offload) -{ - /* Enable L3 checksum offloads */ - if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4_CSUM; - *td_offset |= (tx_offload.l3_len >> 2) - << CI_TX_DESC_LEN_IPLEN_S; - } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4; - *td_offset |= (tx_offload.l3_len >> 2) - << CI_TX_DESC_LEN_IPLEN_S; - } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV6; - *td_offset |= (tx_offload.l3_len >> 2) - << CI_TX_DESC_LEN_IPLEN_S; - } - - if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (tx_offload.l4_len >> 2) - << CI_TX_DESC_LEN_L4_LEN_S; - return; - } - - /* Enable L4 checksum offloads */ - switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { - case RTE_MBUF_F_TX_TCP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - case RTE_MBUF_F_TX_SCTP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - case RTE_MBUF_F_TX_UDP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - default: - break; - } -} - /* Construct the tx flags */ static inline uint64_t i40e_build_ctob(uint32_t td_cmd, @@ -1167,7 +1117,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Enable checksum offloading */ if (ol_flags & CI_TX_CKSUM_OFFLOAD_MASK) - i40e_txd_enable_checksum(ol_flags, &td_cmd, + ci_txd_enable_checksum(ol_flags, &td_cmd, &td_offset, tx_offload); if (nb_ctx) { diff --git a/drivers/net/intel/i40e/i40e_rxtx.h b/drivers/net/intel/i40e/i40e_rxtx.h index 307ffa3049..db8525d52d 100644 --- a/drivers/net/intel/i40e/i40e_rxtx.h +++ b/drivers/net/intel/i40e/i40e_rxtx.h @@ -100,6 +100,7 @@ enum i40e_header_split_mode { RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ RTE_ETH_TX_OFFLOAD_TCP_TSO | \ + RTE_ETH_TX_OFFLOAD_UDP_TSO | \ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \ diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c index 63bce7bd9e..4792aa9a8b 100644 --- a/drivers/net/intel/ice/ice_rxtx.c +++ b/drivers/net/intel/ice/ice_rxtx.c @@ -2954,64 +2954,6 @@ ice_parse_tunneling_params(uint64_t ol_flags, *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; } -static inline void -ice_txd_enable_checksum(uint64_t ol_flags, - uint32_t *td_cmd, - uint32_t *td_offset, - union ci_tx_offload tx_offload) -{ - - /* Enable L3 checksum offloads */ - if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4_CSUM; - *td_offset |= (tx_offload.l3_len >> 2) << - CI_TX_DESC_LEN_IPLEN_S; - } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4; - *td_offset |= (tx_offload.l3_len >> 2) << - CI_TX_DESC_LEN_IPLEN_S; - } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV6; - *td_offset |= (tx_offload.l3_len >> 2) << - CI_TX_DESC_LEN_IPLEN_S; - } - - if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (tx_offload.l4_len >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - return; - } - - if (ol_flags & RTE_MBUF_F_TX_UDP_SEG) { - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (tx_offload.l4_len >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - return; - } - - /* Enable L4 checksum offloads */ - switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { - case RTE_MBUF_F_TX_TCP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - case RTE_MBUF_F_TX_SCTP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - case RTE_MBUF_F_TX_UDP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - default: - break; - } -} - /* Construct the tx flags */ static inline uint64_t ice_build_ctob(uint32_t td_cmd, @@ -3209,7 +3151,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Enable checksum offloading */ if (ol_flags & CI_TX_CKSUM_OFFLOAD_MASK) - ice_txd_enable_checksum(ol_flags, &td_cmd, + ci_txd_enable_checksum(ol_flags, &td_cmd, &td_offset, tx_offload); if (nb_ctx) { diff --git a/drivers/net/intel/ice/ice_rxtx.h b/drivers/net/intel/ice/ice_rxtx.h index cd5fa93d1c..7d6480b410 100644 --- a/drivers/net/intel/ice/ice_rxtx.h +++ b/drivers/net/intel/ice/ice_rxtx.h @@ -112,6 +112,7 @@ #define ICE_TX_SCALAR_OFFLOADS ( \ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ RTE_ETH_TX_OFFLOAD_TCP_TSO | \ + RTE_ETH_TX_OFFLOAD_UDP_TSO | \ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | \ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \ -- 2.51.0

