On 1/16/2020 2:47 PM, Ferruh Yigit wrote: > On 1/13/2020 9:57 AM, Chenxu Di wrote: >> Add support to the ixgbe driver for the API rte_eth_tx_done_cleanup >> to force free consumed buffers on Tx ring. >> >> Signed-off-by: Chenxu Di <chenxux...@intel.com> >> --- >> drivers/net/ixgbe/ixgbe_ethdev.c | 2 + >> drivers/net/ixgbe/ixgbe_rxtx.c | 109 +++++++++++++++++++++++++++++++ >> drivers/net/ixgbe/ixgbe_rxtx.h | 2 +- >> 3 files changed, 112 insertions(+), 1 deletion(-) >> >> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c >> b/drivers/net/ixgbe/ixgbe_ethdev.c >> index 2c6fd0f13..75bdd391a 100644 >> --- a/drivers/net/ixgbe/ixgbe_ethdev.c >> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c >> @@ -601,6 +601,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { >> .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, >> .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, >> .tm_ops_get = ixgbe_tm_ops_get, >> + .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, >> }; >> >> /* >> @@ -649,6 +650,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { >> .reta_query = ixgbe_dev_rss_reta_query, >> .rss_hash_update = ixgbe_dev_rss_hash_update, >> .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, >> + .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, >> }; >> >> /* store statistics names and its offset in stats structure */ >> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c >> index fa572d184..a2e85ed5b 100644 >> --- a/drivers/net/ixgbe/ixgbe_rxtx.c >> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c >> @@ -2306,6 +2306,115 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue >> *txq) >> } >> } >> >> +static int >> +ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt) >> +{ >> + struct ixgbe_tx_entry *swr_ring = txq->sw_ring; >> + uint16_t i, tx_last, tx_id; >> + uint16_t nb_tx_free_last; >> + uint16_t nb_tx_to_clean; >> + uint32_t pkt_cnt; >> + >> + /* Start free mbuf from the next of tx_tail */ >> + tx_last = txq->tx_tail; >> + tx_id = swr_ring[tx_last].next_id; >> + >> + if (txq->nb_tx_free == 0 && ixgbe_xmit_cleanup(txq)) >> + return 0; >> + >> + nb_tx_to_clean = txq->nb_tx_free; >> + nb_tx_free_last = txq->nb_tx_free; >> + if (!free_cnt) >> + free_cnt = txq->nb_tx_desc; >> + >> + /* Loop through swr_ring to count the amount of >> + * freeable mubfs and packets. >> + */ >> + for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { >> + for (i = 0; i < nb_tx_to_clean && >> + pkt_cnt < free_cnt && >> + tx_id != tx_last; i++) { >> + if (swr_ring[tx_id].mbuf != NULL) { >> + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); >> + swr_ring[tx_id].mbuf = NULL; >> + >> + /* >> + * last segment in the packet, >> + * increment packet count >> + */ >> + pkt_cnt += (swr_ring[tx_id].last_id == tx_id); >> + } >> + >> + tx_id = swr_ring[tx_id].next_id; >> + } >> + >> + if (txq->tx_rs_thresh > txq->nb_tx_desc - >> + txq->nb_tx_free || tx_id == tx_last) >> + break; >> + >> + if (pkt_cnt < free_cnt) { >> + if (ixgbe_xmit_cleanup(txq)) >> + break; >> + >> + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; >> + nb_tx_free_last = txq->nb_tx_free; >> + } >> + } >> + >> + return (int)pkt_cnt; >> +} >> + >> +static int >> +ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq, >> + uint32_t free_cnt) >> +{ >> + int i, n, cnt; >> + >> + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) >> + free_cnt = txq->nb_tx_desc; >> + >> + cnt = free_cnt - free_cnt % txq->tx_rs_thresh; >> + >> + for (i = 0; i < cnt; i += n) { >> + if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) >> + break; >> + >> + n = ixgbe_tx_free_bufs(txq); >> + >> + if (n == 0) >> + break; >> + } >> + >> + return i; >> +} >> + >> +static int >> +ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused, >> + uint32_t free_cnt __rte_unused) >> +{ >> + return -ENOTSUP; >> +} >> + >> +int >> +ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) >> +{ >> + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; >> + if (txq->offloads == 0 && >> +#ifdef RTE_LIBRTE_SECURITY >> + !(txq->using_ipsec) && >> +#endif >> + txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) >> +#ifdef RTE_IXGBE_INC_VECTOR >> + if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && >> + (rte_eal_process_type() != RTE_PROC_PRIMARY || >> + txq->sw_ring_v != NULL)) >> + return ixgbe_tx_done_cleanup_vec(txq, free_cnt); >> +#endif >> + return ixgbe_tx_done_cleanup_simple(txq, free_cnt); >> + >> + return ixgbe_tx_done_cleanup_full(txq, free_cnt); >> +} >> + > > Missing curly parantheses in the 'if' blocks are causing confusion on which > return patch to take. > > the above code is like this: > if (txq->offloads == 0 && ...) > if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && ...) > return ixgbe_tx_done_cleanup_vec(txq, free_cnt); > return ixgbe_tx_done_cleanup_simple(txq, free_cnt); <----- [*] > return ixgbe_tx_done_cleanup_full(txq, free_cnt); > > It is not clear, and looks like wrong based on indentation, when to get the > [*] > path above. > > I will add curly parantheses while merging. > > Btw, why we need "#ifdef RTE_IXGBE_INC_VECTOR" here, can't we remove it? >
Since 'ixgbe_tx_done_cleanup_vec()' already implemented in this file, instead of vector specific files, I am removing the ifdef. So making changes [1] and function becomes [2]. Please validate it in next-net. [1] diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index a2e85ed5b..f41dc13d5 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -2403,14 +2403,15 @@ ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) #ifdef RTE_LIBRTE_SECURITY !(txq->using_ipsec) && #endif - txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) -#ifdef RTE_IXGBE_INC_VECTOR + txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) { if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && (rte_eal_process_type() != RTE_PROC_PRIMARY || - txq->sw_ring_v != NULL)) + txq->sw_ring_v != NULL)) { return ixgbe_tx_done_cleanup_vec(txq, free_cnt); -#endif - return ixgbe_tx_done_cleanup_simple(txq, free_cnt); + } else { + return ixgbe_tx_done_cleanup_simple(txq, free_cnt); + } + } return ixgbe_tx_done_cleanup_full(txq, free_cnt); } [2] int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) { struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; if (txq->offloads == 0 && #ifdef RTE_LIBRTE_SECURITY !(txq->using_ipsec) && #endif txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) { if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && (rte_eal_process_type() != RTE_PROC_PRIMARY || txq->sw_ring_v != NULL)) { return ixgbe_tx_done_cleanup_vec(txq, free_cnt); } else { return ixgbe_tx_done_cleanup_simple(txq, free_cnt); } } return ixgbe_tx_done_cleanup_full(txq, free_cnt); }