On 3/30/2023 7:29 AM, Feifei Wang wrote:
> Define specific function implementation for ixgbe driver.
> Currently, recycle buffer mode can support 128bit
> vector path. And can be enabled both in fast free and
> no fast free mode.
> 
> Suggested-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
> Signed-off-by: Feifei Wang <feifei.wa...@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.w...@arm.com>
> Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c |   1 +
>  drivers/net/ixgbe/ixgbe_ethdev.h |   3 +
>  drivers/net/ixgbe/ixgbe_rxtx.c   | 153 +++++++++++++++++++++++++++++++
>  drivers/net/ixgbe/ixgbe_rxtx.h   |   4 +
>  4 files changed, 161 insertions(+)
> 

What do you think to extract buf_recycle related code in drivers into
its own file, this may help to manager maintainership of code easier?

<...>

> +uint16_t
> +ixgbe_tx_buf_stash_vec(void *tx_queue,
> +             struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info)
> +{
> +     struct ixgbe_tx_queue *txq = tx_queue;
> +     struct ixgbe_tx_entry *txep;
> +     struct rte_mbuf **rxep;
> +     struct rte_mbuf *m[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
> +     int i, j, n;
> +     uint32_t status;
> +     uint16_t avail = 0;
> +     uint16_t buf_ring_size = rxq_buf_recycle_info->buf_ring_size;
> +     uint16_t mask = rxq_buf_recycle_info->buf_ring_size - 1;
> +     uint16_t refill_request = rxq_buf_recycle_info->refill_request;
> +     uint16_t refill_head = *rxq_buf_recycle_info->refill_head;
> +     uint16_t receive_tail = *rxq_buf_recycle_info->receive_tail;
> +
> +     /* Get available recycling Rx buffers. */
> +     avail = (buf_ring_size - (refill_head - receive_tail)) & mask;
> +
> +     /* Check Tx free thresh and Rx available space. */
> +     if (txq->nb_tx_free > txq->tx_free_thresh || avail <= txq->tx_rs_thresh)
> +             return 0;
> +
> +     /* check DD bits on threshold descriptor */
> +     status = txq->tx_ring[txq->tx_next_dd].wb.status;
> +     if (!(status & IXGBE_ADVTXD_STAT_DD))
> +             return 0;
> +
> +     n = txq->tx_rs_thresh;
> +
> +     /* Buffer recycle can only support no ring buffer wraparound.
> +      * Two case for this:
> +      *
> +      * case 1: The refill head of Rx buffer ring needs to be aligned with
> +      * buffer ring size. In this case, the number of Tx freeing buffers
> +      * should be equal to refill_request.
> +      *
> +      * case 2: The refill head of Rx ring buffer does not need to be aligned
> +      * with buffer ring size. In this case, the update of refill head can 
> not
> +      * exceed the Rx buffer ring size.
> +      */
> +     if (refill_request != n ||
> +             (!refill_request && (refill_head + n > buf_ring_size)))
> +             return 0;
> +
> +     /* First buffer to free from S/W ring is at index
> +      * tx_next_dd - (tx_rs_thresh-1).
> +      */
> +     txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
> +     rxep = rxq_buf_recycle_info->buf_ring;
> +     rxep += refill_head;
> +
> +     if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
> +             /* Directly put mbufs from Tx to Rx. */
> +             for (i = 0; i < n; i++, rxep++, txep++)
> +                     *rxep = txep[0].mbuf;
> +     } else {
> +             for (i = 0, j = 0; i < n; i++) {
> +                     /* Avoid txq contains buffers from expected mempoo. */

mempool (unless trying to introduce a new concept :)

Reply via email to