On Thu, Jan 16, 2025 at 06:10:38PM +0100, Petr Oros wrote:
> 
> On 16. 01. 25 16:39, Maciej Fijalkowski wrote:
> > Introduce a new helper ice_put_rx_mbuf() that will go through gathered
> > frags from current frame and will call ice_put_rx_buf() on them. Current
> > logic that was supposed to simplify and optimize the driver where we go
> > through a batch of all buffers processed in current NAPI instance turned
> > out to be broken for jumbo frames and very heavy load that was coming
> > from both multi-thread iperf and nginx/wrk pair between server and
> > client. The delay introduced by approach that we are dropping is simply
> > too big and we need to take the decision regarding page
> > recycling/releasing as quick as we can.
> > 
> > While at it, address an error path of ice_add_xdp_frag() - we were
> > missing buffer putting from day 1 there.
> > 
> > As a nice side effect we get rid of annoying and repetetive three-liner:
> > 
> >     xdp->data = NULL;
> >     rx_ring->first_desc = ntc;
> >     rx_ring->nr_frags = 0;
> > 
> > by embedding it within introduced routine.
> > 
> > Fixes: 1dc1a7e7f410 ("ice: Centrallize Rx buffer recycling")
> > Reported-and-tested-by: Xu Du <[email protected]>
> > Co-developed-by: Jacob Keller <[email protected]>
> > Signed-off-by: Jacob Keller <[email protected]>
> > Signed-off-by: Maciej Fijalkowski <[email protected]>
> > ---
> >   drivers/net/ethernet/intel/ice/ice_txrx.c | 67 +++++++++++++----------
> >   1 file changed, 38 insertions(+), 29 deletions(-)
> > 
> > diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c 
> > b/drivers/net/ethernet/intel/ice/ice_txrx.c
> > index 5d2d7736fd5f..501df1bc881d 100644
> > --- a/drivers/net/ethernet/intel/ice/ice_txrx.c
> > +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
> > @@ -1103,6 +1103,37 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct 
> > ice_rx_buf *rx_buf)
> >     rx_buf->page = NULL;
> >   }
> > +static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff 
> > *xdp,
> > +                       u32 *xdp_xmit)
> > +{
> > +   u32 nr_frags = rx_ring->nr_frags + 1;
> > +   u32 idx = rx_ring->first_desc;
> > +   u32 cnt = rx_ring->count;
> > +   struct ice_rx_buf *buf;
> > +
> > +   for (int i = 0; i < nr_frags; i++) {
> > +           buf = &rx_ring->rx_buf[idx];
> > +
> > +           if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
> > +                   ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
> > +                   *xdp_xmit |= buf->act;
> > +           } else if (buf->act & ICE_XDP_CONSUMED) {
> > +                   buf->pagecnt_bias++;
> > +           } else if (buf->act == ICE_XDP_PASS) {
> > +                   ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
> > +           }
> > +
> > +           ice_put_rx_buf(rx_ring, buf);
> > +
> > +           if (++idx == cnt)
> > +                   idx = 0;
> > +   }
> > +
> > +   xdp->data = NULL;
> > +   rx_ring->first_desc = ntc;
> ntc is not delared in this scope

Yikes! That happens when you change things right before submission. Thanks
for being faster than build bots.

I'll wait 24h and send a v2.

> > +   rx_ring->nr_frags = 0;
> > +}
> > +
> >   /**
> >    * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce 
> > buf
> >    * @rx_ring: Rx descriptor ring to transact packets on
> > @@ -1120,7 +1151,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int 
> > budget)
> >     unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
> >     unsigned int offset = rx_ring->rx_offset;
> >     struct xdp_buff *xdp = &rx_ring->xdp;
> > -   u32 cached_ntc = rx_ring->first_desc;
> >     struct ice_tx_ring *xdp_ring = NULL;
> >     struct bpf_prog *xdp_prog = NULL;
> >     u32 ntc = rx_ring->next_to_clean;
> > @@ -1128,7 +1158,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int 
> > budget)
> >     u32 xdp_xmit = 0;
> >     u32 cached_ntu;
> >     bool failure;
> > -   u32 first;
> >     xdp_prog = READ_ONCE(rx_ring->xdp_prog);
> >     if (xdp_prog) {
> > @@ -1190,6 +1219,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int 
> > budget)
> >                     xdp_prepare_buff(xdp, hard_start, offset, size, 
> > !!offset);
> >                     xdp_buff_clear_frags_flag(xdp);
> >             } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
> > +                   ice_put_rx_mbuf(rx_ring, xdp, NULL);
> >                     break;
> >             }
> >             if (++ntc == cnt)
> > @@ -1205,9 +1235,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int 
> > budget)
> >             total_rx_bytes += xdp_get_buff_len(xdp);
> >             total_rx_pkts++;
> > -           xdp->data = NULL;
> > -           rx_ring->first_desc = ntc;
> > -           rx_ring->nr_frags = 0;
> > +           ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit);
> > +
> >             continue;
> >   construct_skb:
> >             if (likely(ice_ring_uses_build_skb(rx_ring)))
> > @@ -1221,14 +1250,11 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, 
> > int budget)
> >                     if (unlikely(xdp_buff_has_frags(xdp)))
> >                             ice_set_rx_bufs_act(xdp, rx_ring,
> >                                                 ICE_XDP_CONSUMED);
> > -                   xdp->data = NULL;
> > -                   rx_ring->first_desc = ntc;
> > -                   rx_ring->nr_frags = 0;
> > -                   break;
> >             }
> > -           xdp->data = NULL;
> > -           rx_ring->first_desc = ntc;
> > -           rx_ring->nr_frags = 0;
> > +           ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit);
> > +
> > +           if (!skb)
> > +                   break;
> >             stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
> >             if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
> > @@ -1257,23 +1283,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, 
> > int budget)
> >             total_rx_pkts++;
> >     }
> > -   first = rx_ring->first_desc;
> > -   while (cached_ntc != first) {
> > -           struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
> > -
> > -           if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
> > -                   ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
> > -                   xdp_xmit |= buf->act;
> > -           } else if (buf->act & ICE_XDP_CONSUMED) {
> > -                   buf->pagecnt_bias++;
> > -           } else if (buf->act == ICE_XDP_PASS) {
> > -                   ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
> > -           }
> > -
> > -           ice_put_rx_buf(rx_ring, buf);
> > -           if (++cached_ntc >= cnt)
> > -                   cached_ntc = 0;
> > -   }
> >     rx_ring->next_to_clean = ntc;
> >     /* return up to cleaned_count buffers to hardware */
> >     failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
> 

Reply via email to