Fix indent which was left untouched to help reviews.
This must be squashed before merging.

Signed-off-by: Thomas Monjalon <[email protected]>
---
 drivers/net/mlx5/mlx5_rx.c      | 146 ++++++++++++++++----------------
 drivers/net/mlx5/mlx5_rxq.c     |  32 +++----
 drivers/net/mlx5/mlx5_trigger.c |  18 ++--
 3 files changed, 97 insertions(+), 99 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c
index 6d4dd85e66..12c4bb10bd 100644
--- a/drivers/net/mlx5/mlx5_rx.c
+++ b/drivers/net/mlx5/mlx5_rx.c
@@ -1071,84 +1071,84 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                rte_prefetch0(cqe);
                rte_prefetch0(wqe);
                if (seg->pool) {
-               /* Allocate buf from the same pool. */
-               rep = rte_mbuf_raw_alloc(seg->pool);
-               if (unlikely(rep == NULL)) {
-                       ++rxq->stats.rx_nombuf;
-                       if (!pkt) {
-                               /*
-                                * no buffers before we even started,
-                                * bail out silently.
-                                */
-                               break;
-                       }
-                       while (pkt != seg) {
-                               MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
-                               rep = NEXT(pkt);
-                               NEXT(pkt) = NULL;
-                               NB_SEGS(pkt) = 1;
-                               rte_mbuf_raw_free(pkt);
-                               pkt = rep;
-                       }
-                       rq_ci >>= sges_n;
-                       ++rq_ci;
-                       rq_ci <<= sges_n;
-                       break;
-               }
-               if (!pkt) {
-                       cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
-                       len = mlx5_rx_poll_len(rxq, cqe, cqe_n, cqe_mask,
-                                              &mcqe, &skip_cnt, false, NULL);
-                       if (unlikely(len & MLX5_ERROR_CQE_MASK)) {
-                               /* We drop packets with non-critical errors */
-                               rte_mbuf_raw_free(rep);
-                               if (len == MLX5_CRITICAL_ERROR_CQE_RET) {
-                                       rq_ci = rxq->rq_ci << sges_n;
+                       /* Allocate buf from the same pool. */
+                       rep = rte_mbuf_raw_alloc(seg->pool);
+                       if (unlikely(rep == NULL)) {
+                               ++rxq->stats.rx_nombuf;
+                               if (!pkt) {
+                                       /*
+                                        * no buffers before we even started,
+                                        * bail out silently.
+                                        */
                                        break;
                                }
-                               /* Skip specified amount of error CQEs packets 
*/
+                               while (pkt != seg) {
+                                       MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
+                                       rep = NEXT(pkt);
+                                       NEXT(pkt) = NULL;
+                                       NB_SEGS(pkt) = 1;
+                                       rte_mbuf_raw_free(pkt);
+                                       pkt = rep;
+                               }
                                rq_ci >>= sges_n;
-                               rq_ci += skip_cnt;
+                               ++rq_ci;
                                rq_ci <<= sges_n;
-                               MLX5_ASSERT(!pkt);
-                               continue;
-                       }
-                       if (len == 0) {
-                               rte_mbuf_raw_free(rep);
                                break;
                        }
-                       pkt = seg;
-                       MLX5_ASSERT(len >= (int)(rxq->crc_present << 2));
-                       pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
-                       if (rxq->cqe_comp_layout && mcqe)
-                               cqe = &rxq->title_cqe;
-                       rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
-                       if (rxq->crc_present)
-                               len -= RTE_ETHER_CRC_LEN;
-                       PKT_LEN(pkt) = len;
-                       if (cqe->lro_num_seg > 1) {
-                               mlx5_lro_update_hdr
-                                       (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
-                                        mcqe, rxq, len);
-                               pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
-                               pkt->tso_segsz = len / cqe->lro_num_seg;
+                       if (!pkt) {
+                               cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
+                               len = mlx5_rx_poll_len(rxq, cqe, cqe_n, 
cqe_mask,
+                                                      &mcqe, &skip_cnt, false, 
NULL);
+                               if (unlikely(len & MLX5_ERROR_CQE_MASK)) {
+                                       /* We drop packets with non-critical 
errors */
+                                       rte_mbuf_raw_free(rep);
+                                       if (len == MLX5_CRITICAL_ERROR_CQE_RET) 
{
+                                               rq_ci = rxq->rq_ci << sges_n;
+                                               break;
+                                       }
+                                       /* Skip specified amount of error CQEs 
packets */
+                                       rq_ci >>= sges_n;
+                                       rq_ci += skip_cnt;
+                                       rq_ci <<= sges_n;
+                                       MLX5_ASSERT(!pkt);
+                                       continue;
+                               }
+                               if (len == 0) {
+                                       rte_mbuf_raw_free(rep);
+                                       break;
+                               }
+                               pkt = seg;
+                               MLX5_ASSERT(len >= (int)(rxq->crc_present << 
2));
+                               pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
+                               if (rxq->cqe_comp_layout && mcqe)
+                                       cqe = &rxq->title_cqe;
+                               rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
+                               if (rxq->crc_present)
+                                       len -= RTE_ETHER_CRC_LEN;
+                               PKT_LEN(pkt) = len;
+                               if (cqe->lro_num_seg > 1) {
+                                       mlx5_lro_update_hdr
+                                               (rte_pktmbuf_mtod(pkt, uint8_t 
*), cqe,
+                                                mcqe, rxq, len);
+                                       pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
+                                       pkt->tso_segsz = len / cqe->lro_num_seg;
+                               }
                        }
-               }
-               tail = seg;
-               DATA_LEN(rep) = DATA_LEN(seg);
-               PKT_LEN(rep) = PKT_LEN(seg);
-               SET_DATA_OFF(rep, DATA_OFF(seg));
-               PORT(rep) = PORT(seg);
-               (*rxq->elts)[idx] = rep;
-               /*
-                * Fill NIC descriptor with the new buffer. The lkey and size
-                * of the buffers are already known, only the buffer address
-                * changes.
-                */
-               wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
-               /* If there's only one MR, no need to replace LKey in WQE. */
-               if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
-                       wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
+                       tail = seg;
+                       DATA_LEN(rep) = DATA_LEN(seg);
+                       PKT_LEN(rep) = PKT_LEN(seg);
+                       SET_DATA_OFF(rep, DATA_OFF(seg));
+                       PORT(rep) = PORT(seg);
+                       (*rxq->elts)[idx] = rep;
+                       /*
+                        * Fill NIC descriptor with the new buffer. The lkey 
and size
+                        * of the buffers are already known, only the buffer 
address
+                        * changes.
+                        */
+                       wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, 
uintptr_t));
+                       /* If there's only one MR, no need to replace LKey in 
WQE. */
+                       if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) 
> 1))
+                               wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
                }
                if (len > DATA_LEN(seg)) {
                        if (seg->pool)
@@ -1159,8 +1159,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                        continue;
                }
                if (seg->pool) {
-               DATA_LEN(seg) = len;
-               data_seg_len += len;
+                       DATA_LEN(seg) = len;
+                       data_seg_len += len;
                }
                PKT_LEN(pkt) = RTE_MIN(PKT_LEN(pkt), data_seg_len);
 #ifdef MLX5_PMD_SOFT_COUNTERS
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 3fae189fa4..6ca29f7543 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -152,22 +152,22 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
                struct rte_mbuf *buf;
 
                if (seg->mp) {
-               buf = rte_pktmbuf_alloc(seg->mp);
-               if (buf == NULL) {
-                       if (rxq_ctrl->share_group == 0)
-                               DRV_LOG(ERR, "port %u queue %u empty mbuf pool",
-                                       RXQ_PORT_ID(rxq_ctrl),
-                                       rxq_ctrl->rxq.idx);
-                       else
-                               DRV_LOG(ERR, "share group %u queue %u empty 
mbuf pool",
-                                       rxq_ctrl->share_group,
-                                       rxq_ctrl->share_qid);
-                       rte_errno = ENOMEM;
-                       goto error;
-               }
-               /* Only vectored Rx routines rely on headroom size. */
-               MLX5_ASSERT(!has_vec_support ||
-                           DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM);
+                       buf = rte_pktmbuf_alloc(seg->mp);
+                       if (buf == NULL) {
+                               if (rxq_ctrl->share_group == 0)
+                                       DRV_LOG(ERR, "port %u queue %u empty 
mbuf pool",
+                                               RXQ_PORT_ID(rxq_ctrl),
+                                               rxq_ctrl->rxq.idx);
+                               else
+                                       DRV_LOG(ERR, "share group %u queue %u 
empty mbuf pool",
+                                               rxq_ctrl->share_group,
+                                               rxq_ctrl->share_qid);
+                               rte_errno = ENOMEM;
+                               goto error;
+                       }
+                       /* Only vectored Rx routines rely on headroom size. */
+                       MLX5_ASSERT(!has_vec_support ||
+                                   DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM);
                } else {
                        buf = seg->null_mbuf;
                }
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 5b04d9a234..ac966c51b4 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -164,16 +164,14 @@ mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
                seg = &rxq_ctrl->rxq.rxseg[s];
                mp = seg->mp;
                if (mp) { /* Regular segment */
-               bool is_extmem = (rte_pktmbuf_priv_flags(mp) &
-                            RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0;
-               ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp,
-                                              is_extmem);
-               if (ret < 0 && rte_errno != EEXIST)
-                       goto error;
-               ret = mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
-                                                    mp);
-               if (ret < 0)
-                       goto error;
+                       bool is_extmem = (rte_pktmbuf_priv_flags(mp) &
+                                       RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0;
+                       ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp, 
is_extmem);
+                       if (ret < 0 && rte_errno != EEXIST)
+                               goto error;
+                       ret = 
mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl, mp);
+                       if (ret < 0)
+                               goto error;
                } else { /* NULL segment used in selective Rx */
                        seg->null_mbuf = mlx5_alloc_null_mbuf(seg->length);
                        if (seg->null_mbuf == NULL) {
-- 
2.54.0

Reply via email to