PMD handles fast path completions in the Rx handler and control path
completions in the interrupt handler. They both are processing
completions from the same fastpath completion queue. There is a
potential for race condition when these two paths are processing
the completions from the same queue and trying to updating Rx Producer.

Add a fastpath Rx lock between these two paths to close this race.

Fixes: 540a211084a7 ("bnx2x: driver core")
Cc: sta...@dpdk.org

Signed-off-by: Rasesh Mody <rm...@marvell.com>
---
 drivers/net/bnx2x/bnx2x.c      | 12 ++++++++++++
 drivers/net/bnx2x/bnx2x.h      |  3 +++
 drivers/net/bnx2x/bnx2x_rxtx.c |  8 +++++++-
 3 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index d38da4f60..7ea98b936 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -1167,6 +1167,10 @@ static int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
        if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) ==
                     MAX_RCQ_ENTRIES(rxq)))
                rx_cq_cons_sb++;
+
+       PMD_RX_LOG(DEBUG, "hw CQ cons = %d, sw CQ cons = %d",
+                  rx_cq_cons_sb, rxq->rx_cq_head);
+
        return rxq->rx_cq_head != rx_cq_cons_sb;
 }
 
@@ -1249,9 +1253,12 @@ static uint8_t bnx2x_rxeof(struct bnx2x_softc *sc, 
struct bnx2x_fastpath *fp)
        uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
        uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
 
+       rte_spinlock_lock(&(fp)->rx_mtx);
+
        rxq = sc->rx_queues[fp->index];
        if (!rxq) {
                PMD_RX_LOG(ERR, "RX queue %d is NULL", fp->index);
+               rte_spinlock_unlock(&(fp)->rx_mtx);
                return 0;
        }
 
@@ -1326,9 +1333,14 @@ static uint8_t bnx2x_rxeof(struct bnx2x_softc *sc, 
struct bnx2x_fastpath *fp)
        rxq->rx_cq_head = sw_cq_cons;
        rxq->rx_cq_tail = sw_cq_prod;
 
+       PMD_RX_LOG(DEBUG, "BD prod = %d, sw CQ prod = %d",
+                  bd_prod_fw, sw_cq_prod);
+
        /* Update producers */
        bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod);
 
+       rte_spinlock_unlock(&(fp)->rx_mtx);
+
        return sw_cq_cons != hw_cq_cons;
 }
 
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 3383c7675..1dbc98197 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -360,6 +360,9 @@ struct bnx2x_fastpath {
        /* pointer back to parent structure */
        struct bnx2x_softc *sc;
 
+       /* Used to synchronize fastpath Rx access */
+       rte_spinlock_t rx_mtx;
+
        /* status block */
        struct bnx2x_dma                 sb_dma;
        union bnx2x_host_hc_status_block status_block;
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index b52f023ea..c8bb202d6 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -357,6 +357,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
        uint16_t len, pad;
        struct rte_mbuf *rx_mb = NULL;
 
+       rte_spinlock_lock(&(fp)->rx_mtx);
+
        /* Add memory barrier as status block fields can change. This memory
         * barrier will flush out all the read/write operations to status block
         * generated before the barrier. It will ensure stale data is not read
@@ -379,8 +381,10 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
         */
        rmb();
 
-       if (sw_cq_cons == hw_cq_cons)
+       if (sw_cq_cons == hw_cq_cons) {
+               rte_spinlock_unlock(&(fp)->rx_mtx);
                return 0;
+       }
 
        while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) {
 
@@ -461,6 +465,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
 
        bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod);
 
+       rte_spinlock_unlock(&(fp)->rx_mtx);
+
        return nb_rx;
 }
 
-- 
2.18.0

Reply via email to