Hi Ruifeng,

On 16/07/2020 06:19, Ruifeng Wang wrote:
Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue.
By default, defer queue size is big enough to hold all tbl8 groups. When
enqueue fails, return error to the user to indicate system issue.

Coverity issue: 360832
Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")

Signed-off-by: Ruifeng Wang <ruifeng.w...@arm.com>
---
  lib/librte_lpm/rte_lpm.c | 16 +++++++++++-----
  1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 2db9e16a2..a6d3a7894 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
        return group_idx;
  }
-static void
+static int
  tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
  {
        struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
        struct __rte_lpm *internal_lpm;
+       int rc = 0;
internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
        if (internal_lpm->v == NULL) {
@@ -552,9 +553,13 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
                                __ATOMIC_RELAXED);
        } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
                /* Push into QSBR defer queue. */
-               rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
+               rc = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
                                (void *)&tbl8_group_start);

On failure rte_rcu_qsbr_dq_enqueue() returns 1 and sets rte_errno. Consequently, rc value is propagated to delete_depth_big() -> rte_lpm_delete(), and on failure the latter returns "1" value, which conflicts with the LPM API:
"0 on success, negative value otherwise"
I would suggest here to return -rte_errno if rc is equal to 1.


+               if (rc != 0)
+                       RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
        }
+
+       return rc;
  }
static __rte_noinline int32_t
@@ -1041,6 +1046,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
        uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
                        tbl8_range, i;
        int32_t tbl8_recycle_index;
+       int rc = 0;
/*
         * Calculate the index into tbl24 and range. Note: All depths larger
@@ -1097,7 +1103,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
                 */
                lpm->tbl24[tbl24_index].valid = 0;
                __atomic_thread_fence(__ATOMIC_RELEASE);
-               tbl8_free(lpm, tbl8_group_start);
+               rc = tbl8_free(lpm, tbl8_group_start);
        } else if (tbl8_recycle_index > -1) {
                /* Update tbl24 entry. */
                struct rte_lpm_tbl_entry new_tbl24_entry = {
@@ -1113,10 +1119,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t 
ip_masked,
                __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
                                __ATOMIC_RELAXED);
                __atomic_thread_fence(__ATOMIC_RELEASE);
-               tbl8_free(lpm, tbl8_group_start);
+               rc = tbl8_free(lpm, tbl8_group_start);
        }
  #undef group_idx
-       return 0;
+       return (int32_t)rc;
  }
/*


--
Regards,
Vladimir

Reply via email to