Coverity issue: 490942
Coverity issue: 490943
Coverity issue: 490946
Coverity issue: 490947
Coverity issue: 490949
Coverity issue: 490950
Coverity issue: 490951
Coverity issue: 490952
Coverity issue: 490953
Coverity issue: 490954
Coverity issue: 490955
Coverity issue: 490957
Coverity issue: 490958
Coverity issue: 490959
Fixes: a1c5ffa13b2c ("net/nbl: add channel layer")
Fixes: dc955cd24c8f ("net/nbl: add coexistence mode")
Fixes: 93b38df5a2ec ("net/nbl: support basic configuration")

Signed-off-by: Dimon Zhao <[email protected]>
---
 drivers/net/nbl/nbl_common/nbl_userdev.c | 11 +++++++++--
 drivers/net/nbl/nbl_dev/nbl_dev.c        |  4 ++--
 drivers/net/nbl/nbl_hw/nbl_channel.c     | 13 +++++++------
 3 files changed, 18 insertions(+), 10 deletions(-)

diff --git a/drivers/net/nbl/nbl_common/nbl_userdev.c 
b/drivers/net/nbl/nbl_common/nbl_userdev.c
index 75e0e2884b..c0bc995b64 100644
--- a/drivers/net/nbl/nbl_common/nbl_userdev.c
+++ b/drivers/net/nbl/nbl_common/nbl_userdev.c
@@ -715,7 +715,11 @@ int nbl_pci_map_device(struct nbl_adapter *adapter)
        }
 
        common->eventfd = fd;
-       ioctl(common->devfd, NBL_DEV_USER_GET_BAR_SIZE, &bar_size);
+       ret = ioctl(common->devfd, NBL_DEV_USER_GET_BAR_SIZE, &bar_size);
+       if (ret) {
+               NBL_LOG(ERR, "nbl userdev get bar size failed");
+               goto close_eventfd;
+       }
 
        if (!ret) {
                pci_dev->mem_resource[0].addr = nbl_userdev_mmap(common->devfd, 
0, bar_size);
@@ -744,12 +748,15 @@ void nbl_pci_unmap_device(struct nbl_adapter *adapter)
 {
        struct rte_pci_device *pci_dev = adapter->pci_dev;
        struct nbl_common_info *common = &adapter->common;
+       int ret = 0;
 
        if (NBL_IS_NOT_COEXISTENCE(common))
                return rte_pci_unmap_device(pci_dev);
 
        rte_mem_unmap(pci_dev->mem_resource[0].addr, 
pci_dev->mem_resource[0].len);
-       ioctl(common->devfd, NBL_DEV_USER_CLEAR_EVENTFD, 0);
+       ret = ioctl(common->devfd, NBL_DEV_USER_CLEAR_EVENTFD, 0);
+       if (ret)
+               NBL_LOG(ERR, "nbl userdev set clear eventfd failed, ret: %d", 
ret);
        close(common->eventfd);
        close(common->nl_socket_route);
 
diff --git a/drivers/net/nbl/nbl_dev/nbl_dev.c 
b/drivers/net/nbl/nbl_dev/nbl_dev.c
index 8a4a776b22..b4cc241def 100644
--- a/drivers/net/nbl/nbl_dev/nbl_dev.c
+++ b/drivers/net/nbl/nbl_dev/nbl_dev.c
@@ -18,11 +18,11 @@ int nbl_dev_configure(struct rte_eth_dev *eth_dev)
        struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
        int ret;
 
-       NBL_LOG(DEBUG, "Begin to configure the device, state: %d", 
adapter->state);
-
        if (dev_data == NULL || adapter == NULL)
                return -EINVAL;
 
+       NBL_LOG(DEBUG, "Begin to configure the device, state: %d", 
adapter->state);
+
        if (rx_mq_mode != RTE_ETH_MQ_RX_NONE && rx_mq_mode != 
RTE_ETH_MQ_RX_RSS) {
                NBL_LOG(ERR, "Rx mq mode %d is not supported", rx_mq_mode);
                return -ENOTSUP;
diff --git a/drivers/net/nbl/nbl_hw/nbl_channel.c 
b/drivers/net/nbl/nbl_hw/nbl_channel.c
index 7cc214df01..654e56f200 100644
--- a/drivers/net/nbl/nbl_hw/nbl_channel.c
+++ b/drivers/net/nbl/nbl_hw/nbl_channel.c
@@ -36,7 +36,7 @@ static int nbl_chan_init_tx_queue(union nbl_chan_info 
*chan_info)
                goto req_wait_queue_failed;
        }
 
-       size = chan_info->mailbox.num_txq_entries * 
chan_info->mailbox.txq_buf_size;
+       size = (u64)chan_info->mailbox.num_txq_entries * 
(u64)chan_info->mailbox.txq_buf_size;
        txq->buf = nbl_alloc_dma_mem(&txq->buf_mem, size);
        if (!txq->buf) {
                NBL_LOG(ERR, "Allocate memory for chan tx buffer arrays 
failed");
@@ -66,7 +66,7 @@ static int nbl_chan_init_rx_queue(union nbl_chan_info 
*chan_info)
                return -ENOMEM;
        }
 
-       size = chan_info->mailbox.num_rxq_entries * 
chan_info->mailbox.rxq_buf_size;
+       size = (u64)chan_info->mailbox.num_rxq_entries * 
(u64)chan_info->mailbox.rxq_buf_size;
        rxq->buf = nbl_alloc_dma_mem(&rxq->buf_mem, size);
        if (!rxq->buf) {
                NBL_LOG(ERR, "Allocate memory for chan rx buffer arrays 
failed");
@@ -163,7 +163,7 @@ static int nbl_chan_prepare_rx_bufs(struct nbl_channel_mgt 
*chan_mgt,
        desc = rxq->desc;
        for (i = 0; i < chan_info->mailbox.num_rxq_entries - 1; i++) {
                desc[i].flags = NBL_CHAN_RX_DESC_AVAIL;
-               desc[i].buf_addr = rxq->buf_mem.pa + i * 
chan_info->mailbox.rxq_buf_size;
+               desc[i].buf_addr = rxq->buf_mem.pa + (u64)i * 
(u64)chan_info->mailbox.rxq_buf_size;
                desc[i].buf_len = chan_info->mailbox.rxq_buf_size;
        }
 
@@ -324,7 +324,8 @@ static void nbl_chan_advance_rx_ring(struct nbl_channel_mgt 
*chan_mgt,
        rx_desc = NBL_CHAN_RX_DESC(rxq, next_to_use);
 
        rx_desc->flags = NBL_CHAN_RX_DESC_AVAIL;
-       rx_desc->buf_addr = rxq->buf_mem.pa + chan_info->mailbox.rxq_buf_size * 
next_to_use;
+       rx_desc->buf_addr = rxq->buf_mem.pa +
+                               (u64)chan_info->mailbox.rxq_buf_size * 
(u64)next_to_use;
        rx_desc->buf_len = chan_info->mailbox.rxq_buf_size;
 
        rte_wmb();
@@ -376,8 +377,8 @@ static uint16_t nbl_chan_update_txqueue(union nbl_chan_info 
*chan_info,
 
        txq = &chan_info->mailbox.txq;
        next_to_use = txq->next_to_use;
-       va = (u8 *)txq->buf + next_to_use * chan_info->mailbox.txq_buf_size;
-       pa = txq->buf_mem.pa + next_to_use * chan_info->mailbox.txq_buf_size;
+       va = (u8 *)txq->buf + (u64)next_to_use * 
(u64)chan_info->mailbox.txq_buf_size;
+       pa = txq->buf_mem.pa + (u64)next_to_use * 
(u64)chan_info->mailbox.txq_buf_size;
        tx_desc = NBL_CHAN_TX_DESC(txq, next_to_use);
 
        tx_desc->dstid = dstid;
-- 
2.34.1

Reply via email to