Normally when closing the device the queue memzone should be
freed. But the memzone will be not freed, when device setup
ops like:

rte_eth_bond_slave_remove
-->__eth_bond_slave_remove_lock_free
---->slave_remove
------>rte_eth_dev_internal_reset
-------->rte_eth_dev_rx_queue_config
---------->eth_dev_rx_queue_config
------------>ice_rx_queue_release
rte_eth_dev_close
-->ice_dev_close
---->ice_free_queues
------>ice_rx_queue_release
      (not been called due to nb_rx_queues and nb_tx_queues are 0)

And when queue number is changed to small size, the BIG memzone
queue index will be lost. This will lead to a memory leak. So we
should release the memzone when releasing queues.

Fixes: 460d1679586e ("drivers/net: delete HW rings while freeing queues")
Cc: sta...@dpdk.org

Signed-off-by: Yunjian Wang <wangyunj...@huawei.com>
---
 drivers/net/ice/ice_fdir_filter.c | 2 --
 drivers/net/ice/ice_rxtx.c        | 8 ++++++--
 drivers/net/ice/ice_rxtx.h        | 2 ++
 3 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ice/ice_fdir_filter.c 
b/drivers/net/ice/ice_fdir_filter.c
index 7ba65b9b04..82adb1fc8b 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -651,10 +651,8 @@ ice_fdir_teardown(struct ice_pf *pf)
 
        ice_tx_queue_release(pf->fdir.txq);
        pf->fdir.txq = NULL;
-       rte_eth_dma_zone_free(eth_dev, "fdir_tx_ring", ICE_FDIR_QUEUE_ID);
        ice_rx_queue_release(pf->fdir.rxq);
        pf->fdir.rxq = NULL;
-       rte_eth_dma_zone_free(eth_dev, "fdir_rx_ring", ICE_FDIR_QUEUE_ID);
        ice_fdir_prof_rm_all(pf);
        ice_fdir_prof_free(hw);
        ice_release_vsi(vsi);
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 5d7ab4f047..2fc2883059 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1135,6 +1135,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
+       rxq->mz = rz;
        /* Zero all the descriptors in the ring. */
        memset(rz->addr, 0, ring_size);
 
@@ -1190,6 +1191,7 @@ ice_rx_queue_release(void *rxq)
 
        q->rx_rel_mbufs(q);
        rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
        rte_free(q);
 }
 
@@ -1336,6 +1338,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
+       txq->mz = tz;
        txq->nb_tx_desc = nb_desc;
        txq->tx_rs_thresh = tx_rs_thresh;
        txq->tx_free_thresh = tx_free_thresh;
@@ -1386,6 +1389,7 @@ ice_tx_queue_release(void *txq)
 
        q->tx_rel_mbufs(q);
        rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
        rte_free(q);
 }
 
@@ -2080,7 +2084,6 @@ ice_free_queues(struct rte_eth_dev *dev)
                        continue;
                ice_rx_queue_release(dev->data->rx_queues[i]);
                dev->data->rx_queues[i] = NULL;
-               rte_eth_dma_zone_free(dev, "rx_ring", i);
        }
        dev->data->nb_rx_queues = 0;
 
@@ -2089,7 +2092,6 @@ ice_free_queues(struct rte_eth_dev *dev)
                        continue;
                ice_tx_queue_release(dev->data->tx_queues[i]);
                dev->data->tx_queues[i] = NULL;
-               rte_eth_dma_zone_free(dev, "tx_ring", i);
        }
        dev->data->nb_tx_queues = 0;
 }
@@ -2136,6 +2138,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
                return -ENOMEM;
        }
 
+       txq->mz = tz;
        txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
        txq->queue_id = ICE_FDIR_QUEUE_ID;
        txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
@@ -2194,6 +2197,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf)
                return -ENOMEM;
        }
 
+       rxq->mz = rz;
        rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
        rxq->queue_id = ICE_FDIR_QUEUE_ID;
        rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index b10db0874d..903c99a640 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,6 +89,7 @@ struct ice_rx_queue {
        ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
        ice_rx_release_mbufs_t rx_rel_mbufs;
        uint64_t offloads;
+       const struct rte_memzone *mz;
 };
 
 struct ice_tx_entry {
@@ -133,6 +134,7 @@ struct ice_tx_queue {
        bool tx_deferred_start; /* don't start this queue in dev start */
        bool q_set; /* indicate if tx queue has been configured */
        ice_tx_release_mbufs_t tx_rel_mbufs;
+       const struct rte_memzone *mz;
 };
 
 /* Offload features */
-- 
2.23.0

Reply via email to