From: Kamil Rytarowski <kamil.rytarow...@caviumnetworks.com>

Signed-off-by: Maciej Czekaj <maciej.czekaj at caviumnetworks.com>
Signed-off-by: Kamil Rytarowski <kamil.rytarowski at caviumnetworks.com>
Signed-off-by: Zyta Szpak <zyta.szpak at semihalf.com>
Signed-off-by: Slawomir Rosek <slawomir.rosek at semihalf.com>
Signed-off-by: Radoslaw Biernacki <rad at semihalf.com>
Signed-off-by: Jerin Jacob <jerin.jacob at caviumnetworks.com>
---
 drivers/net/thunderx/nicvf_ethdev.h | 39 +++++++++++++++++++++++++++++++++++++
 1 file changed, 39 insertions(+)

diff --git a/drivers/net/thunderx/nicvf_ethdev.h 
b/drivers/net/thunderx/nicvf_ethdev.h
index 34447e0..e162364 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -87,6 +87,17 @@ nicvf_mbuff_meta_length(struct rte_mbuf *mbuf)
        return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
 }

+static inline uint16_t
+nicvf_netdev_qidx(struct nicvf *nic, uint8_t local_qidx)
+{
+       uint16_t global_qidx = local_qidx;
+
+       if (nic->sqs_mode)
+               global_qidx += ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
+
+       return global_qidx;
+}
+
 /*
  * Simple phy2virt functions assuming mbufs are in a single huge page
  * V = P + offset
@@ -104,4 +115,32 @@ nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t 
mbuf_phys_off)
        return (phys_addr_t)(virt - mbuf_phys_off);
 }

+static inline void
+nicvf_tx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *tx_start,
+              uint16_t *tx_end)
+{
+       uint16_t tmp;
+
+       *tx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
+                                   MAX_SND_QUEUES_PER_QS);
+       tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
+                            MAX_SND_QUEUES_PER_QS) - 1;
+       *tx_end = dev->data->nb_tx_queues ?
+               RTE_MIN(tmp, dev->data->nb_tx_queues - 1) : 0;
+}
+
+static inline void
+nicvf_rx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *rx_start,
+              uint16_t *rx_end)
+{
+       uint16_t tmp;
+
+       *rx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
+                                   MAX_RCV_QUEUES_PER_QS);
+       tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
+                            MAX_RCV_QUEUES_PER_QS) - 1;
+       *rx_end = dev->data->nb_rx_queues ?
+               RTE_MIN(tmp, dev->data->nb_rx_queues - 1) : 0;
+}
+
 #endif /* __THUNDERX_NICVF_ETHDEV_H__  */
-- 
1.9.1

Reply via email to