- rxq_info_get
 - txq_info_get
 - rx_queue_count
 - rx_descriptor_done
 - rx_descriptor_status
 - tx_descriptor_status

Signed-off-by: Jingjing Wu <jingjing...@intel.com>
---
 doc/guides/nics/intel_vf.rst |  16 ++++-
 drivers/net/avf/avf_ethdev.c |   8 +++
 drivers/net/avf/avf_rxtx.c   | 145 +++++++++++++++++++++++++++++++++++++++++++
 drivers/net/avf/avf_rxtx.h   |   8 +++
 4 files changed, 175 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 1e83bf6..3adb684 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -28,8 +28,8 @@
     (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-I40E/IXGBE/IGB Virtual Function Driver
-======================================
+Intel Virtual Function Driver
+=============================
 
 Supported IntelĀ® Ethernet Controllers (see the *DPDK Release Notes* for 
details)
 support the following modes of operation in a virtualized environment:
@@ -93,6 +93,18 @@ and the Physical Function operates on the global resources 
on behalf of the Virt
 For this out-of-band communication, an SR-IOV enabled NIC provides a memory 
buffer for each Virtual Function,
 which is called a "Mailbox".
 
+IntelĀ® Ethernet Adaptive Virtual Function
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Adaptive Virtual Function (AVF) is a SR-IOV Virtual Function with the same 
device id (8086:1889) on different Intel Ethernet Controller.
+AVF Driver is VF driver which supports for all future Intel devices without 
requiring a VM update. And since this happens to be an adaptive VF driver,
+every new drop of the VF driver would add more and more advanced features that 
can be turned on in the VM if the underlying HW device supports those
+advanced features based on a device agnostic way without ever compromising on 
the base functionality. AVF provides generic hardware interface and
+interface between AVF driver and a compliant PF driver is specified.
+
+Intel products starting Ethernet Controller 710 Series to support Adaptive 
Virtual Function.
+
+The way to generate Virtual Function is like normal, and the resource of VF 
assignment depends on the NIC Infrastructure.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF 
infrastructure
 
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c
index d3946d6..550bead 100644
--- a/drivers/net/avf/avf_ethdev.c
+++ b/drivers/net/avf/avf_ethdev.c
@@ -132,7 +132,15 @@ static const struct eth_dev_ops avf_eth_dev_ops = {
        .reta_query                 = avf_dev_rss_reta_query,
        .rss_hash_update            = avf_dev_rss_hash_update,
        .rss_hash_conf_get          = avf_dev_rss_hash_conf_get,
+       .rxq_info_get               = avf_dev_rxq_info_get,
+       .txq_info_get               = avf_dev_txq_info_get,
+       .rx_queue_count             = avf_dev_rxq_count,
+       .rx_descriptor_done         = avf_dev_rx_desc_done,
+       .rx_descriptor_status       = avf_dev_rx_desc_status,
+       .tx_descriptor_status       = avf_dev_tx_desc_status,
        .mtu_set                    = avf_dev_mtu_set,
+       /* TODO: Get statistics/xstatistics */
+       /* TODO: Rx interrupt */
 };
 
 static int
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
index 95992fc..b3fe550 100644
--- a/drivers/net/avf/avf_rxtx.c
+++ b/drivers/net/avf/avf_rxtx.c
@@ -1384,4 +1384,149 @@ avf_set_tx_function(struct rte_eth_dev *dev)
 {
        dev->tx_pkt_burst = avf_xmit_pkts;
        dev->tx_pkt_prepare = avf_prep_pkts;
+}
+void
+avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                struct rte_eth_rxq_info *qinfo)
+{
+       struct avf_rx_queue *rxq;
+
+       rxq = dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->mp;
+       qinfo->scattered_rx = dev->data->scattered_rx;
+       qinfo->nb_desc = rxq->nb_rx_desc;
+
+       qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+       qinfo->conf.rx_drop_en = TRUE;
+       qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                struct rte_eth_txq_info *qinfo)
+{
+       struct avf_tx_queue *txq;
+
+       txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+
+       qinfo->conf.tx_free_thresh = txq->free_thresh;
+       qinfo->conf.tx_rs_thresh = txq->rs_thresh;
+       qinfo->conf.txq_flags = txq->txq_flags;
+       qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+/* Get the number of used descriptors of a rx queue */
+uint32_t
+avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+#define AVF_RXQ_SCAN_INTERVAL 4
+       volatile union avf_rx_desc *rxdp;
+       struct avf_rx_queue *rxq;
+       uint16_t desc = 0;
+
+       rxq = dev->data->rx_queues[queue_id];
+       rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+       while ((desc < rxq->nb_rx_desc) &&
+               ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+               AVF_RXD_QW1_STATUS_MASK) >> AVF_RXD_QW1_STATUS_SHIFT) &
+                               (1 << AVF_RX_DESC_STATUS_DD_SHIFT)) {
+               /* Check the DD bit of a rx descriptor of each 4 in a group,
+                * to avoid checking too frequently and downgrading performance
+                * too much.
+                */
+               desc += AVF_RXQ_SCAN_INTERVAL;
+               rxdp += AVF_RXQ_SCAN_INTERVAL;
+               if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+                       rxdp = &(rxq->rx_ring[rxq->rx_tail +
+                                       desc - rxq->nb_rx_desc]);
+       }
+
+       return desc;
+}
+
+int
+avf_dev_rx_desc_done(void *rx_queue, uint16_t offset)
+{
+       volatile union avf_rx_desc *rxdp;
+       struct avf_rx_queue *rxq = rx_queue;
+       uint16_t desc;
+       int ret;
+
+       if (unlikely(offset >= rxq->nb_rx_desc)) {
+               PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
+               return 0;
+       }
+
+       desc = rxq->rx_tail + offset;
+       if (desc >= rxq->nb_rx_desc)
+               desc -= rxq->nb_rx_desc;
+
+       rxdp = &(rxq->rx_ring[desc]);
+
+       ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+                 AVF_RXD_QW1_STATUS_MASK) >> AVF_RXD_QW1_STATUS_SHIFT) &
+                (1 << AVF_RX_DESC_STATUS_DD_SHIFT));
+
+       return ret;
+}
+
+int
+avf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
+{
+       struct avf_rx_queue *rxq = rx_queue;
+       volatile uint64_t *status;
+       uint64_t mask;
+       uint32_t desc;
+
+       if (unlikely(offset >= rxq->nb_rx_desc))
+               return -EINVAL;
+
+       if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+               return RTE_ETH_RX_DESC_UNAVAIL;
+
+       desc = rxq->rx_tail + offset;
+       if (desc >= rxq->nb_rx_desc)
+               desc -= rxq->nb_rx_desc;
+
+       status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+       mask = rte_le_to_cpu_64((1ULL << AVF_RX_DESC_STATUS_DD_SHIFT)
+               << AVF_RXD_QW1_STATUS_SHIFT);
+       if (*status & mask)
+               return RTE_ETH_RX_DESC_DONE;
+
+       return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+avf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
+{
+       struct avf_tx_queue *txq = tx_queue;
+       volatile uint64_t *status;
+       uint64_t mask, expect;
+       uint32_t desc;
+
+       if (unlikely(offset >= txq->nb_tx_desc))
+               return -EINVAL;
+
+       desc = txq->tx_tail + offset;
+       /* go to next desc that has the RS bit */
+       desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
+               txq->rs_thresh;
+       if (desc >= txq->nb_tx_desc) {
+               desc -= txq->nb_tx_desc;
+               if (desc >= txq->nb_tx_desc)
+                       desc -= txq->nb_tx_desc;
+       }
+
+       status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+       mask = rte_le_to_cpu_64(AVF_TXD_QW1_DTYPE_MASK);
+       expect = rte_cpu_to_le_64(
+                AVF_TX_DESC_DTYPE_DESC_DONE << AVF_TXD_QW1_DTYPE_SHIFT);
+       if ((*status & mask) == expect)
+               return RTE_ETH_TX_DESC_DONE;
+
+       return RTE_ETH_TX_DESC_FULL;
 }
\ No newline at end of file
diff --git a/drivers/net/avf/avf_rxtx.h b/drivers/net/avf/avf_rxtx.h
index de98ce3..c52bd5f 100644
--- a/drivers/net/avf/avf_rxtx.h
+++ b/drivers/net/avf/avf_rxtx.h
@@ -180,6 +180,14 @@ uint16_t avf_prep_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                       uint16_t nb_pkts);
 void avf_set_rx_function(struct rte_eth_dev *dev);
 void avf_set_tx_function(struct rte_eth_dev *dev);
+void avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                         struct rte_eth_rxq_info *qinfo);
+void avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                         struct rte_eth_txq_info *qinfo);
+uint32_t avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
+int avf_dev_rx_desc_done(void *rx_queue, uint16_t offset);
+int avf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
+int avf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
 
 static inline
 void avf_dump_rx_descriptor(struct avf_rx_queue *rxq,
-- 
2.4.11

Reply via email to