Signed-off-by: Jingjing Wu <jingjing...@intel.com>
---
 config/common_base                      |   5 +
 drivers/net/Makefile                    |   1 +
 drivers/net/avf/Makefile                |  58 ++++
 drivers/net/avf/avf.h                   | 214 ++++++++++++++
 drivers/net/avf/avf_ethdev.c            | 482 ++++++++++++++++++++++++++++++++
 drivers/net/avf/avf_log.h               |   2 +-
 drivers/net/avf/avf_vchnl.c             | 336 ++++++++++++++++++++++
 drivers/net/avf/rte_pmd_avf_version.map |   4 +
 mk/rte.app.mk                           |   1 +
 9 files changed, 1102 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/avf/Makefile
 create mode 100644 drivers/net/avf/avf.h
 create mode 100644 drivers/net/avf/avf_ethdev.c
 create mode 100644 drivers/net/avf/avf_vchnl.c
 create mode 100644 drivers/net/avf/rte_pmd_avf_version.map

diff --git a/config/common_base b/config/common_base
index e74febe..ce4d9bb 100644
--- a/config/common_base
+++ b/config/common_base
@@ -226,6 +226,11 @@ CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y
 CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
 
 #
+# Compile burst-oriented AVF PMD driver
+#
+CONFIG_RTE_LIBRTE_AVF_PMD=n
+
+#
 # Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD
 #
 CONFIG_RTE_LIBRTE_MLX4_PMD=n
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ef09b4e..688b8ee 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -37,6 +37,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD),d)
 endif
 
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
+DIRS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf
 DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
 DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
 DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
diff --git a/drivers/net/avf/Makefile b/drivers/net/avf/Makefile
new file mode 100644
index 0000000..40d0a0f
--- /dev/null
+++ b/drivers/net/avf/Makefile
@@ -0,0 +1,58 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_avf.a
+
+CFLAGS += -O3
+
+EXPORT_MAP := rte_pmd_avf_version.map
+
+LIBABIVER := 1
+
+OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval 
CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_adminq.c
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_common.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_vchnl.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/avf/avf.h b/drivers/net/avf/avf.h
new file mode 100644
index 0000000..3d3e0dc
--- /dev/null
+++ b/drivers/net/avf/avf.h
@@ -0,0 +1,214 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AVF_ETHDEV_H_
+#define _AVF_ETHDEV_H_
+
+#include <rte_kvargs.h>
+
+#define AVF_AQ_LEN               32
+#define AVF_AQ_BUF_SZ            4096
+#define AVF_RESET_WAIT_CNT       50
+#define AVF_BUF_SIZE_MIN         1024
+#define AVF_FRAME_SIZE_MAX       9728
+#define AVF_QUEUE_BASE_ADDR_UNIT 128
+
+#define AVF_MAX_NUM_QUEUES       16
+/* Vlan table size */
+#define AVF_VLAN_TB_SIZE               (4096 / (CHAR_BIT * sizeof(uint32_t)))
+
+#define AVF_NUM_MACADDR_MAX      64
+
+#define AVF_DEFAULT_RX_PTHRESH      8
+#define AVF_DEFAULT_RX_HTHRESH      8
+#define AVF_DEFAULT_RX_WTHRESH      0
+
+#define AVF_DEFAULT_RX_FREE_THRESH  32
+
+#define AVF_DEFAULT_TX_PTHRESH      32
+#define AVF_DEFAULT_TX_HTHRESH      0
+#define AVF_DEFAULT_TX_WTHRESH      0
+
+#define AVF_DEFAULT_TX_FREE_THRESH  32
+#define AVF_DEFAULT_TX_RS_THRESH 32
+
+#define AVF_BASIC_OFFLOAD_CAPS  ( \
+       VF_BASE_MODE_OFFLOADS | \
+       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | \
+       VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+
+#define AVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
+#define AVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
+
+/* Default queue interrupt throttling time in microseconds */
+#define AVF_ITR_INDEX_DEFAULT          0
+#define AVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define AVF_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
+
+/* The overhead from MTU to max frame size.
+ * Considering QinQ packet, the VLAN tag needs to be counted twice.
+ */
+#define AVF_VLAN_TAG_SIZE               4
+#define AVF_ETH_OVERHEAD \
+       (ETHER_HDR_LEN + ETHER_CRC_LEN + AVF_VLAN_TAG_SIZE * 2)
+
+struct avf_adapter;
+struct avf_rx_queue;
+struct avf_tx_queue;
+
+/* Structure that defines a VSI, associated with a adapter. */
+struct avf_vsi {
+       struct avf_adapter *adapter; /* Backreference to associated adapter */
+       uint16_t vsi_id;
+       uint16_t nb_qps;         /* Number of queue pairs VSI can occupy */
+       uint16_t nb_used_qps;    /* Number of queue pairs VSI uses */
+       uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
+       uint16_t base_vector;
+       uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
+};
+
+/* TODO: is that correct to assume the max number to be 16 ?*/
+#define AVF_MAX_MSIX_VECTORS   16
+
+/* Structure to store private data specific for VF instance. */
+struct avf_info {
+       uint16_t num_queue_pairs;
+       uint16_t max_pkt_len; /* Maximum packet length */
+       uint16_t mac_num;     /* Number of MAC addresses */
+       uint32_t vlan[AVF_VLAN_TB_SIZE]; /* VLAN bit map */
+       bool promisc_unicast_enabled;
+       bool promisc_multicast_enabled;
+
+       struct virtchnl_version_info virtchnl_version;
+       struct virtchnl_vf_resource *vf_res; /* VF resource */
+       struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+       volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+       uint32_t cmd_retval; /* return value of the cmd response from PF */
+       uint8_t *aq_resp; /* buffer to store the adminq response from PF */
+
+       /* Event from pf */
+       bool dev_closed;
+       bool link_up;
+       enum virtchnl_link_speed link_speed;
+
+       struct avf_vsi vsi;
+       bool vf_reset;
+       uint64_t flags;
+
+       uint8_t *rss_lut;
+       uint8_t *rss_key;
+       uint16_t nb_msix;   /* number of MSI-X interrupts on Rx */
+       uint16_t msix_base; /* msix vector base from */
+       uint16_t rxq_map[AVF_MAX_MSIX_VECTORS];  /* queue bitmask for each 
vector */
+};
+
+#define AVF_MAX_PKT_TYPE 256
+
+/* Structure to store private data for each VF instance. */
+struct avf_adapter {
+       struct avf_hw hw;
+       struct rte_eth_dev *eth_dev;
+       struct avf_info vf;
+};
+
+/* AVF_DEV_PRIVATE_TO */
+#define AVF_DEV_PRIVATE_TO_ADAPTER(adapter) \
+       ((struct avf_adapter *)adapter)
+#define AVF_DEV_PRIVATE_TO_VF(adapter) \
+       (&((struct avf_adapter *)adapter)->vf)
+#define AVF_DEV_PRIVATE_TO_HW(adapter) \
+       (&((struct avf_adapter *)adapter)->hw)
+
+/* AVF_VSI_TO */
+#define AVF_VSI_TO_HW(vsi) \
+       (&(((struct avf_vsi *)vsi)->adapter->hw))
+#define AVF_VSI_TO_VF(vsi) \
+       (&(((struct avf_vsi *)vsi)->adapter->vf))
+#define AVF_VSI_TO_ETH_DEV(vsi) \
+       (((struct avf_vsi *)vsi)->adapter->eth_dev)
+
+static inline void
+avf_init_adminq_parameter(struct avf_hw *hw)
+{
+       hw->aq.num_arq_entries = AVF_AQ_LEN;
+       hw->aq.num_asq_entries = AVF_AQ_LEN;
+       hw->aq.arq_buf_size = AVF_AQ_BUF_SZ;
+       hw->aq.asq_buf_size = AVF_AQ_BUF_SZ;
+}
+
+static inline uint16_t
+avf_calc_itr_interval(int16_t interval)
+{
+       if (interval < 0 || interval > AVF_QUEUE_ITR_INTERVAL_MAX)
+               interval = AVF_QUEUE_ITR_INTERVAL_DEFAULT;
+
+       /* Convert to hardware count, as writing each 1 represents 2 us */
+       return interval / 2;
+}
+
+/* structure used for sending and checking response of virtchnl ops */
+struct avf_cmd_info {
+       enum virtchnl_ops ops;
+       uint8_t *in_args;       /* buffer for sending */
+       uint32_t in_args_size;  /* buffer size for sending */
+       uint8_t *out_buffer;    /* buffer for response */
+       uint32_t out_size;      /* buffer size for response */
+};
+
+/* clear current command. Only call in case execute
+ * _atomic_set_cmd successfully.
+ */
+static inline void
+_clear_cmd(struct avf_info *vf)
+{
+       rte_wmb();
+       vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
+       vf->cmd_retval = VIRTCHNL_STATUS_SUCCESS;
+}
+
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_cmd(struct avf_info *vf, enum virtchnl_ops ops)
+{
+       int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+       if (!ret)
+               PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+       return !ret;
+}
+
+int avf_check_api_version(struct avf_adapter *adapter);
+int avf_get_vf_resource(struct avf_adapter *adapter);
+void avf_handle_virtchnl_msg(struct rte_eth_dev *dev);
+#endif /* _AVF_ETHDEV_H_ */
diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c
new file mode 100644
index 0000000..ba31b47
--- /dev/null
+++ b/drivers/net/avf/avf_ethdev.c
@@ -0,0 +1,482 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include <rte_interrupts.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_dev.h>
+
+#include "avf_log.h"
+#include "base/avf_prototype.h"
+#include "base/avf_adminq_cmd.h"
+#include "base/avf_type.h"
+
+#include "avf.h"
+
+int avf_logtype_init;
+int avf_logtype_driver;
+static const struct rte_pci_id pci_id_avf_map[] = {
+       { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
+       { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct eth_dev_ops avf_eth_dev_ops = {
+};
+
+static int
+avf_check_vf_reset_done(struct avf_hw *hw)
+{
+       int i, reset;
+
+       for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
+               reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
+                       AVFGEN_RSTAT_VFR_STATE_MASK;
+               reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
+               if (reset == VIRTCHNL_VFR_VFACTIVE ||
+                   reset == VIRTCHNL_VFR_COMPLETED)
+                       break;
+               rte_delay_ms(20);
+       }
+
+       if (i >= AVF_RESET_WAIT_CNT)
+               return -1;
+
+       return 0;
+}
+
+static int
+avf_init_vf(struct rte_eth_dev *dev)
+{
+       int i, err, bufsz;
+       struct avf_adapter *adapter =
+               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       uint16_t interval =
+               avf_calc_itr_interval(AVF_QUEUE_ITR_INTERVAL_MAX);
+
+       err = avf_set_mac_type(hw);
+       if (err) {
+               PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
+               goto err;
+       }
+
+       err = avf_check_vf_reset_done(hw);
+       if (err) {
+               PMD_INIT_LOG(ERR, "VF is still resetting");
+               goto err;
+       }
+
+       avf_init_adminq_parameter(hw);
+       err = avf_init_adminq(hw);
+       if (err) {
+               PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
+               goto err;
+       }
+
+       vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
+       if (!vf->aq_resp) {
+               PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
+               goto err_aq;
+       }
+       if (avf_check_api_version(adapter) != 0) {
+               PMD_INIT_LOG(ERR, "check_api version failed");
+               goto err_api;
+       }
+
+       bufsz = sizeof(struct virtchnl_vf_resource) +
+               (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
+       vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
+       if (!vf->vf_res) {
+               PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
+               goto err_api;
+       }
+       if (avf_get_vf_resource(adapter) != 0) {
+               PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
+               goto err_alloc;
+       }
+       /* Allocate memort for RSS info */
+       if (vf->vf_res->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+               vf->rss_key = rte_zmalloc("rss_key",
+                                         vf->vf_res->rss_key_size, 0);
+               if (!vf->rss_key) {
+                       PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
+                       goto err_rss;
+               }
+               vf->rss_lut = rte_zmalloc("rss_lut",
+                                         vf->vf_res->rss_lut_size, 0);
+               if (!vf->rss_lut) {
+                       PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
+                       goto err_rss;
+               }
+       }
+       return 0;
+err_rss:
+       rte_free(vf->rss_key);
+       rte_free(vf->rss_lut);
+err_alloc:
+       rte_free(vf->vf_res);
+       vf->vsi_res = NULL;
+err_api:
+       rte_free(vf->aq_resp);
+err_aq:
+       avf_shutdown_adminq(hw);
+err:
+       return -1;
+}
+
+/* Enable default admin queue interrupt setting */
+static inline void
+avf_enable_irq0(struct avf_hw *hw)
+{
+       /* Enable admin queue interrupt trigger */
+       AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
+
+       AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
+                                           AVFINT_DYN_CTL01_CLEARPBA_MASK |
+                                           AVFINT_DYN_CTL01_ITR_INDX_MASK);
+
+       AVF_WRITE_FLUSH(hw);
+}
+
+static inline void
+avf_disable_irq0(struct avf_hw *hw)
+{
+       /* Disable all interrupt types */
+       AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
+       AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
+                     AVFINT_DYN_CTL01_ITR_INDX_MASK);
+       AVF_WRITE_FLUSH(hw);
+}
+
+static void
+avf_dev_interrupt_handler(void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t icr0;
+
+       avf_disable_irq0(hw);
+
+       /* read out interrupt causes */
+       icr0 = AVF_READ_REG(hw, AVFINT_ICR01);
+
+       /* No interrupt event indicated */
+       if (!(icr0 & AVFINT_ICR01_INTEVENT_MASK)) {
+               PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
+               goto done;
+       }
+
+       if (icr0 & AVFINT_ICR01_ADMINQ_MASK) {
+               PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
+               avf_handle_virtchnl_msg(dev);
+       }
+
+done:
+       avf_enable_irq0(hw);
+}
+
+static int
+avf_dev_init(struct rte_eth_dev *eth_dev)
+{
+       struct avf_adapter *adapter =
+               AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* assign ops func pointer */
+       eth_dev->dev_ops = &avf_eth_dev_ops;
+
+       rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+       hw->vendor_id = pci_dev->id.vendor_id;
+       hw->device_id = pci_dev->id.device_id;
+       hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+       hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+       hw->bus.bus_id = pci_dev->addr.bus;
+       hw->bus.device = pci_dev->addr.devid;
+       hw->bus.func = pci_dev->addr.function;
+       hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+       hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);;
+       adapter->eth_dev = eth_dev;
+
+       if (avf_init_vf(eth_dev) != 0) {
+               PMD_INIT_LOG(ERR, "Init vf failed");
+               return -1;
+       }
+
+       /* copy mac addr */
+       eth_dev->data->mac_addrs = rte_zmalloc(
+                                       "avf_mac",
+                                       ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
+                                       0);
+       if (!eth_dev->data->mac_addrs) {
+               PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
+                            " store MAC addresses",
+                            ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
+               return -ENOMEM;
+       }
+       /* If the MAC address is not configured by host,
+        * generate a random one.
+        */
+       if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+               eth_random_addr(hw->mac.addr);
+       ether_addr_copy((struct ether_addr *)hw->mac.addr,
+                       &eth_dev->data->mac_addrs[0]);
+
+       /* register callback func to eal lib */
+       rte_intr_callback_register(&pci_dev->intr_handle,
+                                  avf_dev_interrupt_handler,
+                                  (void *)eth_dev);
+
+       /* enable uio intr after callback register */
+       rte_intr_enable(&pci_dev->intr_handle);
+
+       /* configure and enable device interrupt */
+       avf_enable_irq0(hw);
+
+       return 0;
+}
+
+static void
+avf_dev_close(struct rte_eth_dev *dev)
+{
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+       avf_shutdown_adminq(hw);
+       /* disable uio intr before callback unregister */
+       rte_intr_disable(intr_handle);
+
+       /* unregister callback func from eal lib */
+       rte_intr_callback_unregister(intr_handle,
+                                    avf_dev_interrupt_handler, dev);
+       avf_disable_irq0(hw);
+}
+
+static int
+avf_dev_uninit(struct rte_eth_dev *dev)
+{
+       struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return -EPERM;
+
+       dev->dev_ops = NULL;
+       dev->rx_pkt_burst = NULL;
+       dev->tx_pkt_burst = NULL;
+       if (hw->adapter_stopped == 0)
+               avf_dev_close(dev);
+
+       rte_free(vf->vf_res);
+       vf->vsi_res = NULL;
+       vf->vf_res = NULL;
+
+       rte_free(vf->aq_resp);
+       vf->aq_resp = NULL;
+
+       rte_free(dev->data->mac_addrs);
+       dev->data->mac_addrs = NULL;
+
+       if (vf->rss_lut) {
+               rte_free(vf->rss_lut);
+               vf->rss_lut = NULL;
+       }
+       if (vf->rss_key) {
+               rte_free(vf->rss_key);
+               vf->rss_key = NULL;
+       }
+
+       return 0;
+}
+
+static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+                            struct rte_pci_device *pci_dev)
+{
+       return rte_eth_dev_pci_generic_probe(pci_dev,
+               sizeof(struct avf_adapter), avf_dev_init);
+}
+
+static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
+{
+       return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
+}
+
+/* Adaptive virtual function driver struct */
+static struct rte_pci_driver rte_avf_pmd = {
+       .id_table = pci_id_avf_map,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+                    RTE_PCI_DRV_IOVA_AS_VA,
+       .probe = eth_avf_pci_probe,
+       .remove = eth_avf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
+RTE_INIT(avf_init_log);
+static void
+avf_init_log(void)
+{
+       avf_logtype_init = rte_log_register("pmd.avf.init");
+       if (avf_logtype_init >= 0)
+               rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
+       avf_logtype_driver = rte_log_register("pmd.avf.driver");
+       if (avf_logtype_driver >= 0)
+               rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
+}
+
+/* memory func for base code */
+enum avf_status_code
+avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
+                      struct avf_dma_mem *mem,
+                      u64 size,
+                      u32 alignment)
+{
+       const struct rte_memzone *mz = NULL;
+       char z_name[RTE_MEMZONE_NAMESIZE];
+
+       if (!mem)
+               return AVF_ERR_PARAM;
+
+       snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
+       mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
+                                        alignment, RTE_PGSIZE_2M);
+       if (!mz)
+               return AVF_ERR_NO_MEMORY;
+
+       mem->size = size;
+       mem->va = mz->addr;
+       mem->pa = mz->phys_addr;
+       mem->zone = (const void *)mz;
+       PMD_DRV_LOG(DEBUG,
+                   "memzone %s allocated with physical address: %"PRIu64,
+                   mz->name, mem->pa);
+
+       return AVF_SUCCESS;
+}
+
+enum avf_status_code
+avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
+                  struct avf_dma_mem *mem)
+{
+       if (!mem)
+               return AVF_ERR_PARAM;
+
+       PMD_DRV_LOG(DEBUG,
+                   "memzone %s to be freed with physical address: %"PRIu64,
+                   ((const struct rte_memzone *)mem->zone)->name, mem->pa);
+       rte_memzone_free((const struct rte_memzone *)mem->zone);
+       mem->zone = NULL;
+       mem->va = NULL;
+       mem->pa = (u64)0;
+
+       return AVF_SUCCESS;
+}
+
+enum avf_status_code
+avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
+                       struct avf_virt_mem *mem,
+                       u32 size)
+{
+       if (!mem)
+               return AVF_ERR_PARAM;
+
+       mem->size = size;
+       mem->va = rte_zmalloc("avf", size, 0);
+
+       if (mem->va)
+               return AVF_SUCCESS;
+       else
+               return AVF_ERR_NO_MEMORY;
+}
+
+enum avf_status_code
+avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
+                   struct avf_virt_mem *mem)
+{
+       if (!mem)
+               return AVF_ERR_PARAM;
+
+       rte_free(mem->va);
+       mem->va = NULL;
+
+       return AVF_SUCCESS;
+}
+
+/* spinlock func for base code */
+void
+avf_init_spinlock_d(struct avf_spinlock *sp)
+{
+       rte_spinlock_init(&sp->spinlock);
+}
+
+void
+avf_acquire_spinlock_d(struct avf_spinlock *sp)
+{
+       rte_spinlock_lock(&sp->spinlock);
+}
+
+void
+avf_release_spinlock_d(struct avf_spinlock *sp)
+{
+       rte_spinlock_unlock(&sp->spinlock);
+}
+
+void
+avf_destroy_spinlock_d(__rte_unused struct avf_spinlock *sp)
+{
+       return;
+}
diff --git a/drivers/net/avf/avf_log.h b/drivers/net/avf/avf_log.h
index 431f0f3..25e853b 100644
--- a/drivers/net/avf/avf_log.h
+++ b/drivers/net/avf/avf_log.h
@@ -37,7 +37,7 @@
 extern int avf_logtype_init;
 #define PMD_INIT_LOG(level, fmt, args...) \
        rte_log(RTE_LOG_ ## level, avf_logtype_init, "%s(): " fmt "\n", \
-               __func__, ##args)
+               __func__, ## args)
 #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
 
 extern int avf_logtype_driver;
diff --git a/drivers/net/avf/avf_vchnl.c b/drivers/net/avf/avf_vchnl.c
new file mode 100644
index 0000000..214ddf9
--- /dev/null
+++ b/drivers/net/avf/avf_vchnl.c
@@ -0,0 +1,336 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_dev.h>
+
+#include "avf_log.h"
+#include "base/avf_prototype.h"
+#include "base/avf_adminq_cmd.h"
+#include "base/avf_type.h"
+
+#include "avf.h"
+
+#define MAX_TRY_TIMES 200
+#define ASQ_DELAY_MS  10
+
+/* Read data in admin queue to get msg from pf driver */
+static enum avf_status_code
+avf_read_msg_from_pf(struct avf_adapter *adapter, uint16_t buf_len,
+                    uint8_t *buf)
+{
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+       struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+       struct avf_arq_event_info event;
+       enum virtchnl_ops opcode;
+       int ret;
+
+       event.buf_len = buf_len;
+       event.msg_buf = buf;
+       ret = avf_clean_arq_element(hw, &event, NULL);
+       /* Can't read any msg from adminQ */
+       if (ret) {
+               PMD_DRV_LOG(ERR, "Can't read msg from AQ");
+               return ret;
+       }
+
+       opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
+       vf->cmd_retval = (enum virtchnl_status_code)rte_le_to_cpu_32(
+                       event.desc.cookie_low);
+
+       PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d",
+                   opcode, vf->cmd_retval);
+
+       if (opcode != vf->pend_cmd)
+               PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
+                           vf->pend_cmd, opcode);
+
+       return AVF_SUCCESS;
+}
+
+static int
+avf_execute_vf_cmd(struct avf_adapter *adapter, struct avf_cmd_info *args)
+{
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+       struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+       struct avf_arq_event_info event_info;
+       enum avf_status_code ret;
+       int err = 0;
+       int i = 0;
+
+       if (_atomic_set_cmd(vf, args->ops))
+               return -1;
+
+       ret = avf_aq_send_msg_to_pf(hw, args->ops, AVF_SUCCESS,
+                                   args->in_args, args->in_args_size, NULL);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
+               _clear_cmd(vf);
+               return err;
+       }
+
+       switch (args->ops) {
+       case VIRTCHNL_OP_RESET_VF:
+               /*no need to wait for response */
+               _clear_cmd(vf);
+               break;
+       case VIRTCHNL_OP_VERSION:
+       case VIRTCHNL_OP_GET_VF_RESOURCES:
+               /* for init virtchnl ops, need to poll the response */
+               do {
+                       ret = avf_read_msg_from_pf(adapter, args->out_size,
+                                                  args->out_buffer);
+                       if (ret == AVF_SUCCESS)
+                               break;
+                       rte_delay_ms(ASQ_DELAY_MS);
+               } while (i++ < MAX_TRY_TIMES);
+               if (i >= MAX_TRY_TIMES ||
+                   vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+                       err = -1;
+                       PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+                                   " for cmd %d", vf->cmd_retval, args->ops);
+               }
+               _clear_cmd(vf);
+               break;
+
+       default:
+               /* For other virtchnl ops in running time,
+                * wait for the cmd done flag.
+                */
+               do {
+                       if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN)
+                               break;
+                       rte_delay_ms(ASQ_DELAY_MS);
+                       /* If don't read msg or read sys event, continue */
+               } while (i++ < MAX_TRY_TIMES);
+               /* If there's no response is received, clear command */
+               if (i >= MAX_TRY_TIMES  ||
+                   vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+                       err = -1;
+                       PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+                                   " for cmd %d", vf->cmd_retval, args->ops);
+                       _clear_cmd(vf);
+               }
+               break;
+       }
+
+       return err;
+}
+
+void
+avf_handle_virtchnl_msg(struct rte_eth_dev *dev)
+{
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct avf_arq_event_info info;
+       uint16_t pending, aq_opc;
+       enum virtchnl_ops msg_opc;
+       enum avf_status_code msg_ret;
+       int ret;
+
+       info.buf_len = AVF_AQ_BUF_SZ;
+       if (!vf->aq_resp) {
+               PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
+               return;
+       }
+       info.msg_buf = vf->aq_resp;
+
+       pending = 1;
+       while (pending) {
+               ret = avf_clean_arq_element(hw, &info, &pending);
+
+               if (ret != AVF_SUCCESS) {
+                       PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
+                                   "ret: %d", ret);
+                       break;
+               }
+               aq_opc = rte_le_to_cpu_16(info.desc.opcode);
+               /* For the message sent from pf to vf, opcode is stored in
+                * cookie_high of struct avf_aq_desc, while return error code
+                * are stored in cookie_low, Which is done by PF driver.
+                */
+               msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
+                                                 info.desc.cookie_high);
+               msg_ret = (enum avf_status_code)rte_le_to_cpu_32(
+                                                 info.desc.cookie_low);
+               switch (aq_opc) {
+               case avf_aqc_opc_send_msg_to_vf:
+                       if (msg_opc == VIRTCHNL_OP_EVENT) {
+                               /* TODO */
+                       } else {
+                               /* read message and it's expected one */
+                               if (msg_opc == vf->pend_cmd) {
+                                       vf->cmd_retval = msg_ret;
+                                       /* prevent compiler reordering */
+                                       rte_compiler_barrier();
+                                       _clear_cmd(vf);
+                               } else
+                                       PMD_DRV_LOG(ERR, "command mismatch,"
+                                                   "expect %u, get %u",
+                                                   vf->pend_cmd, msg_opc);
+                               PMD_DRV_LOG(DEBUG, "adminq response is 
received,"
+                                            " opcode = %d", msg_opc);
+                       }
+                       break;
+               default:
+                       PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+                                   aq_opc);
+                       break;
+               }
+       }
+}
+
+#define VIRTCHNL_VERSION_MAJOR_START 1
+#define VIRTCHNL_VERSION_MINOR_START 1
+
+/**
+ * avf_check_api_version
+ * @dev: pointer to eth device
+ *
+ * Check API version with sync wait until version read or fail from admin queue
+ */
+int
+avf_check_api_version(struct avf_adapter *adapter)
+{
+       struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+       struct virtchnl_version_info version, *pver;
+       struct avf_cmd_info args;
+       int err;
+
+       version.major = VIRTCHNL_VERSION_MAJOR;
+       version.minor = VIRTCHNL_VERSION_MINOR;
+
+       args.ops = VIRTCHNL_OP_VERSION;
+       args.in_args = (uint8_t *)&version;
+       args.in_args_size = sizeof(version);
+       args.out_buffer = vf->aq_resp;
+       args.out_size = AVF_AQ_BUF_SZ;
+
+       err = avf_execute_vf_cmd(adapter, &args);
+       if (err) {
+               PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
+               return err;
+       }
+
+       pver = (struct virtchnl_version_info *)args.out_buffer;
+       vf->virtchnl_version = *pver;
+
+       if ((vf->virtchnl_version.major < VIRTCHNL_VERSION_MAJOR_START) ||
+           ((vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START) &&
+            (vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START))) {
+               PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower"
+                            " than (%u.%u) to support Adapative VF",
+                            VIRTCHNL_VERSION_MAJOR_START,
+                            VIRTCHNL_VERSION_MAJOR_START);
+               return -1;
+       } else if ((vf->virtchnl_version.major > VIRTCHNL_VERSION_MAJOR) ||
+                  ((vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR) &&
+                   (vf->virtchnl_version.minor > VIRTCHNL_VERSION_MINOR))) {
+               PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
+                            vf->virtchnl_version.major,
+                            vf->virtchnl_version.minor,
+                            VIRTCHNL_VERSION_MAJOR,
+                            VIRTCHNL_VERSION_MINOR);
+               return -1;
+       }
+
+       PMD_DRV_LOG(DEBUG, "Peer is supported PF host");
+       return 0;
+}
+
+int
+avf_get_vf_resource(struct avf_adapter *adapter)
+{
+       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+       struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+       struct avf_cmd_info args;
+       uint32_t caps, len;
+       int err, i;
+
+       args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
+       args.out_buffer = vf->aq_resp;
+       args.out_size = AVF_AQ_BUF_SZ;
+
+       /* TODO: basic offload capabilities, need to
+         * add advanced/optional offload capabilities
+         */
+
+       caps = AVF_BASIC_OFFLOAD_CAPS;
+
+       args.in_args = (uint8_t *)&caps;
+       args.in_args_size = sizeof(caps);
+
+       err = avf_execute_vf_cmd(adapter, &args);
+
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to execute command of "
+                                "OP_GET_VF_RESOURCE");
+               return -1;
+       }
+
+       len =  sizeof(struct virtchnl_vf_resource) +
+                     AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
+
+       rte_memcpy(vf->vf_res, args.out_buffer,
+                  RTE_MIN(args.out_size, len));
+       /* parse  VF config message back from PF*/
+       avf_parse_hw_config(hw, vf->vf_res);
+       for (i = 0; i < vf->vf_res->num_vsis; i++) {
+               if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
+                       vf->vsi_res = &vf->vf_res->vsi_res[i];
+       }
+
+       if (!vf->vsi_res) {
+               PMD_INIT_LOG(ERR, "no LAN VSI found");
+               return -1;
+       }
+
+       vf->vsi.vsi_id = vf->vsi_res->vsi_id;
+       vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
+       vf->vsi.adapter = adapter;
+
+       return 0;
+}
diff --git a/drivers/net/avf/rte_pmd_avf_version.map 
b/drivers/net/avf/rte_pmd_avf_version.map
new file mode 100644
index 0000000..179140f
--- /dev/null
+++ b/drivers/net/avf/rte_pmd_avf_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+       local: *;
+};
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 6a6a745..584c168 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -120,6 +120,7 @@ _LDLIBS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK)  += 
-lrte_mempool_stack
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET)  += -lrte_pmd_af_packet
 _LDLIBS-$(CONFIG_RTE_LIBRTE_ARK_PMD)        += -lrte_pmd_ark
 _LDLIBS-$(CONFIG_RTE_LIBRTE_AVP_PMD)        += -lrte_pmd_avp
+_LDLIBS-$(CONFIG_RTE_LIBRTE_AVF_PMD)        += -lrte_pmd_avf
 _LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD)      += -lrte_pmd_bnx2x -lz
 _LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD)       += -lrte_pmd_bnxt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND)       += -lrte_pmd_bond
-- 
2.4.11

Reply via email to