The patch does below things for ixgbe PF and VF:
- Setup NIC to generate MSI-X interrupts
- Set the IVAR register to map interrupt causes to vectors
- Implement interrupt enable/disable functions

Signed-off-by: Danny Zhou <danny.zhou at intel.com>
Signed-off-by: Yong Liu <yong.liu at intel.com>
Signed-off-by: Cunming Liang <cunming.liang at intel.com>
---
v10 changes
 - return an actual error code rather than -1

v9 changes
 - move queue-vec mapping init from dev_configure to dev_start

v8 changes
 - add vfio-msi/vfio-legacy and uio-legacy support

v7 changes
 - add condition check when intr vector is not enabled

v6 changes
 - fill queue-vector mapping table

v5 changes
 - Rebase the patchset onto the HEAD

v3 changes
 - Remove spinlok from PMD

v2 changes
 - Consolidate review comments related to coding style

 drivers/net/ixgbe/ixgbe_ethdev.c | 484 ++++++++++++++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h |   4 +
 2 files changed, 476 insertions(+), 12 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 0d9f9b2..bcec971 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -82,6 +82,9 @@
  */
 #define IXGBE_FC_LO    0x40

+/* Default minimum inter-interrupt interval for EITR configuration */
+#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
+
 /* Timer value included in XOFF frames. */
 #define IXGBE_FC_PAUSE 0x680

@@ -171,6 +174,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
                        uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
@@ -183,11 +187,14 @@ static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct 
ixgbe_dcb_config *dcb_conf

 /* For Virtual Function support */
 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
+static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
                struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -197,6 +204,15 @@ static void ixgbevf_vlan_strip_queue_set(struct 
rte_eth_dev *dev,
                uint16_t queue, int on);
 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
+               void *param);
+static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+               uint16_t queue_id);
+static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+                uint16_t queue_id);
+static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+                uint8_t queue, uint8_t msix_vector);
+static void ixgbevf_configure_msix(struct rte_eth_dev *dev);

 /* For Eth VMDQ APIs support */
 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
@@ -214,6 +230,14 @@ static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
                uint8_t rule_id);

+static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+                                       uint16_t queue_id);
+static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+                                       uint16_t queue_id);
+static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+                               uint8_t queue, uint8_t msix_vector);
+static void ixgbe_configure_msix(struct rte_eth_dev *dev);
+
 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
                uint16_t queue_idx, uint16_t tx_rate);
 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
@@ -262,7 +286,7 @@ static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, 
uint16_t mtu);
  */
 #define UPDATE_VF_STAT(reg, last, cur)                         \
 {                                                               \
-       u32 latest = IXGBE_READ_REG(hw, reg);                   \
+       uint32_t latest = IXGBE_READ_REG(hw, reg);                   \
        cur += latest - last;                                   \
        last = latest;                                          \
 }
@@ -343,6 +367,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .tx_queue_start       = ixgbe_dev_tx_queue_start,
        .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
+       .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
        .rx_queue_count       = ixgbe_dev_rx_queue_count,
        .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
@@ -402,8 +428,11 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .vlan_offload_set     = ixgbevf_vlan_offload_set,
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
+       .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
        .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
+       .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
        .mac_addr_add         = ixgbevf_add_mac_addr,
        .mac_addr_remove      = ixgbevf_remove_mac_addr,
 };
@@ -899,12 +928,6 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                        eth_dev->data->port_id, pci_dev->id.vendor_id,
                        pci_dev->id.device_id);

-       rte_intr_callback_register(&(pci_dev->intr_handle),
-               ixgbe_dev_interrupt_handler, (void *)eth_dev);
-
-       /* enable uio intr after callback register */
-       rte_intr_enable(&(pci_dev->intr_handle));
-
        /* enable support intr */
        ixgbe_enable_intr(eth_dev);

@@ -1457,6 +1480,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_vf_info *vfinfo =
                *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t intr_vector = 0;
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
        int mask = 0;
@@ -1489,6 +1514,28 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        /* configure PF module if SRIOV enabled */
        ixgbe_pf_host_configure(dev);

+       /* check and configure queue intr-vector mapping */
+       if (dev->data->dev_conf.intr_conf.rxq != 0)
+               intr_vector = dev->data->nb_rx_queues;
+
+       if (rte_intr_efd_enable(intr_handle, intr_vector))
+               return -1;
+
+       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+               intr_handle->intr_vec =
+                       rte_zmalloc("intr_vec",
+                                   dev->data->nb_rx_queues * sizeof(int),
+                                   0);
+               if (intr_handle->intr_vec == NULL) {
+                       PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+                                    "intr_vec\n", dev->data->nb_rx_queues);
+                       return -ENOMEM;
+               }
+       }
+
+       /* confiugre msix for sleep until rx interrupt */
+       ixgbe_configure_msix(dev);
+
        /* initialize transmission unit */
        ixgbe_dev_tx_init(dev);

@@ -1561,8 +1608,23 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 skip_link_setup:

        /* check if lsc interrupt is enabled */
-       if (dev->data->dev_conf.intr_conf.lsc != 0)
-               ixgbe_dev_lsc_interrupt_setup(dev);
+       if (dev->data->dev_conf.intr_conf.lsc != 0) {
+               if (rte_intr_allow_others(intr_handle)) {
+                       rte_intr_callback_register(intr_handle,
+                                                  ixgbe_dev_interrupt_handler,
+                                                  (void *)dev);
+                       ixgbe_dev_lsc_interrupt_setup(dev);
+               } else
+                       PMD_INIT_LOG(INFO, "lsc won't enable because of"
+                                    " no intr multiplex\n");
+       }
+
+       /* check if rxq interrupt is enabled */
+       if (dev->data->dev_conf.intr_conf.rxq != 0)
+               ixgbe_dev_rxq_interrupt_setup(dev);
+
+       /* enable uio/vfio intr/eventfd mapping */
+       rte_intr_enable(intr_handle);

        /* resume enabled intr since hw reset */
        ixgbe_enable_intr(dev);
@@ -1619,6 +1681,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        struct ixgbe_filter_info *filter_info =
                IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
        struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        int vf;

        PMD_INIT_FUNC_TRACE();
@@ -1626,6 +1689,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        /* disable interrupts */
        ixgbe_disable_intr(hw);

+       /* disable intr eventfd mapping */
+       rte_intr_disable(intr_handle);
+
        /* reset the NIC */
        ixgbe_pf_reset_hw(hw);
        hw->adapter_stopped = FALSE;
@@ -1661,6 +1727,12 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        memset(filter_info->fivetuple_mask, 0,
                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);

+       /* Clean datapath event and queue/vec mapping */
+       rte_intr_efd_disable(intr_handle);
+       if (intr_handle->intr_vec != NULL) {
+               rte_free(intr_handle->intr_vec);
+               intr_handle->intr_vec = NULL;
+       }
 }

 /*
@@ -2252,6 +2324,28 @@ ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
        return 0;
 }

+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       intr->mask |= IXGBE_EICR_RTX_QUEUE;
+
+       return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -2278,10 +2372,10 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
        PMD_DRV_LOG(INFO, "eicr %x", eicr);

        intr->flags = 0;
-       if (eicr & IXGBE_EICR_LSC) {
-               /* set flag for async link update */
+
+       /* set flag for async link update */
+       if (eicr & IXGBE_EICR_LSC)
                intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
-       }

        if (eicr & IXGBE_EICR_MAILBOX)
                intr->flags |= IXGBE_FLAG_MAILBOX;
@@ -2289,6 +2383,30 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
        return 0;
 }

+static int
+ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+       uint32_t eicr;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       /* clear all cause mask */
+       ixgbevf_intr_disable(hw);
+
+       /* read-on-clear nic registers here */
+       eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
+       PMD_DRV_LOG(INFO, "eicr %x", eicr);
+
+       intr->flags = 0;
+
+       /* set flag for async link update */
+       if (eicr & IXGBE_EICR_LSC)
+               intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+
+       return 0;
+}
+
 /**
  * It gets and then prints the link status.
  *
@@ -2384,6 +2502,18 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
        return 0;
 }

+static int
+ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_DRV_LOG(DEBUG, "enable intr immediately");
+       ixgbevf_intr_enable(hw);
+       rte_intr_enable(&(dev->pci_dev->intr_handle));
+       return 0;
+}
+
 /**
  * Interrupt handler which shall be registered for alarm callback for delayed
  * handling specific interrupt to wait for the stable nic state. As the
@@ -2445,6 +2575,15 @@ ixgbe_dev_interrupt_handler(__rte_unused struct 
rte_intr_handle *handle,
        ixgbe_dev_interrupt_action(dev);
 }

+static void
+ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+                                                       void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+       ixgbevf_dev_interrupt_get_status(dev);
+       ixgbevf_dev_interrupt_action(dev);
+}
+
 static int
 ixgbe_dev_led_on(struct rte_eth_dev *dev)
 {
@@ -2943,6 +3082,19 @@ ixgbevf_intr_disable(struct ixgbe_hw *hw)
        IXGBE_WRITE_FLUSH(hw);
 }

+static void
+ixgbevf_intr_enable(struct ixgbe_hw *hw)
+{
+       PMD_INIT_FUNC_TRACE();
+
+       /* VF enable interrupt autoclean */
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
+
+       IXGBE_WRITE_FLUSH(hw);
+}
+
 static int
 ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
@@ -2975,6 +3127,9 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t intr_vector = 0;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
        int err, mask = 0;

        PMD_INIT_FUNC_TRACE();
@@ -3005,6 +3160,41 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)

        ixgbevf_dev_rxtx_start(dev);

+       /* check and configure queue intr-vector mapping */
+       if (dev->data->dev_conf.intr_conf.rxq != 0)
+               intr_vector = dev->data->nb_rx_queues;
+
+       if (rte_intr_efd_enable(intr_handle, intr_vector))
+               return -1;
+
+       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+               intr_handle->intr_vec =
+                       rte_zmalloc("intr_vec",
+                                   dev->data->nb_rx_queues * sizeof(int), 0);
+               if (intr_handle->intr_vec == NULL) {
+                       PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+                                    " intr_vec\n", dev->data->nb_rx_queues);
+                       return -ENOMEM;
+               }
+       }
+
+       ixgbevf_configure_msix(dev);
+
+       if (dev->data->dev_conf.intr_conf.lsc != 0) {
+               if (rte_intr_allow_others(intr_handle))
+                       rte_intr_callback_register(intr_handle,
+                                       ixgbevf_dev_interrupt_handler,
+                                       (void *)dev);
+               else
+                       PMD_INIT_LOG(INFO, "lsc won't enable because of"
+                                    " no intr multiplex\n");
+       }
+
+       rte_intr_enable(intr_handle);
+
+       /* Re-enable interrupt for VF */
+       ixgbevf_intr_enable(hw);
+
        return 0;
 }

@@ -3012,6 +3202,7 @@ static void
 ixgbevf_dev_stop(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;

        PMD_INIT_FUNC_TRACE();

@@ -3028,12 +3219,23 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
        dev->data->scattered_rx = 0;

        ixgbe_dev_clear_queues(dev);
+
+       /* disable intr eventfd mapping */
+       rte_intr_disable(intr_handle);
+
+       /* Clean datapath event and queue/vec mapping */
+       rte_intr_efd_disable(intr_handle);
+       if (intr_handle->intr_vec != NULL) {
+               rte_free(intr_handle->intr_vec);
+               intr_handle->intr_vec = NULL;
+       }
 }

 static void
 ixgbevf_dev_close(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev;

        PMD_INIT_FUNC_TRACE();

@@ -3043,6 +3245,12 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)

        /* reprogram the RAR[0] in case user changed it. */
        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+       pci_dev = dev->pci_dev;
+       if (pci_dev->intr_handle.intr_vec) {
+               rte_free(pci_dev->intr_handle.intr_vec);
+               pci_dev->intr_handle.intr_vec = NULL;
+       }
 }

 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
@@ -3542,6 +3750,258 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, 
uint8_t rule_id)
        return 0;
 }

+
+static int
+ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       uint32_t mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+       mask |= (1 << queue_id);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+       rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+       return 0;
+}
+
+static int
+ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       uint32_t mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+       mask &= ~(1 << queue_id);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+       return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       uint32_t mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       if (queue_id < 16) {
+               ixgbe_disable_intr(hw);
+               intr->mask |= (1 << queue_id);
+               ixgbe_enable_intr(dev);
+       } else if (queue_id < 32) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+               mask &= (1 << queue_id);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+       } else if (queue_id < 64) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+               mask &= (1 << (queue_id - 32));
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+       }
+       rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+       return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       uint32_t mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       if (queue_id < 16) {
+               ixgbe_disable_intr(hw);
+               intr->mask &= ~(1 << queue_id);
+               ixgbe_enable_intr(dev);
+       } else if (queue_id < 32) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+               mask &= ~(1 << queue_id);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+       } else if (queue_id < 64) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+               mask &= ~(1 << (queue_id - 32));
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+       }
+
+       return 0;
+}
+
+static void
+ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+                       uint8_t queue, uint8_t msix_vector)
+{
+       uint32_t tmp, idx;
+       if (direction == -1) {
+               /* other causes */
+               msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+               tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+               tmp &= ~0xFF;
+               tmp |= msix_vector;
+               IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
+       } else {
+               /* rx or tx cause */
+               msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+               idx = ((16 * (queue & 1)) + (8 * direction));
+               tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+               tmp &= ~(0xFF << idx);
+               tmp |= (msix_vector << idx);
+               IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
+       }
+}
+
+/**
+ * set the IVAR registers, mapping interrupt causes to vectors
+ * @param hw
+ *  pointer to ixgbe_hw struct
+ * @direction
+ *  0 for Rx, 1 for Tx, -1 for other causes
+ * @queue
+ *  queue to map the corresponding interrupt to
+ * @msix_vector
+ *  the vector to map to the corresponding queue
+ */
+static void
+ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+                          uint8_t queue, uint8_t msix_vector)
+{
+       uint32_t tmp, idx;
+
+       msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               if (direction == -1)
+                       direction = 0;
+               idx = (((direction * 64) + queue) >> 2) & 0x1F;
+               tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
+               tmp &= ~(0xFF << (8 * (queue & 0x3)));
+               tmp |= (msix_vector << (8 * (queue & 0x3)));
+               IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
+       } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
+                       (hw->mac.type == ixgbe_mac_X540)) {
+               if (direction == -1) {
+                       /* other causes */
+                       idx = ((queue & 1) * 8);
+                       tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+                       tmp &= ~(0xFF << idx);
+                       tmp |= (msix_vector << idx);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
+               } else {
+                       /* rx or tx causes */
+                       idx = ((16 * (queue & 1)) + (8 * direction));
+                       tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
+                       tmp &= ~(0xFF << idx);
+                       tmp |= (msix_vector << idx);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
+               }
+       }
+}
+
+static void
+ixgbevf_configure_msix(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t q_idx;
+       uint32_t vector_idx = 0;
+
+       /* won't configure msix register if no mapping is done
+        * between intr vector and event fd */
+       if (!rte_intr_dp_is_en(intr_handle))
+               return;
+
+       /* Configure all RX queues of VF */
+       for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
+               /* Force all queue use vector 0,
+                * as IXGBE_VF_MAXMSIVECOTR = 1 */
+               ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
+               intr_handle->intr_vec[q_idx] = vector_idx;
+       }
+
+       /* Configure VF Rx queue ivar */
+       ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+}
+
+/**
+ * Sets up the hardware to properly generate MSI-X interrupts
+ * @hw
+ *  board private structure
+ */
+static void
+ixgbe_configure_msix(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t queue_id, vec = 0;
+       uint32_t mask;
+       uint32_t gpie;
+
+       /* won't configure msix register if no mapping is done
+        * between intr vector and event fd */
+       if (!rte_intr_dp_is_en(intr_handle))
+               return;
+
+       /* setup GPIE for MSI-x mode */
+       gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+       gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+               IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
+       /*
+       * auto clearing and auto setting corresponding bits in EIMS
+       * when MSI-X interrupt is triggered
+       */
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+       else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+       /*
+        * Populate the IVAR table and set the ITR values to the
+        * corresponding register.
+        */
+       for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+            queue_id++) {
+               /* by default, 1:1 mapping */
+               ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+               intr_handle->intr_vec[queue_id] = vec;
+               if (vec < intr_handle->nb_efd - 1)
+                       vec++;
+       }
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
+                                  intr_handle->max_intr - 1);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ixgbe_set_ivar_map(hw, -1, 1, intr_handle->max_intr - 1);
+               break;
+       default:
+               break;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_EITR(queue_id),
+                        IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
+
+       /* set up to autoclear timer, and the vectors */
+       mask = IXGBE_EIMS_ENABLE_MASK;
+       mask &= ~(IXGBE_EIMS_OTHER |
+                 IXGBE_EIMS_MAILBOX |
+                 IXGBE_EIMS_LSC);
+
+       IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+}
+
 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
        uint16_t queue_idx, uint16_t tx_rate)
 {
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 19237b8..cccef46 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -117,6 +117,9 @@
        ETH_RSS_IPV6_TCP_EX | \
        ETH_RSS_IPV6_UDP_EX)

+#define IXGBE_VF_IRQ_ENABLE_MASK        3          /* vf irq enable mask */
+#define IXGBE_VF_MAXMSIVECTOR           1
+
 /*
  * Information about the fdir mode.
  */
@@ -328,6 +331,7 @@ uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
                uint16_t rx_queue_id);

 int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);

 int ixgbe_dev_rx_init(struct rte_eth_dev *dev);

-- 
1.8.1.4

Reply via email to