> -----Original Message-----
> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Pawel Wodkowski
> Sent: Thursday, February 19, 2015 11:55 PM
> To: dev at dpdk.org
> Subject: [dpdk-dev] [PATCH v4 4/7] move rte_eth_dev_check_mq_mode()
> logic to driver
> 
> Function rte_eth_dev_check_mq_mode() is driver specific. It should be
> done in PF configuration phase. This patch move igb/ixgbe driver specific mq
> check and SRIOV configuration code to driver part. Also rewriting log
> messages to be shorter and more descriptive.
> 
> Signed-off-by: Pawel Wodkowski <pawelx.wodkowski at intel.com>
> ---
>  lib/librte_ether/rte_ethdev.c       | 197 -----------------------------------
>  lib/librte_pmd_e1000/igb_ethdev.c   |  43 ++++++++
>  lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 105 ++++++++++++++++++-
>  lib/librte_pmd_ixgbe/ixgbe_ethdev.h |   5 +-
>  lib/librte_pmd_ixgbe/ixgbe_pf.c     | 202
> +++++++++++++++++++++++++++++++-----
>  5 files changed, 327 insertions(+), 225 deletions(-)
> 
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> index 02b9cda..8e9da3b 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> @@ -863,7 +863,8 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct
> eth_driver *eth_drv,
>                       "Failed to allocate %u bytes needed to store "
>                       "MAC addresses",
>                       ETHER_ADDR_LEN * hw->mac.num_rar_entries);
> -             return -ENOMEM;
> +             diag = -ENOMEM;
> +             goto error;
>       }
>       /* Copy the permanent MAC address */
>       ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, @@ -
> 876,7 +877,8 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct
> eth_driver *eth_drv,
>               PMD_INIT_LOG(ERR,
>                       "Failed to allocate %d bytes needed to store MAC
> addresses",
>                       ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
> -             return -ENOMEM;
> +             diag = -ENOMEM;
> +             goto error;
>       }
> 
>       /* initialize the vfta */
> @@ -886,7 +888,13 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct
> eth_driver *eth_drv,
>       memset(hwstrip, 0, sizeof(*hwstrip));
> 
>       /* initialize PF if max_vfs not zero */
> -     ixgbe_pf_host_init(eth_dev);
> +     diag = ixgbe_pf_host_init(eth_dev);
> +     if (diag < 0) {
> +             PMD_INIT_LOG(ERR,
> +                     "Failed to allocate %d bytes needed to store MAC
> addresses",
> +                     ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
> +             goto error;
> +     }
> 
>       ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
>       /* let hardware know driver is loaded */ @@ -918,6 +926,11 @@
> eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
>       ixgbe_enable_intr(eth_dev);
> 
>       return 0;
> +
> +error:
> +     rte_free(eth_dev->data->hash_mac_addrs);
> +     rte_free(eth_dev->data->mac_addrs);
> +     return diag;
>  }
> 
> 
> @@ -1434,7 +1447,93 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
>       struct ixgbe_interrupt *intr =
>               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
> 
> +     struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
> +     struct rte_eth_dev_info dev_info;
> +     int retval;
> +
>       PMD_INIT_FUNC_TRACE();
> +     retval = ixgbe_pf_configure_mq_sriov(dev);
> +     if (retval <= 0)
> +             return retval;
> +
> +     uint16_t nb_rx_q = dev->data->nb_rx_queues;
> +     uint16_t nb_tx_q = dev->data->nb_rx_queues;
> +
> +     /* For DCB we need to obtain maximum number of queues
> dinamically,
> +      * as this depends on max VF exported in PF. */
> +     if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
> +             (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
> +             /* Use dev_infos_get field as this might be pointer to PF or
> VF. */
> +             (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
Why not call ixgbe_dev_info_get directly? And it looks only max_rx_queues and 
max_tx_queues
are used below, maybe hw->mac.max_rx_queues and hw->mac.max_tx_queues can be 
used
below instead of calling a function.

> +     }
> +
> +     /* For vmdq+dcb mode check our configuration before we go further
> */
> +     if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
> +             const struct rte_eth_vmdq_dcb_conf *conf;
> +
> +             if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
> +                     PMD_INIT_LOG(ERR, " VMDQ+DCB,
> nb_rx_q != %d\n",
> +                                     ETH_VMDQ_DCB_NUM_QUEUES);
> +                     return (-EINVAL);
> +             }
> +             conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
> +             if (conf->nb_queue_pools != ETH_16_POOLS &&
> +                             conf->nb_queue_pools != ETH_32_POOLS) {
> +                     PMD_INIT_LOG(ERR, " VMDQ+DCB selected, "
> +                                     "number of RX queue pools must
> be %d or %d\n",
> +                                     ETH_16_POOLS, ETH_32_POOLS);
> +                     return (-EINVAL);
> +             }
> +     } else if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
> +             /* For DCB mode check out configuration before we go
> further */
> +             const struct rte_eth_dcb_rx_conf *conf;
> +
> +             if (nb_rx_q != dev_info.max_rx_queues) {
> +                     PMD_INIT_LOG(ERR, " DCB, number of RX
> queues != %d\n",
> +                                     ETH_DCB_NUM_QUEUES);
The check is using dev_info.max_rx_queues, while ETH_DCB_NUM_QUEUES in log.

> +                     return (-EINVAL);
> +             }
> +             conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
> +             if (conf->nb_tcs != ETH_4_TCS &&
> +                        conf->nb_tcs != ETH_8_TCS) {
> +                     PMD_INIT_LOG(ERR, " DCB, number of RX TC must
> be %d or %d\n",
> +                                     ETH_4_TCS, ETH_8_TCS);
> +                     return (-EINVAL);
> +             }
> +     }
> +
> +     if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
> +             const struct rte_eth_vmdq_dcb_tx_conf *conf;
> +
> +             if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
> +                     PMD_INIT_LOG(ERR, " VMDQ+DCB, number of TX
> queues != %d\n",
> +                                     ETH_VMDQ_DCB_NUM_QUEUES);
> +                     return (-EINVAL);
> +             }
> +             conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
> +             if (conf->nb_queue_pools != ETH_16_POOLS &&
> +                        conf->nb_queue_pools != ETH_32_POOLS) {
> +                     PMD_INIT_LOG(ERR, " VMDQ+DCB selected, "
> +                                     "number of TX qqueue pools must
Typo: qqueue->queue

> be %d or %d\n",
> +                                     ETH_16_POOLS, ETH_32_POOLS);
> +                     return (-EINVAL);
> +             }
> +     } else if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
> +             const struct rte_eth_dcb_tx_conf *conf;
> +
> +             if (nb_tx_q != dev_info.max_tx_queues) {
> +                     PMD_INIT_LOG(ERR, " DCB, number of queues must
> be %d\n",
> +                                     ETH_DCB_NUM_QUEUES);

The check is using dev_info.max_rx_queues, while ETH_DCB_NUM_QUEUES in log.

> +                     return (-EINVAL);
> +             }
> +             conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
> +             if (conf->nb_tcs != ETH_4_TCS &&
> +                        conf->nb_tcs != ETH_8_TCS) {
> +                     PMD_INIT_LOG(ERR, " DCB, number of TX TC must
> be %d or %d\n",
> +                                     ETH_4_TCS, ETH_8_TCS);
> +                     return (-EINVAL);
> +             }
> +     }
> 
>       /* set flag to update link status after init */
>       intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; diff --git
> a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> index 1383194..e70a6e8 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> @@ -348,11 +348,14 @@ void ixgbe_vlan_hw_strip_enable_all(struct
> rte_eth_dev *dev);
> 
>  void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
> 
> -void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
> +int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
> 
>  void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
> 
> +int ixgbe_pf_configure_mq_sriov(struct rte_eth_dev *dev);
> +
>  int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
> 
>  uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t
> orig_val);
> +
>  #endif /* _IXGBE_ETHDEV_H_ */
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c
> b/lib/librte_pmd_ixgbe/ixgbe_pf.c index 4103e97..a7b9333 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c
> @@ -91,7 +91,7 @@ ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
>       return 0;
>  }
> 
> -void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
> +int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
>  {
>       struct ixgbe_vf_info **vfinfo =
>               IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data-
> >dev_private);
> @@ -101,39 +101,31 @@ void ixgbe_pf_host_init(struct rte_eth_dev
> *eth_dev)
>          IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
>       struct ixgbe_hw *hw =
>               IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
> +     int retval;
>       uint16_t vf_num;
> -     uint8_t nb_queue;
> 
>       PMD_INIT_FUNC_TRACE();
> 
> -     RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
> -     if (0 == (vf_num = dev_num_vf(eth_dev)))
> -             return;
> +     /* Fill sriov structure using default configuration. */
> +     retval = ixgbe_pf_configure_mq_sriov(eth_dev);
> +     if (retval != 0) {
> +             if (retval < 0)
> +                     PMD_INIT_LOG(ERR, " Setting up SRIOV with default
> device "
> +                                     "configuration should not fail. This is 
> a
> BUG.");
> +             return 0;
> +     }
> 
> +     vf_num = dev_num_vf(eth_dev);
>       *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) *
> vf_num, 0);
> -     if (*vfinfo == NULL)
> -             rte_panic("Cannot allocate memory for private VF data\n");
> +     if (*vfinfo == NULL) {
> +             PMD_INIT_LOG(ERR, "Cannot allocate memory for private VF
> data.");
> +             return (-ENOMEM);
> +     }
> 
>       memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
>       memset(uta_info,0,sizeof(struct ixgbe_uta_info));
>       hw->mac.mc_filter_type = 0;
> 
> -     if (vf_num >= ETH_32_POOLS) {
> -             nb_queue = 2;
> -             RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
> -     } else if (vf_num >= ETH_16_POOLS) {
> -             nb_queue = 4;
> -             RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
> -     } else {
> -             nb_queue = 8;
> -             RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
> -     }
> -
> -     RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
> -     RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
> -     RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
> -     RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx =
> (uint16_t)(vf_num * nb_queue);
> -
>       ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
> 
>       /* init_mailbox_params */
> @@ -142,7 +134,169 @@ void ixgbe_pf_host_init(struct rte_eth_dev
> *eth_dev)
>       /* set mb interrupt mask */
>       ixgbe_mb_intr_setup(eth_dev);
> 
> -     return;
> +     return 0;
> +}
> +
> +
> +/*
> + * Function that make SRIOV configuration, based on device
> +configuration,
> + * number of requested queues and number of VF created.
> + * Function returns:
> + * 1 - SRIOV is not enabled (no VF created)
> + * 0 - proper SRIOV configuration found.
> + * -EINVAL - no suitable SRIOV configuration found.
> + */
> +int
> +ixgbe_pf_configure_mq_sriov(struct rte_eth_dev *dev) {

If this function is called by ixgbe_pf_host_init. It is in the initialization 
process, the
dev_conf in data is meaningless. The following check is still necessary? Maybe 
it's
better to use different functions in configure and init phase.

> +     struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
> +     struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
> +     uint16_t vf_num;
> +
> +     vf_num = dev_num_vf(dev);
> +     if (vf_num == 0) {
> +             memset(sriov, 0, sizeof(*sriov));
> +             return 1;
> +     }
> +
> +     /* Check multi-queue mode. */
> +     if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
> +             (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS)
> ||
> +             (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
> +             /* SRIOV only works in VMDq enable mode */
> +             PMD_INIT_LOG(ERR, " SRIOV active, "
> +                             "invlaid VMDQ rx mode (%u) or tx (%u)
> mode.",
> +                             dev_conf->rxmode.mq_mode, dev_conf-
> >txmode.mq_mode);
> +             return (-EINVAL);
> +     }
> +
> +     switch (dev_conf->rxmode.mq_mode) {
> +     case ETH_MQ_RX_VMDQ_DCB:
> +             if (vf_num <= ETH_16_POOLS)
> +                     sriov->nb_rx_q_per_pool = 8;
> +             else if (vf_num <= ETH_32_POOLS)
> +                     sriov->nb_rx_q_per_pool = 4;
> +             else {
> +                     PMD_INIT_LOG(ERR,
> +                             "DCB (SRIOV active) - VF count (%d) must be
> less or equal 32.",
> +                             vf_num);
> +                     return (-EINVAL);
> +             }
> +
> +             if (dev->data->nb_rx_queues < sriov->nb_rx_q_per_pool) {
> +                     PMD_INIT_LOG(WARNING,
> +                             "DCB (SRIOV active) rx queues (%d) count is
> not equal %d.",
> +                             dev->data->nb_rx_queues,
> +                             sriov->nb_rx_q_per_pool);
> +             }
> +             break;
> +     case ETH_MQ_RX_RSS:
> +             PMD_INIT_LOG(INFO, "RSS (SRIOV active), "
> +                             "rx mq mode is changed from: mq_mode %u
> into VMDQ mq_mode %u.",
> +                             dev_conf->rxmode.mq_mode, dev->data-
> >dev_conf.rxmode.mq_mode);
> +             dev->data->dev_conf.rxmode.mq_mode =
> ETH_MQ_RX_VMDQ_RSS;
> +             /* falltrought */
> +     case ETH_MQ_RX_VMDQ_RSS:
> +             if (vf_num >= ETH_64_POOLS) {
> +                     /* FIXME: Is vf_num > 64 realy supported by
> hardware? */
> +                     PMD_INIT_LOG(ERR, "RSS (SRIOV active), "
> +                                     "VFs num must be less or equal 64.");
> +                             return (-EINVAL);
> +             } else if (vf_num >= ETH_32_POOLS) {
> +                     if (dev->data->nb_rx_queues != 1 && dev->data-
> >nb_rx_queues != 2) {
> +                             PMD_INIT_LOG(ERR, "RSS (SRIOV active, VF
> count >= 32),"
> +                                             "invalid rx queues count %d.
> It must be 1 or 2.",
> +                                             dev->data->nb_rx_queues);
> +                             return (-EINVAL);
> +                     }
> +
> +                     sriov->nb_rx_q_per_pool = dev->data-
> >nb_rx_queues;
> +             } else {
> +                     /* FIXME: is VT(16) + RSS realy supported? */
> +                     if (dev->data->nb_rx_queues != 4) {
> +                             PMD_INIT_LOG(ERR, "RSS (SRIOV active, VFs
> count < 32), "
> +                                             "invalid rx queues count %d.
> It must be 4.",
> +                                             dev->data->nb_rx_queues);
> +                             return (-EINVAL);
> +                     }
> +
> +                     sriov->nb_rx_q_per_pool = 4;
> +             }
> +             break;
> +     default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
> +             /* if nothing mq mode configure, use default scheme */
> +             if (dev->data->dev_conf.rxmode.mq_mode !=
> ETH_MQ_RX_VMDQ_ONLY) {
> +                     PMD_INIT_LOG(INFO, "Rx mq mode changed
> from %u into VMDQ %u.",
> +                                     dev->data-
> >dev_conf.rxmode.mq_mode, ETH_MQ_RX_VMDQ_ONLY);
> +
> +                     dev->data->dev_conf.rxmode.mq_mode =
> ETH_MQ_RX_VMDQ_ONLY;
> +             }
> +
> +             /* queue 0 of each pool is used. */
> +             sriov->nb_rx_q_per_pool = 1;
> +             break;
> +     }
> +
> +     switch (dev_conf->txmode.mq_mode) {
> +     case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV mode*/
> +             if (vf_num <= ETH_16_POOLS)
> +                     sriov->nb_tx_q_per_pool = 8;
> +             else if (vf_num <= ETH_32_POOLS)
> +                     sriov->nb_tx_q_per_pool = 4;
> +             else if (vf_num <= ETH_64_POOLS)
> +                     sriov->nb_tx_q_per_pool = 1;
> +             else {
> +                     PMD_INIT_LOG(ERR, "DCB (SRIOV active), "
> +                                     "VF count (%d) must be less or equal
> 64.",
> +                             vf_num);
> +                     return (-EINVAL);
> +             }
> +             break;
> +     default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
> +             /* if nothing mq mode configure, use default scheme */
> +             if (dev->data->dev_conf.txmode.mq_mode !=
> ETH_MQ_TX_VMDQ_ONLY) {
> +                     PMD_INIT_LOG(INFO, "Tx mq mode is changed
> from %u into VMDQ %u.",
> +                                     dev->data-
> >dev_conf.txmode.mq_mode, ETH_MQ_TX_VMDQ_ONLY);
> +
> +                     dev->data->dev_conf.txmode.mq_mode =
> ETH_MQ_TX_VMDQ_ONLY;
> +             }
> +
> +             /* queue 0 of each pool is used. */
> +             sriov->nb_tx_q_per_pool = 1;
> +             break;
> +     }
> +
> +     sriov->def_vmdq_idx = vf_num;
> +
> +     /*
> +      * Pools starts at 2xN, 4xN or 8xN
> +      */
> +     if (vf_num >= ETH_32_POOLS) {
> +             /* This must be vf_num <= ETH_64_POOLS */
> +             sriov->active = ETH_64_POOLS;
> +             sriov->def_pool_q_idx = vf_num * 2;
> +     } else if (vf_num >= ETH_16_POOLS) {
> +             sriov->active = ETH_32_POOLS;
> +             sriov->def_pool_q_idx = vf_num * 4;
> +     } else {
> +             sriov->active = ETH_16_POOLS;
> +             sriov->def_pool_q_idx = vf_num * 8;
> +     }
> +
> +     /* Check if available queus count is not less than allocated.*/
> +     if (dev->data->nb_rx_queues > sriov->nb_rx_q_per_pool) {
> +             PMD_INIT_LOG(ERR, "SRIOV active, rx queue count must
> less or equal %d.",
> +                                             sriov->nb_rx_q_per_pool);
> +                             return (-EINVAL);
> +     }
> +
> +     if (dev->data->nb_rx_queues > sriov->nb_tx_q_per_pool) {
> +             PMD_INIT_LOG(ERR, "SRIOV active, tx queue count must
> less or equal %d.",
> +                             sriov->nb_tx_q_per_pool);
> +             return (-EINVAL);
> +     }
> +
> +     return 0;
>  }
> 
>  int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
> --
> 1.9.1

Reply via email to