In short, same comments as the TX patch, more below.

On Wed, Jan 03, 2018 at 09:16:17AM +0200, Shahaf Shuler wrote:
> Ethdev Rx offloads API has changed since:
> 
> commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
> 
> This commit support the new Rx offloads API.
> 
> Signed-off-by: Shahaf Shuler <shah...@mellanox.com>
> ---
>  drivers/net/mlx4/mlx4_ethdev.c | 10 ++---
>  drivers/net/mlx4/mlx4_flow.c   |  5 ++-
>  drivers/net/mlx4/mlx4_rxq.c    | 78 ++++++++++++++++++++++++++++++++++---
>  drivers/net/mlx4/mlx4_rxtx.h   |  2 +
>  4 files changed, 82 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
> index 63e00b1da..39a23ee7b 100644
> --- a/drivers/net/mlx4/mlx4_ethdev.c
> +++ b/drivers/net/mlx4/mlx4_ethdev.c
> @@ -766,13 +766,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct 
> rte_eth_dev_info *info)
>       info->max_rx_queues = max;
>       info->max_tx_queues = max;
>       info->max_mac_addrs = RTE_DIM(priv->mac);
> -     info->rx_offload_capa = 0;
>       info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv);
> -     if (priv->hw_csum) {
> -             info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
> -                                       DEV_RX_OFFLOAD_UDP_CKSUM |
> -                                       DEV_RX_OFFLOAD_TCP_CKSUM);
> -     }
> +     info->rx_queue_offload_capa =
> +                             mlx4_priv_get_rx_queue_offloads(priv);
> +     info->rx_offload_capa = (mlx4_priv_get_rx_port_offloads(priv) |
> +                              info->rx_queue_offload_capa);
>       if (mlx4_get_ifname(priv, &ifname) == 0)
>               info->if_index = if_nametoindex(ifname);
>       info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
> diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
> index 69025da42..96a6a6fa7 100644
> --- a/drivers/net/mlx4/mlx4_flow.c
> +++ b/drivers/net/mlx4/mlx4_flow.c
> @@ -1232,7 +1232,7 @@ mlx4_flow_internal_next_vlan(struct priv *priv, 
> uint16_t vlan)
>   * - MAC flow rules are generated from @p dev->data->mac_addrs
>   *   (@p priv->mac array).
>   * - An additional flow rule for Ethernet broadcasts is also generated.
> - * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
> + * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
>   *   is enabled and VLAN filters are configured.
>   *
>   * @param priv
> @@ -1300,7 +1300,8 @@ mlx4_flow_internal(struct priv *priv, struct 
> rte_flow_error *error)
>       };
>       struct ether_addr *rule_mac = &eth_spec.dst;
>       rte_be16_t *rule_vlan =
> -             priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
> +             (priv->dev->data->dev_conf.rxmode.offloads &
> +              DEV_RX_OFFLOAD_VLAN_FILTER) &&
>               !priv->dev->data->promiscuous ?
>               &vlan_spec.tci :
>               NULL;
> diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
> index 53313c56f..0cad28269 100644
> --- a/drivers/net/mlx4/mlx4_rxq.c
> +++ b/drivers/net/mlx4/mlx4_rxq.c
> @@ -663,6 +663,64 @@ mlx4_rxq_detach(struct rxq *rxq)
>  }
>  
>  /**
> + * Returns the per-queue supported offloads.
> + *
> + * @param priv
> + *   Pointer to private structure.
> + *
> + * @return
> + *   Supported Tx offloads.
> + */
> +uint64_t
> +mlx4_priv_get_rx_queue_offloads(struct priv *priv)

You should drop "priv", e.g.:

 mlx4_get_rx_queue_offloads()

> +{
> +     uint64_t offloads = DEV_RX_OFFLOAD_SCATTER;
> +
> +     if (priv->hw_csum)
> +             offloads |= DEV_RX_OFFLOAD_CHECKSUM;
> +     return offloads;
> +}
> +
> +/**
> + * Returns the per-port supported offloads.
> + *
> + * @param priv
> + *   Pointer to private strucute.

strucute -> structure

> + *
> + * @return
> + *   Supported Rx offloads.
> + */
> +uint64_t
> +mlx4_priv_get_rx_port_offloads(struct priv *priv __rte_unused)

Same comment about "priv".

Using (void) instead of __rte_unused for consistency with the rest of the
PMD code is fine by the way. A subsequent patch can convert them all at once
to __rte_unused if needed.

> +{
> +     uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
> +
> +     return offloads;
> +}
> +
> +/**
> + * Checks if the per-queue offload configuration is valid.
> + *
> + * @param priv
> + *   Pointer to private structure.
> + * @param offloads
> + *   Per-queue offloads configuration.
> + *
> + * @return
> + *   1 if the configuration is valid, 0 otherwise.

Better described as "Nonzero when configuration is valid."

> + */
> +static int
> +priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)

Ditto, with the same suggestion as for TX:

 [mlx4_]check_rx_queue_offloads()

> +{
> +     uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
> +     uint64_t port_supp_offloads = mlx4_priv_get_rx_port_offloads(priv);
> +
> +     if (((port_offloads ^ offloads) & port_supp_offloads))
> +             return 0;
> +     return 1;
> +}

Same comment and questions regarding this condition as for the TX patch
(mandatory/requested/supported).

> +
> +/**
>   * DPDK callback to configure a Rx queue.
>   *
>   * @param dev
> @@ -707,6 +765,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
> idx, uint16_t desc,
>       (void)conf; /* Thresholds configuration (ignored). */
>       DEBUG("%p: configuring queue %u for %u descriptors",
>             (void *)dev, idx, desc);
> +     if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
> +             rte_errno = ENOTSUP;
> +             ERROR("%p: Rx queue offloads 0x%lx don't match port "
> +                   "offloads 0x%lx or supported offloads 0x%lx",
> +                   (void *)dev, conf->offloads,
> +                   dev->data->dev_conf.rxmode.offloads,
> +                   (mlx4_priv_get_rx_port_offloads(priv) |
> +                    mlx4_priv_get_rx_queue_offloads(priv)));

Should use "%" PRIx64 instead of "%lx".

> +             return -rte_errno;
> +     }
>       if (idx >= dev->data->nb_rx_queues) {
>               rte_errno = EOVERFLOW;
>               ERROR("%p: queue index out of range (%u >= %u)",
> @@ -746,10 +814,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
> idx, uint16_t desc,
>               .elts_n = rte_log2_u32(desc),
>               .elts = elts,
>               /* Toggle Rx checksum offload if hardware supports it. */
> -             .csum = (priv->hw_csum &&
> -                      dev->data->dev_conf.rxmode.hw_ip_checksum),
> -             .csum_l2tun = (priv->hw_csum_l2tun &&
> -                            dev->data->dev_conf.rxmode.hw_ip_checksum),
> +             .csum = priv->hw_csum &&
> +                     (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
> +             .csum_l2tun = priv->hw_csum_l2tun &&
> +                           (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
>               .l2tun_offload = priv->hw_csum_l2tun,
>               .stats = {
>                       .idx = idx,
> @@ -761,7 +829,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
> idx, uint16_t desc,
>       if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
>           (mb_len - RTE_PKTMBUF_HEADROOM)) {
>               ;
> -     } else if (dev->data->dev_conf.rxmode.enable_scatter) {
> +     } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
>               uint32_t size =
>                       RTE_PKTMBUF_HEADROOM +
>                       dev->data->dev_conf.rxmode.max_rx_pkt_len;
> diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
> index 91971c4fb..bcb76ee27 100644
> --- a/drivers/net/mlx4/mlx4_rxtx.h
> +++ b/drivers/net/mlx4/mlx4_rxtx.h
> @@ -166,6 +166,8 @@ int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
> idx,
>                       const struct rte_eth_rxconf *conf,
>                       struct rte_mempool *mp);
>  void mlx4_rx_queue_release(void *dpdk_rxq);
> +uint64_t mlx4_priv_get_rx_port_offloads(struct priv *priv);
> +uint64_t mlx4_priv_get_rx_queue_offloads(struct priv *priv);

Declarations should come in the same order as in mlx4_rxq.c.

>  
>  /* mlx4_rxtx.c */
>  
> -- 
> 2.12.0
> 

-- 
Adrien Mazarguil
6WIND

Reply via email to