Re: [dpdk-dev] [PATCH v2 05/15] net/mlx5: support tunnel inner checksum offloads

2018-04-11 Thread Xueming(Steven) Li
Hi Nelio,

> -Original Message-
> From: Nélio Laranjeiro 
> Sent: Tuesday, April 10, 2018 11:28 PM
> To: Xueming(Steven) Li 
> Cc: Shahaf Shuler ; dev@dpdk.org
> Subject: Re: [PATCH v2 05/15] net/mlx5: support tunnel inner checksum
> offloads
> 
> On Tue, Apr 10, 2018 at 09:34:05PM +0800, Xueming Li wrote:
> > This patch support tunnel inner checksum offloads. By creating tunnel
> > flow, once tunnel packet type(RTE_PTYPE_TUNNEL_xxx) identified,
> 
> Where is the code creating the tunnel flow?

Literal issue, I'll remove "By creating tunnel flow". Also, this patch set
actually a cleanup of tunnel checksum, I'll update.

> 
> > PKT_RX_IP_CKSUM_XXX and PKT_RX_L4_CKSUM_XXX represent checksum result
> > of inner headers, outer L3 and L4 header checksum are always valid as
> > soon as tunnel identified. If no tunnel identified,
> > PKT_RX_IP_CKSUM_XXX and PKT_RX_L4_CKSUM_XXX represent checksum result
> > of outer L3 and L4 headers.
> >
> > Signed-off-by: Xueming Li 
> > ---
> >  drivers/net/mlx5/mlx5_flow.c |  7 +--
> > drivers/net/mlx5/mlx5_rxq.c  |  2 --  drivers/net/mlx5/mlx5_rxtx.c |
> > 18 --  drivers/net/mlx5/mlx5_rxtx.h |  1 -
> >  4 files changed, 9 insertions(+), 19 deletions(-)
> >
> > diff --git a/drivers/net/mlx5/mlx5_flow.c
> > b/drivers/net/mlx5/mlx5_flow.c index 65d7a9b62..b3ad6dc85 100644
> > --- a/drivers/net/mlx5/mlx5_flow.c
> > +++ b/drivers/net/mlx5/mlx5_flow.c
> > @@ -829,6 +829,8 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
> >  /**
> >   * Validate items.
> >   *
> > + * @param dev
> > + *   Pointer to Ethernet device.
> >   * @param[in] items
> >   *   Pattern specification (list terminated by the END pattern item).
> >   * @param[out] error
> > @@ -840,7 +842,8 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
> >   *   0 on success, a negative errno value otherwise and rte_errno is
> set.
> >   */
> >  static int
> > -mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
> > +mlx5_flow_convert_items_validate(struct rte_eth_dev *dev __rte_unused,
> > +const struct rte_flow_item items[],
> >  struct rte_flow_error *error,
> >  struct mlx5_flow_parse *parser)
> >  {
> > @@ -1146,7 +1149,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
> > ret = mlx5_flow_convert_actions(dev, actions, error, parser);
> > if (ret)
> > return ret;
> > -   ret = mlx5_flow_convert_items_validate(items, error, parser);
> > +   ret = mlx5_flow_convert_items_validate(dev, items, error, parser);
> > if (ret)
> > return ret;
> > mlx5_flow_convert_finalise(parser);
> 
> I don't understand the necessity of the two hunks above.
> 
> > diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> > index 351acfc0f..073732e16 100644
> > --- a/drivers/net/mlx5/mlx5_rxq.c
> > +++ b/drivers/net/mlx5/mlx5_rxq.c
> > @@ -1045,8 +1045,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
> uint16_t desc,
> > }
> > /* Toggle RX checksum offload if hardware supports it. */
> > tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
> > -   tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
> &&
> > -   priv->config.tunnel_en);
> > tmpl->rxq.hw_timestamp = !!(conf->offloads &
> DEV_RX_OFFLOAD_TIMESTAMP);
> > /* Configure VLAN stripping. */
> > tmpl->rxq.vlan_strip = !!(conf->offloads &
> > DEV_RX_OFFLOAD_VLAN_STRIP); diff --git a/drivers/net/mlx5/mlx5_rxtx.c
> > b/drivers/net/mlx5/mlx5_rxtx.c index d061dfc8a..285b2dbf0 100644
> > --- a/drivers/net/mlx5/mlx5_rxtx.c
> > +++ b/drivers/net/mlx5/mlx5_rxtx.c
> > @@ -41,7 +41,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile
> struct mlx5_cqe *cqe,
> >  uint16_t cqe_cnt, uint32_t *rss_hash);
> >
> >  static __rte_always_inline uint32_t
> > -rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct
> > mlx5_cqe *cqe);
> > +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
> >
> >  uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
> > [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ @@
> > -1728,8 +1728,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq,
> > volatile struct mlx5_cqe *cqe,
> >  /**
> >   * Translate RX completion flags to offload flags.
> >   *
> > - * @param[in] rxq
> > - *   Pointer to RX queue structure.
> >   * @param[in] cqe
> >   *   Pointer to CQE.
> >   *
> > @@ -1737,7 +1735,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq,
> volatile struct mlx5_cqe *cqe,
> >   *   Offload flags (ol_flags) for struct rte_mbuf.
> >   */
> >  static inline uint32_t
> > -rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct
> > mlx5_cqe *cqe)
> > +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
> >  {
> > uint32_t ol_flags = 0;
> > uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
> > @@ -1749,14 +1747,6 @@ rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq,
> vo

Re: [dpdk-dev] [PATCH v2 05/15] net/mlx5: support tunnel inner checksum offloads

2018-04-10 Thread Nélio Laranjeiro
On Tue, Apr 10, 2018 at 09:34:05PM +0800, Xueming Li wrote:
> This patch support tunnel inner checksum offloads. By creating tunnel
> flow, once tunnel packet type(RTE_PTYPE_TUNNEL_xxx) identified,

Where is the code creating the tunnel flow?

> PKT_RX_IP_CKSUM_XXX and PKT_RX_L4_CKSUM_XXX represent checksum result of
> inner headers, outer L3 and L4 header checksum are always valid as soon
> as tunnel identified. If no tunnel identified, PKT_RX_IP_CKSUM_XXX and
> PKT_RX_L4_CKSUM_XXX represent checksum result of outer L3 and L4
> headers.
> 
> Signed-off-by: Xueming Li 
> ---
>  drivers/net/mlx5/mlx5_flow.c |  7 +--
>  drivers/net/mlx5/mlx5_rxq.c  |  2 --
>  drivers/net/mlx5/mlx5_rxtx.c | 18 --
>  drivers/net/mlx5/mlx5_rxtx.h |  1 -
>  4 files changed, 9 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 65d7a9b62..b3ad6dc85 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -829,6 +829,8 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
>  /**
>   * Validate items.
>   *
> + * @param dev
> + *   Pointer to Ethernet device.
>   * @param[in] items
>   *   Pattern specification (list terminated by the END pattern item).
>   * @param[out] error
> @@ -840,7 +842,8 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
>   *   0 on success, a negative errno value otherwise and rte_errno is set.
>   */
>  static int
> -mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
> +mlx5_flow_convert_items_validate(struct rte_eth_dev *dev __rte_unused,
> +  const struct rte_flow_item items[],
>struct rte_flow_error *error,
>struct mlx5_flow_parse *parser)
>  {
> @@ -1146,7 +1149,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
>   ret = mlx5_flow_convert_actions(dev, actions, error, parser);
>   if (ret)
>   return ret;
> - ret = mlx5_flow_convert_items_validate(items, error, parser);
> + ret = mlx5_flow_convert_items_validate(dev, items, error, parser);
>   if (ret)
>   return ret;
>   mlx5_flow_convert_finalise(parser);

I don't understand the necessity of the two hunks above.

> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> index 351acfc0f..073732e16 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -1045,8 +1045,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, 
> uint16_t desc,
>   }
>   /* Toggle RX checksum offload if hardware supports it. */
>   tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
> - tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
> - priv->config.tunnel_en);
>   tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
>   /* Configure VLAN stripping. */
>   tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
> diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
> index d061dfc8a..285b2dbf0 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.c
> +++ b/drivers/net/mlx5/mlx5_rxtx.c
> @@ -41,7 +41,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct 
> mlx5_cqe *cqe,
>uint16_t cqe_cnt, uint32_t *rss_hash);
>  
>  static __rte_always_inline uint32_t
> -rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
> +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
>  
>  uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
>   [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
> @@ -1728,8 +1728,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile 
> struct mlx5_cqe *cqe,
>  /**
>   * Translate RX completion flags to offload flags.
>   *
> - * @param[in] rxq
> - *   Pointer to RX queue structure.
>   * @param[in] cqe
>   *   Pointer to CQE.
>   *
> @@ -1737,7 +1735,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile 
> struct mlx5_cqe *cqe,
>   *   Offload flags (ol_flags) for struct rte_mbuf.
>   */
>  static inline uint32_t
> -rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
> +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
>  {
>   uint32_t ol_flags = 0;
>   uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
> @@ -1749,14 +1747,6 @@ rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile 
> struct mlx5_cqe *cqe)
>   TRANSPOSE(flags,
> MLX5_CQE_RX_L4_HDR_VALID,
> PKT_RX_L4_CKSUM_GOOD);
> - if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
> - ol_flags |=
> - TRANSPOSE(flags,
> -   MLX5_CQE_RX_L3_HDR_VALID,
> -   PKT_RX_IP_CKSUM_GOOD) |
> - TRANSPOSE(flags,
> -   MLX5_CQE_RX_L4_HDR_VALID,
> -