> The default behavior is to NOT support the old ptype behavior,
> but enabling the configuration option the old ptype style
> can be supported.
> 
> Add support for old behaviour until we have a cleaner solution using
> a configuration option CONFIG_RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOUR,
> which is defaulted to not set.

I think it was explained why we had to diable ptype recognition for vRX, please 
see here:
http://dpdk.org/browse/dpdk/commit/drivers/net/ixgbe/ixgbe_rxtx_vec.c?id=d9a2009a81089093645fea2e04b51dd37edf3e6f
I think that introducing a compile time option to re-enable incomplete
and not fully correct functionality is a very bad approach.
So NACK.
Konstantin  

> 
> Signed-off-by: Keith Wiles <keith.wiles at intel.com>
> ---
>  config/common_base                 |  2 ++
>  drivers/net/ixgbe/ixgbe_ethdev.c   |  6 +++++
>  drivers/net/ixgbe/ixgbe_rxtx_vec.c | 52 
> +++++++++++++++++++++++++++++++++++---
>  3 files changed, 57 insertions(+), 3 deletions(-)
> 
> diff --git a/config/common_base b/config/common_base
> index bdde2e7..05e69bc 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -160,6 +160,8 @@ CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
>  CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
>  CONFIG_RTE_IXGBE_INC_VECTOR=y
>  CONFIG_RTE_IXGBE_RX_OLFLAGS_ENABLE=y
> +# Enable to restore old ptype behavior
> +CONFIG_RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR=n
> 
>  #
>  # Compile burst-oriented I40E PMD driver
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c 
> b/drivers/net/ixgbe/ixgbe_ethdev.c
> index e11a431..068b92b 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -3076,7 +3076,13 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
>       if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
>           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
>           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
> +#ifdef RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR
> +         dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc ||
> +         dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
> +         dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
> +#else
>           dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
> +#endif
>               return ptypes;
>       return NULL;
>  }
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c 
> b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
> index 12190d2..2e0d50b 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
> @@ -228,6 +228,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
> rte_mbuf **rx_pkts,
>                       );
>       __m128i dd_check, eop_check;
>       uint8_t vlan_flags;
> +#ifdef RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR
> +     __m128i desc_mask = _mm_set_epi32(0xFFFFFFFF, 0xFFFFFFFF,
> +                                       0xFFFFFFFF, 0xFFFF07F0);
> +#endif
> 
>       /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
>       nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
> @@ -268,8 +272,14 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
> rte_mbuf **rx_pkts,
>               13, 12,      /* octet 12~13, 16 bits data_len */
>               0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
>               13, 12,      /* octet 12~13, low 16 bits pkt_len */
> +#ifdef RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR
> +             0xFF, 0xFF,  /* skip high 16 bits pkt_type */
> +             1,           /* octet 1, 8 bits pkt_type field */
> +             0            /* octet 0, 4 bits offset 4 pkt_type field */
> +#else
>               0xFF, 0xFF,  /* skip 32 bit pkt_type */
>               0xFF, 0xFF
> +#endif
>               );
> 
>       /* Cache is empty -> need to scan the buffer rings, but first move
> @@ -291,6 +301,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
> rte_mbuf **rx_pkts,
>       for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
>                       pos += RTE_IXGBE_DESCS_PER_LOOP,
>                       rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
> +#ifdef RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR
> +             __m128i descs0[RTE_IXGBE_DESCS_PER_LOOP];
> +#endif
>               __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
>               __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
>               __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
> @@ -301,18 +314,30 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
> rte_mbuf **rx_pkts,
> 
>               /* Read desc statuses backwards to avoid race condition */
>               /* A.1 load 4 pkts desc */
> +#ifdef RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR
> +             descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
> +#else
>               descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
> -
> +#endif
>               /* B.2 copy 2 mbuf point into rx_pkts  */
>               _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
> 
>               /* B.1 load 1 mbuf point */
>               mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
> 
> +#ifdef RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR
> +             descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
> +
> +             /* B.1 load 2 mbuf point */
> +             descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
> +             descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
> +#else
>               descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
> +
>               /* B.1 load 2 mbuf point */
>               descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
>               descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
> +#endif
> 
>               /* B.2 copy 2 mbuf point into rx_pkts  */
>               _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
> @@ -324,6 +349,16 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
> rte_mbuf **rx_pkts,
>                       rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
>               }
> 
> +#ifdef RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR
> +             /* A* mask out 0~3 bits RSS type */
> +             descs[3] = _mm_and_si128(descs0[3], desc_mask);
> +             descs[2] = _mm_and_si128(descs0[2], desc_mask);
> +
> +             /* A* mask out 0~3 bits RSS type */
> +             descs[1] = _mm_and_si128(descs0[1], desc_mask);
> +             descs[0] = _mm_and_si128(descs0[0], desc_mask);
> +#endif
> +
>               /* avoid compiler reorder optimization */
>               rte_compiler_barrier();
> 
> @@ -331,22 +366,33 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
> rte_mbuf **rx_pkts,
>               pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
>               pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
> 
> +#ifdef RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR
> +#else
>               /* D.1 pkt 1,2 convert format from desc to pktmbuf */
>               pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
>               pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
> -
> +#endif
>               /* C.1 4=>2 filter staterr info only */
>               sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
>               /* C.1 4=>2 filter staterr info only */
>               sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
> 
>               /* set ol_flags with vlan packet type */
> +#ifdef RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR
> +             desc_to_olflags_v(descs0, vlan_flags, &rx_pkts[pos]);
> +#else
>               desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]);
> -
> +#endif
>               /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
>               pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
>               pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
> 
> +#ifdef RTE_IXGBE_ENABLE_OLD_PTYPE_BEHAVIOR
> +             /* D.1 pkt 1,2 convert format from desc to pktmbuf */
> +             pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
> +             pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
> +#endif
> +
>               /* C.2 get 4 pkts staterr value  */
>               zero = _mm_xor_si128(dd_check, dd_check);
>               staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
> --
> 2.1.4

Reply via email to