[dpdk-dev] [PATCH] eal: fix c++ compilation issue with rte_delay_us()

2016-10-03 Thread Konstantin Ananyev
When compiling with C++, it treats
void (*rte_delay_us)(unsigned int us);
as definition of the global variable.
So further linking with librte_eal fails.

Fixes: b4d63fb62240 ("eal: customize delay function")

Steps to reproduce:

$ cat rttm1.cpp

#include 
#include 
#include 

using namespace std;

int main(int argc, char *argv[])
{
int ret = rte_eal_init(argc, argv);
rte_delay_us(1);
cout << "return code ";
cout << ret;
return ret;
}

$ g++ -m64 -I/${RTE_SDK}/${RTE_TARGET}/include -c  -o rttm1.o rttm1.cpp
$ gcc -m64 -pthread -o rttm1 rttm1.o -ldl -Wl,-lstdc++ \
  -L/${RTE_SDK}/${RTE_TARGET}/lib -Wl,-lrte_eal
.../librte_eal.a(eal_common_timer.o):
(.bss+0x0): multiple definition of `rte_delay_us'
rttm1.o:(.bss+0x0): first defined here
collect2: error: ld returned 1 exit status

$ nm rttm1.o | grep rte_delay_us
0092 t _GLOBAL__sub_I_rte_delay_us
 B rte_delay_us


Signed-off-by: Konstantin Ananyev 
---
 lib/librte_eal/common/include/generic/rte_cycles.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lib/librte_eal/common/include/generic/rte_cycles.h 
b/lib/librte_eal/common/include/generic/rte_cycles.h
index 96a2da9..00103ca 100644
--- a/lib/librte_eal/common/include/generic/rte_cycles.h
+++ b/lib/librte_eal/common/include/generic/rte_cycles.h
@@ -188,7 +188,7 @@ rte_get_timer_hz(void)
  * @param us
  *   The number of microseconds to wait.
  */
-void
+extern void
 (*rte_delay_us)(unsigned int us);

 /**
-- 
2.4.3



[dpdk-dev] [PATCH] ixgbe: fix missed packet types.

2016-06-15 Thread Konstantin Ananyev
ixgbe PMD RX function(s) miss pacjet types that are:
 - correctly recognised by the underlying HW.
 - marked as supported by ixgbe_dev_supported_ptypes_get().

Fixes: 9586ebd358d5 ("ixgbe: replace some offload flags with packet type")

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.c | 180 +++--
 1 file changed, 138 insertions(+), 42 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9c6eaf2..64dc17b 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -909,24 +909,40 @@ end_of_tx:
  *  RX functions
  *
  **/
-#define IXGBE_PACKET_TYPE_IPV4  0X01
-#define IXGBE_PACKET_TYPE_IPV4_TCP  0X11
-#define IXGBE_PACKET_TYPE_IPV4_UDP  0X21
-#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
-#define IXGBE_PACKET_TYPE_IPV4_EXT  0X03
-#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
-#define IXGBE_PACKET_TYPE_IPV6  0X04
-#define IXGBE_PACKET_TYPE_IPV6_TCP  0X14
-#define IXGBE_PACKET_TYPE_IPV6_UDP  0X24
-#define IXGBE_PACKET_TYPE_IPV6_EXT  0X0C
-#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP  0X1C
-#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP  0X2C
-#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+
+#define IXGBE_PACKET_TYPE_ETHER0X00
+#define IXGBE_PACKET_TYPE_IPV4 0X01
+#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
+#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
+#define IXGBE_PACKET_TYPE_IPV4_SCTP0X41
+#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
+#define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
+#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP0X43
+#define IXGBE_PACKET_TYPE_IPV6 0X04
+#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
+#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_IPV6_SCTP0X44
+#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP0X4C
+#define IXGBE_PACKET_TYPE_IPV4_IPV60X05
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP0X15
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP0X25
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP   0X45
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV60X07
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP0X17
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP0X27
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP   0X47
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT0X0D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP0X1D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP0X2D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP   0X4D
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT0X0F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP0X1F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP0X2F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP   0X4F

 #define IXGBE_PACKET_TYPE_NVGRE   0X00
 #define IXGBE_PACKET_TYPE_NVGRE_IPV4  0X01
@@ -934,13 +950,17 @@ end_of_tx:
 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP  0X21
 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT  0X03
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP  0X13
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP  0X23
 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
 #define IXGBE_PACKET_TYPE_NVGRE_IPV6  0X04
 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP  0X14
 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP  0X24
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT  0X0C
 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP  0X1C
 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP  0X2C
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
@@ -954,13 +974,17 @@ end_of_tx:
 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP  0xA1
 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
 #define IXGBE_PACKET_TYPE_VXLA

[dpdk-dev] [PATCH] l3fwd: fix incorrect size for destination port values

2016-03-31 Thread Konstantin Ananyev
Fixes: dc81ebbacaeb ("lpm: extend IPv4 next hop field")

Originally l3fwd used 16-bit value to store dest_port value.
To accommodate 24-bit nexthop dest_port was increased to 32-bit,
though some further packet processing code remained unchanged and
still expects dest_port to be 16-bit.
That is not correct and can cause l3fwd invalid behaviour or even
process crash/hang on some input packet patterns.
For the fix, I choose the simplest approach and restored dest_port
as 16-bit value, plus necessary conversions from 32 to 16 bit values
after lpm_lookupx4.

Signed-off-by: Konstantin Ananyev 
---
 examples/l3fwd/l3fwd_em_hlm_sse.h |  6 +++---
 examples/l3fwd/l3fwd_em_sse.h |  2 +-
 examples/l3fwd/l3fwd_lpm.h|  4 ++--
 examples/l3fwd/l3fwd_lpm_sse.h| 12 
 examples/l3fwd/l3fwd_sse.h|  8 
 5 files changed, 18 insertions(+), 14 deletions(-)

diff --git a/examples/l3fwd/l3fwd_em_hlm_sse.h 
b/examples/l3fwd/l3fwd_em_hlm_sse.h
index ee0211f..5001c72 100644
--- a/examples/l3fwd/l3fwd_em_hlm_sse.h
+++ b/examples/l3fwd/l3fwd_em_hlm_sse.h
@@ -38,7 +38,7 @@

 static inline __attribute__((always_inline)) void
 em_get_dst_port_ipv4x8(struct lcore_conf *qconf, struct rte_mbuf *m[8],
-   uint8_t portid, uint32_t dst_port[8])
+   uint8_t portid, uint16_t dst_port[8])
 {
int32_t ret[8];
union ipv4_5tuple_host key[8];
@@ -162,7 +162,7 @@ get_ipv6_5tuple(struct rte_mbuf *m0, __m128i mask0,

 static inline __attribute__((always_inline)) void
 em_get_dst_port_ipv6x8(struct lcore_conf *qconf, struct rte_mbuf *m[8],
-   uint8_t portid, uint32_t dst_port[8])
+   uint8_t portid, uint16_t dst_port[8])
 {
int32_t ret[8];
union ipv6_5tuple_host key[8];
@@ -289,7 +289,7 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf 
**pkts_burst,
uint8_t portid, struct lcore_conf *qconf)
 {
int32_t j;
-   uint32_t dst_port[MAX_PKT_BURST];
+   uint16_t dst_port[MAX_PKT_BURST];

/*
 * Send nb_rx - nb_rx%8 packets
diff --git a/examples/l3fwd/l3fwd_em_sse.h b/examples/l3fwd/l3fwd_em_sse.h
index e2fe932..c0a9725 100644
--- a/examples/l3fwd/l3fwd_em_sse.h
+++ b/examples/l3fwd/l3fwd_em_sse.h
@@ -102,7 +102,7 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf 
**pkts_burst,
uint8_t portid, struct lcore_conf *qconf)
 {
int32_t j;
-   uint32_t dst_port[MAX_PKT_BURST];
+   uint16_t dst_port[MAX_PKT_BURST];

for (j = 0; j < nb_rx; j++)
dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
diff --git a/examples/l3fwd/l3fwd_lpm.h b/examples/l3fwd/l3fwd_lpm.h
index fc10235..a43c507 100644
--- a/examples/l3fwd/l3fwd_lpm.h
+++ b/examples/l3fwd/l3fwd_lpm.h
@@ -34,14 +34,14 @@
 #ifndef __L3FWD_LPM_H__
 #define __L3FWD_LPM_H__

-static inline uint32_t
+static inline uint8_t
 lpm_get_ipv4_dst_port(void *ipv4_hdr,  uint8_t portid, void *lookup_struct)
 {
uint32_t next_hop;
struct rte_lpm *ipv4_l3fwd_lookup_struct =
(struct rte_lpm *)lookup_struct;

-   return (uint32_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
+   return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
_hop) == 0) ? next_hop : portid);
 }
diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h
index d64d6d2..538fe3d 100644
--- a/examples/l3fwd/l3fwd_lpm_sse.h
+++ b/examples/l3fwd/l3fwd_lpm_sse.h
@@ -145,9 +145,9 @@ static inline void
 processx4_step2(const struct lcore_conf *qconf,
__m128i dip,
uint32_t ipv4_flag,
-   uint32_t portid,
+   uint8_t portid,
struct rte_mbuf *pkt[FWDSTEP],
-   uint32_t dprt[FWDSTEP])
+   uint16_t dprt[FWDSTEP])
 {
rte_xmm_t dst;
const  __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
@@ -158,7 +158,11 @@ processx4_step2(const struct lcore_conf *qconf,

/* if all 4 packets are IPV4. */
if (likely(ipv4_flag)) {
-   rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid);
+   rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dst.u32,
+   portid);
+   /* get rid of unused upper 16 bit for each dport. */
+   dst.x = _mm_packs_epi32(dst.x, dst.x);
+   *(uint64_t *)dprt = dst.u64[0];
} else {
dst.x = dip;
dprt[0] = lpm_get_dst_port_with_ipv4(qconf, pkt[0], dst.u32[0], 
portid);
@@ -177,7 +181,7 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf 
**pkts_burst,
uint8_t portid, struct lcore_conf *qconf)
 {
int32_t j;
-   uint32_t dst_port[MAX_PKT_BURST];
+   uint16_t dst_port[MAX_PKT_BURST];
__m128i dip[MAX_PKT_BURST / FWDSTEP];
uint32_t ipv4_flag[M

[dpdk-dev] [PATCH] ixgbe: fix vector RX can't always set packet_type properly

2016-03-22 Thread Konstantin Ananyev
Fixes: 39625417585 ("mbuf: redefine packet type")

Current vector RX can't always set packet_type properly.
To be more specific:
a) it never sets RTE_PTYPE_L2_ETHER
b) it doesn't handle tunnel ipv4/ipv6 case correctly.
c) it doesn't check is IXGBE_RXDADV_PKTTYPE_ETQF set or not.
While a) is pretty easy to fix, b) and c) are not that straightforward
in terms of SIMD ops (specially b).
So far I wasn't able to make vRX support packet_type properly without
noticeable performance loss.
So for now, just remove that functionality at all from vector RX and
update dev_supported_ptypes_get().

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_ethdev.c   |  4 +---
 drivers/net/ixgbe/ixgbe_rxtx_vec.c | 34 +++---
 2 files changed, 12 insertions(+), 26 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index c1a8630..29527de 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2962,9 +2962,7 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
-   dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc ||
-   dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
-   dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
+   dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
return ptypes;
return NULL;
 }
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index ccd93c7..5040704 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -220,8 +220,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
0, 0/* ignore pkt_type field */
);
__m128i dd_check, eop_check;
-   __m128i desc_mask = _mm_set_epi32(0x, 0x,
- 0x, 0x07F0);

/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
@@ -259,9 +257,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
13, 12,  /* octet 12~13, 16 bits data_len */
0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
13, 12,  /* octet 12~13, low 16 bits pkt_len */
-   0xFF, 0xFF,  /* skip high 16 bits pkt_type */
-   1,   /* octet 1, 8 bits pkt_type field */
-   0/* octet 0, 4 bits offset 4 pkt_type field */
+   0xFF, 0xFF,  /* skip 32 bit pkt_type */
+   0xFF, 0xFF
);

/* Cache is empty -> need to scan the buffer rings, but first move
@@ -278,7 +275,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
pos += RTE_IXGBE_DESCS_PER_LOOP,
rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
-   __m128i descs0[RTE_IXGBE_DESCS_PER_LOOP];
__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
@@ -289,7 +285,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,

/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
-   descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+   descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));

/* B.2 copy 2 mbuf point into rx_pkts  */
_mm_storeu_si128((__m128i *)_pkts[pos], mbp1);
@@ -297,10 +293,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
/* B.1 load 1 mbuf point */
mbp2 = _mm_loadu_si128((__m128i *)_ring[pos+2]);

-   descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+   descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
/* B.1 load 2 mbuf point */
-   descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
-   descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
+   descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+   descs[0] = _mm_loadu_si128((__m128i *)(rxdp));

/* B.2 copy 2 mbuf point into rx_pkts  */
_mm_storeu_si128((__m128i *)_pkts[pos+2], mbp2);
@@ -312,14 +308,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
rte_prefetch0(_pkts[pos + 3]->cacheline1);
}

-   /* A* mask out 0~3 bits RSS type */
-   descs

[dpdk-dev] [PATCH v2] ACL: fix build for native-icc target on haswell fails

2015-11-20 Thread Konstantin Ananyev
On HSW box with icc 16.0.0 build for x86_64-native-linuxapp-icc fails with:
icc: command line warning #10120: overriding '-march=native' with '-msse4.1'
...
dpdk.org/x86_64-native-linuxapp-icc/include/rte_memcpy.h(96): error: identifier 
"__m256i" is undefined

The reason is that icc treats "-march=native ... -msse4.1"
in a different way, then gcc and clang.
For icc it means override all flags enabled with
'-march=native' with '-msse4.1'.
Even when '-march=native' is a superset for '-msse4.1'.
To overcome the problem add a check is SSE4.1 compilation flag already enabled.
If yes, then no need to add '-msse4.1'
Similar change for avx2 compilation option.

Fixes: 074f54ad03ee ("acl: fix build and runtime for default target")

Reported-by: Declan Doherty 
Reported-by: Sergio Gonzalez Monroy 
Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/Makefile | 31 +--
 1 file changed, 21 insertions(+), 10 deletions(-)

v2 changes:
- merged with latest mainline

diff --git a/lib/librte_acl/Makefile b/lib/librte_acl/Makefile
index 27f91d5..897237d 100644
--- a/lib/librte_acl/Makefile
+++ b/lib/librte_acl/Makefile
@@ -48,31 +48,42 @@ SRCS-$(CONFIG_RTE_LIBRTE_ACL) += rte_acl.c
 SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_bld.c
 SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_gen.c
 SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_scalar.c
+
 ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
 SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_neon.c
+CFLAGS_acl_run_neon.o += -flax-vector-conversions -Wno-maybe-uninitialized
 else
 SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_sse.c
+#check if flag for SSE4.1 is already on, if not set it up manually
+   ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
+   CFLAGS_acl_run_sse.o += -msse4.1
+   endif
 endif

-CFLAGS_acl_run_sse.o += -msse4.1
-CFLAGS_acl_run_neon.o += -flax-vector-conversions -Wno-maybe-uninitialized
-
 #
 # If the compiler supports AVX2 instructions,
 # then add support for AVX2 classify method.
 #

-CC_AVX2_SUPPORT=$(shell $(CC) -march=core-avx2 -dM -E - &1 | \
-grep -q AVX2 && echo 1)
+#check if flag for AVX2 is already on, if not set it up manually
+ifeq ($(findstring 
RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
+   CC_AVX2_SUPPORT=1
+else
+   CC_AVX2_SUPPORT=\
+   $(shell $(CC) -march=core-avx2 -dM -E - &1 | \
+   grep -q AVX2 && echo 1)
+   ifeq ($(CC_AVX2_SUPPORT), 1)
+   ifeq ($(CC), icc)
+   CFLAGS_acl_run_avx2.o += -march=core-avx2
+   else
+   CFLAGS_acl_run_avx2.o += -mavx2
+   endif
+   endif
+endif

 ifeq ($(CC_AVX2_SUPPORT), 1)
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_avx2.c
CFLAGS_rte_acl.o += -DCC_AVX2_SUPPORT
-   ifeq ($(CC), icc)
-   CFLAGS_acl_run_avx2.o += -march=core-avx2
-   else
-   CFLAGS_acl_run_avx2.o += -mavx2
-   endif
 endif

 # install this header file
-- 
1.8.5.3



[dpdk-dev] [PATCH] ACL: fix build for native-icc target on haswell fails

2015-11-17 Thread Konstantin Ananyev
On HSW box with icc 16.0.0 build for x86_64-default-linuxapp-icc fails with:
icc: command line warning #10120: overriding '-march=native' with '-msse4.1'
...
dpdk.org/x86_64-native-linuxapp-icc/include/rte_memcpy.h(96): error: identifier 
"__m256i" is undefined

The reason is that icc treats "-march=native ... -msse4.1"
in a different way, then gcc and clang.
For icc it means override all flags enabled with
'-march=native' with '-msse4.1'.
Even when '-march=native' is a superset for '-msse4.1'.
To overcome the problem add a check is SSE4.1 compilation flag already enabled.
If yes, then no need to add '-msse4.1'
Similar change for avx2 compilation option.

Fixes: 074f54ad03ee ("acl: fix build and runtime for default target")

Reported-by: Declan Doherty 
Reported-by: Sergio Gonzalez Monroy 
Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/Makefile | 27 +++
 1 file changed, 19 insertions(+), 8 deletions(-)

diff --git a/lib/librte_acl/Makefile b/lib/librte_acl/Makefile
index 7a1cf8a..ff63a0c 100644
--- a/lib/librte_acl/Makefile
+++ b/lib/librte_acl/Makefile
@@ -50,24 +50,35 @@ SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_gen.c
 SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_scalar.c
 SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_sse.c

-CFLAGS_acl_run_sse.o += -msse4.1
+#check if flag for SSE4.1 is already on, if not set it up manually
+ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
+   CFLAGS_acl_run_sse.o += -msse4.1
+endif

 #
 # If the compiler supports AVX2 instructions,
 # then add support for AVX2 classify method.
 #

-CC_AVX2_SUPPORT=$(shell $(CC) -march=core-avx2 -dM -E - &1 | \
-grep -q AVX2 && echo 1)
+#check if flag for AVX2 is already on, if not set it up manually
+ifeq ($(findstring 
RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
+   CC_AVX2_SUPPORT=1
+else
+   CC_AVX2_SUPPORT=\
+   $(shell $(CC) -march=core-avx2 -dM -E - &1 | \
+   grep -q AVX2 && echo 1)
+   ifeq ($(CC_AVX2_SUPPORT), 1)
+   ifeq ($(CC), icc)
+   CFLAGS_acl_run_avx2.o += -march=core-avx2
+   else
+   CFLAGS_acl_run_avx2.o += -mavx2
+   endif
+   endif
+endif

 ifeq ($(CC_AVX2_SUPPORT), 1)
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_avx2.c
CFLAGS_rte_acl.o += -DCC_AVX2_SUPPORT
-   ifeq ($(CC), icc)
-   CFLAGS_acl_run_avx2.o += -march=core-avx2
-   else
-   CFLAGS_acl_run_avx2.o += -mavx2
-   endif
 endif

 # install this header file
-- 
1.8.5.3



[dpdk-dev] [PATCHv7 2/2] ixgbe: fix TX hang when RS distance exceeds HW limit

2015-11-10 Thread Konstantin Ananyev
One of the ways to reproduce the issue:

testpmd  -- -i --txqflags=0
testpmd> set fwd txonly
testpmd> set txpkts 
64,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4
testpmd> set txsplit rand
testpmd> start

After some time TX on ixgbe queue will hang,
and all packet transmission on that queue will stop.

This bug was first reported and investigated by
Vlad Zolotarov :
"We can reproduce this issue when stressed the xmit path with a lot of highly
fragmented TCP frames (packets with up to 33 fragments with non-headers
fragments as small as 4 bytes) with all offload features enabled."

The root cause is that ixgbe_xmit_pkts() in some cases violates the HW rule
that the distance between TDs with RS bit set should not exceed 40 TDs.

>From the latest 82599 spec update:
"When WTHRESH is set to zero, the software device driver should set the RS bit
in the Tx descriptors with the EOP bit set and at least once in the 40
descriptors."

The fix is to make sure that the distance between TDs with RS bit set
would never exceed HW limit.
As part of that fix, tx_rs_thresh for ixgbe PMD is not allowed to be greater
then to 32 to comply with HW restrictions.

With that fix slight slowdown for the full-featured ixgbe TX path
might be observed (from our testing - up to 4%).

ixgbe simple TX path is unaffected by that patch.

Reported-by: Vlad Zolotarov 
Signed-off-by: Konstantin Ananyev 
---
 app/test/test_pmd_perf.c |  8 
 doc/guides/rel_notes/release_2_2.rst |  7 +++
 drivers/net/ixgbe/ixgbe_rxtx.c   | 32 +++-
 3 files changed, 38 insertions(+), 9 deletions(-)

v5 changes:
- rework the patch to avoid setting RS bit on every EOP descriptor
 (while that approach is valid, it causes significant slowdown
  on the TX path: up to 25%).

v6 changes:
- fix pmd_perf_autotest
- fix error description
- update RN

v7 changes:
- move vN changes after the changed file list

diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index 1fd6843..ef9262c 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -841,10 +841,10 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
port_conf.rxmode.enable_scatter = 0;
return 0;
} else if (!strcmp(mode, "scalar")) {
-   /* bulk alloc rx, simple tx */
-   tx_conf.txq_flags = 0xf01;
-   tx_conf.tx_rs_thresh = 128;
-   tx_conf.tx_free_thresh = 128;
+   /* bulk alloc rx, full-featured tx */
+   tx_conf.txq_flags = 0;
+   tx_conf.tx_rs_thresh = 32;
+   tx_conf.tx_free_thresh = 32;
port_conf.rxmode.hw_ip_checksum = 1;
port_conf.rxmode.enable_scatter = 0;
return 0;
diff --git a/doc/guides/rel_notes/release_2_2.rst 
b/doc/guides/rel_notes/release_2_2.rst
index 59dda59..62e225b 100644
--- a/doc/guides/rel_notes/release_2_2.rst
+++ b/doc/guides/rel_notes/release_2_2.rst
@@ -134,6 +134,13 @@ Drivers

   VF needs the PF interrupt support initialized even if not started.

+* **ixgbe: Fixed TX hang when RS distance exceeds HW limit.**
+
+  Fixed an issue when TX queue can hang when a lot of highly fragmented
+  packets have to be sent.
+  As part of that fix, tx_rs_thresh for ixgbe PMD is not allowed to be greater
+  then to 32 to comply with HW restrictions.
+
 * **i40e: Fixed base driver allocation when not using first numa node.**

   Fixed i40e issue that occurred when a DPDK application didn't initialize
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 5561195..ca6fb69 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -572,7 +572,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
struct ixgbe_tx_entry *sw_ring;
struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
-   volatile union ixgbe_adv_tx_desc *txd;
+   volatile union ixgbe_adv_tx_desc *txd, *txp;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
@@ -595,6 +595,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txr = txq->tx_ring;
tx_id   = txq->tx_tail;
txe = _ring[tx_id];
+   txp = NULL;

/* Determine if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
@@ -638,6 +639,12 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 */
nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);

+   if (txp != NULL &&
+   nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
+   /* set RS on the previous packet in the burst */
+   txp->read.cmd_type_len |=
+   rte_cpu_to_le_32(IXGBE_TXD_C

[dpdk-dev] [PATCHv7 1/2] testpmd: add ability to split outgoing packets

2015-11-10 Thread Konstantin Ananyev
For CSUM forwarding mode add ability to copy & split outgoing packet
into the new mbuf that consists of multiple segments.
For TXONLY and CSUM forwarding modes add ability to make number of
segments in the outgoing packet to vary on a per packet basis.
Number of segments and size of each segment is controlled by
'set txpkts' command.
Split policy is controlled by 'set txsplit' command.
Possible values are: on | off | rand.
Tha allows to increase test coverage for TX PMD codepaths.

Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c  |  57 +-
 app/test-pmd/config.c   |  61 +++
 app/test-pmd/csumonly.c | 163 +++-
 app/test-pmd/testpmd.c  |   3 +
 app/test-pmd/testpmd.h  |  10 ++
 app/test-pmd/txonly.c   |  13 ++-
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  21 +++-
 7 files changed, 319 insertions(+), 9 deletions(-)

v6 changes:
- fix typos
- testpmd guide: fix invalid command description

v7 changes:
- move vN changes after the changed file list

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index c637198..a92fe0b 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -199,7 +199,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"clear port (info|stats|xstats|fdir|stat_qmap) 
(port_id|all)\n"
"Clear information for port_id, or all.\n\n"

-   "show config (rxtx|cores|fwd)\n"
+   "show config (rxtx|cores|fwd|txpkts)\n"
"Display the given configuration.\n\n"

"read rxd (port_id) (queue_id) (rxd_id)\n"
@@ -246,7 +246,12 @@ static void cmd_help_long_parsed(void *parsed_result,

"set txpkts (x[,y]*)\n"
"Set the length of each segment of TXONLY"
-   " packets.\n\n"
+   " and optionally CSUM packets.\n\n"
+
+   "set txsplit (off|on|rand)\n"
+   "Set the split policy for the TX packets."
+   " Right now only applicable for CSUM and TXONLY"
+   " modes\n\n"

"set corelist (x[,y]*)\n"
"Set the list of forwarding cores.\n\n"
@@ -2621,6 +2626,47 @@ cmdline_parse_inst_t cmd_set_txpkts = {
},
 };

+/* *** SET COPY AND SPLIT POLICY ON TX PACKETS *** */
+
+struct cmd_set_txsplit_result {
+   cmdline_fixed_string_t cmd_keyword;
+   cmdline_fixed_string_t txsplit;
+   cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_set_txsplit_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+   struct cmd_set_txsplit_result *res;
+
+   res = parsed_result;
+   set_tx_pkt_split(res->mode);
+}
+
+cmdline_parse_token_string_t cmd_set_txsplit_keyword =
+   TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+cmd_keyword, "set");
+cmdline_parse_token_string_t cmd_set_txsplit_name =
+   TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+txsplit, "txsplit");
+cmdline_parse_token_string_t cmd_set_txsplit_mode =
+   TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+mode, NULL);
+
+cmdline_parse_inst_t cmd_set_txsplit = {
+   .f = cmd_set_txsplit_parsed,
+   .data = NULL,
+   .help_str = "set txsplit on|off|rand",
+   .tokens = {
+   (void *)_set_txsplit_keyword,
+   (void *)_set_txsplit_name,
+   (void *)_set_txsplit_mode,
+   NULL,
+   },
+};
+
 /* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */
 struct cmd_rx_vlan_filter_all_result {
cmdline_fixed_string_t rx_vlan;
@@ -5233,6 +5279,8 @@ static void cmd_showcfg_parsed(void *parsed_result,
fwd_lcores_config_display();
else if (!strcmp(res->what, "fwd"))
fwd_config_display();
+   else if (!strcmp(res->what, "txpkts"))
+   show_tx_pkt_segments();
 }

 cmdline_parse_token_string_t cmd_showcfg_show =
@@ -5241,12 +5289,12 @@ cmdline_parse_token_string_t cmd_showcfg_port =
TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, cfg, "config");
 cmdline_parse_token_string_t cmd_showcfg_what =
TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, what,
-"rxtx#cores#fwd");
+"rxtx#cores#fwd#txpk

[dpdk-dev] [PATCHv7 0/2] ixgbe: fix TX hang when RS distance exceeds HW limit

2015-11-10 Thread Konstantin Ananyev
First patch contains changes in testpmd that allow to reproduce the issue.
Second patch is the actual fix.

Konstantin Ananyev (2):
  testpmd: add ability to split outgoing packets
  ixgbe: fix TX hang when RS distance exceeds HW limit

 app/test-pmd/cmdline.c  |  57 +-
 app/test-pmd/config.c   |  61 +++
 app/test-pmd/csumonly.c | 163 +++-
 app/test-pmd/testpmd.c  |   3 +
 app/test-pmd/testpmd.h  |  10 ++
 app/test-pmd/txonly.c   |  13 ++-
 app/test/test_pmd_perf.c|   8 +-
 doc/guides/rel_notes/release_2_2.rst|   7 ++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  21 +++-
 drivers/net/ixgbe/ixgbe_rxtx.c  |  32 +-
 10 files changed, 357 insertions(+), 18 deletions(-)

-- 
1.8.3.1



[dpdk-dev] [PATCHv6 2/2] ixgbe: fix TX hang when RS distance exceeds HW limit

2015-11-09 Thread Konstantin Ananyev
One of the ways to reproduce the issue:

testpmd  -- -i --txqflags=0
testpmd> set fwd txonly
testpmd> set txpkts 
64,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4
testpmd> set txsplit rand
testpmd> start

After some time TX on ixgbe queue will hang,
and all packet transmission on that queue will stop.

This bug was first reported and investigated by
Vlad Zolotarov :
"We can reproduce this issue when stressed the xmit path with a lot of highly
fragmented TCP frames (packets with up to 33 fragments with non-headers
fragments as small as 4 bytes) with all offload features enabled."

The root cause is that ixgbe_xmit_pkts() in some cases violates the HW rule
that the distance between TDs with RS bit set should not exceed 40 TDs.

>From the latest 82599 spec update:
"When WTHRESH is set to zero, the software device driver should set the RS bit
in the Tx descriptors with the EOP bit set and at least once in the 40
descriptors."

The fix is to make sure that the distance between TDs with RS bit set
would never exceed HW limit.
As part of that fix, tx_rs_thresh for ixgbe PMD is not allowed to be greater
then to 32 to comply with HW restrictions.

With that fix slight slowdown for the full-featured ixgbe TX path
might be observed (from our testing - up to 4%).

ixgbe simple TX path is unaffected by that patch.

v5 changes:
- rework the patch to avoid setting RS bit on every EOP descriptor
 (while that approach is valid, it causes significant slowdown
  on the TX path: up to 25%).

v6 changes:
- fix pmd_perf_autotest
- fix error description
- update RN

Reported-by: Vlad Zolotarov 
Signed-off-by: Konstantin Ananyev 
---
 app/test/test_pmd_perf.c |  8 
 doc/guides/rel_notes/release_2_2.rst |  7 +++
 drivers/net/ixgbe/ixgbe_rxtx.c   | 32 +++-
 3 files changed, 38 insertions(+), 9 deletions(-)

diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index 1fd6843..ef9262c 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -841,10 +841,10 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
port_conf.rxmode.enable_scatter = 0;
return 0;
} else if (!strcmp(mode, "scalar")) {
-   /* bulk alloc rx, simple tx */
-   tx_conf.txq_flags = 0xf01;
-   tx_conf.tx_rs_thresh = 128;
-   tx_conf.tx_free_thresh = 128;
+   /* bulk alloc rx, full-featured tx */
+   tx_conf.txq_flags = 0;
+   tx_conf.tx_rs_thresh = 32;
+   tx_conf.tx_free_thresh = 32;
port_conf.rxmode.hw_ip_checksum = 1;
port_conf.rxmode.enable_scatter = 0;
return 0;
diff --git a/doc/guides/rel_notes/release_2_2.rst 
b/doc/guides/rel_notes/release_2_2.rst
index 59dda59..62e225b 100644
--- a/doc/guides/rel_notes/release_2_2.rst
+++ b/doc/guides/rel_notes/release_2_2.rst
@@ -134,6 +134,13 @@ Drivers

   VF needs the PF interrupt support initialized even if not started.

+* **ixgbe: Fixed TX hang when RS distance exceeds HW limit.**
+
+  Fixed an issue when TX queue can hang when a lot of highly fragmented
+  packets have to be sent.
+  As part of that fix, tx_rs_thresh for ixgbe PMD is not allowed to be greater
+  then to 32 to comply with HW restrictions.
+
 * **i40e: Fixed base driver allocation when not using first numa node.**

   Fixed i40e issue that occurred when a DPDK application didn't initialize
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 5561195..ca6fb69 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -572,7 +572,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
struct ixgbe_tx_entry *sw_ring;
struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
-   volatile union ixgbe_adv_tx_desc *txd;
+   volatile union ixgbe_adv_tx_desc *txd, *txp;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
@@ -595,6 +595,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txr = txq->tx_ring;
tx_id   = txq->tx_tail;
txe = _ring[tx_id];
+   txp = NULL;

/* Determine if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
@@ -638,6 +639,12 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 */
nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);

+   if (txp != NULL &&
+   nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
+   /* set RS on the previous packet in the burst */
+   txp->read.cmd_type_len |=
+   rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
/*
 * The number 

[dpdk-dev] [PATCHv6 1/2] testpmd: add ability to split outgoing packets

2015-11-09 Thread Konstantin Ananyev
For CSUM forwarding mode add ability to copy & split outgoing packet
into the new mbuf that consists of multiple segments.
For TXONLY and CSUM forwarding modes add ability to make number of
segments in the outgoing packet to vary on a per packet basis.
Number of segments and size of each segment is controlled by
'set txpkts' command.
Split policy is controlled by 'set txsplit' command.
Possible values are: on | off | rand.
Tha allows to increase test coverage for TX PMD codepaths.

v6 changes:
- fix typos
- testpmd guide: fix invalid command description

Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c  |  57 +-
 app/test-pmd/config.c   |  61 +++
 app/test-pmd/csumonly.c | 163 +++-
 app/test-pmd/testpmd.c  |   3 +
 app/test-pmd/testpmd.h  |  10 ++
 app/test-pmd/txonly.c   |  13 ++-
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  21 +++-
 7 files changed, 319 insertions(+), 9 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index c637198..a92fe0b 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -199,7 +199,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"clear port (info|stats|xstats|fdir|stat_qmap) 
(port_id|all)\n"
"Clear information for port_id, or all.\n\n"

-   "show config (rxtx|cores|fwd)\n"
+   "show config (rxtx|cores|fwd|txpkts)\n"
"Display the given configuration.\n\n"

"read rxd (port_id) (queue_id) (rxd_id)\n"
@@ -246,7 +246,12 @@ static void cmd_help_long_parsed(void *parsed_result,

"set txpkts (x[,y]*)\n"
"Set the length of each segment of TXONLY"
-   " packets.\n\n"
+   " and optionally CSUM packets.\n\n"
+
+   "set txsplit (off|on|rand)\n"
+   "Set the split policy for the TX packets."
+   " Right now only applicable for CSUM and TXONLY"
+   " modes\n\n"

"set corelist (x[,y]*)\n"
"Set the list of forwarding cores.\n\n"
@@ -2621,6 +2626,47 @@ cmdline_parse_inst_t cmd_set_txpkts = {
},
 };

+/* *** SET COPY AND SPLIT POLICY ON TX PACKETS *** */
+
+struct cmd_set_txsplit_result {
+   cmdline_fixed_string_t cmd_keyword;
+   cmdline_fixed_string_t txsplit;
+   cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_set_txsplit_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+   struct cmd_set_txsplit_result *res;
+
+   res = parsed_result;
+   set_tx_pkt_split(res->mode);
+}
+
+cmdline_parse_token_string_t cmd_set_txsplit_keyword =
+   TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+cmd_keyword, "set");
+cmdline_parse_token_string_t cmd_set_txsplit_name =
+   TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+txsplit, "txsplit");
+cmdline_parse_token_string_t cmd_set_txsplit_mode =
+   TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+mode, NULL);
+
+cmdline_parse_inst_t cmd_set_txsplit = {
+   .f = cmd_set_txsplit_parsed,
+   .data = NULL,
+   .help_str = "set txsplit on|off|rand",
+   .tokens = {
+   (void *)_set_txsplit_keyword,
+   (void *)_set_txsplit_name,
+   (void *)_set_txsplit_mode,
+   NULL,
+   },
+};
+
 /* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */
 struct cmd_rx_vlan_filter_all_result {
cmdline_fixed_string_t rx_vlan;
@@ -5233,6 +5279,8 @@ static void cmd_showcfg_parsed(void *parsed_result,
fwd_lcores_config_display();
else if (!strcmp(res->what, "fwd"))
fwd_config_display();
+   else if (!strcmp(res->what, "txpkts"))
+   show_tx_pkt_segments();
 }

 cmdline_parse_token_string_t cmd_showcfg_show =
@@ -5241,12 +5289,12 @@ cmdline_parse_token_string_t cmd_showcfg_port =
TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, cfg, "config");
 cmdline_parse_token_string_t cmd_showcfg_what =
TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, what,
-"rxtx#cores#fwd");
+"rxtx#cores#fwd#txpkts");

 cmdline_parse_inst_t cmd_showcfg = {
.f = cmd_

[dpdk-dev] [PATCHv6 0/2] ixgbe: fix TX hang when RS distance exceeds HW limit

2015-11-09 Thread Konstantin Ananyev
First patch contains changes in testpmd that allow to reproduce the issue.
Second patch is the actual fix.

Konstantin Ananyev (2):
  testpmd: add ability to split outgoing packets
  ixgbe: fix TX hang when RS distance exceeds HW limit

 app/test-pmd/cmdline.c  |  57 +-
 app/test-pmd/config.c   |  61 +++
 app/test-pmd/csumonly.c | 163 +++-
 app/test-pmd/testpmd.c  |   3 +
 app/test-pmd/testpmd.h  |  10 ++
 app/test-pmd/txonly.c   |  13 ++-
 app/test/test_pmd_perf.c|   8 +-
 doc/guides/rel_notes/release_2_2.rst|   7 ++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  21 +++-
 drivers/net/ixgbe/ixgbe_rxtx.c  |  32 +-
 10 files changed, 357 insertions(+), 18 deletions(-)

-- 
1.8.3.1



[dpdk-dev] [PATCHv5 2/2] ixgbe: fix TX hang when RS distance exceeds HW limit

2015-11-09 Thread Konstantin Ananyev
One of the ways to reproduce the issue:

testpmd  -- -i --txqflags=0
testpmd> set fwd txonly
testpmd> set txpkts 
64,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4
testpmd> set txsplit rand
testpmd> start

After some time TX on ixgbe queue will hang,
and all packet transmission on that queue will stop.

This bug was first reported and investigated by
Vlad Zolotarov :
"We can reproduce this issue when stressed the xmit path with a lot of highly
fragmented TCP frames (packets with up to 33 fragments with non-headers
fragments as small as 4 bytes) with all offload features enabled."

The root cause is that ixgbe_xmit_pkts() in some cases violates the HW rule
that the distance between TDs with RS bit set should not exceed 40 TDs.

>From the latest 82599 spec update:
"When WTHRESH is set to zero, the software device driver should set the RS bit
in the Tx descriptors with the EOP bit set and at least once in the 40
descriptors."

The fix is to make sure that the distance between TDs with RS bit set
would never exceed HW limit.

With that fix slight slowdown for the full-featured ixgbe TX path
might be observed (from our testing - up to 4%).

ixgbe simple TX path is unaffected by that patch.

Reported-by: Vlad Zolotarov 
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.c | 32 +++-
 1 file changed, 27 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 5561195..80cae5e 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -572,7 +572,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
struct ixgbe_tx_entry *sw_ring;
struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
-   volatile union ixgbe_adv_tx_desc *txd;
+   volatile union ixgbe_adv_tx_desc *txd, *txp;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
@@ -595,6 +595,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txr = txq->tx_ring;
tx_id   = txq->tx_tail;
txe = _ring[tx_id];
+   txp = NULL;

/* Determine if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
@@ -638,6 +639,12 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 */
nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);

+   if (txp != NULL &&
+   nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
+   /* set RS on the previous packet in the burst */
+   txp->read.cmd_type_len |=
+   rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
/*
 * The number of descriptors that must be allocated for a
 * packet is the number of segments of that packet, plus 1
@@ -840,10 +847,18 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,

/* Update txq RS bit counters */
txq->nb_tx_used = 0;
-   }
+   txp = NULL;
+   } else
+   txp = txd;
+
txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
}
+
 end_of_tx:
+   /* set RS on last packet in the burst */
+   if (txp != NULL)
+   txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
rte_wmb();

/*
@@ -2019,9 +2034,16 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
if (tx_rs_thresh >= (nb_desc - 2)) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
-"of TX descriptors minus 2. (tx_rs_thresh=%u "
-"port=%d queue=%d)", (unsigned int)tx_rs_thresh,
-(int)dev->data->port_id, (int)queue_idx);
+   "of TX descriptors minus 2. (tx_rs_thresh=%u "
+   "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+   (int)dev->data->port_id, (int)queue_idx);
+   return -(EINVAL);
+   }
+   if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
+   PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than %u. "
+   "(tx_rs_thresh=%u port=%d queue=%d)",
+   DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
+   (int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
if (tx_free_thresh >= (nb_desc - 3)) {
-- 
1.8.5.3



[dpdk-dev] [PATCHv5 1/2] testpmd: add ability to split outgoing packets

2015-11-09 Thread Konstantin Ananyev
For CSUM forwarding mode add ability to copy & split outgoing packet
into the new mbuf that consists of multiple segments.
For TXONLY and CSUM forwarding modes add ability to make number of
segments in the outgoing packet to vary on a per packet basis.
Number of segments and size of each segment is controlled by
'set txpkts' command.
Split policy is controlled by 'set txsplit' command.
Possible values are: on | off | rand.
Tha allows to increase test coverage for TX PMD codepaths.

Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c  |  57 +-
 app/test-pmd/config.c   |  61 +++
 app/test-pmd/csumonly.c | 163 +++-
 app/test-pmd/testpmd.c  |   3 +
 app/test-pmd/testpmd.h  |  10 ++
 app/test-pmd/txonly.c   |  13 ++-
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  11 +-
 7 files changed, 309 insertions(+), 9 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index c637198..a92fe0b 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -199,7 +199,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"clear port (info|stats|xstats|fdir|stat_qmap) 
(port_id|all)\n"
"Clear information for port_id, or all.\n\n"

-   "show config (rxtx|cores|fwd)\n"
+   "show config (rxtx|cores|fwd|txpkts)\n"
"Display the given configuration.\n\n"

"read rxd (port_id) (queue_id) (rxd_id)\n"
@@ -246,7 +246,12 @@ static void cmd_help_long_parsed(void *parsed_result,

"set txpkts (x[,y]*)\n"
"Set the length of each segment of TXONLY"
-   " packets.\n\n"
+   " and optionally CSUM packets.\n\n"
+
+   "set txsplit (off|on|rand)\n"
+   "Set the split policy for the TX packets."
+   " Right now only applicable for CSUM and TXONLY"
+   " modes\n\n"

"set corelist (x[,y]*)\n"
"Set the list of forwarding cores.\n\n"
@@ -2621,6 +2626,47 @@ cmdline_parse_inst_t cmd_set_txpkts = {
},
 };

+/* *** SET COPY AND SPLIT POLICY ON TX PACKETS *** */
+
+struct cmd_set_txsplit_result {
+   cmdline_fixed_string_t cmd_keyword;
+   cmdline_fixed_string_t txsplit;
+   cmdline_fixed_string_t mode;
+};
+
+static void
+cmd_set_txsplit_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+   struct cmd_set_txsplit_result *res;
+
+   res = parsed_result;
+   set_tx_pkt_split(res->mode);
+}
+
+cmdline_parse_token_string_t cmd_set_txsplit_keyword =
+   TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+cmd_keyword, "set");
+cmdline_parse_token_string_t cmd_set_txsplit_name =
+   TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+txsplit, "txsplit");
+cmdline_parse_token_string_t cmd_set_txsplit_mode =
+   TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result,
+mode, NULL);
+
+cmdline_parse_inst_t cmd_set_txsplit = {
+   .f = cmd_set_txsplit_parsed,
+   .data = NULL,
+   .help_str = "set txsplit on|off|rand",
+   .tokens = {
+   (void *)_set_txsplit_keyword,
+   (void *)_set_txsplit_name,
+   (void *)_set_txsplit_mode,
+   NULL,
+   },
+};
+
 /* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */
 struct cmd_rx_vlan_filter_all_result {
cmdline_fixed_string_t rx_vlan;
@@ -5233,6 +5279,8 @@ static void cmd_showcfg_parsed(void *parsed_result,
fwd_lcores_config_display();
else if (!strcmp(res->what, "fwd"))
fwd_config_display();
+   else if (!strcmp(res->what, "txpkts"))
+   show_tx_pkt_segments();
 }

 cmdline_parse_token_string_t cmd_showcfg_show =
@@ -5241,12 +5289,12 @@ cmdline_parse_token_string_t cmd_showcfg_port =
TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, cfg, "config");
 cmdline_parse_token_string_t cmd_showcfg_what =
TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, what,
-"rxtx#cores#fwd");
+"rxtx#cores#fwd#txpkts");

 cmdline_parse_inst_t cmd_showcfg = {
.f = cmd_showcfg_parsed,
.data = NULL,
-   .help_str = "show config 

[dpdk-dev] [PATCHv5 0/2] ixgbe: fix TX hang when RS distance exceeds HW limit

2015-11-09 Thread Konstantin Ananyev
First patch contains changes in testpmd that allows to reproduce the issue.
Second patch is the actual fix.

Konstantin Ananyev (2):
  testpmd: add ability to split outgoing packets
  ixgbe: fix TX hang when RS distance exceeds HW limit

 app/test-pmd/cmdline.c  |  57 +-
 app/test-pmd/config.c   |  61 +++
 app/test-pmd/csumonly.c | 163 +++-
 app/test-pmd/testpmd.c  |   3 +
 app/test-pmd/testpmd.h  |  10 ++
 app/test-pmd/txonly.c   |  13 ++-
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  11 +-
 drivers/net/ixgbe/ixgbe_rxtx.c  |  32 +-
 8 files changed, 336 insertions(+), 14 deletions(-)

-- 
1.8.5.3



[dpdk-dev] [PATCH] app/testpmd: add 'show (rxq|txq)' command description into UG and cmdline help

2015-11-02 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c  | 3 +++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst | 7 +++
 2 files changed, 10 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0afdc96..ae9be81 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -199,6 +199,9 @@ static void cmd_help_long_parsed(void *parsed_result,
"clear port (info|stats|xstats|fdir|stat_qmap) 
(port_id|all)\n"
"Clear information for port_id, or all.\n\n"

+   "show (rxq|txq) info (port_id) (queue_id)\n"
+   "Display information for configured RX/TX 
queue.\n\n"
+
"show config (rxtx|cores|fwd)\n"
"Display the given configuration.\n\n"

diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst 
b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 2d0599d..ad9889d 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -205,6 +205,13 @@ For example::

testpmd> clear port stats all

+show (rxq|txq)
+~~
+
+Display information for a given port's RX/TX queue::
+
+   testpmd> show (rxq|txq) info (port_id) (queue_id)
+
 show config
 ~~~

-- 
1.8.5.3



[dpdk-dev] [PATCHv7 9/9] doc: release notes update for queue_info_get() and (rx|tx)_desc_limit

2015-10-27 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 doc/guides/rel_notes/release_2_2.rst | 13 +
 1 file changed, 13 insertions(+)

diff --git a/doc/guides/rel_notes/release_2_2.rst 
b/doc/guides/rel_notes/release_2_2.rst
index de6916e..aff6306 100644
--- a/doc/guides/rel_notes/release_2_2.rst
+++ b/doc/guides/rel_notes/release_2_2.rst
@@ -11,6 +11,11 @@ New Features

 * **Added vhost-user multiple queue support.**

+* **Add new API into rte_ethdev to retrieve RX/TX queue information.**
+
+  *  Add the ability for the upper layer to query RX/TX queue information.
+  *  Add into rte_eth_dev_info new fields to represent information about
+ RX/TX descriptors min/max/alig nnumbers per queue for the device.

 Resolved Issues
 ---
@@ -98,6 +103,11 @@ API Changes

 * The devargs union field virtual is renamed to virt for C++ compatibility.

+* New functions rte_eth_rx_queue_info_get() and rte_eth_tx_queue_info_get()
+  are introduced.
+
+* New fields rx_desc_lim and tx_desc_lim are added into rte_eth_dev_info
+  structure.

 ABI Changes
 ---
@@ -108,6 +118,9 @@ ABI Changes
 * The ethdev flow director entries for SCTP were changed.
   It was already done in 2.1 for CONFIG_RTE_NEXT_ABI.

+* New fields rx_desc_lim and tx_desc_lim were added into rte_eth_dev_info
+  structure.
+
 * The mbuf structure was changed to support unified packet type.
   It was already done in 2.1 for CONFIG_RTE_NEXT_ABI.

-- 
1.8.5.3



[dpdk-dev] [PATCHv7 8/9] testpmd: add new command to display RX/TX queue information

2015-10-27 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c | 48 +++
 app/test-pmd/config.c  | 77 ++
 app/test-pmd/testpmd.h |  2 ++
 3 files changed, 127 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0f8f48f..ea2b8a8 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -5305,6 +5305,53 @@ cmdline_parse_inst_t cmd_showport = {
},
 };

+/* *** SHOW QUEUE INFO *** */
+struct cmd_showqueue_result {
+   cmdline_fixed_string_t show;
+   cmdline_fixed_string_t type;
+   cmdline_fixed_string_t what;
+   uint8_t portnum;
+   uint16_t queuenum;
+};
+
+static void
+cmd_showqueue_parsed(void *parsed_result,
+   __attribute__((unused)) struct cmdline *cl,
+   __attribute__((unused)) void *data)
+{
+   struct cmd_showqueue_result *res = parsed_result;
+
+   if (!strcmp(res->type, "rxq"))
+   rx_queue_infos_display(res->portnum, res->queuenum);
+   else if (!strcmp(res->type, "txq"))
+   tx_queue_infos_display(res->portnum, res->queuenum);
+}
+
+cmdline_parse_token_string_t cmd_showqueue_show =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, show, "show");
+cmdline_parse_token_string_t cmd_showqueue_type =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, type, "rxq#txq");
+cmdline_parse_token_string_t cmd_showqueue_what =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, what, "info");
+cmdline_parse_token_num_t cmd_showqueue_portnum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT8);
+cmdline_parse_token_num_t cmd_showqueue_queuenum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, queuenum, UINT16);
+
+cmdline_parse_inst_t cmd_showqueue = {
+   .f = cmd_showqueue_parsed,
+   .data = NULL,
+   .help_str = "show rxq|txq info  ",
+   .tokens = {
+   (void *)_showqueue_show,
+   (void *)_showqueue_type,
+   (void *)_showqueue_what,
+   (void *)_showqueue_portnum,
+   (void *)_showqueue_queuenum,
+   NULL,
+   },
+};
+
 /* *** READ PORT REGISTER *** */
 struct cmd_read_reg_result {
cmdline_fixed_string_t read;
@@ -8910,6 +8957,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)_help_long,
(cmdline_parse_inst_t *)_quit,
(cmdline_parse_inst_t *)_showport,
+   (cmdline_parse_inst_t *)_showqueue,
(cmdline_parse_inst_t *)_showportall,
(cmdline_parse_inst_t *)_showcfg,
(cmdline_parse_inst_t *)_start,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index cf2aa6e..aad2ab6 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -293,6 +293,69 @@ nic_stats_mapping_display(portid_t port_id)
 }

 void
+rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_rxq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_rx_queue_info_get(port_id, queue_id, );
+   if (rc != 0) {
+   printf("Failed to retrieve information for port: %hhu, "
+   "RX queue: %hu\nerror desc: %s(%d)\n",
+   port_id, queue_id, strerror(-rc), rc);
+   return;
+   }
+
+   printf("\n%s Infos for port %-2u, RX queue %-2u %s",
+  info_border, port_id, queue_id, info_border);
+
+   printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
+   printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
+   printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
+   printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
+   printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
+   printf("\nRX drop packets: %s",
+   (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
+   printf("\nRX deferred start: %s",
+   (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
+   printf("\nRX scattered packets: %s",
+   (qinfo.scattered_rx != 0) ? "on" : "off");
+   printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
+   printf("\n");
+}
+
+void
+tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_txq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_tx_queue_info_get(port_id, queue_id, );
+   if (rc != 0) {
+   printf("Failed to retrieve information for port: %hhu, "
+   

[dpdk-dev] [PATCHv7 7/9] vmxnet3: add HW specific desc_lim data into dev_info

2015-10-27 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/vmxnet3/vmxnet3_ethdev.c | 12 
 1 file changed, 12 insertions(+)

diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c 
b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index a70be5c..3745b7d 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -677,6 +677,18 @@ vmxnet3_dev_info_get(__attribute__((unused))struct 
rte_eth_dev *dev, struct rte_
dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS;
dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
+
+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = VMXNET3_RX_RING_MAX_SIZE,
+   .nb_min = VMXNET3_DEF_RX_RING_SIZE,
+   .nb_align = 1,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = VMXNET3_TX_RING_MAX_SIZE,
+   .nb_min = VMXNET3_DEF_TX_RING_SIZE,
+   .nb_align = 1,
+   };
 }

 /* return 0 means link status changed, -1 means not changed */
-- 
1.8.5.3



[dpdk-dev] [PATCHv7 4/9] e1000: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim

2015-10-27 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/e1000/e1000_ethdev.h | 36 
 drivers/net/e1000/em_ethdev.c| 14 
 drivers/net/e1000/em_rxtx.c  | 71 +++-
 drivers/net/e1000/igb_ethdev.c   | 22 +
 drivers/net/e1000/igb_rxtx.c | 66 -
 5 files changed, 156 insertions(+), 53 deletions(-)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 4e69e44..3c6f613 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -108,6 +108,30 @@
ETH_RSS_IPV6_TCP_EX | \
ETH_RSS_IPV6_UDP_EX)

+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#defineE1000_MIN_RING_DESC 32
+#defineE1000_MAX_RING_DESC 4096
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#defineE1000_ALIGN 128
+
+#defineIGB_RXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
+#defineIGB_TXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
+
+#defineEM_RXD_ALIGN(E1000_ALIGN / sizeof(struct e1000_rx_desc))
+#defineEM_TXD_ALIGN(E1000_ALIGN / sizeof(struct e1000_data_desc))
+
 /* structure for interrupt relative data */
 struct e1000_interrupt {
uint32_t flags;
@@ -307,6 +331,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);

 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);

+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /*
  * RX/TX EM function prototypes
  */
@@ -343,6 +373,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);

+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 void igb_pf_host_uninit(struct rte_eth_dev *dev);

 #endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 912f5dd..0cbc228 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = {
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove  = eth_em_rar_clear,
.set_mc_addr_list = eth_em_set_mc_addr_list,
+   .rxq_info_get = em_rxq_info_get,
+   .txq_info_get = em_txq_info_get,
 };

 /**
@@ -933,6 +935,18 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)

dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
+
+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = E1000_MAX_RING_DESC,
+   .nb_min = E1000_MIN_RING_DESC,
+   .nb_align = EM_RXD_ALIGN,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = E1000_MAX_RING_DESC,
+   .nb_min = E1000_MIN_RING_DESC,
+   .nb_align = EM_TXD_ALIGN,
+   };
 }

 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 3b8776d..03e1bc2 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1081,26 +1081,6 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
return (nb_rx);
 }

-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define EM_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
- * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
- */
-#define EM_MIN_RING_DESC 32
-#define EM_MAX_RING_DESC 4096
-
 #defineEM_MAX_BUF_SIZE 16384
 #define EM_RCTL_FLXBUF_STEP 1024

@@ -1210,11 +1190,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
/*
 * Validate number of transmit descriptors.
 * It must not exceed hardware maximum, and must

[dpdk-dev] [PATCHv7 3/9] ixgbe: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim

2015-10-27 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 23 ++
 drivers/net/ixgbe/ixgbe_ethdev.h |  6 
 drivers/net/ixgbe/ixgbe_rxtx.c   | 68 +---
 drivers/net/ixgbe/ixgbe_rxtx.h   | 21 +
 4 files changed, 93 insertions(+), 25 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index ec2918c..4769bb0 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -386,6 +386,18 @@ static const struct rte_pci_id pci_id_ixgbevf_map[] = {

 };

+static const struct rte_eth_desc_lim rx_desc_lim = {
+   .nb_max = IXGBE_MAX_RING_DESC,
+   .nb_min = IXGBE_MIN_RING_DESC,
+   .nb_align = IXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+   .nb_max = IXGBE_MAX_RING_DESC,
+   .nb_min = IXGBE_MIN_RING_DESC,
+   .nb_align = IXGBE_TXD_ALIGN,
+};
+
 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.dev_configure= ixgbe_dev_configure,
.dev_start= ixgbe_dev_start,
@@ -456,6 +468,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rss_hash_conf_get= ixgbe_dev_rss_hash_conf_get,
.filter_ctrl  = ixgbe_dev_filter_ctrl,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
.timesync_enable  = ixgbe_timesync_enable,
.timesync_disable = ixgbe_timesync_disable,
.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
@@ -494,6 +508,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove  = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
.mac_addr_set = ixgbevf_set_default_mac_addr,
.get_reg_length   = ixgbevf_get_reg_length,
.get_reg  = ixgbevf_get_regs,
@@ -2396,6 +2412,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+   dev_info->rx_desc_lim = rx_desc_lim;
+   dev_info->tx_desc_lim = tx_desc_lim;
+
dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
@@ -2449,6 +2469,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+   dev_info->rx_desc_lim = rx_desc_lim;
+   dev_info->tx_desc_lim = tx_desc_lim;
 }

 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c3d4f4f..d16f476 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -351,6 +351,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id);

 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);

 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a598a72..ba08588 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1821,25 +1821,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
  **/

 /*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
- * also optimize cache line size effect. H/W supports up to cache line size 
128.
- */
-#define IXGBE_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * descriptors should meet the following condition:
- *  (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
- */
-#define IXGBE_MIN_RING_DESC 32
-#define IXGBE_MAX_RING_DESC 4096
-
-/*
  * Create memzone for HW rings. malloc can't be used as the physical address is
  * needed. If the memzone is already created, then this function returns a ptr
  * to the old one.
@@ -2007,9 +1988,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 * It mus

[dpdk-dev] [PATCHv7 2/9] i40e: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim

2015-10-27 Thread Konstantin Ananyev
This patch assumes that the patch:
i40e: fix wrong alignment for the number of HW descriptors
already applied.

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/i40e/i40e_ethdev.c| 14 ++
 drivers/net/i40e/i40e_ethdev.h|  5 +
 drivers/net/i40e/i40e_ethdev_vf.c | 12 
 drivers/net/i40e/i40e_rxtx.c  | 37 +
 4 files changed, 68 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2dd9fdc..cbc1985 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -283,6 +283,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.udp_tunnel_add   = i40e_dev_udp_tunnel_add,
.udp_tunnel_del   = i40e_dev_udp_tunnel_del,
.filter_ctrl  = i40e_dev_filter_ctrl,
+   .rxq_info_get = i40e_rxq_info_get,
+   .txq_info_get = i40e_txq_info_get,
.mirror_rule_set  = i40e_mirror_rule_set,
.mirror_rule_reset= i40e_mirror_rule_reset,
.timesync_enable  = i40e_timesync_enable,
@@ -1674,6 +1676,18 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
ETH_TXQ_FLAGS_NOOFFLOADS,
};

+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
+
if (pf->flags & I40E_FLAG_VMDQ) {
dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
dev_info->vmdq_queue_base = dev_info->max_rx_queues;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6185657..4748392 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -502,6 +502,11 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
  enum rte_filter_op filter_op,
  void *arg);

+void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /* I40E_DEV_PRIVATE_TO */
 #define I40E_DEV_PRIVATE_TO_PF(adapter) \
(&((struct i40e_adapter *)adapter)->pf)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c 
b/drivers/net/i40e/i40e_ethdev_vf.c
index b694400..5dad12d 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1756,6 +1756,18 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
 }

 static void
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 260e580..fa1451e 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3063,3 +3063,40 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)

return I40E_SUCCESS;
 }
+
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct i40e_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mp;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+   qinfo->nb_desc = rxq->nb_rx_desc;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct i40e_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
-- 
1.8.5.3



[dpdk-dev] [PATCHv7 0/9] ethdev: add new API to retrieve RX/TX queue information

2015-10-27 Thread Konstantin Ananyev
Add the ability for the upper layer to query:
1) configured RX/TX queue information.
2) information about RX/TX descriptors min/max/align
numbers per queue for the device.

v2 changes:
- Add formal check for the qinfo input parameter.
- As suggested rename 'rx_qinfo/tx_qinfo' to 'rxq_info/txq_info'

v3 changes:
- Updated rte_ether_version.map
- Merged with latest changes

v4 changes:
- rte_ether_version.map: move new functions into DPDK_2.1 sub-space.

v5 changes:
- adressed previous code-review comments
- rte_ether_version.map: move new functions into DPDK_2.2 sub-space.
- added new fields into rte_eth_dev_info

v6 chages:
- respin to comply with latest dpdk.org
- update release_notes, section "New Features"

v7 changes:
- update release notes, sections: "API Changes", "ABI Changes"

Konstantin Ananyev (9):
  ethdev: add new API to retrieve RX/TX queue information
  i40e: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim
  ixgbe: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim
  e1000: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim
  fm10k: add HW specific desc_lim data into dev_info
  cxgbe: add HW specific desc_lim data into dev_info
  vmxnet3: add HW specific desc_lim data into dev_info
  testpmd: add new command to display RX/TX queue information
  doc: release notes update for queue_info_get() and (rx|tx)_desc_limit

 app/test-pmd/cmdline.c | 48 +++
 app/test-pmd/config.c  | 77 ++
 app/test-pmd/testpmd.h |  2 +
 doc/guides/rel_notes/release_2_2.rst   | 13 ++
 drivers/net/cxgbe/cxgbe_ethdev.c   |  9 
 drivers/net/e1000/e1000_ethdev.h   | 36 ++
 drivers/net/e1000/em_ethdev.c  | 14 ++
 drivers/net/e1000/em_rxtx.c| 71 
 drivers/net/e1000/igb_ethdev.c | 22 +
 drivers/net/e1000/igb_rxtx.c   | 66 +-
 drivers/net/fm10k/fm10k_ethdev.c   | 11 +
 drivers/net/i40e/i40e_ethdev.c | 14 ++
 drivers/net/i40e/i40e_ethdev.h |  5 ++
 drivers/net/i40e/i40e_ethdev_vf.c  | 12 +
 drivers/net/i40e/i40e_rxtx.c   | 37 +++
 drivers/net/ixgbe/ixgbe_ethdev.c   | 23 +
 drivers/net/ixgbe/ixgbe_ethdev.h   |  6 +++
 drivers/net/ixgbe/ixgbe_rxtx.c | 68 +--
 drivers/net/ixgbe/ixgbe_rxtx.h | 21 +
 drivers/net/vmxnet3/vmxnet3_ethdev.c   | 12 +
 lib/librte_ether/rte_ethdev.c  | 68 +++
 lib/librte_ether/rte_ethdev.h  | 85 +-
 lib/librte_ether/rte_ether_version.map |  8 
 23 files changed, 648 insertions(+), 80 deletions(-)

-- 
1.8.5.3



[dpdk-dev] [PATCHv6 9/9] doc: release notes update for queue_info_get()

2015-10-22 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 doc/guides/rel_notes/release_2_2.rst | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/doc/guides/rel_notes/release_2_2.rst 
b/doc/guides/rel_notes/release_2_2.rst
index 4f75cff..33ea399 100644
--- a/doc/guides/rel_notes/release_2_2.rst
+++ b/doc/guides/rel_notes/release_2_2.rst
@@ -9,6 +9,11 @@ New Features
   *  Added support for Jumbo Frames.
   *  Optimize forwarding performance for Chelsio T5 40GbE cards.

+* **Add new API into rte_ethdev to retrieve RX/TX queue information.**
+
+  *  Add the ability for the upper layer to query RX/TX queue information.
+  *  Add into rte_eth_dev_info new fields to represent information about
+ RX/TX descriptors min/max/alig nnumbers per queue for the device.

 Resolved Issues
 ---
@@ -94,6 +99,8 @@ API Changes
 * The deprecated ring PMD functions are removed:
   rte_eth_ring_pair_create() and rte_eth_ring_pair_attach().

+* New functions rte_eth_rx_queue_info_get() and rte_eth_tx_queue_info_get()
+  are introduced.

 ABI Changes
 ---
-- 
1.8.5.3



[dpdk-dev] [PATCHv6 8/9] testpmd: add new command to display RX/TX queue information

2015-10-22 Thread Konstantin Ananyev
From: "Ananyev, Konstantin" <konstantin.anan...@intel.com>

Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c | 48 +++
 app/test-pmd/config.c  | 77 ++
 app/test-pmd/testpmd.h |  2 ++
 3 files changed, 127 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0f8f48f..ea2b8a8 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -5305,6 +5305,53 @@ cmdline_parse_inst_t cmd_showport = {
},
 };

+/* *** SHOW QUEUE INFO *** */
+struct cmd_showqueue_result {
+   cmdline_fixed_string_t show;
+   cmdline_fixed_string_t type;
+   cmdline_fixed_string_t what;
+   uint8_t portnum;
+   uint16_t queuenum;
+};
+
+static void
+cmd_showqueue_parsed(void *parsed_result,
+   __attribute__((unused)) struct cmdline *cl,
+   __attribute__((unused)) void *data)
+{
+   struct cmd_showqueue_result *res = parsed_result;
+
+   if (!strcmp(res->type, "rxq"))
+   rx_queue_infos_display(res->portnum, res->queuenum);
+   else if (!strcmp(res->type, "txq"))
+   tx_queue_infos_display(res->portnum, res->queuenum);
+}
+
+cmdline_parse_token_string_t cmd_showqueue_show =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, show, "show");
+cmdline_parse_token_string_t cmd_showqueue_type =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, type, "rxq#txq");
+cmdline_parse_token_string_t cmd_showqueue_what =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, what, "info");
+cmdline_parse_token_num_t cmd_showqueue_portnum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT8);
+cmdline_parse_token_num_t cmd_showqueue_queuenum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, queuenum, UINT16);
+
+cmdline_parse_inst_t cmd_showqueue = {
+   .f = cmd_showqueue_parsed,
+   .data = NULL,
+   .help_str = "show rxq|txq info  ",
+   .tokens = {
+   (void *)_showqueue_show,
+   (void *)_showqueue_type,
+   (void *)_showqueue_what,
+   (void *)_showqueue_portnum,
+   (void *)_showqueue_queuenum,
+   NULL,
+   },
+};
+
 /* *** READ PORT REGISTER *** */
 struct cmd_read_reg_result {
cmdline_fixed_string_t read;
@@ -8910,6 +8957,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)_help_long,
(cmdline_parse_inst_t *)_quit,
(cmdline_parse_inst_t *)_showport,
+   (cmdline_parse_inst_t *)_showqueue,
(cmdline_parse_inst_t *)_showportall,
(cmdline_parse_inst_t *)_showcfg,
(cmdline_parse_inst_t *)_start,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index cf2aa6e..aad2ab6 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -293,6 +293,69 @@ nic_stats_mapping_display(portid_t port_id)
 }

 void
+rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_rxq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_rx_queue_info_get(port_id, queue_id, );
+   if (rc != 0) {
+   printf("Failed to retrieve information for port: %hhu, "
+   "RX queue: %hu\nerror desc: %s(%d)\n",
+   port_id, queue_id, strerror(-rc), rc);
+   return;
+   }
+
+   printf("\n%s Infos for port %-2u, RX queue %-2u %s",
+  info_border, port_id, queue_id, info_border);
+
+   printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
+   printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
+   printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
+   printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
+   printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
+   printf("\nRX drop packets: %s",
+   (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
+   printf("\nRX deferred start: %s",
+   (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
+   printf("\nRX scattered packets: %s",
+   (qinfo.scattered_rx != 0) ? "on" : "off");
+   printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
+   printf("\n");
+}
+
+void
+tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_txq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_tx_queue_info_get(port_id, queue_id, );
+   if (rc != 0) {
+   printf("Failed t

[dpdk-dev] [PATCHv6 7/9] vmxnet3: add HW specific desc_lim data into dev_info

2015-10-22 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/vmxnet3/vmxnet3_ethdev.c | 12 
 1 file changed, 12 insertions(+)

diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c 
b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index a70be5c..3745b7d 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -677,6 +677,18 @@ vmxnet3_dev_info_get(__attribute__((unused))struct 
rte_eth_dev *dev, struct rte_
dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS;
dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
+
+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = VMXNET3_RX_RING_MAX_SIZE,
+   .nb_min = VMXNET3_DEF_RX_RING_SIZE,
+   .nb_align = 1,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = VMXNET3_TX_RING_MAX_SIZE,
+   .nb_min = VMXNET3_DEF_TX_RING_SIZE,
+   .nb_align = 1,
+   };
 }

 /* return 0 means link status changed, -1 means not changed */
-- 
1.8.5.3



[dpdk-dev] [PATCHv6 6/9] cxgbe: add HW specific desc_lim data into dev_info

2015-10-22 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/cxgbe/cxgbe_ethdev.c | 9 +
 1 file changed, 9 insertions(+)

diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index a8e057b..920e071 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -141,6 +141,12 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
struct adapter *adapter = pi->adapter;
int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;

+   static const struct rte_eth_desc_lim cxgbe_desc_lim = {
+   .nb_max = CXGBE_MAX_RING_DESC_SIZE,
+   .nb_min = CXGBE_MIN_RING_DESC_SIZE,
+   .nb_align = 1,
+   };
+
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
device_info->max_rx_queues = max_queues;
@@ -162,6 +168,9 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
   DEV_TX_OFFLOAD_TCP_TSO;

device_info->reta_size = pi->rss_size;
+
+   device_info->rx_desc_lim = cxgbe_desc_lim;
+   device_info->tx_desc_lim = cxgbe_desc_lim;
 }

 static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
-- 
1.8.5.3



[dpdk-dev] [PATCHv6 5/9] fm10k: add HW specific desc_lim data into dev_info

2015-10-22 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/fm10k/fm10k_ethdev.c | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index a69c990..9588dab 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -964,6 +964,17 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
ETH_TXQ_FLAGS_NOOFFLOADS,
};

+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = FM10K_MAX_RX_DESC,
+   .nb_min = FM10K_MIN_RX_DESC,
+   .nb_align = FM10K_MULT_RX_DESC,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = FM10K_MAX_TX_DESC,
+   .nb_min = FM10K_MIN_TX_DESC,
+   .nb_align = FM10K_MULT_TX_DESC,
+   };
 }

 static int
-- 
1.8.5.3



[dpdk-dev] [PATCHv6 4/9] e1000: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim

2015-10-22 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/e1000/e1000_ethdev.h | 36 
 drivers/net/e1000/em_ethdev.c| 14 
 drivers/net/e1000/em_rxtx.c  | 71 +++-
 drivers/net/e1000/igb_ethdev.c   | 22 +
 drivers/net/e1000/igb_rxtx.c | 66 -
 5 files changed, 156 insertions(+), 53 deletions(-)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 4e69e44..3c6f613 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -108,6 +108,30 @@
ETH_RSS_IPV6_TCP_EX | \
ETH_RSS_IPV6_UDP_EX)

+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#defineE1000_MIN_RING_DESC 32
+#defineE1000_MAX_RING_DESC 4096
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#defineE1000_ALIGN 128
+
+#defineIGB_RXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
+#defineIGB_TXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
+
+#defineEM_RXD_ALIGN(E1000_ALIGN / sizeof(struct e1000_rx_desc))
+#defineEM_TXD_ALIGN(E1000_ALIGN / sizeof(struct e1000_data_desc))
+
 /* structure for interrupt relative data */
 struct e1000_interrupt {
uint32_t flags;
@@ -307,6 +331,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);

 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);

+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /*
  * RX/TX EM function prototypes
  */
@@ -343,6 +373,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);

+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 void igb_pf_host_uninit(struct rte_eth_dev *dev);

 #endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 912f5dd..0cbc228 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = {
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove  = eth_em_rar_clear,
.set_mc_addr_list = eth_em_set_mc_addr_list,
+   .rxq_info_get = em_rxq_info_get,
+   .txq_info_get = em_txq_info_get,
 };

 /**
@@ -933,6 +935,18 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)

dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
+
+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = E1000_MAX_RING_DESC,
+   .nb_min = E1000_MIN_RING_DESC,
+   .nb_align = EM_RXD_ALIGN,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = E1000_MAX_RING_DESC,
+   .nb_min = E1000_MIN_RING_DESC,
+   .nb_align = EM_TXD_ALIGN,
+   };
 }

 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 3b8776d..03e1bc2 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1081,26 +1081,6 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
return (nb_rx);
 }

-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define EM_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
- * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
- */
-#define EM_MIN_RING_DESC 32
-#define EM_MAX_RING_DESC 4096
-
 #defineEM_MAX_BUF_SIZE 16384
 #define EM_RCTL_FLXBUF_STEP 1024

@@ -1210,11 +1190,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
/*
 * Validate number of transmit descriptors.
 * It must not exceed hardware maximum, and must

[dpdk-dev] [PATCHv6 3/9] ixgbe: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim

2015-10-22 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 23 ++
 drivers/net/ixgbe/ixgbe_ethdev.h |  6 
 drivers/net/ixgbe/ixgbe_rxtx.c   | 68 +---
 drivers/net/ixgbe/ixgbe_rxtx.h   | 21 +
 4 files changed, 93 insertions(+), 25 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index ec2918c..4769bb0 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -386,6 +386,18 @@ static const struct rte_pci_id pci_id_ixgbevf_map[] = {

 };

+static const struct rte_eth_desc_lim rx_desc_lim = {
+   .nb_max = IXGBE_MAX_RING_DESC,
+   .nb_min = IXGBE_MIN_RING_DESC,
+   .nb_align = IXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+   .nb_max = IXGBE_MAX_RING_DESC,
+   .nb_min = IXGBE_MIN_RING_DESC,
+   .nb_align = IXGBE_TXD_ALIGN,
+};
+
 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.dev_configure= ixgbe_dev_configure,
.dev_start= ixgbe_dev_start,
@@ -456,6 +468,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rss_hash_conf_get= ixgbe_dev_rss_hash_conf_get,
.filter_ctrl  = ixgbe_dev_filter_ctrl,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
.timesync_enable  = ixgbe_timesync_enable,
.timesync_disable = ixgbe_timesync_disable,
.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
@@ -494,6 +508,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove  = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
.mac_addr_set = ixgbevf_set_default_mac_addr,
.get_reg_length   = ixgbevf_get_reg_length,
.get_reg  = ixgbevf_get_regs,
@@ -2396,6 +2412,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+   dev_info->rx_desc_lim = rx_desc_lim;
+   dev_info->tx_desc_lim = tx_desc_lim;
+
dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
@@ -2449,6 +2469,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+   dev_info->rx_desc_lim = rx_desc_lim;
+   dev_info->tx_desc_lim = tx_desc_lim;
 }

 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c3d4f4f..d16f476 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -351,6 +351,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id);

 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);

 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a598a72..ba08588 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1821,25 +1821,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
  **/

 /*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
- * also optimize cache line size effect. H/W supports up to cache line size 
128.
- */
-#define IXGBE_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * descriptors should meet the following condition:
- *  (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
- */
-#define IXGBE_MIN_RING_DESC 32
-#define IXGBE_MAX_RING_DESC 4096
-
-/*
  * Create memzone for HW rings. malloc can't be used as the physical address is
  * needed. If the memzone is already created, then this function returns a ptr
  * to the old one.
@@ -2007,9 +1988,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 * It mus

[dpdk-dev] [PATCHv6 2/9] i40e: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim

2015-10-22 Thread Konstantin Ananyev
This patch assumes that the patch:
i40e: fix wrong alignment for the number of HW descriptors
already applied.

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/i40e/i40e_ethdev.c| 14 ++
 drivers/net/i40e/i40e_ethdev.h|  5 +
 drivers/net/i40e/i40e_ethdev_vf.c | 12 
 drivers/net/i40e/i40e_rxtx.c  | 37 +
 4 files changed, 68 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2dd9fdc..cbc1985 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -283,6 +283,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.udp_tunnel_add   = i40e_dev_udp_tunnel_add,
.udp_tunnel_del   = i40e_dev_udp_tunnel_del,
.filter_ctrl  = i40e_dev_filter_ctrl,
+   .rxq_info_get = i40e_rxq_info_get,
+   .txq_info_get = i40e_txq_info_get,
.mirror_rule_set  = i40e_mirror_rule_set,
.mirror_rule_reset= i40e_mirror_rule_reset,
.timesync_enable  = i40e_timesync_enable,
@@ -1674,6 +1676,18 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
ETH_TXQ_FLAGS_NOOFFLOADS,
};

+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
+
if (pf->flags & I40E_FLAG_VMDQ) {
dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
dev_info->vmdq_queue_base = dev_info->max_rx_queues;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6185657..4748392 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -502,6 +502,11 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
  enum rte_filter_op filter_op,
  void *arg);

+void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /* I40E_DEV_PRIVATE_TO */
 #define I40E_DEV_PRIVATE_TO_PF(adapter) \
(&((struct i40e_adapter *)adapter)->pf)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c 
b/drivers/net/i40e/i40e_ethdev_vf.c
index b694400..5dad12d 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1756,6 +1756,18 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
 }

 static void
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 260e580..fa1451e 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3063,3 +3063,40 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)

return I40E_SUCCESS;
 }
+
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct i40e_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mp;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+   qinfo->nb_desc = rxq->nb_rx_desc;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct i40e_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
-- 
1.8.5.3



[dpdk-dev] [PATCHv6 1/9] ethdev: add new API to retrieve RX/TX queue information

2015-10-22 Thread Konstantin Ananyev
From: "Ananyev, Konstantin" <konstantin.anan...@intel.com>

Add the ability for the upper layer to query RX/TX queue information.
Add into rte_eth_dev_info new fields to represent information about
RX/TX descriptors min/max/alig nnumbers per queue for the device.

Add new structures:
struct rte_eth_rxq_info
struct rte_eth_txq_info

new functions:
rte_eth_rx_queue_info_get
rte_eth_tx_queue_info_get

into rte_etdev API.

Left extra free space in the queue info structures,
so extra fields could be added later without ABI breakage.

Add new fields:
rx_desc_lim
tx_desc_lim
into rte_eth_dev_info.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_ether/rte_ethdev.c  | 68 +++
 lib/librte_ether/rte_ethdev.h  | 85 +-
 lib/librte_ether/rte_ether_version.map |  8 
 3 files changed, 159 insertions(+), 2 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index f593f6e..d18ecb5 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1447,6 +1447,19 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t 
rx_queue_id,
return -EINVAL;
}

+   if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
+   nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
+   nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
+
+   PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
+   "should be: <= %hu, = %hu, and a product of %hu\n",
+   nb_rx_desc,
+   dev_info.rx_desc_lim.nb_max,
+   dev_info.rx_desc_lim.nb_min,
+   dev_info.rx_desc_lim.nb_align);
+   return -EINVAL;
+   }
+
if (rx_conf == NULL)
rx_conf = _info.default_rxconf;

@@ -1786,11 +1799,18 @@ void
 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
 {
struct rte_eth_dev *dev;
+   const struct rte_eth_desc_lim lim = {
+   .nb_max = UINT16_MAX,
+   .nb_min = 0,
+   .nb_align = 1,
+   };

VALID_PORTID_OR_RET(port_id);
dev = _eth_devices[port_id];

memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+   dev_info->rx_desc_lim = lim;
+   dev_info->tx_desc_lim = lim;

FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
@@ -3221,6 +3241,54 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t 
queue_id,
 }

 int
+rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+   if (qinfo == NULL)
+   return -EINVAL;
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_rx_queues) {
+   PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
+
+   memset(qinfo, 0, sizeof(*qinfo));
+   dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
+rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+   if (qinfo == NULL)
+   return -EINVAL;
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_tx_queues) {
+   PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
+
+   memset(qinfo, 0, sizeof(*qinfo));
+   dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
 struct ether_addr *mc_addr_set,
 uint32_t nb_mc_addr)
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 8a8c82b..4d7b6f2 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -653,6 +653,15 @@ struct rte_eth_txconf {
 };

 /**
+ * A structure contains information about HW descriptor ring limitations.
+ */
+struct rte_eth_desc_lim {
+   uint16_t nb_max;   /**< Max allowed number of descriptors. */
+   uint16_t nb_min;   /**< Min allowed number of descriptors. */
+   uint16_t nb_align; /**< Number of descriptors should be aligned to. */
+};
+
+/**
  * This enum indicates the flow control mode
  */
 enum rte_eth_fc_mode {
@@ -837,6 +846,8 @@ struct rte_eth_dev_info {
uint16_t vmdq_queue_base; /**< First queue ID for VMDQ pools. */
uint16_t vmdq_queue_num;  /**< Q

[dpdk-dev] [PATCHv6 0/9] ethdev: add new API to retrieve RX/TX queue information

2015-10-22 Thread Konstantin Ananyev
Add the ability for the upper layer to query:
1) configured RX/TX queue information.
2) information about RX/TX descriptors min/max/align
numbers per queue for the device.

v2 changes:
- Add formal check for the qinfo input parameter.
- As suggested rename 'rx_qinfo/tx_qinfo' to 'rxq_info/txq_info'

v3 changes:
- Updated rte_ether_version.map
- Merged with latest changes

v4 changes:
- rte_ether_version.map: move new functions into DPDK_2.1 sub-space.

v5 changes:
- adressed previous code-review comments
- rte_ether_version.map: move new functions into DPDK_2.2 sub-space.
- added new fields into rte_eth_dev_info

v6 chages:
- respin to comply with latest dpdk.org
- update release_notes

Konstantin Ananyev (9):
  ethdev: add new API to retrieve RX/TX queue information
  i40e: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim
  ixgbe: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim
  e1000: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim
  testpmd: add new command to display RX/TX queue information
  fm10k: add HW specific desc_lim data into dev_info
  cxgbe: add HW specific desc_lim data into dev_info
  vmxnet3: add HW specific desc_lim data into dev_info
  doc: release notes update for queue_info_get()

 app/test-pmd/cmdline.c | 48 +++
 app/test-pmd/config.c  | 77 ++
 app/test-pmd/testpmd.h |  2 +
 doc/guides/rel_notes/release_2_2.rst   |  7 +++
 drivers/net/cxgbe/cxgbe_ethdev.c   |  9 
 drivers/net/e1000/e1000_ethdev.h   | 36 ++
 drivers/net/e1000/em_ethdev.c  | 14 ++
 drivers/net/e1000/em_rxtx.c| 71 
 drivers/net/e1000/igb_ethdev.c | 22 +
 drivers/net/e1000/igb_rxtx.c   | 66 +-
 drivers/net/fm10k/fm10k_ethdev.c   | 11 +
 drivers/net/i40e/i40e_ethdev.c | 14 ++
 drivers/net/i40e/i40e_ethdev.h |  5 ++
 drivers/net/i40e/i40e_ethdev_vf.c  | 12 +
 drivers/net/i40e/i40e_rxtx.c   | 37 +++
 drivers/net/ixgbe/ixgbe_ethdev.c   | 23 +
 drivers/net/ixgbe/ixgbe_ethdev.h   |  6 +++
 drivers/net/ixgbe/ixgbe_rxtx.c | 68 +--
 drivers/net/ixgbe/ixgbe_rxtx.h | 21 +
 drivers/net/vmxnet3/vmxnet3_ethdev.c   | 12 +
 lib/librte_ether/rte_ethdev.c  | 68 +++
 lib/librte_ether/rte_ethdev.h  | 85 +-
 lib/librte_ether/rte_ether_version.map |  8 
 23 files changed, 642 insertions(+), 80 deletions(-)

-- 
1.8.5.3



[dpdk-dev] [PATCHv5 8/8] testpmd: add new command to display RX/TX queue information

2015-10-01 Thread Konstantin Ananyev
From: "Ananyev, Konstantin" <konstantin.anan...@intel.com>

Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c | 48 +++
 app/test-pmd/config.c  | 77 ++
 app/test-pmd/testpmd.h |  2 ++
 3 files changed, 127 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0f8f48f..ea2b8a8 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -5305,6 +5305,53 @@ cmdline_parse_inst_t cmd_showport = {
},
 };

+/* *** SHOW QUEUE INFO *** */
+struct cmd_showqueue_result {
+   cmdline_fixed_string_t show;
+   cmdline_fixed_string_t type;
+   cmdline_fixed_string_t what;
+   uint8_t portnum;
+   uint16_t queuenum;
+};
+
+static void
+cmd_showqueue_parsed(void *parsed_result,
+   __attribute__((unused)) struct cmdline *cl,
+   __attribute__((unused)) void *data)
+{
+   struct cmd_showqueue_result *res = parsed_result;
+
+   if (!strcmp(res->type, "rxq"))
+   rx_queue_infos_display(res->portnum, res->queuenum);
+   else if (!strcmp(res->type, "txq"))
+   tx_queue_infos_display(res->portnum, res->queuenum);
+}
+
+cmdline_parse_token_string_t cmd_showqueue_show =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, show, "show");
+cmdline_parse_token_string_t cmd_showqueue_type =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, type, "rxq#txq");
+cmdline_parse_token_string_t cmd_showqueue_what =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, what, "info");
+cmdline_parse_token_num_t cmd_showqueue_portnum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT8);
+cmdline_parse_token_num_t cmd_showqueue_queuenum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, queuenum, UINT16);
+
+cmdline_parse_inst_t cmd_showqueue = {
+   .f = cmd_showqueue_parsed,
+   .data = NULL,
+   .help_str = "show rxq|txq info  ",
+   .tokens = {
+   (void *)_showqueue_show,
+   (void *)_showqueue_type,
+   (void *)_showqueue_what,
+   (void *)_showqueue_portnum,
+   (void *)_showqueue_queuenum,
+   NULL,
+   },
+};
+
 /* *** READ PORT REGISTER *** */
 struct cmd_read_reg_result {
cmdline_fixed_string_t read;
@@ -8910,6 +8957,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)_help_long,
(cmdline_parse_inst_t *)_quit,
(cmdline_parse_inst_t *)_showport,
+   (cmdline_parse_inst_t *)_showqueue,
(cmdline_parse_inst_t *)_showportall,
(cmdline_parse_inst_t *)_showcfg,
(cmdline_parse_inst_t *)_start,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index cf2aa6e..aad2ab6 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -293,6 +293,69 @@ nic_stats_mapping_display(portid_t port_id)
 }

 void
+rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_rxq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_rx_queue_info_get(port_id, queue_id, );
+   if (rc != 0) {
+   printf("Failed to retrieve information for port: %hhu, "
+   "RX queue: %hu\nerror desc: %s(%d)\n",
+   port_id, queue_id, strerror(-rc), rc);
+   return;
+   }
+
+   printf("\n%s Infos for port %-2u, RX queue %-2u %s",
+  info_border, port_id, queue_id, info_border);
+
+   printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
+   printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
+   printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
+   printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
+   printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
+   printf("\nRX drop packets: %s",
+   (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
+   printf("\nRX deferred start: %s",
+   (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
+   printf("\nRX scattered packets: %s",
+   (qinfo.scattered_rx != 0) ? "on" : "off");
+   printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
+   printf("\n");
+}
+
+void
+tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_txq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_tx_queue_info_get(port_id, queue_id, );
+   if (rc != 0) {
+   printf("Failed t

[dpdk-dev] [PATCHv5 7/8] vmxnet3: add HW specific desc_lim data into dev_info

2015-10-01 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/vmxnet3/vmxnet3_ethdev.c | 12 
 1 file changed, 12 insertions(+)

diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c 
b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index a70be5c..3745b7d 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -677,6 +677,18 @@ vmxnet3_dev_info_get(__attribute__((unused))struct 
rte_eth_dev *dev, struct rte_
dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS;
dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
+
+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = VMXNET3_RX_RING_MAX_SIZE,
+   .nb_min = VMXNET3_DEF_RX_RING_SIZE,
+   .nb_align = 1,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = VMXNET3_TX_RING_MAX_SIZE,
+   .nb_min = VMXNET3_DEF_TX_RING_SIZE,
+   .nb_align = 1,
+   };
 }

 /* return 0 means link status changed, -1 means not changed */
-- 
1.8.3.1



[dpdk-dev] [PATCHv5 5/8] fm10k: add HW specific desc_lim data into dev_info

2015-10-01 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/fm10k/fm10k_ethdev.c | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index a69c990..9588dab 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -964,6 +964,17 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
ETH_TXQ_FLAGS_NOOFFLOADS,
};

+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = FM10K_MAX_RX_DESC,
+   .nb_min = FM10K_MIN_RX_DESC,
+   .nb_align = FM10K_MULT_RX_DESC,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = FM10K_MAX_TX_DESC,
+   .nb_min = FM10K_MIN_TX_DESC,
+   .nb_align = FM10K_MULT_TX_DESC,
+   };
 }

 static int
-- 
1.8.3.1



[dpdk-dev] [PATCHv5 4/8] e1000: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim

2015-10-01 Thread Konstantin Ananyev
From: "Ananyev, Konstantin" <konstantin.anan...@intel.com>

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/e1000/e1000_ethdev.h | 36 
 drivers/net/e1000/em_ethdev.c| 14 
 drivers/net/e1000/em_rxtx.c  | 71 +++-
 drivers/net/e1000/igb_ethdev.c   | 22 +
 drivers/net/e1000/igb_rxtx.c | 66 -
 5 files changed, 156 insertions(+), 53 deletions(-)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 4e69e44..3c6f613 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -108,6 +108,30 @@
ETH_RSS_IPV6_TCP_EX | \
ETH_RSS_IPV6_UDP_EX)

+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#defineE1000_MIN_RING_DESC 32
+#defineE1000_MAX_RING_DESC 4096
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#defineE1000_ALIGN 128
+
+#defineIGB_RXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
+#defineIGB_TXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
+
+#defineEM_RXD_ALIGN(E1000_ALIGN / sizeof(struct e1000_rx_desc))
+#defineEM_TXD_ALIGN(E1000_ALIGN / sizeof(struct e1000_data_desc))
+
 /* structure for interrupt relative data */
 struct e1000_interrupt {
uint32_t flags;
@@ -307,6 +331,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);

 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);

+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /*
  * RX/TX EM function prototypes
  */
@@ -343,6 +373,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);

+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 void igb_pf_host_uninit(struct rte_eth_dev *dev);

 #endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 912f5dd..0cbc228 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = {
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove  = eth_em_rar_clear,
.set_mc_addr_list = eth_em_set_mc_addr_list,
+   .rxq_info_get = em_rxq_info_get,
+   .txq_info_get = em_txq_info_get,
 };

 /**
@@ -933,6 +935,18 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)

dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
+
+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = E1000_MAX_RING_DESC,
+   .nb_min = E1000_MIN_RING_DESC,
+   .nb_align = EM_RXD_ALIGN,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = E1000_MAX_RING_DESC,
+   .nb_min = E1000_MIN_RING_DESC,
+   .nb_align = EM_TXD_ALIGN,
+   };
 }

 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 3b8776d..03e1bc2 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1081,26 +1081,6 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
return (nb_rx);
 }

-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define EM_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
- * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
- */
-#define EM_MIN_RING_DESC 32
-#define EM_MAX_RING_DESC 4096
-
 #defineEM_MAX_BUF_SIZE 16384
 #define EM_RCTL_FLXBUF_STEP 1024

@@ -1210,11 +1190,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
/*
 * Validate number of transmit des

[dpdk-dev] [PATCHv5 3/8] ixgbe: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim

2015-10-01 Thread Konstantin Ananyev
From: "Ananyev, Konstantin" <konstantin.anan...@intel.com>

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 23 ++
 drivers/net/ixgbe/ixgbe_ethdev.h |  6 
 drivers/net/ixgbe/ixgbe_rxtx.c   | 68 +---
 drivers/net/ixgbe/ixgbe_rxtx.h   | 21 +
 4 files changed, 93 insertions(+), 25 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index ec2918c..4769bb0 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -386,6 +386,18 @@ static const struct rte_pci_id pci_id_ixgbevf_map[] = {

 };

+static const struct rte_eth_desc_lim rx_desc_lim = {
+   .nb_max = IXGBE_MAX_RING_DESC,
+   .nb_min = IXGBE_MIN_RING_DESC,
+   .nb_align = IXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+   .nb_max = IXGBE_MAX_RING_DESC,
+   .nb_min = IXGBE_MIN_RING_DESC,
+   .nb_align = IXGBE_TXD_ALIGN,
+};
+
 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.dev_configure= ixgbe_dev_configure,
.dev_start= ixgbe_dev_start,
@@ -456,6 +468,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rss_hash_conf_get= ixgbe_dev_rss_hash_conf_get,
.filter_ctrl  = ixgbe_dev_filter_ctrl,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
.timesync_enable  = ixgbe_timesync_enable,
.timesync_disable = ixgbe_timesync_disable,
.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
@@ -494,6 +508,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove  = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
.mac_addr_set = ixgbevf_set_default_mac_addr,
.get_reg_length   = ixgbevf_get_reg_length,
.get_reg  = ixgbevf_get_regs,
@@ -2396,6 +2412,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+   dev_info->rx_desc_lim = rx_desc_lim;
+   dev_info->tx_desc_lim = tx_desc_lim;
+
dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
@@ -2449,6 +2469,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+   dev_info->rx_desc_lim = rx_desc_lim;
+   dev_info->tx_desc_lim = tx_desc_lim;
 }

 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c3d4f4f..d16f476 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -351,6 +351,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id);

 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);

 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a598a72..ba08588 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1821,25 +1821,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
  **/

 /*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
- * also optimize cache line size effect. H/W supports up to cache line size 
128.
- */
-#define IXGBE_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * descriptors should meet the following condition:
- *  (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
- */
-#define IXGBE_MIN_RING_DESC 32
-#define IXGBE_MAX_RING_DESC 4096
-
-/*
  * Create memzone for HW rings. malloc can't be used as the physical address is
  * needed. If the memzone is already created, then this function returns a ptr
  * to the old one.
@@ -2007,9 +1988

[dpdk-dev] [PATCHv5 2/8] i40e: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim

2015-10-01 Thread Konstantin Ananyev
From: "Ananyev, Konstantin" <konstantin.anan...@intel.com>

This patch assumes that the patch:
i40e: fix wrong alignment for the number of HW descriptors
already applied.

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/i40e/i40e_ethdev.c| 14 ++
 drivers/net/i40e/i40e_ethdev.h|  5 +
 drivers/net/i40e/i40e_ethdev_vf.c | 12 
 drivers/net/i40e/i40e_rxtx.c  | 37 +
 4 files changed, 68 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2dd9fdc..cbc1985 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -283,6 +283,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.udp_tunnel_add   = i40e_dev_udp_tunnel_add,
.udp_tunnel_del   = i40e_dev_udp_tunnel_del,
.filter_ctrl  = i40e_dev_filter_ctrl,
+   .rxq_info_get = i40e_rxq_info_get,
+   .txq_info_get = i40e_txq_info_get,
.mirror_rule_set  = i40e_mirror_rule_set,
.mirror_rule_reset= i40e_mirror_rule_reset,
.timesync_enable  = i40e_timesync_enable,
@@ -1674,6 +1676,18 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
ETH_TXQ_FLAGS_NOOFFLOADS,
};

+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
+
if (pf->flags & I40E_FLAG_VMDQ) {
dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
dev_info->vmdq_queue_base = dev_info->max_rx_queues;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6185657..4748392 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -502,6 +502,11 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
  enum rte_filter_op filter_op,
  void *arg);

+void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /* I40E_DEV_PRIVATE_TO */
 #define I40E_DEV_PRIVATE_TO_PF(adapter) \
(&((struct i40e_adapter *)adapter)->pf)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c 
b/drivers/net/i40e/i40e_ethdev_vf.c
index b694400..5dad12d 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1756,6 +1756,18 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = I40E_MAX_RING_DESC,
+   .nb_min = I40E_MIN_RING_DESC,
+   .nb_align = I40E_ALIGN_RING_DESC,
+   };
 }

 static void
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 7a30c44..78b2f9b 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3065,3 +3065,40 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)

return I40E_SUCCESS;
 }
+
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct i40e_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mp;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+   qinfo->nb_desc = rxq->nb_rx_desc;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct i40e_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
-- 
1.8.3.1



[dpdk-dev] [PATCHv5 1/8] ethdev: add new API to retrieve RX/TX queue information

2015-10-01 Thread Konstantin Ananyev
From: "Ananyev, Konstantin" <konstantin.anan...@intel.com>

Add the ability for the upper layer to query RX/TX queue information.
Add into rte_eth_dev_info new fields to represent information about
RX/TX descriptors min/max/alig nnumbers per queue for the device.

Add new structures:
struct rte_eth_rxq_info
struct rte_eth_txq_info

new functions:
rte_eth_rx_queue_info_get
rte_eth_tx_queue_info_get

into rte_etdev API.

Left extra free space in the queue info structures,
so extra fields could be added later without ABI breakage.

Add new fields:
rx_desc_lim
tx_desc_lim
into rte_eth_dev_info.

v2 changes:
- Add formal check for the qinfo input parameter.
- As suggested rename 'rx_qinfo/tx_qinfo' to 'rxq_info/txq_info'

v3 changes:
- Updated rte_ether_version.map
- Merged with latest changes

v4 changes:
- rte_ether_version.map: move new functions into DPDK_2.1 sub-space.

v5 changes:
- adressed previous code-review comments
- rte_ether_version.map: move new functions into DPDK_2.2 sub-space.
- added new fields into rte_eth_dev_info

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_ether/rte_ethdev.c  | 68 +++
 lib/librte_ether/rte_ethdev.h  | 85 +-
 lib/librte_ether/rte_ether_version.map |  8 
 3 files changed, 159 insertions(+), 2 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index b309309..66bd074 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1447,6 +1447,19 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t 
rx_queue_id,
return -EINVAL;
}

+   if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
+   nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
+   nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
+
+   PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
+   "should be: <= %hu, = %hu, and a product of %hu\n",
+   nb_rx_desc,
+   dev_info.rx_desc_lim.nb_max,
+   dev_info.rx_desc_lim.nb_min,
+   dev_info.rx_desc_lim.nb_align);
+   return -EINVAL;
+   }
+
if (rx_conf == NULL)
rx_conf = _info.default_rxconf;

@@ -1786,11 +1799,18 @@ void
 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
 {
struct rte_eth_dev *dev;
+   const struct rte_eth_desc_lim lim = {
+   .nb_max = UINT16_MAX,
+   .nb_min = 0,
+   .nb_align = 1,
+   };

VALID_PORTID_OR_RET(port_id);
dev = _eth_devices[port_id];

memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+   dev_info->rx_desc_lim = lim;
+   dev_info->tx_desc_lim = lim;

FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
@@ -3449,6 +3469,54 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t 
queue_id,
 }

 int
+rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+   if (qinfo == NULL)
+   return -EINVAL;
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_rx_queues) {
+   PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
+
+   memset(qinfo, 0, sizeof(*qinfo));
+   dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
+rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+   if (qinfo == NULL)
+   return -EINVAL;
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_tx_queues) {
+   PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
+
+   memset(qinfo, 0, sizeof(*qinfo));
+   dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
 struct ether_addr *mc_addr_set,
 uint32_t nb_mc_addr)
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index fa06554..2cb9bc2 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -653,6 +653,15 @@ struct rte_eth_txconf {
 };

 /**
+ * A structure contains information about HW descriptor ring limitations.
+ */
+struct rte_eth_desc_lim {
+   uint16_t nb_max;   /**&

[dpdk-dev] [PATCHv5 0/8] ethdev: add new API to retrieve RX/TX queue information

2015-10-01 Thread Konstantin Ananyev
Add the ability for the upper layer to query:
1) configured RX/TX queue information.
2) information about RX/TX descriptors min/max/align
numbers per queue for the device.

Right now 1) is implemented for:
ixgbe, i40e, e1000 PMDs.

Ananyev, Konstantin (5):
  ethdev: add new API to retrieve RX/TX queue information
  i40e: add support for eth_(rxq|txq)_info_get
  ixgbe: add support for eth_(rxq|txq)_info_get
  e1000: add support for eth_(rxq|txq)_info_get
  testpmd: add new command to display RX/TX queue information

Konstantin Ananyev (3):
  fm10k: add HW specific desc_lim data into dev_info
  cxgbe: add HW specific desc_lim data into dev_info
  vmxnet3: add HW specific desc_lim data into dev_info

 app/test-pmd/cmdline.c | 48 +++
 app/test-pmd/config.c  | 77 ++
 app/test-pmd/testpmd.h |  2 +
 drivers/net/cxgbe/cxgbe_ethdev.c   |  9 
 drivers/net/e1000/e1000_ethdev.h   | 36 ++
 drivers/net/e1000/em_ethdev.c  | 14 ++
 drivers/net/e1000/em_rxtx.c| 71 
 drivers/net/e1000/igb_ethdev.c | 22 +
 drivers/net/e1000/igb_rxtx.c   | 66 +-
 drivers/net/fm10k/fm10k_ethdev.c   | 11 +
 drivers/net/i40e/i40e_ethdev.c | 14 ++
 drivers/net/i40e/i40e_ethdev.h |  5 ++
 drivers/net/i40e/i40e_ethdev_vf.c  | 12 +
 drivers/net/i40e/i40e_rxtx.c   | 37 +++
 drivers/net/ixgbe/ixgbe_ethdev.c   | 23 +
 drivers/net/ixgbe/ixgbe_ethdev.h   |  6 +++
 drivers/net/ixgbe/ixgbe_rxtx.c | 68 +--
 drivers/net/ixgbe/ixgbe_rxtx.h | 21 +
 drivers/net/vmxnet3/vmxnet3_ethdev.c   | 12 +
 lib/librte_ether/rte_ethdev.c  | 68 +++
 lib/librte_ether/rte_ethdev.h  | 85 +-
 lib/librte_ether/rte_ether_version.map |  8 
 22 files changed, 635 insertions(+), 80 deletions(-)

-- 
1.8.3.1



[dpdk-dev] [PATCH] i40e: fix wrong alignment for the number of HW descriptors

2015-09-30 Thread Konstantin Ananyev
According to XL710 datasheet:
RX QLEN restrictions: When the PXE_MODE flag in the GLLAN_RCTL_0
register is cleared, the QLEN must be whole number of 32
descriptors.
TX QLEN restrictions: When the PXE_MODE flag in the GLLAN_RCTL_0
register is cleared, the QLEN must be whole number of 32
descriptors.

So make sure that for both RX and TX queues number of HW descriptors is
a multiple of 32.

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/i40e/i40e_rxtx.c | 26 +-
 drivers/net/i40e/i40e_rxtx.h |  6 ++
 2 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index fd656d5..260e580 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -57,9 +57,6 @@
 #include "i40e_ethdev.h"
 #include "i40e_rxtx.h"

-#define I40E_MIN_RING_DESC 64
-#define I40E_MAX_RING_DESC 4096
-#define I40E_ALIGN 128
 #define DEFAULT_TX_RS_THRESH   32
 #define DEFAULT_TX_FREE_THRESH 32
 #define I40E_MAX_PKT_TYPE  256
@@ -68,6 +65,9 @@

 #define I40E_DMA_MEM_ALIGN 4096

+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define I40E_RING_BASE_ALIGN   128
+
 #define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
ETH_TXQ_FLAGS_NOOFFLOADS)

@@ -2126,9 +2126,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
"index exceeds the maximum");
return I40E_ERR_PARAM;
}
-   if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
-   (nb_desc > I40E_MAX_RING_DESC) ||
-   (nb_desc < I40E_MIN_RING_DESC)) {
+   if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+   (nb_desc > I40E_MAX_RING_DESC) ||
+   (nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
"invalid", nb_desc);
return I40E_ERR_PARAM;
@@ -2338,9 +2338,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
return I40E_ERR_PARAM;
}

-   if (((nb_desc * sizeof(struct i40e_tx_desc)) % I40E_ALIGN) != 0 ||
-   (nb_desc > I40E_MAX_RING_DESC) ||
-   (nb_desc < I40E_MIN_RING_DESC)) {
+   if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
+   (nb_desc > I40E_MAX_RING_DESC) ||
+   (nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
"invalid", nb_desc);
return I40E_ERR_PARAM;
@@ -2537,10 +2537,10 @@ i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,

 #ifdef RTE_LIBRTE_XEN_DOM0
return rte_memzone_reserve_bounded(z_name, ring_size,
-   socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
+   socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
 #else
return rte_memzone_reserve_aligned(z_name, ring_size,
-   socket_id, 0, I40E_ALIGN);
+   socket_id, 0, I40E_RING_BASE_ALIGN);
 #endif
 }

@@ -2554,10 +2554,10 @@ i40e_memzone_reserve(const char *name, uint32_t len, 
int socket_id)
return mz;
 #ifdef RTE_LIBRTE_XEN_DOM0
mz = rte_memzone_reserve_bounded(name, len,
-   socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
+   socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
 #else
mz = rte_memzone_reserve_aligned(name, len,
-   socket_id, 0, I40E_ALIGN);
+   socket_id, 0, I40E_RING_BASE_ALIGN);
 #endif
return mz;
 }
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 4385142..3d9884d 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -51,6 +51,12 @@
 #define I40E_RXBUF_SZ_1024 1024
 #define I40E_RXBUF_SZ_2048 2048

+/* In none-PXE mode QLEN must be whole number of 32 descriptors. */
+#defineI40E_ALIGN_RING_DESC32
+
+#defineI40E_MIN_RING_DESC  64
+#defineI40E_MAX_RING_DESC  4096
+
 enum i40e_header_split_mode {
i40e_header_split_none = 0,
i40e_header_split_enabled = 1,
-- 
1.8.5.3



[dpdk-dev] [PATCH v4] ixgbe_pmd: enforce RS bit on every EOP descriptor for devices newer than 82598

2015-09-11 Thread Konstantin Ananyev
Hi Vlad,

>> Unfortunately we are seeing a huge performance drop with that patch:
>> On my box bi-directional traffic (64B packet) over one port can't reach even 
>> 11 Mpps.
>Konstantin, could u clarify - u saw "only" 11 Mpps with v3 of this patch which 
>doesn't change the rs_thresh and only sets RS on every packet? What is the 
>performance in the same test without this patch? 

Yes, that's with you latest patch - v4.
I am seeing:
vectorRX+fullfeaturedTX over 1 port:
orig_code   14.74 Mpps
your_patch: 10.6 Mpps

Actually, while we speaking about it,
could you try another patch for that issue on your test environment,
see below.
It seems to fix the problem in our test environment.
It is based on the observation that it is ok not to set RS on each EOP if:
the distance between TDs with RS bit set doesn't exceed size of
on-die descriptor queue (40 descriptors).

With that approach I also see a slight performance drop
but it is much less then with your approach:
with the same conditions it can do 14.2 Mpps over 1 port.

Thanks
Konstantin


Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.c | 25 +++--
 1 file changed, 19 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 91023b9..a7a32ad 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -573,7 +573,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
struct ixgbe_tx_entry *sw_ring;
struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
-   volatile union ixgbe_adv_tx_desc *txd;
+   volatile union ixgbe_adv_tx_desc *txd, *txp;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
@@ -596,6 +596,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txr = txq->tx_ring;
tx_id   = txq->tx_tail;
txe = _ring[tx_id];
+   txp = NULL;

/* Determine if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
@@ -639,6 +640,12 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 */
nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);

+   if (txp != NULL &&
+   nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
+   /* set RS on the previous packet in the burst */
+   txp->read.cmd_type_len |=
+   rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
/*
 * The number of descriptors that must be allocated for a
 * packet is the number of segments of that packet, plus 1
@@ -843,8 +850,14 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txq->nb_tx_used = 0;
}
txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
+   txp = txd;
}
+
 end_of_tx:
+   /* set RS on last packet in the burst */
+   if (txp != NULL)
+   txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+   
rte_wmb();

/*
@@ -2124,11 +2137,11 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
-   if (tx_rs_thresh >= (nb_desc - 2)) {
-   PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
-"of TX descriptors minus 2. (tx_rs_thresh=%u "
-"port=%d queue=%d)", (unsigned int)tx_rs_thresh,
-(int)dev->data->port_id, (int)queue_idx);
+   if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
+   PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than %u. "
+   "(tx_rs_thresh=%u port=%d queue=%d)",
+   DEFAULT_TX_FREE_THRESH, (unsigned int)tx_rs_thresh,
+   (int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
if (tx_free_thresh >= (nb_desc - 3)) {
-- 
1.8.3.1



[dpdk-dev] [PATCH] ixgbe: fix RX is not working properly when buffer address is not word aligned

2015-08-03 Thread Konstantin Ananyev
Niantic HW expects Header Buffer Address in the RXD to be word aligned.
So, if mbuf's buf_physaddr is not word aligned then
RX path will not work properly.
Right now, in ixgbe PMD we always setup Packet Buffer Address(PBA) and
Header Buffer Address (HBA) to the same value:
buf_physaddr + RTE_PKTMBUF_HEADROOM.
As ixgbe PMD doesn't support split header feature anyway,
the issue can be fixied just by  always setting HBA in the RXD to zero.

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.c | 8 
 drivers/net/ixgbe/ixgbe_rxtx_vec.c | 6 ++
 2 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a0c8847..94967c5 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1183,7 +1183,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool 
reset_mbuf)

/* populate the descriptors */
dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
-   rxdp[i].read.hdr_addr = dma_addr;
+   rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}

@@ -1414,7 +1414,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxe->mbuf = nmb;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
-   rxdp->read.hdr_addr = dma_addr;
+   rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;

/*
@@ -1741,7 +1741,7 @@ next_desc:
rxe->mbuf = nmb;

rxm->data_off = RTE_PKTMBUF_HEADROOM;
-   rxdp->read.hdr_addr = dma;
+   rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma;
} else
rxe->mbuf = NULL;
@@ -3633,7 +3633,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
rxd = >rx_ring[i];
-   rxd->read.hdr_addr = dma_addr;
+   rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
rxe[i].mbuf = mbuf;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index 6c1647e..e1750c1 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -56,6 +56,8 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
RTE_PKTMBUF_HEADROOM);
__m128i dma_addr0, dma_addr1;

+   const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX);
+
rxdp = rxq->rx_ring + rxq->rxrearm_start;

/* Pull 'n' more MBUFs into the software ring */
@@ -108,6 +110,10 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);

+   /* set Header Buffer Address to zero */
+   dma_addr0 =  _mm_and_si128(dma_addr0, hba_msk);
+   dma_addr1 =  _mm_and_si128(dma_addr1, hba_msk);
+
/* flush desc with pa dma_addr */
_mm_store_si128((__m128i *)++->read, dma_addr0);
_mm_store_si128((__m128i *)++->read, dma_addr1);
-- 
1.8.3.1



[dpdk-dev] [PATCHv2 2/2] ixgbe: fix vector scatter RX could produce wrong nb_segs value

2015-07-28 Thread Konstantin Ananyev
Fixes: cf4b4708a88a (ixgbe: improve slow-path perf with vector scattered Rx)

v2: changes:
- updated commit message with 'Fixes'.

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx_vec.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index 6c1647e..1c16dec 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -497,6 +497,8 @@ reassemble_packets(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_bufs,
else {
/* free up last mbuf */
struct rte_mbuf *secondlast = start;
+
+   start->nb_segs--;
while (secondlast->next != end)
secondlast = secondlast->next;
secondlast->data_len -= (rxq->crc_len -
-- 
1.8.3.1



[dpdk-dev] [PATCHv2 1/2] ixgbe: fix scalar scatter RX doesn't take into account CRC length

2015-07-28 Thread Konstantin Ananyev
For 2.1 release, in attempt to minimize number of RX routines to support,
ixgbe scatter and ixgbe LRO RX routines were merged into one
that can handle both cases.
Though I completely missed the fact, that while LRO could only be used
when HW CRC strip is enabled, scatter RX should work for both cases
(HW CRC strip on/off).
That patch restores missed functionality.

Fixes: 9d8a92628f21 ("ixgbe: remove simple scalar scattered Rx method")

v2 changes:
- updated commit message with 'Fixes'.

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.c | 19 +++
 1 file changed, 19 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a0c8847..a1b25aa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1817,6 +1817,25 @@ next_desc:
ixgbe_fill_cluster_head_buf(first_seg, , rxq->port_id,
staterr);

+   /*
+* Deal with the case, when HW CRC srip is disabled.
+* That can't happen when LRO is enabled, but still could
+* happen for scattered RX mode.
+*/
+   first_seg->pkt_len -= rxq->crc_len;
+   if (unlikely(rxm->data_len <= rxq->crc_len)) {
+   struct rte_mbuf *lp;
+
+   for (lp = first_seg; lp->next != rxm; lp = lp->next)
+   ;
+
+   first_seg->nb_segs--;
+   lp->data_len -= rxq->crc_len - rxm->data_len;
+   lp->next = NULL;
+   rte_pktmbuf_free_seg(rxm);
+   } else
+   rxm->data_len -= rxq->crc_len;
+
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
first_seg->data_off);
-- 
1.8.3.1



[dpdk-dev] [PATCHv2 0/2] ixgbe: Two fixes for RX scatter functions.

2015-07-28 Thread Konstantin Ananyev
Konstantin Ananyev (2):
  ixgbe: fix scalar scatter RX doesn't take into account CRC length
  ixgbe: fix vector scatter RX could produce wrong nb_segs value

 drivers/net/ixgbe/ixgbe_rxtx.c | 19 +++
 drivers/net/ixgbe/ixgbe_rxtx_vec.c |  2 ++
 2 files changed, 21 insertions(+)

-- 
1.8.3.1



[dpdk-dev] [PATCH 2/2] ixgbe: fix vector scatter RX could produce wrong nb_segs value

2015-07-28 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx_vec.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index 6c1647e..1c16dec 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -497,6 +497,8 @@ reassemble_packets(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_bufs,
else {
/* free up last mbuf */
struct rte_mbuf *secondlast = start;
+
+   start->nb_segs--;
while (secondlast->next != end)
secondlast = secondlast->next;
secondlast->data_len -= (rxq->crc_len -
-- 
1.8.3.1



[dpdk-dev] [PATCH 1/2] ixgbe: fix scalar scatter RX doesn't take into account CRC length

2015-07-28 Thread Konstantin Ananyev
For 2.1 release, in attempt to minimize number of RX routines to support,
ixgbe scatter and ixgbe LRO RX routines were merged into one
that can handle both cases.
Though I completely missed the fact, that while LRO could only be used
when HW CRC strip is enabled, scatter RX should work for both cases
(HW CRC strip on/off).
That patch restores missed functionality.

Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.c | 19 +++
 1 file changed, 19 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a0c8847..a1b25aa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1817,6 +1817,25 @@ next_desc:
ixgbe_fill_cluster_head_buf(first_seg, , rxq->port_id,
staterr);

+   /*
+* Deal with the case, when HW CRC srip is disabled.
+* That can't happen when LRO is enabled, but still could
+* happen for scattered RX mode.
+*/
+   first_seg->pkt_len -= rxq->crc_len;
+   if (unlikely(rxm->data_len <= rxq->crc_len)) {
+   struct rte_mbuf *lp;
+
+   for (lp = first_seg; lp->next != rxm; lp = lp->next)
+   ;
+
+   first_seg->nb_segs--;
+   lp->data_len -= rxq->crc_len - rxm->data_len;
+   lp->next = NULL;
+   rte_pktmbuf_free_seg(rxm);
+   } else
+   rxm->data_len -= rxq->crc_len;
+
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
first_seg->data_off);
-- 
1.8.3.1



[dpdk-dev] [PATCH 0/2] ixgbe: Two fixes for RX scatter functions.

2015-07-28 Thread Konstantin Ananyev
Konstantin Ananyev (2):
  ixgbe: fix scalar scatter RX doesn't take into account CRC length
  ixgbe: fix vector scatter RX could produce wrong nb_segs value

 drivers/net/ixgbe/ixgbe_rxtx.c | 19 +++
 drivers/net/ixgbe/ixgbe_rxtx_vec.c |  2 ++
 2 files changed, 21 insertions(+)

-- 
1.8.3.1



[dpdk-dev] [PATCH] ixgbe: fix RX queue vector fields are not reset properly

2015-07-27 Thread Konstantin Ananyev
As Steve pointed out, "ixgbe: fix release queue mbufs" is not complete.
As at queue stop we don't reset vector related rx queue fields to their
initial values.

Reported-by: Cunming Liang 
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index cbb16b6..a0c8847 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2400,6 +2400,11 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, 
struct ixgbe_rx_queue *rxq)
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+   rxq->rxrearm_start = 0;
+   rxq->rxrearm_nb = 0;
+#endif
 }

 int __attribute__((cold))
-- 
1.8.3.1



[dpdk-dev] [PATCH] rte_ethdev.h: fix make sure that all functions has C linkage type.

2015-07-24 Thread Konstantin Ananyev
Move #ifdef __cplusplus to the end of the file.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_ether/rte_ethdev.h | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index c901a2c..9b50858 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -3793,10 +3793,6 @@ int rte_eth_dev_get_eeprom(uint8_t port_id, struct 
rte_dev_eeprom_info *info);
  */
 int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);

-#ifdef __cplusplus
-}
-#endif
-
 /**
  * Set the list of multicast addresses to filter on an Ethernet device.
  *
@@ -3882,4 +3878,9 @@ extern int rte_eth_timesync_read_rx_timestamp(uint8_t 
port_id,
  */
 extern int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
  struct timespec *timestamp);
+
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _RTE_ETHDEV_H_ */
-- 
1.8.3.1



[dpdk-dev] [PATCHv2 5/5] ixgbe: remove awkward typecasts from ixgbe SSE PMD

2015-07-24 Thread Konstantin Ananyev
The vector/SSE pmd used a different element type for the tx queue sw_ring
entries. This led to lots of typecasts in the code which required specific
use of bracketing, leading to subtle errors.
For example, in the original code:
txe = (struct ixgbe_tx_entry_v *)>sw_ring[i];
instead needs to be written as:
txe = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[i];

We can eliminate this problem, by having two software ring pointers in the
structure for the two different element types.

v2 changes:
- fix remaining wrong typecast.

Signed-off-by: Bruce Richardson 
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.h |  5 -
 drivers/net/ixgbe/ixgbe_rxtx_vec.c | 22 ++
 2 files changed, 14 insertions(+), 13 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index befdc3a..1557438 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -193,7 +193,10 @@ struct ixgbe_tx_queue {
/** TX ring virtual address. */
volatile union ixgbe_adv_tx_desc *tx_ring;
uint64_ttx_ring_phys_addr; /**< TX ring DMA address. */
-   struct ixgbe_tx_entry *sw_ring;  /**< virtual address of SW ring. */
+   union {
+   struct ixgbe_tx_entry *sw_ring; /**< address of SW ring for 
scalar PMD. */
+   struct ixgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for 
vector PMD */
+   };
volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
uint16_tnb_tx_desc;/**< number of TX descriptors. */
uint16_ttx_tail;   /**< current value of TDT reg. */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index d3da31d..9c5390e 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -608,8 +608,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
 * first buffer to free from S/W ring is at index
 * tx_next_dd - (tx_rs_thresh-1)
 */
-   txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
-   (n - 1)];
+   txep = >sw_ring_v[txq->tx_next_dd - (n - 1)];
m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
@@ -678,7 +677,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,

tx_id = txq->tx_tail;
txdp = >tx_ring[tx_id];
-   txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id];
+   txep = >sw_ring_v[tx_id];

txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);

@@ -699,7 +698,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,

/* avoid reach the end of ring */
txdp = &(txq->tx_ring[tx_id]);
-   txep = &(((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id]);
+   txep = >sw_ring_v[tx_id];
}

tx_backlog_entry(txep, tx_pkts, nb_commit);
@@ -735,14 +734,14 @@ ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue 
*txq)
for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
 i != txq->tx_tail;
 i = (i + 1) & max_desc) {
-   txe = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[i];
+   txe = >sw_ring_v[i];
rte_pktmbuf_free_seg(txe->mbuf);
}
txq->nb_tx_free = max_desc;

/* reset tx_entry */
for (i = 0; i < txq->nb_tx_desc; i++) {
-   txe = (struct ixgbe_tx_entry_v *)>sw_ring[i];
+   txe = >sw_ring_v[i];
txe->mbuf = NULL;
}
 }
@@ -772,8 +771,8 @@ ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
return;

if (txq->sw_ring != NULL) {
-   rte_free((struct ixgbe_rx_entry *)txq->sw_ring - 1);
-   txq->sw_ring = NULL;
+   rte_free(txq->sw_ring_v - 1);
+   txq->sw_ring_v = NULL;
}
 }

@@ -781,7 +780,7 @@ static void __attribute__((cold))
 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
 {
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
-   struct ixgbe_tx_entry_v *txe = (struct ixgbe_tx_entry_v *)txq->sw_ring;
+   struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
uint16_t i;

/* Zero out HW ring memory */
@@ -838,12 +837,11 @@ ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
 int __attribute__((cold))
 ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
 {
-   if (txq->sw_ring == NULL)
+   if (txq->sw_ring_v == NULL)
return -1;

/* leave the first one for overflow */
-   txq->sw_ring = (struct ixgbe_tx_entry *)
-   ((struct ixgbe_tx_entry_v *)txq->sw_ring + 1);
+   txq->sw_ring_v = txq->sw_ring_v + 1;
txq->ops = _txq_ops;

return 0;
-- 
1.8.3.1



[dpdk-dev] [PATCHv2 4/5] ixgbe: rename tx queue release function for consistency

2015-07-24 Thread Konstantin Ananyev
The function inside the vector/SSE poll-mode driver for releasing
the mbufs on the TX queues had the same name as another function
inside the regular PMD. To keep consistency and avoid confusion,
rename the vector PMD version to have a "_vec" suffix.

Signed-off-by: Bruce Richardson 
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx_vec.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index c01da7b..d3da31d 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -722,7 +722,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 }

 static void __attribute__((cold))
-ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
 {
unsigned i;
struct ixgbe_tx_entry_v *txe;
@@ -812,7 +812,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
 }

 static const struct ixgbe_txq_ops vec_txq_ops = {
-   .release_mbufs = ixgbe_tx_queue_release_mbufs,
+   .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
.free_swring = ixgbe_tx_free_swring,
.reset = ixgbe_reset_tx_queue,
 };
-- 
1.8.3.1



[dpdk-dev] [PATCHv2 3/5] ixgbe: fix bug on release of mbufs from queue

2015-07-24 Thread Konstantin Ananyev
The calculations of what mbufs were valid in the RX and TX queues were
incorrect when freeing the mbufs for the vector PMD. This led to crashes
due to invalid reference counts when mbuf debugging was turned on, and
possibly other more subtle problems (such as mbufs being freed when in use)
in other cases.

To fix this, the following changes were made:
* correct counts and post-loop values in the TX release function for the
  vector code.
* create a new separate RX release function for the RX vector code, since the
  tracking of what mbufs are valid or not is different for that code path

Fixes: c95584dc2b18 ("ixgbe: new vectorized functions for Rx/Tx")

v2 chages:
- Make sure that rx_using_sse is reset to zero if scalar RX function was chosen.
- fix checkpatch.pl errors.

Signed-off-by: Bruce Richardson 
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.c | 20 +++
 drivers/net/ixgbe/ixgbe_rxtx.h |  3 +++
 drivers/net/ixgbe/ixgbe_rxtx_vec.c | 52 +-
 3 files changed, 58 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 75c..db2454c 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2270,6 +2270,14 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
 {
unsigned i;

+#ifdef RTE_IXGBE_INC_VECTOR
+   /* SSE Vector driver has a different way of releasing mbufs. */
+   if (rxq->rx_using_sse) {
+   ixgbe_rx_queue_release_mbufs_vec(rxq);
+   return;
+   }
+#endif
+
if (rxq->sw_ring != NULL) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
if (rxq->sw_ring[i].mbuf != NULL) {
@@ -3925,6 +3933,7 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 
vector, s8 type)
 void __attribute__((cold))
 ixgbe_set_rx_function(struct rte_eth_dev *dev)
 {
+   uint16_t i, rx_using_sse;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;

@@ -4013,6 +4022,17 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)

dev->rx_pkt_burst = ixgbe_recv_pkts;
}
+
+   /* Propagate information about RX function choice through all queues. */
+
+   rx_using_sse =
+   (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
+   dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
+
+   for (i = 0; i < dev->data->nb_rx_queues; i++) {
+   struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+   rxq->rx_using_sse = rx_using_sse;
+   }
 }

 /**
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 64e6bb9..befdc3a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -121,6 +121,8 @@ struct ixgbe_rx_queue {
uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
 #endif
+   uint16_trx_using_sse;
+   /**< indicates that vector RX is in use */
 #ifdef RTE_IXGBE_INC_VECTOR
uint16_trxrearm_nb; /**< number of remaining to be 
re-armed */
uint16_trxrearm_start;  /**< the idx we start the re-arming 
from */
@@ -284,6 +286,7 @@ uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
 int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
+void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);

 #ifdef RTE_IXGBE_INC_VECTOR

diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index 47ff990..c01da7b 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -726,27 +726,45 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
 {
unsigned i;
struct ixgbe_tx_entry_v *txe;
-   uint16_t nb_free, max_desc;
+   const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);

-   if (txq->sw_ring != NULL) {
-   /* release the used mbufs in sw_ring */
-   nb_free = txq->nb_tx_free;
-   max_desc = (uint16_t)(txq->nb_tx_desc - 1);
-   for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
-nb_free < max_desc && i != txq->tx_tail;
-i = (i + 1) & max_desc) {
-   txe = (struct ixgbe_tx_entry_v *)>sw_ring[i];
-   if (txe->mbuf != NULL)
-   rte_pktmbuf_free_seg(txe->mbuf);
-   }
-   /* reset tx_entry */
-   for (i = 0; i < txq->nb_tx_desc; i++) {
-   txe = (struct ixgbe_tx_entry_v *)>sw_ring[

[dpdk-dev] [PATCHv2 2/5] ixgbe: fix comments on rx_queue fields

2015-07-24 Thread Konstantin Ananyev
The two fields for vector RX rearming in the rx queue structure were
incorrectly labelled. Switching the comments on each around makes things
clearer.

Signed-off-by: Bruce Richardson 
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 0e6ad93..64e6bb9 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -122,8 +122,8 @@ struct ixgbe_rx_queue {
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
 #endif
 #ifdef RTE_IXGBE_INC_VECTOR
-   uint16_trxrearm_nb; /**< the idx we start the re-arming 
from */
-   uint16_trxrearm_start;  /**< number of remaining to be 
re-armed */
+   uint16_trxrearm_nb; /**< number of remaining to be 
re-armed */
+   uint16_trxrearm_start;  /**< the idx we start the re-arming 
from */
 #endif
uint16_trx_free_thresh; /**< max free RX desc to hold. */
uint16_tqueue_id; /**< RX queue index. */
-- 
1.8.3.1



[dpdk-dev] [PATCHv2 1/5] Revert "ixgbe: check mbuf refcnt when clearing a ring"

2015-07-24 Thread Konstantin Ananyev
This reverts commit b35d0d80f0a809939549ddde99c1a76b7e38bff3.
The bug fix was incorrect as it did not take account of the fact that
the mbufs that were previously freed may have since be re-allocated.

Signed-off-by: Bruce Richardson 
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_rxtx.c | 3 +--
 drivers/net/ixgbe/ixgbe_rxtx_vec.c | 8 +---
 2 files changed, 2 insertions(+), 9 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index af7e222..75c 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2272,8 +2272,7 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)

if (rxq->sw_ring != NULL) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
-   if (rxq->sw_ring[i].mbuf != NULL &&
-   
rte_mbuf_refcnt_read(rxq->sw_ring[i].mbuf)) {
+   if (rxq->sw_ring[i].mbuf != NULL) {
rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
rxq->sw_ring[i].mbuf = NULL;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index d3ac74a..47ff990 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -736,13 +736,7 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
 nb_free < max_desc && i != txq->tx_tail;
 i = (i + 1) & max_desc) {
txe = (struct ixgbe_tx_entry_v *)>sw_ring[i];
-   /*
-* Check for already freed packets.
-* Note: ixgbe_tx_free_bufs does not NULL after free,
-* so we actually have to check the reference count.
-*/
-   if (txe->mbuf != NULL &&
-   rte_mbuf_refcnt_read(txe->mbuf) != 0)
+   if (txe->mbuf != NULL)
rte_pktmbuf_free_seg(txe->mbuf);
}
/* reset tx_entry */
-- 
1.8.3.1



[dpdk-dev] [PATCHv2 0/5] ixgbe: fix mbuf release on RX and TX

2015-07-24 Thread Konstantin Ananyev
Konstantin has correctly pointed out that the previously applied fix:
b35d0d80f0a8 ("ixgbe: check mbuf refcnt when clearing a ring")
is not a proper fix for the reported issue at all.
Ref: http://permalink.gmane.org/gmane.comp.networking.dpdk.devel/21932

This patch set reverts the original fix, and applies a better fix for the
issue, as well as performing other cleanups in the code in question, to
try and avoid future issues.

v2 chages:
- Make sure that rx_using_sse is reset to zero if scalar RX function was chosen.
- fix checkpatch.pl errors.
- fix remaining wrong typecast.

Konstantin Ananyev (5):
  Revert "ixgbe: check mbuf refcnt when clearing a ring"
  ixgbe: fix comments on rx_queue fields
  ixgbe: fix bug on release of mbufs from queue
  ixgbe: rename tx queue release function for consistency
  ixgbe: remove awkward typecasts from ixgbe SSE PMD

 drivers/net/ixgbe/ixgbe_rxtx.c | 23 ++-
 drivers/net/ixgbe/ixgbe_rxtx.h | 12 --
 drivers/net/ixgbe/ixgbe_rxtx_vec.c | 80 +-
 3 files changed, 75 insertions(+), 40 deletions(-)

-- 
1.8.3.1



[dpdk-dev] [PATCHv4 5/5] testpmd: add new command to display RX/TX queue information

2015-07-22 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c | 48 
 app/test-pmd/config.c  | 67 ++
 app/test-pmd/testpmd.h |  2 ++
 3 files changed, 117 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 8ab4687..29180de 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -5304,6 +5304,53 @@ cmdline_parse_inst_t cmd_showport = {
},
 };

+/* *** SHOW QUEUE INFO *** */
+struct cmd_showqueue_result {
+   cmdline_fixed_string_t show;
+   cmdline_fixed_string_t type;
+   cmdline_fixed_string_t what;
+   uint8_t portnum;
+   uint16_t queuenum;
+};
+
+static void
+cmd_showqueue_parsed(void *parsed_result,
+   __attribute__((unused)) struct cmdline *cl,
+   __attribute__((unused)) void *data)
+{
+   struct cmd_showqueue_result *res = parsed_result;
+
+   if (!strcmp(res->type, "rxq"))
+   rx_queue_infos_display(res->portnum, res->queuenum);
+   else if (!strcmp(res->type, "txq"))
+   tx_queue_infos_display(res->portnum, res->queuenum);
+}
+
+cmdline_parse_token_string_t cmd_showqueue_show =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, show, "show");
+cmdline_parse_token_string_t cmd_showqueue_type =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, type, "rxq#txq");
+cmdline_parse_token_string_t cmd_showqueue_what =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, what, "info");
+cmdline_parse_token_num_t cmd_showqueue_portnum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT8);
+cmdline_parse_token_num_t cmd_showqueue_queuenum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, queuenum, UINT16);
+
+cmdline_parse_inst_t cmd_showqueue = {
+   .f = cmd_showqueue_parsed,
+   .data = NULL,
+   .help_str = "show rxq|txq info  ",
+   .tokens = {
+   (void *)_showqueue_show,
+   (void *)_showqueue_type,
+   (void *)_showqueue_what,
+   (void *)_showqueue_portnum,
+   (void *)_showqueue_queuenum,
+   NULL,
+   },
+};
+
 /* *** READ PORT REGISTER *** */
 struct cmd_read_reg_result {
cmdline_fixed_string_t read;
@@ -8913,6 +8960,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)_help_long,
(cmdline_parse_inst_t *)_quit,
(cmdline_parse_inst_t *)_showport,
+   (cmdline_parse_inst_t *)_showqueue,
(cmdline_parse_inst_t *)_showportall,
(cmdline_parse_inst_t *)_showcfg,
(cmdline_parse_inst_t *)_start,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 1d29146..f1fd6b1 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -293,6 +293,73 @@ nic_stats_mapping_display(portid_t port_id)
 }

 void
+rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_rxq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_rx_queue_info_get(port_id, queue_id, );
+   if (rc != 0) {
+   printf("Failed to retrieve inforamtion for port: %hhu, "
+   "RX queue: %hu\nerror desc: %s(%d)\n",
+   port_id, queue_id, strerror(-rc), rc);
+   return;
+   }
+
+   printf("\n%s Infos for port %-2u, RX queue %-2u %s",
+  info_border, port_id, queue_id, info_border);
+
+   printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
+   printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
+   printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
+   printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
+   printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
+   printf("\nRX drop packets: %s",
+   (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
+   printf("\nRX deferred start: %s",
+   (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
+   printf("\nRX scattered packets: %s",
+   (qinfo.scattered_rx != 0) ? "on" : "off");
+   printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
+   printf("\nMax possible number of RXDs: %hu", qinfo.max_desc);
+   printf("\nMin possible number of RXDs: %hu", qinfo.min_desc);
+   printf("\n");
+}
+
+void
+tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_txq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_tx_queue_inf

[dpdk-dev] [PATCHv4 4/5] e1000: add support for eth_(rxq|txq)_info_get

2015-07-22 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/e1000/e1000_ethdev.h | 12 
 drivers/net/e1000/em_ethdev.c|  2 ++
 drivers/net/e1000/em_rxtx.c  | 38 ++
 drivers/net/e1000/igb_ethdev.c   |  4 
 drivers/net/e1000/igb_rxtx.c | 36 
 5 files changed, 92 insertions(+)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 4e69e44..7ee013a 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -307,6 +307,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);

 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);

+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /*
  * RX/TX EM function prototypes
  */
@@ -343,6 +349,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);

+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 void igb_pf_host_uninit(struct rte_eth_dev *dev);

 #endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index d8c04e7..7cb8b0e 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = {
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove  = eth_em_rar_clear,
.set_mc_addr_list = eth_em_set_mc_addr_list,
+   .rxq_info_get = em_rxq_info_get,
+   .txq_info_get = em_txq_info_get,
 };

 /**
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 3b8776d..5778723 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1881,3 +1881,41 @@ eth_em_tx_init(struct rte_eth_dev *dev)
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
 }
+
+void
+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct em_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mb_pool;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = EM_MAX_RING_DESC;
+   qinfo->min_desc = EM_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+}
+
+void
+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct em_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = EM_MAX_RING_DESC;
+   qinfo->min_desc = EM_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+}
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index ddc7186..436405c 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -307,6 +307,8 @@ static const struct eth_dev_ops eth_igb_ops = {
.rss_hash_conf_get= eth_igb_rss_hash_conf_get,
.filter_ctrl  = eth_igb_filter_ctrl,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
+   .rxq_info_get = igb_rxq_info_get,
+   .txq_info_get = igb_txq_info_get,
.timesync_enable  = igb_timesync_enable,
.timesync_disable = igb_timesync_disable,
.timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
@@ -337,6 +339,8 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
.tx_queue_setup   = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
+   .rxq_info_get = igb_rxq_info_get,
+   .txq_info_get = igb_txq_info_get,
.mac_addr_set = igbvf_default_mac_addr_set,
.get_reg_length   = igbvf_get_reg_length,
.get_reg  = igbvf_get_regs,
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 3a31b21..5f047ca 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -2516,3 +2516,39 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
}

 }
+
+void
+igb_rxq_info_get(struct rte_eth

[dpdk-dev] [PATCHv4 3/5] ixgbe: add support for eth_(rxq|txq)_info_get

2015-07-22 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_ethdev.c |  4 
 drivers/net/ixgbe/ixgbe_ethdev.h |  6 ++
 drivers/net/ixgbe/ixgbe_rxtx.c   | 42 
 3 files changed, 52 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 3a8cff0..053279e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -431,6 +431,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rss_hash_conf_get= ixgbe_dev_rss_hash_conf_get,
.filter_ctrl  = ixgbe_dev_filter_ctrl,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
.timesync_enable  = ixgbe_timesync_enable,
.timesync_disable = ixgbe_timesync_disable,
.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
@@ -466,6 +468,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove  = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
.mac_addr_set = ixgbevf_set_default_mac_addr,
.get_reg_length   = ixgbevf_get_reg_length,
.get_reg  = ixgbevf_get_regs,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c16c11d..190c34a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -347,6 +347,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id);

 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);

 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9b2d637..f910fb8 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -4689,6 +4689,48 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
return 0;
 }

+void
+ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct ixgbe_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mb_pool;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = IXGBE_MAX_RING_DESC;
+   qinfo->min_desc = IXGBE_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct ixgbe_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = IXGBE_MAX_RING_DESC;
+   qinfo->min_desc = IXGBE_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
 /*
  * [VF] Initializes Receive Unit.
  */
-- 
1.8.3.1



[dpdk-dev] [PATCHv4 2/5] i40e: add support for eth_(rxq|txq)_info_get

2015-07-22 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/i40e/i40e_ethdev.c |  2 ++
 drivers/net/i40e/i40e_ethdev.h |  5 +
 drivers/net/i40e/i40e_rxtx.c   | 42 ++
 3 files changed, 49 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 40b0526..6815b6c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -283,6 +283,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.udp_tunnel_add   = i40e_dev_udp_tunnel_add,
.udp_tunnel_del   = i40e_dev_udp_tunnel_del,
.filter_ctrl  = i40e_dev_filter_ctrl,
+   .rxq_info_get = i40e_rxq_info_get,
+   .txq_info_get = i40e_txq_info_get,
.mirror_rule_set  = i40e_mirror_rule_set,
.mirror_rule_reset= i40e_mirror_rule_reset,
.timesync_enable  = i40e_timesync_enable,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6185657..4748392 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -502,6 +502,11 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
  enum rte_filter_op filter_op,
  void *arg);

+void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /* I40E_DEV_PRIVATE_TO */
 #define I40E_DEV_PRIVATE_TO_PF(adapter) \
(&((struct i40e_adapter *)adapter)->pf)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 891a221..fadf3e8 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3352,3 +3352,45 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)

return I40E_SUCCESS;
 }
+
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct i40e_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mp;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = I40E_MAX_RING_DESC;
+   qinfo->min_desc = I40E_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct i40e_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = I40E_MAX_RING_DESC;
+   qinfo->min_desc = I40E_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
-- 
1.8.3.1



[dpdk-dev] [PATCHv4 1/5] ethdev: add new API to retrieve RX/TX queue information

2015-07-22 Thread Konstantin Ananyev
Add the ability for the upper layer to query RX/TX queue information.

Add new structures:
struct rte_eth_rxq_info
struct rte_eth_txq_info

new functions:
rte_eth_rx_queue_info_get
rte_eth_tx_queue_info_get

into rte_etdev API.

Left extra free space in the queue info structures,
so extra fields could be added later without ABI breakage.

v2 changes:
- Add formal check for the qinfo input parameter.
- As suggested rename 'rx_qinfo/tx_qinfo' to 'rxq_info/txq_info'

v3 changes:
- Updated rte_ether_version.map
- Merged with latest changes

v4 changes:
- rte_ether_version.map: move new functions into DPDK_2.1 sub-space.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_ether/rte_ethdev.c  | 54 +
 lib/librte_ether/rte_ethdev.h  | 87 +++---
 lib/librte_ether/rte_ether_version.map |  2 +
 3 files changed, 137 insertions(+), 6 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 94104ce..a94c119 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -3341,6 +3341,60 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t 
queue_id,
 }

 int
+rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   if (qinfo == NULL)
+   return -EINVAL;
+
+   if (!rte_eth_dev_is_valid_port(port_id)) {
+   PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+   return -EINVAL;
+   }
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_rx_queues) {
+   PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
+
+   memset(qinfo, 0, sizeof(*qinfo));
+   dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
+rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   if (qinfo == NULL)
+   return -EINVAL;
+
+   if (!rte_eth_dev_is_valid_port(port_id)) {
+   PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+   return -EINVAL;
+   }
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_tx_queues) {
+   PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
+
+   memset(qinfo, 0, sizeof(*qinfo));
+   dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
 struct ether_addr *mc_addr_set,
 uint32_t nb_mc_addr)
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index c901a2c..0c6705e 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -960,6 +960,30 @@ struct rte_eth_xstats {
uint64_t value;
 };

+/**
+ * Ethernet device RX queue information strcuture.
+ * Used to retieve information about configured queue.
+ */
+struct rte_eth_rxq_info {
+   struct rte_mempool *mp; /**< mempool used by that queue. */
+   struct rte_eth_rxconf conf; /**< queue config parameters. */
+   uint8_t scattered_rx;   /**< scattered packets RX supported. */
+   uint16_t nb_desc;   /**< configured number of RXDs. */
+   uint16_t max_desc;  /**< max allowed number of RXDs. */
+   uint16_t min_desc;  /**< min allowed number of RXDs. */
+} __rte_cache_aligned;
+
+/**
+ * Ethernet device TX queue information strcuture.
+ * Used to retieve information about configured queue.
+ */
+struct rte_eth_txq_info {
+   struct rte_eth_txconf conf; /**< queue config parameters. */
+   uint16_t nb_desc;   /**< configured number of TXDs. */
+   uint16_t max_desc;  /**< max allowed number of TXDs. */
+   uint16_t min_desc;  /**< min allowed number of TXDs. */
+} __rte_cache_aligned;
+
 struct rte_eth_dev;

 struct rte_eth_dev_callback;
@@ -1063,6 +1087,12 @@ typedef uint32_t (*eth_rx_queue_count_t)(struct 
rte_eth_dev *dev,
 typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
 /**< @internal Check DD bit of specific RX descriptor */

+typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
+   uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
+
+typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
+   uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
+
 typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
 /**< @internal Set MTU. */

@@ -1451,9 +1481,13 @@ struct eth_dev_ops {
rss_hash_upda

[dpdk-dev] [PATCHv4 0/5] ethdev: add new API to retrieve RX/TX queue information

2015-07-22 Thread Konstantin Ananyev
Add the ability for the upper layer to query RX/TX queue information.
Right now supported for:
ixgbe, i40e, e1000 PMDs.

Konstantin Ananyev (5):
  ethdev: add new API to retrieve RX/TX queue information
  i40e: add support for eth_(rxq|txq)_info_get
  ixgbe: add support for eth_(rxq|txq)_info_get
  e1000: add support for eth_(rxq|txq)_info_get
  testpmd: add new command to display RX/TX queue information

 app/test-pmd/cmdline.c | 48 +++
 app/test-pmd/config.c  | 67 ++
 app/test-pmd/testpmd.h |  2 +
 drivers/net/e1000/e1000_ethdev.h   | 12 +
 drivers/net/e1000/em_ethdev.c  |  2 +
 drivers/net/e1000/em_rxtx.c| 38 +++
 drivers/net/e1000/igb_ethdev.c |  4 ++
 drivers/net/e1000/igb_rxtx.c   | 36 ++
 drivers/net/i40e/i40e_ethdev.c |  2 +
 drivers/net/i40e/i40e_ethdev.h |  5 ++
 drivers/net/i40e/i40e_rxtx.c   | 42 
 drivers/net/ixgbe/ixgbe_ethdev.c   |  4 ++
 drivers/net/ixgbe/ixgbe_ethdev.h   |  6 +++
 drivers/net/ixgbe/ixgbe_rxtx.c | 42 
 lib/librte_ether/rte_ethdev.c  | 54 +
 lib/librte_ether/rte_ethdev.h  | 87 +++---
 lib/librte_ether/rte_ether_version.map |  2 +
 17 files changed, 447 insertions(+), 6 deletions(-)

-- 
1.8.3.1



[dpdk-dev] [PATCHv3 5/5] testpmd: add new command to display RX/TX queue information

2015-07-20 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c | 48 
 app/test-pmd/config.c  | 67 ++
 app/test-pmd/testpmd.h |  2 ++
 3 files changed, 117 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 8ab4687..29180de 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -5304,6 +5304,53 @@ cmdline_parse_inst_t cmd_showport = {
},
 };

+/* *** SHOW QUEUE INFO *** */
+struct cmd_showqueue_result {
+   cmdline_fixed_string_t show;
+   cmdline_fixed_string_t type;
+   cmdline_fixed_string_t what;
+   uint8_t portnum;
+   uint16_t queuenum;
+};
+
+static void
+cmd_showqueue_parsed(void *parsed_result,
+   __attribute__((unused)) struct cmdline *cl,
+   __attribute__((unused)) void *data)
+{
+   struct cmd_showqueue_result *res = parsed_result;
+
+   if (!strcmp(res->type, "rxq"))
+   rx_queue_infos_display(res->portnum, res->queuenum);
+   else if (!strcmp(res->type, "txq"))
+   tx_queue_infos_display(res->portnum, res->queuenum);
+}
+
+cmdline_parse_token_string_t cmd_showqueue_show =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, show, "show");
+cmdline_parse_token_string_t cmd_showqueue_type =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, type, "rxq#txq");
+cmdline_parse_token_string_t cmd_showqueue_what =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, what, "info");
+cmdline_parse_token_num_t cmd_showqueue_portnum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT8);
+cmdline_parse_token_num_t cmd_showqueue_queuenum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, queuenum, UINT16);
+
+cmdline_parse_inst_t cmd_showqueue = {
+   .f = cmd_showqueue_parsed,
+   .data = NULL,
+   .help_str = "show rxq|txq info  ",
+   .tokens = {
+   (void *)_showqueue_show,
+   (void *)_showqueue_type,
+   (void *)_showqueue_what,
+   (void *)_showqueue_portnum,
+   (void *)_showqueue_queuenum,
+   NULL,
+   },
+};
+
 /* *** READ PORT REGISTER *** */
 struct cmd_read_reg_result {
cmdline_fixed_string_t read;
@@ -8913,6 +8960,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)_help_long,
(cmdline_parse_inst_t *)_quit,
(cmdline_parse_inst_t *)_showport,
+   (cmdline_parse_inst_t *)_showqueue,
(cmdline_parse_inst_t *)_showportall,
(cmdline_parse_inst_t *)_showcfg,
(cmdline_parse_inst_t *)_start,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 1d29146..f1fd6b1 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -293,6 +293,73 @@ nic_stats_mapping_display(portid_t port_id)
 }

 void
+rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_rxq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_rx_queue_info_get(port_id, queue_id, );
+   if (rc != 0) {
+   printf("Failed to retrieve inforamtion for port: %hhu, "
+   "RX queue: %hu\nerror desc: %s(%d)\n",
+   port_id, queue_id, strerror(-rc), rc);
+   return;
+   }
+
+   printf("\n%s Infos for port %-2u, RX queue %-2u %s",
+  info_border, port_id, queue_id, info_border);
+
+   printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
+   printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
+   printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
+   printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
+   printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
+   printf("\nRX drop packets: %s",
+   (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
+   printf("\nRX deferred start: %s",
+   (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
+   printf("\nRX scattered packets: %s",
+   (qinfo.scattered_rx != 0) ? "on" : "off");
+   printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
+   printf("\nMax possible number of RXDs: %hu", qinfo.max_desc);
+   printf("\nMin possible number of RXDs: %hu", qinfo.min_desc);
+   printf("\n");
+}
+
+void
+tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_txq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_tx_queue_inf

[dpdk-dev] [PATCHv3 3/5] ixgbe: add support for eth_(rxq|txq)_info_get

2015-07-20 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_ethdev.c |  4 
 drivers/net/ixgbe/ixgbe_ethdev.h |  6 ++
 drivers/net/ixgbe/ixgbe_rxtx.c   | 42 
 3 files changed, 52 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 3a8cff0..053279e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -431,6 +431,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rss_hash_conf_get= ixgbe_dev_rss_hash_conf_get,
.filter_ctrl  = ixgbe_dev_filter_ctrl,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
.timesync_enable  = ixgbe_timesync_enable,
.timesync_disable = ixgbe_timesync_disable,
.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
@@ -466,6 +468,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove  = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
.mac_addr_set = ixgbevf_set_default_mac_addr,
.get_reg_length   = ixgbevf_get_reg_length,
.get_reg  = ixgbevf_get_regs,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c16c11d..190c34a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -347,6 +347,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id);

 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);

 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9b2d637..f910fb8 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -4689,6 +4689,48 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
return 0;
 }

+void
+ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct ixgbe_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mb_pool;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = IXGBE_MAX_RING_DESC;
+   qinfo->min_desc = IXGBE_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct ixgbe_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = IXGBE_MAX_RING_DESC;
+   qinfo->min_desc = IXGBE_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
 /*
  * [VF] Initializes Receive Unit.
  */
-- 
1.8.3.1



[dpdk-dev] [PATCHv3 2/5] i40e: add support for eth_(rxq|txq)_info_get

2015-07-20 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/i40e/i40e_ethdev.c |  2 ++
 drivers/net/i40e/i40e_ethdev.h |  5 +
 drivers/net/i40e/i40e_rxtx.c   | 42 ++
 3 files changed, 49 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 40b0526..6815b6c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -283,6 +283,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.udp_tunnel_add   = i40e_dev_udp_tunnel_add,
.udp_tunnel_del   = i40e_dev_udp_tunnel_del,
.filter_ctrl  = i40e_dev_filter_ctrl,
+   .rxq_info_get = i40e_rxq_info_get,
+   .txq_info_get = i40e_txq_info_get,
.mirror_rule_set  = i40e_mirror_rule_set,
.mirror_rule_reset= i40e_mirror_rule_reset,
.timesync_enable  = i40e_timesync_enable,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6185657..4748392 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -502,6 +502,11 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
  enum rte_filter_op filter_op,
  void *arg);

+void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /* I40E_DEV_PRIVATE_TO */
 #define I40E_DEV_PRIVATE_TO_PF(adapter) \
(&((struct i40e_adapter *)adapter)->pf)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 891a221..fadf3e8 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3352,3 +3352,45 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)

return I40E_SUCCESS;
 }
+
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct i40e_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mp;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = I40E_MAX_RING_DESC;
+   qinfo->min_desc = I40E_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct i40e_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = I40E_MAX_RING_DESC;
+   qinfo->min_desc = I40E_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
-- 
1.8.3.1



[dpdk-dev] [PATCHv3 1/5] ethdev: add new API to retrieve RX/TX queue information

2015-07-20 Thread Konstantin Ananyev
Add the ability for the upper layer to query RX/TX queue information.

Add new structures:
struct rte_eth_rxq_info
struct rte_eth_txq_info

new functions:
rte_eth_rx_queue_info_get
rte_eth_tx_queue_info_get

into rte_etdev API.

Left extra free space in the queue info structures,
so extra fields could be added later without ABI breakage.

v2 changes:
- Add formal check for the qinfo input parameter.
- As suggested rename 'rx_qinfo/tx_qinfo' to 'rxq_info/txq_info'

v3 changes:
- Updated rte_ether_version.map 
- Merged with latest changes

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_ether/rte_ethdev.c  | 54 +
 lib/librte_ether/rte_ethdev.h  | 87 +++---
 lib/librte_ether/rte_ether_version.map |  2 +
 3 files changed, 137 insertions(+), 6 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 94104ce..a94c119 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -3341,6 +3341,60 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t 
queue_id,
 }

 int
+rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   if (qinfo == NULL)
+   return -EINVAL;
+
+   if (!rte_eth_dev_is_valid_port(port_id)) {
+   PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+   return -EINVAL;
+   }
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_rx_queues) {
+   PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
+
+   memset(qinfo, 0, sizeof(*qinfo));
+   dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
+rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   if (qinfo == NULL)
+   return -EINVAL;
+
+   if (!rte_eth_dev_is_valid_port(port_id)) {
+   PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+   return -EINVAL;
+   }
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_tx_queues) {
+   PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
+
+   memset(qinfo, 0, sizeof(*qinfo));
+   dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
 struct ether_addr *mc_addr_set,
 uint32_t nb_mc_addr)
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index c901a2c..0c6705e 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -960,6 +960,30 @@ struct rte_eth_xstats {
uint64_t value;
 };

+/**
+ * Ethernet device RX queue information strcuture.
+ * Used to retieve information about configured queue.
+ */
+struct rte_eth_rxq_info {
+   struct rte_mempool *mp; /**< mempool used by that queue. */
+   struct rte_eth_rxconf conf; /**< queue config parameters. */
+   uint8_t scattered_rx;   /**< scattered packets RX supported. */
+   uint16_t nb_desc;   /**< configured number of RXDs. */
+   uint16_t max_desc;  /**< max allowed number of RXDs. */
+   uint16_t min_desc;  /**< min allowed number of RXDs. */
+} __rte_cache_aligned;
+
+/**
+ * Ethernet device TX queue information strcuture.
+ * Used to retieve information about configured queue.
+ */
+struct rte_eth_txq_info {
+   struct rte_eth_txconf conf; /**< queue config parameters. */
+   uint16_t nb_desc;   /**< configured number of TXDs. */
+   uint16_t max_desc;  /**< max allowed number of TXDs. */
+   uint16_t min_desc;  /**< min allowed number of TXDs. */
+} __rte_cache_aligned;
+
 struct rte_eth_dev;

 struct rte_eth_dev_callback;
@@ -1063,6 +1087,12 @@ typedef uint32_t (*eth_rx_queue_count_t)(struct 
rte_eth_dev *dev,
 typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
 /**< @internal Check DD bit of specific RX descriptor */

+typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
+   uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
+
+typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
+   uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
+
 typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
 /**< @internal Set MTU. */

@@ -1451,9 +1481,13 @@ struct eth_dev_ops {
rss_hash_update_t rss_hash_update;
/** Get current RSS hash configuration. */
rss_hash_

[dpdk-dev] [PATCHv3 0/5] ethdev: add new API to retrieve RX/TX queue information

2015-07-20 Thread Konstantin Ananyev
Add the ability for the upper layer to query RX/TX queue information.
Right now supported for:
ixgbe, i40e, e1000 PMDs.

Konstantin Ananyev (5):
  ethdev: add new API to retrieve RX/TX queue information
  i40e: add support for eth_(rxq|txq)_info_get
  ixgbe: add support for eth_(rxq|txq)_info_get
  e1000: add support for eth_(rxq|txq)_info_get
  testpmd: add new command to display RX/TX queue information

 app/test-pmd/cmdline.c | 48 +++
 app/test-pmd/config.c  | 67 ++
 app/test-pmd/testpmd.h |  2 +
 drivers/net/e1000/e1000_ethdev.h   | 12 +
 drivers/net/e1000/em_ethdev.c  |  2 +
 drivers/net/e1000/em_rxtx.c| 38 +++
 drivers/net/e1000/igb_ethdev.c |  4 ++
 drivers/net/e1000/igb_rxtx.c   | 36 ++
 drivers/net/i40e/i40e_ethdev.c |  2 +
 drivers/net/i40e/i40e_ethdev.h |  5 ++
 drivers/net/i40e/i40e_rxtx.c   | 42 
 drivers/net/ixgbe/ixgbe_ethdev.c   |  4 ++
 drivers/net/ixgbe/ixgbe_ethdev.h   |  6 +++
 drivers/net/ixgbe/ixgbe_rxtx.c | 42 
 lib/librte_ether/rte_ethdev.c  | 54 +
 lib/librte_ether/rte_ethdev.h  | 87 +++---
 lib/librte_ether/rte_ether_version.map |  2 +
 17 files changed, 447 insertions(+), 6 deletions(-)

-- 
1.8.3.1



[dpdk-dev] [PATCHv2 5/5] testpmd: add new command to display RX/TX queue information

2015-06-18 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c | 48 
 app/test-pmd/config.c  | 67 ++
 app/test-pmd/testpmd.h |  2 ++
 3 files changed, 117 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 8142910..a178801 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -5235,6 +5235,53 @@ cmdline_parse_inst_t cmd_showport = {
},
 };

+/* *** SHOW QUEUE INFO *** */
+struct cmd_showqueue_result {
+   cmdline_fixed_string_t show;
+   cmdline_fixed_string_t type;
+   cmdline_fixed_string_t what;
+   uint8_t portnum;
+   uint16_t queuenum;
+};
+
+static void
+cmd_showqueue_parsed(void *parsed_result,
+   __attribute__((unused)) struct cmdline *cl,
+   __attribute__((unused)) void *data)
+{
+   struct cmd_showqueue_result *res = parsed_result;
+
+   if (!strcmp(res->type, "rxq"))
+   rx_queue_infos_display(res->portnum, res->queuenum);
+   else if (!strcmp(res->type, "txq"))
+   tx_queue_infos_display(res->portnum, res->queuenum);
+}
+
+cmdline_parse_token_string_t cmd_showqueue_show =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, show, "show");
+cmdline_parse_token_string_t cmd_showqueue_type =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, type, "rxq#txq");
+cmdline_parse_token_string_t cmd_showqueue_what =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, what, "info");
+cmdline_parse_token_num_t cmd_showqueue_portnum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT8);
+cmdline_parse_token_num_t cmd_showqueue_queuenum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, queuenum, UINT16);
+
+cmdline_parse_inst_t cmd_showqueue = {
+   .f = cmd_showqueue_parsed,
+   .data = NULL,
+   .help_str = "show rxq|txq info  ",
+   .tokens = {
+   (void *)_showqueue_show,
+   (void *)_showqueue_type,
+   (void *)_showqueue_what,
+   (void *)_showqueue_portnum,
+   (void *)_showqueue_queuenum,
+   NULL,
+   },
+};
+
 /* *** READ PORT REGISTER *** */
 struct cmd_read_reg_result {
cmdline_fixed_string_t read;
@@ -8793,6 +8840,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)_help_long,
(cmdline_parse_inst_t *)_quit,
(cmdline_parse_inst_t *)_showport,
+   (cmdline_parse_inst_t *)_showqueue,
(cmdline_parse_inst_t *)_showportall,
(cmdline_parse_inst_t *)_showcfg,
(cmdline_parse_inst_t *)_start,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 52917c7..5f76740 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -293,6 +293,73 @@ nic_stats_mapping_display(portid_t port_id)
 }

 void
+rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_rxq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_rx_queue_info_get(port_id, queue_id, );
+   if (rc != 0) {
+   printf("Failed to retrieve inforamtion for port: %hhu, "
+   "RX queue: %hu\nerror desc: %s(%d)\n",
+   port_id, queue_id, strerror(-rc), rc);
+   return;
+   }
+
+   printf("\n%s Infos for port %-2u, RX queue %-2u %s",
+  info_border, port_id, queue_id, info_border);
+
+   printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
+   printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
+   printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
+   printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
+   printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
+   printf("\nRX drop packets: %s",
+   (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
+   printf("\nRX deferred start: %s",
+   (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
+   printf("\nRX scattered packets: %s",
+   (qinfo.scattered_rx != 0) ? "on" : "off");
+   printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
+   printf("\nMax possible number of RXDs: %hu", qinfo.max_desc);
+   printf("\nMin possible number of RXDs: %hu", qinfo.min_desc);
+   printf("\n");
+}
+
+void
+tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_txq_info qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_tx_queue_inf

[dpdk-dev] [PATCHv2 4/5] e1000: add support for eth_(rxq|txq)_info_get

2015-06-18 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/e1000/e1000_ethdev.h | 12 
 drivers/net/e1000/em_ethdev.c|  2 ++
 drivers/net/e1000/em_rxtx.c  | 38 ++
 drivers/net/e1000/igb_ethdev.c   |  4 
 drivers/net/e1000/igb_rxtx.c | 36 
 5 files changed, 92 insertions(+)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index c451faa..57a4017 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -302,6 +302,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);

 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);

+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /*
  * RX/TX EM function prototypes
  */
@@ -337,4 +343,10 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);

+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 #endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index a306c55..1b6d2d0 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = {
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove  = eth_em_rar_clear,
.set_mc_addr_list = eth_em_set_mc_addr_list,
+   .rxq_info_get = em_rxq_info_get,
+   .txq_info_get = em_txq_info_get,
 };

 /**
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index fdc825f..fc1c5f2 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1862,3 +1862,41 @@ eth_em_tx_init(struct rte_eth_dev *dev)
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
 }
+
+void
+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct em_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mb_pool;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = EM_MAX_RING_DESC;
+   qinfo->min_desc = EM_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+}
+
+void
+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct em_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = EM_MAX_RING_DESC;
+   qinfo->min_desc = EM_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+}
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 24c7510..92ef01e 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -274,6 +274,8 @@ static const struct eth_dev_ops eth_igb_ops = {
.rss_hash_conf_get= eth_igb_rss_hash_conf_get,
.filter_ctrl  = eth_igb_filter_ctrl,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
+   .rxq_info_get = igb_rxq_info_get,
+   .txq_info_get = igb_txq_info_get,
 };

 /*
@@ -295,6 +297,8 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
.tx_queue_setup   = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
+   .rxq_info_get = igb_rxq_info_get,
+   .txq_info_get = igb_txq_info_get,
 };

 /**
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 43d6703..704d414 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -2394,3 +2394,39 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
}

 }
+
+void
+igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct igb_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mb_pool;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = IGB_MAX_RING_DESC;
+   qinfo->

[dpdk-dev] [PATCHv2 3/5] ixgbe: add support for eth_(rxq|txq)_info_get

2015-06-18 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_ethdev.c |  4 
 drivers/net/ixgbe/ixgbe_ethdev.h |  6 ++
 drivers/net/ixgbe/ixgbe_rxtx.c   | 42 
 3 files changed, 52 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 7414a2e..99eaf26 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -386,6 +386,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rss_hash_conf_get= ixgbe_dev_rss_hash_conf_get,
.filter_ctrl  = ixgbe_dev_filter_ctrl,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
 };

 /*
@@ -412,6 +414,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove  = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rxq_info_get = ixgbe_rxq_info_get,
+   .txq_info_get = ixgbe_txq_info_get,
 };

 /**
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 19237b8..25d76d2 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -343,6 +343,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id);

 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);

 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 4f9ab22..68ebc52 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -4501,6 +4501,48 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
return 0;
 }

+void
+ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct ixgbe_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mb_pool;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = IXGBE_MAX_RING_DESC;
+   qinfo->min_desc = IXGBE_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct ixgbe_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = IXGBE_MAX_RING_DESC;
+   qinfo->min_desc = IXGBE_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
 /*
  * [VF] Initializes Receive Unit.
  */
-- 
1.8.5.3



[dpdk-dev] [PATCHv2 2/5] i40e: add support for eth_(rxq|txq)_info_get

2015-06-18 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/i40e/i40e_ethdev.c |  2 ++
 drivers/net/i40e/i40e_ethdev.h |  5 +
 drivers/net/i40e/i40e_rxtx.c   | 42 ++
 3 files changed, 49 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2ada502..c691fd6 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -262,6 +262,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.udp_tunnel_add   = i40e_dev_udp_tunnel_add,
.udp_tunnel_del   = i40e_dev_udp_tunnel_del,
.filter_ctrl  = i40e_dev_filter_ctrl,
+   .rxq_info_get = i40e_rxq_info_get,
+   .txq_info_get = i40e_txq_info_get,
 };

 static struct eth_driver rte_i40e_pmd = {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 587ee71..c012cda 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -479,6 +479,11 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
  enum rte_filter_op filter_op,
  void *arg);

+void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 /* I40E_DEV_PRIVATE_TO */
 #define I40E_DEV_PRIVATE_TO_PF(adapter) \
(&((struct i40e_adapter *)adapter)->pf)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 2de0ac4..e31b99d 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2706,3 +2706,45 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)

return I40E_SUCCESS;
 }
+
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct i40e_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mp;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = I40E_MAX_RING_DESC;
+   qinfo->min_desc = I40E_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct i40e_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = I40E_MAX_RING_DESC;
+   qinfo->min_desc = I40E_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
-- 
1.8.5.3



[dpdk-dev] [PATCHv2 1/5] ethdev: add new API to retrieve RX/TX queue information

2015-06-18 Thread Konstantin Ananyev
Add the ability for the upper layer to query RX/TX queue information.

Add new structures:
struct rte_eth_rxq_info
struct rte_eth_txq_info

new functions:
rte_eth_rx_queue_info_get
rte_eth_tx_queue_info_get

into rte_etdev API.

Left extra free space in the queue info structures,
so extra fields could be added later without ABI breakage.

v2 changes:

- Add formal check for the qinfo input parameter.
- As suggested rename 'rx_qinfo/tx_qinfo' to 'rxq_info/txq_info'

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_ether/rte_ethdev.c | 54 ++
 lib/librte_ether/rte_ethdev.h | 77 ++-
 2 files changed, 130 insertions(+), 1 deletion(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index e13fde5..7dfe72a 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -3629,6 +3629,60 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t 
queue_id,
 }

 int
+rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   if (qinfo == NULL)
+   return -EINVAL;
+
+   if (!rte_eth_dev_is_valid_port(port_id)) {
+   PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+   return -EINVAL;
+   }
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_rx_queues) {
+   PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
+
+   memset(qinfo, 0, sizeof(*qinfo));
+   dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
+rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   if (qinfo == NULL)
+   return -EINVAL;
+
+   if (!rte_eth_dev_is_valid_port(port_id)) {
+   PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+   return -EINVAL;
+   }
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_tx_queues) {
+   PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
+
+   memset(qinfo, 0, sizeof(*qinfo));
+   dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
 struct ether_addr *mc_addr_set,
 uint32_t nb_mc_addr)
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 04c192d..5dd4c01 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -942,6 +942,30 @@ struct rte_eth_xstats {
uint64_t value;
 };

+/**
+ * Ethernet device RX queue information strcuture.
+ * Used to retieve information about configured queue.
+ */
+struct rte_eth_rxq_info {
+   struct rte_mempool *mp; /**< mempool used by that queue. */
+   struct rte_eth_rxconf conf; /**< queue config parameters. */
+   uint8_t scattered_rx;   /**< scattered packets RX supported. */
+   uint16_t nb_desc;   /**< configured number of RXDs. */
+   uint16_t max_desc;  /**< max allowed number of RXDs. */
+   uint16_t min_desc;  /**< min allowed number of RXDs. */
+} __rte_cache_aligned;
+
+/**
+ * Ethernet device TX queue information strcuture.
+ * Used to retieve information about configured queue.
+ */
+struct rte_eth_txq_info {
+   struct rte_eth_txconf conf; /**< queue config parameters. */
+   uint16_t nb_desc;   /**< configured number of TXDs. */
+   uint16_t max_desc;  /**< max allowed number of TXDs. */
+   uint16_t min_desc;  /**< min allowed number of TXDs. */
+} __rte_cache_aligned;
+
 struct rte_eth_dev;

 struct rte_eth_dev_callback;
@@ -1045,6 +1069,12 @@ typedef uint32_t (*eth_rx_queue_count_t)(struct 
rte_eth_dev *dev,
 typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
 /**< @Check DD bit of specific RX descriptor */

+typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
+   uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
+
+typedef void (*eth_txq_info_get_t)(struct rte_eth_dev *dev,
+   uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo);
+
 typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
 /**< @internal Set MTU. */

@@ -1389,8 +1419,13 @@ struct eth_dev_ops {
rss_hash_update_t rss_hash_update;
/** Get current RSS hash configuration. */
rss_hash_conf_get_t rss_hash_conf_get;
-   eth_filter_ctrl_t  filter_ctrl;  /**< common filter 
control*/

[dpdk-dev] [PATCHv2 0/5] ethdev: add new API to retrieve RX/TX queue information

2015-06-18 Thread Konstantin Ananyev
Add the ability for the upper layer to query RX/TX queue information.
Right now supported for:
ixgbe, i40e, e1000 PMDs.

Konstantin Ananyev (5):
  ethdev: add new API to retrieve RX/TX queue information
  i40e: add support for eth_(rxq|txq)_info_get
  ixgbe: add support for eth_(rxq|txq)_info_get
  e1000: add support for eth_(rxq|txq)_info_get
  testpmd: add new command to display RX/TX queue information

 app/test-pmd/cmdline.c   | 48 +
 app/test-pmd/config.c| 67 ++
 app/test-pmd/testpmd.h   |  2 ++
 drivers/net/e1000/e1000_ethdev.h | 12 +++
 drivers/net/e1000/em_ethdev.c|  2 ++
 drivers/net/e1000/em_rxtx.c  | 38 
 drivers/net/e1000/igb_ethdev.c   |  4 +++
 drivers/net/e1000/igb_rxtx.c | 36 +++
 drivers/net/i40e/i40e_ethdev.c   |  2 ++
 drivers/net/i40e/i40e_ethdev.h   |  5 +++
 drivers/net/i40e/i40e_rxtx.c | 42 ++
 drivers/net/ixgbe/ixgbe_ethdev.c |  4 +++
 drivers/net/ixgbe/ixgbe_ethdev.h |  6 
 drivers/net/ixgbe/ixgbe_rxtx.c   | 42 ++
 lib/librte_ether/rte_ethdev.c| 54 
 lib/librte_ether/rte_ethdev.h| 77 +++-
 16 files changed, 440 insertions(+), 1 deletion(-)

-- 
1.8.5.3



[dpdk-dev] [PATCH 5/5] testpmd: add new command to display RX/TX queue information

2015-06-17 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 app/test-pmd/cmdline.c | 48 
 app/test-pmd/config.c  | 67 ++
 app/test-pmd/testpmd.h |  2 ++
 3 files changed, 117 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 8142910..a178801 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -5235,6 +5235,53 @@ cmdline_parse_inst_t cmd_showport = {
},
 };

+/* *** SHOW QUEUE INFO *** */
+struct cmd_showqueue_result {
+   cmdline_fixed_string_t show;
+   cmdline_fixed_string_t type;
+   cmdline_fixed_string_t what;
+   uint8_t portnum;
+   uint16_t queuenum;
+};
+
+static void
+cmd_showqueue_parsed(void *parsed_result,
+   __attribute__((unused)) struct cmdline *cl,
+   __attribute__((unused)) void *data)
+{
+   struct cmd_showqueue_result *res = parsed_result;
+
+   if (!strcmp(res->type, "rxq"))
+   rx_queue_infos_display(res->portnum, res->queuenum);
+   else if (!strcmp(res->type, "txq"))
+   tx_queue_infos_display(res->portnum, res->queuenum);
+}
+
+cmdline_parse_token_string_t cmd_showqueue_show =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, show, "show");
+cmdline_parse_token_string_t cmd_showqueue_type =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, type, "rxq#txq");
+cmdline_parse_token_string_t cmd_showqueue_what =
+   TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, what, "info");
+cmdline_parse_token_num_t cmd_showqueue_portnum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT8);
+cmdline_parse_token_num_t cmd_showqueue_queuenum =
+   TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, queuenum, UINT16);
+
+cmdline_parse_inst_t cmd_showqueue = {
+   .f = cmd_showqueue_parsed,
+   .data = NULL,
+   .help_str = "show rxq|txq info  ",
+   .tokens = {
+   (void *)_showqueue_show,
+   (void *)_showqueue_type,
+   (void *)_showqueue_what,
+   (void *)_showqueue_portnum,
+   (void *)_showqueue_queuenum,
+   NULL,
+   },
+};
+
 /* *** READ PORT REGISTER *** */
 struct cmd_read_reg_result {
cmdline_fixed_string_t read;
@@ -8793,6 +8840,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)_help_long,
(cmdline_parse_inst_t *)_quit,
(cmdline_parse_inst_t *)_showport,
+   (cmdline_parse_inst_t *)_showqueue,
(cmdline_parse_inst_t *)_showportall,
(cmdline_parse_inst_t *)_showcfg,
(cmdline_parse_inst_t *)_start,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 52917c7..8bfacdb 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -293,6 +293,73 @@ nic_stats_mapping_display(portid_t port_id)
 }

 void
+rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_rx_qinfo qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_rx_queue_info_get(port_id, queue_id, );
+   if (rc != 0) {
+   printf("Failed to retrieve inforamtion for port: %hhu, "
+   "RX queue: %hu\nerror desc: %s(%d)\n",
+   port_id, queue_id, strerror(-rc), rc);
+   return;
+   }
+
+   printf("\n%s Infos for port %-2u, RX queue %-2u %s",
+  info_border, port_id, queue_id, info_border);
+
+   printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
+   printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
+   printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
+   printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
+   printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
+   printf("\nRX drop packets: %s",
+   (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
+   printf("\nRX deferred start: %s",
+   (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
+   printf("\nRX scattered packets: %s",
+   (qinfo.scattered_rx != 0) ? "on" : "off");
+   printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
+   printf("\nMax possible number of RXDs: %hu", qinfo.max_desc);
+   printf("\nMin possible number of RXDs: %hu", qinfo.min_desc);
+   printf("\n");
+}
+
+void
+tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_tx_qinfo qinfo;
+   int32_t rc;
+   static const char *info_border = "*";
+
+   rc = rte_eth_tx_queue_inf

[dpdk-dev] [PATCH 4/5] e1000: add support for eth_(rx|tx)_qinfo_get

2015-06-17 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/e1000/e1000_ethdev.h | 12 
 drivers/net/e1000/em_ethdev.c|  2 ++
 drivers/net/e1000/em_rxtx.c  | 38 ++
 drivers/net/e1000/igb_ethdev.c   |  4 
 drivers/net/e1000/igb_rxtx.c | 36 
 5 files changed, 92 insertions(+)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index c451faa..6001066 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -302,6 +302,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);

 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);

+void igb_rx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rx_qinfo *qinfo);
+
+void igb_tx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_tx_qinfo *qinfo);
+
 /*
  * RX/TX EM function prototypes
  */
@@ -337,4 +343,10 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);

+void em_rx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rx_qinfo *qinfo);
+
+void em_tx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_tx_qinfo *qinfo);
+
 #endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index a306c55..d9188a4 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = {
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove  = eth_em_rar_clear,
.set_mc_addr_list = eth_em_set_mc_addr_list,
+   .rx_qinfo_get = em_rx_qinfo_get,
+   .tx_qinfo_get = em_tx_qinfo_get,
 };

 /**
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index fdc825f..39d6b25 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1862,3 +1862,41 @@ eth_em_tx_init(struct rte_eth_dev *dev)
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
 }
+
+void
+em_rx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rx_qinfo *qinfo)
+{
+   struct em_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mb_pool;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = EM_MAX_RING_DESC;
+   qinfo->min_desc = EM_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+}
+
+void
+em_tx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_tx_qinfo *qinfo)
+{
+   struct em_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = EM_MAX_RING_DESC;
+   qinfo->min_desc = EM_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+}
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 24c7510..1e11918 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -274,6 +274,8 @@ static const struct eth_dev_ops eth_igb_ops = {
.rss_hash_conf_get= eth_igb_rss_hash_conf_get,
.filter_ctrl  = eth_igb_filter_ctrl,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
+   .rx_qinfo_get = igb_rx_qinfo_get,
+   .tx_qinfo_get = igb_tx_qinfo_get,
 };

 /*
@@ -295,6 +297,8 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
.tx_queue_setup   = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
+   .rx_qinfo_get = igb_rx_qinfo_get,
+   .tx_qinfo_get = igb_tx_qinfo_get,
 };

 /**
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 43d6703..48d58f6 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -2394,3 +2394,39 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
}

 }
+
+void
+igb_rx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rx_qinfo *qinfo)
+{
+   struct igb_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mb_pool;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = IGB_MAX_RING_DESC;
+   qinfo->

[dpdk-dev] [PATCH 3/5] ixgbe: add support for eth_(rx|tx)_qinfo_get

2015-06-17 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/ixgbe/ixgbe_ethdev.c |  4 
 drivers/net/ixgbe/ixgbe_ethdev.h |  6 ++
 drivers/net/ixgbe/ixgbe_rxtx.c   | 42 
 3 files changed, 52 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 7414a2e..bed33b0 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -386,6 +386,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rss_hash_conf_get= ixgbe_dev_rss_hash_conf_get,
.filter_ctrl  = ixgbe_dev_filter_ctrl,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rx_qinfo_get = ixgbe_rx_qinfo_get,
+   .tx_qinfo_get = ixgbe_tx_qinfo_get,
 };

 /*
@@ -412,6 +414,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove  = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
+   .rx_qinfo_get = ixgbe_rx_qinfo_get,
+   .tx_qinfo_get = ixgbe_tx_qinfo_get,
 };

 /**
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 19237b8..03ac00d 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -343,6 +343,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id);

 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);

+void ixgbe_rx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rx_qinfo *qinfo);
+
+void ixgbe_tx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_tx_qinfo *qinfo);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);

 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 4f9ab22..23d7053 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -4501,6 +4501,48 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
return 0;
 }

+void
+ixgbe_rx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rx_qinfo *qinfo)
+{
+   struct ixgbe_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mb_pool;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = IXGBE_MAX_RING_DESC;
+   qinfo->min_desc = IXGBE_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+ixgbe_tx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_tx_qinfo *qinfo)
+{
+   struct ixgbe_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = IXGBE_MAX_RING_DESC;
+   qinfo->min_desc = IXGBE_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
 /*
  * [VF] Initializes Receive Unit.
  */
-- 
1.8.5.3



[dpdk-dev] [PATCH 2/5] i40e: add support for eth_(rx|tx)_qinfo_get

2015-06-17 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 drivers/net/i40e/i40e_ethdev.c |  2 ++
 drivers/net/i40e/i40e_ethdev.h |  5 +
 drivers/net/i40e/i40e_rxtx.c   | 42 ++
 3 files changed, 49 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2ada502..38a05d7 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -262,6 +262,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.udp_tunnel_add   = i40e_dev_udp_tunnel_add,
.udp_tunnel_del   = i40e_dev_udp_tunnel_del,
.filter_ctrl  = i40e_dev_filter_ctrl,
+   .rx_qinfo_get = i40e_rx_qinfo_get,
+   .tx_qinfo_get = i40e_tx_qinfo_get,
 };

 static struct eth_driver rte_i40e_pmd = {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 587ee71..88576b7 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -479,6 +479,11 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
  enum rte_filter_op filter_op,
  void *arg);

+void i40e_rx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rx_qinfo *qinfo);
+void i40e_tx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_tx_qinfo *qinfo);
+
 /* I40E_DEV_PRIVATE_TO */
 #define I40E_DEV_PRIVATE_TO_PF(adapter) \
(&((struct i40e_adapter *)adapter)->pf)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 2de0ac4..c471db4 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2706,3 +2706,45 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)

return I40E_SUCCESS;
 }
+
+void
+i40e_rx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rx_qinfo *qinfo)
+{
+   struct i40e_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mp;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+
+   qinfo->nb_desc = rxq->nb_rx_desc;
+   qinfo->max_desc = I40E_MAX_RING_DESC;
+   qinfo->min_desc = I40E_MIN_RING_DESC;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_tx_qinfo_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_tx_qinfo *qinfo)
+{
+   struct i40e_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+   qinfo->max_desc = I40E_MAX_RING_DESC;
+   qinfo->min_desc = I40E_MIN_RING_DESC;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+   qinfo->conf.txq_flags = txq->txq_flags;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
-- 
1.8.5.3



[dpdk-dev] [PATCH 1/5] ethdev: add new API to retrieve RX/TX queue information

2015-06-17 Thread Konstantin Ananyev
Add the ability for the upper layer to query RX/TX queue information.

Add new structures:
struct rte_eth_rx_qinfo
struct rte_eth_tx_qinfo

new functions:
rte_eth_rx_queue_info_get
rte_eth_tx_queue_info_get

into rte_etdev API.

Left extra free space in the qinfo structures,
so extra fields could be added later without ABI breakage.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_ether/rte_ethdev.c | 48 +++
 lib/librte_ether/rte_ethdev.h | 77 ++-
 2 files changed, 124 insertions(+), 1 deletion(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index e13fde5..6b9a7ef 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -3629,6 +3629,54 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t 
queue_id,
 }

 int
+rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_rx_qinfo *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   memset(qinfo, 0, sizeof(*qinfo));
+
+   if (!rte_eth_dev_is_valid_port(port_id)) {
+   PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+   return -EINVAL;
+   }
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_rx_queues) {
+   PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_qinfo_get, -ENOTSUP);
+   dev->dev_ops->rx_qinfo_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
+rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+   struct rte_eth_tx_qinfo *qinfo)
+{
+   struct rte_eth_dev *dev;
+
+   memset(qinfo, 0, sizeof(*qinfo));
+
+   if (!rte_eth_dev_is_valid_port(port_id)) {
+   PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+   return -EINVAL;
+   }
+
+   dev = _eth_devices[port_id];
+   if (queue_id >= dev->data->nb_tx_queues) {
+   PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+   return -EINVAL;
+   }
+
+   FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_qinfo_get, -ENOTSUP);
+   dev->dev_ops->tx_qinfo_get(dev, queue_id, qinfo);
+   return 0;
+}
+
+int
 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
 struct ether_addr *mc_addr_set,
 uint32_t nb_mc_addr)
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 04c192d..45afdd3 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -942,6 +942,30 @@ struct rte_eth_xstats {
uint64_t value;
 };

+/**
+ * Ethernet device RX queue information strcuture.
+ * Used to retieve information about configured queue.
+ */
+struct rte_eth_rx_qinfo {
+   struct rte_mempool *mp; /**< mempool used by that queue. */
+   struct rte_eth_rxconf conf; /**< queue config parameters. */
+   uint8_t scattered_rx;   /**< scattered packets RX supported. */
+   uint16_t nb_desc;   /**< configured number of RXDs. */
+   uint16_t max_desc;  /**< max allowed number of RXDs. */
+   uint16_t min_desc;  /**< min allowed number of RXDs. */
+} __rte_cache_aligned;
+
+/**
+ * Ethernet device TX queue information strcuture.
+ * Used to retieve information about configured queue.
+ */
+struct rte_eth_tx_qinfo {
+   struct rte_eth_txconf conf; /**< queue config parameters. */
+   uint16_t nb_desc;   /**< configured number of TXDs. */
+   uint16_t max_desc;  /**< max allowed number of TXDs. */
+   uint16_t min_desc;  /**< min allowed number of TXDs. */
+} __rte_cache_aligned;
+
 struct rte_eth_dev;

 struct rte_eth_dev_callback;
@@ -1045,6 +1069,12 @@ typedef uint32_t (*eth_rx_queue_count_t)(struct 
rte_eth_dev *dev,
 typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
 /**< @Check DD bit of specific RX descriptor */

+typedef void (*eth_rx_qinfo_get_t)(struct rte_eth_dev *dev,
+   uint16_t rx_queue_id, struct rte_eth_rx_qinfo *qinfo);
+
+typedef void (*eth_tx_qinfo_get_t)(struct rte_eth_dev *dev,
+   uint16_t tx_queue_id, struct rte_eth_tx_qinfo *qinfo);
+
 typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
 /**< @internal Set MTU. */

@@ -1389,8 +1419,13 @@ struct eth_dev_ops {
rss_hash_update_t rss_hash_update;
/** Get current RSS hash configuration. */
rss_hash_conf_get_t rss_hash_conf_get;
-   eth_filter_ctrl_t  filter_ctrl;  /**< common filter 
control*/
+   eth_filter_ctrl_t  filter_ctrl;
+   /**< common filter control. */
eth_set_mc_addr_list_t set_mc_addr_list; /**< set list of mcast addrs */
+   eth_rx_qinfo_get_t rx_qinfo_get;
+   /**< retrieve RX queue informatio

[dpdk-dev] [PATCH 0/5] ethdev: add new API to retrieve RX/TX queue information

2015-06-17 Thread Konstantin Ananyev
Add the ability for the upper layer to query RX/TX queue information.
Right now supported for:
ixgbe, i40e, e1000 PMDs.

Konstantin Ananyev (5):
  ethdev: add new API to retrieve RX/TX queue information
  i40e: add support for eth_(rx|tx)_qinfo_get
  ixgbe: add support for eth_(rx|tx)_qinfo_get
  e1000: add support for eth_(rx|tx)_qinfo_get
  testpmd: add new command to display RX/TX queue information

 app/test-pmd/cmdline.c   | 48 +
 app/test-pmd/config.c| 67 ++
 app/test-pmd/testpmd.h   |  2 ++
 drivers/net/e1000/e1000_ethdev.h | 12 +++
 drivers/net/e1000/em_ethdev.c|  2 ++
 drivers/net/e1000/em_rxtx.c  | 38 
 drivers/net/e1000/igb_ethdev.c   |  4 +++
 drivers/net/e1000/igb_rxtx.c | 36 +++
 drivers/net/i40e/i40e_ethdev.c   |  2 ++
 drivers/net/i40e/i40e_ethdev.h   |  5 +++
 drivers/net/i40e/i40e_rxtx.c | 42 ++
 drivers/net/ixgbe/ixgbe_ethdev.c |  4 +++
 drivers/net/ixgbe/ixgbe_ethdev.h |  6 
 drivers/net/ixgbe/ixgbe_rxtx.c   | 42 ++
 lib/librte_ether/rte_ethdev.c| 48 +
 lib/librte_ether/rte_ethdev.h| 77 +++-
 16 files changed, 434 insertions(+), 1 deletion(-)

-- 
1.8.5.3



[dpdk-dev] [PATCHv2 8/8] acl: add new test-cases into UT

2015-06-08 Thread Konstantin Ananyev
Add several new test cases for ACL to cover different build configurations.

Signed-off-by: Konstantin Ananyev 
---
 app/test/test_acl.c | 431 +++-
 1 file changed, 423 insertions(+), 8 deletions(-)

diff --git a/app/test/test_acl.c b/app/test/test_acl.c
index 6a032f9..b4a107d 100644
--- a/app/test/test_acl.c
+++ b/app/test/test_acl.c
@@ -47,6 +47,8 @@

 #define LEN RTE_ACL_MAX_CATEGORIES

+RTE_ACL_RULE_DEF(acl_ipv4vlan_rule, RTE_ACL_IPV4VLAN_NUM_FIELDS);
+
 struct rte_acl_param acl_param = {
.name = "acl_ctx",
.socket_id = SOCKET_ID_ANY,
@@ -62,6 +64,15 @@ struct rte_acl_ipv4vlan_rule acl_rule = {
.dst_port_high = UINT16_MAX,
 };

+const uint32_t ipv4_7tuple_layout[RTE_ACL_IPV4VLAN_NUM] = {
+   offsetof(struct ipv4_7tuple, proto),
+   offsetof(struct ipv4_7tuple, vlan),
+   offsetof(struct ipv4_7tuple, ip_src),
+   offsetof(struct ipv4_7tuple, ip_dst),
+   offsetof(struct ipv4_7tuple, port_src),
+};
+
+
 /* byteswap to cpu or network order */
 static void
 bswap_test_data(struct ipv4_7tuple *data, int len, int to_be)
@@ -195,13 +206,6 @@ test_classify_buid(struct rte_acl_ctx *acx,
const struct rte_acl_ipv4vlan_rule *rules, uint32_t num)
 {
int ret;
-   const uint32_t layout[RTE_ACL_IPV4VLAN_NUM] = {
-   offsetof(struct ipv4_7tuple, proto),
-   offsetof(struct ipv4_7tuple, vlan),
-   offsetof(struct ipv4_7tuple, ip_src),
-   offsetof(struct ipv4_7tuple, ip_dst),
-   offsetof(struct ipv4_7tuple, port_src),
-   };

/* add rules to the context */
ret = rte_acl_ipv4vlan_add_rules(acx, rules, num);
@@ -212,7 +216,8 @@ test_classify_buid(struct rte_acl_ctx *acx,
}

/* try building the context */
-   ret = rte_acl_ipv4vlan_build(acx, layout, RTE_ACL_MAX_CATEGORIES);
+   ret = rte_acl_ipv4vlan_build(acx, ipv4_7tuple_layout,
+   RTE_ACL_MAX_CATEGORIES);
if (ret != 0) {
printf("Line %i: Building ACL context failed!\n", __LINE__);
return ret;
@@ -412,6 +417,414 @@ test_build_ports_range(void)
return ret;
 }

+static void
+convert_rule(const struct rte_acl_ipv4vlan_rule *ri,
+   struct acl_ipv4vlan_rule *ro)
+{
+   ro->data = ri->data;
+
+   ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].value.u8 = ri->proto;
+   ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].value.u16 = ri->vlan;
+   ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].value.u16 = ri->domain;
+   ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 = ri->src_addr;
+   ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].value.u32 = ri->dst_addr;
+   ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].value.u16 = ri->src_port_low;
+   ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].value.u16 = ri->dst_port_low;
+
+   ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].mask_range.u8 = ri->proto_mask;
+   ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].mask_range.u16 = ri->vlan_mask;
+   ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].mask_range.u16 =
+   ri->domain_mask;
+   ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 =
+   ri->src_mask_len;
+   ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32 = ri->dst_mask_len;
+   ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].mask_range.u16 =
+   ri->src_port_high;
+   ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].mask_range.u16 =
+   ri->dst_port_high;
+}
+
+/*
+ * Convert IPV4 source and destination from RTE_ACL_FIELD_TYPE_MASK to
+ * RTE_ACL_FIELD_TYPE_BITMASK.
+ */
+static void
+convert_rule_1(const struct rte_acl_ipv4vlan_rule *ri,
+   struct acl_ipv4vlan_rule *ro)
+{
+   uint32_t v;
+
+   convert_rule(ri, ro);
+   v = ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32;
+   ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 =
+   RTE_ACL_MASKLEN_TO_BITMASK(v, sizeof(v));
+   v = ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32;
+   ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32 =
+   RTE_ACL_MASKLEN_TO_BITMASK(v, sizeof(v));
+}
+
+/*
+ * Convert IPV4 source and destination from RTE_ACL_FIELD_TYPE_MASK to
+ * RTE_ACL_FIELD_TYPE_RANGE.
+ */
+static void
+convert_rule_2(const struct rte_acl_ipv4vlan_rule *ri,
+   struct acl_ipv4vlan_rule *ro)
+{
+   uint32_t hi, lo, mask;
+
+   convert_rule(ri, ro);
+
+   mask = ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32;
+   mask = RTE_ACL_MASKLEN_TO_BITMASK(mask, sizeof(mask));
+   lo = ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 & mask;
+   hi = lo + ~mask;
+   ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 = lo;
+   ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 = hi;
+
+   mask = r

[dpdk-dev] [PATCHv2 7/8] acl: fix ambiguity between ACL rules in UT.

2015-06-08 Thread Konstantin Ananyev
Some test rules had equal priority for the same category.
That can cause an ambiguity in build trie and test results.
Specify different priority value for each rule from the same category.

Signed-off-by: Konstantin Ananyev 
---
 app/test/test_acl.h | 52 ++--
 1 file changed, 26 insertions(+), 26 deletions(-)

diff --git a/app/test/test_acl.h b/app/test/test_acl.h
index 4af457d..4e8ff34 100644
--- a/app/test/test_acl.h
+++ b/app/test/test_acl.h
@@ -105,7 +105,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling to 192.168.0.0/16 */
{
.data = {.userdata = 1, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 2},
+   .priority = 230},
.dst_addr = IPv4(192,168,0,0),
.dst_mask_len = 16,
.src_port_low = 0,
@@ -116,7 +116,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling to 192.168.1.0/24 */
{
.data = {.userdata = 2, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 3},
+   .priority = 330},
.dst_addr = IPv4(192,168,1,0),
.dst_mask_len = 24,
.src_port_low = 0,
@@ -127,7 +127,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling to 192.168.1.50 */
{
.data = {.userdata = 3, .category_mask = 
ACL_DENY_MASK,
-   .priority = 2},
+   .priority = 230},
.dst_addr = IPv4(192,168,1,50),
.dst_mask_len = 32,
.src_port_low = 0,
@@ -140,7 +140,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling from 10.0.0.0/8 */
{
.data = {.userdata = 4, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 2},
+   .priority = 240},
.src_addr = IPv4(10,0,0,0),
.src_mask_len = 8,
.src_port_low = 0,
@@ -151,7 +151,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling from 10.1.1.0/24 */
{
.data = {.userdata = 5, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 3},
+   .priority = 340},
.src_addr = IPv4(10,1,1,0),
.src_mask_len = 24,
.src_port_low = 0,
@@ -162,7 +162,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling from 10.1.1.1 */
{
.data = {.userdata = 6, .category_mask = 
ACL_DENY_MASK,
-   .priority = 2},
+   .priority = 240},
.src_addr = IPv4(10,1,1,1),
.src_mask_len = 32,
.src_port_low = 0,
@@ -175,7 +175,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets with lower 7 bytes of VLAN tag equal to 
0x64  */
{
.data = {.userdata = 7, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 2},
+   .priority = 260},
.vlan = 0x64,
.vlan_mask = 0x7f,
.src_port_low = 0,
@@ -186,7 +186,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets with VLAN tags that have 0x5 in them */
{
.data = {.userdata = 8, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 2},
+   .priority = 260},
.vlan = 0x5,
.vlan_mask = 0x5,
.src_port_low = 0,
@@ -197,7 +197,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets with VLAN tag 5

[dpdk-dev] [PATCHv2 5/8] acl: code dedup - introduce a new macro

2015-06-08 Thread Konstantin Ananyev
Introduce new RTE_ACL_MASKLEN_TO_BITMASK macro, that will be used
in several places inside librte_acl and it's UT.
Simplify and cleanup build_trie() code a bit.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/acl_bld.c | 16 +++-
 lib/librte_acl/rte_acl.h |  3 +++
 2 files changed, 6 insertions(+), 13 deletions(-)

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index d89c66a..4d8a62f 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -1262,19 +1262,9 @@ build_trie(struct acl_build_context *context, struct 
rte_acl_build_rule *head,
 * all higher bits.
 */
uint64_t mask;
-
-   if (fld->mask_range.u32 == 0) {
-   mask = 0;
-
-   /*
-* arithmetic right shift for the length of
-* the mask less the msb.
-*/
-   } else {
-   mask = -1 <<
-   (rule->config->defs[n].size *
-   CHAR_BIT - fld->mask_range.u32);
-   }
+   mask = RTE_ACL_MASKLEN_TO_BITMASK(
+   fld->mask_range.u32,
+   rule->config->defs[n].size);

/* gen a mini-trie for this field */
merge = acl_gen_mask_trie(context,
diff --git a/lib/librte_acl/rte_acl.h b/lib/librte_acl/rte_acl.h
index 8d9bbe5..bd8f892 100644
--- a/lib/librte_acl/rte_acl.h
+++ b/lib/librte_acl/rte_acl.h
@@ -122,6 +122,9 @@ enum {

 #defineRTE_ACL_INVALID_USERDATA0

+#defineRTE_ACL_MASKLEN_TO_BITMASK(v, s)\
+((v) == 0 ? (v) : (typeof(v))((uint64_t)-1 << ((s) * CHAR_BIT - (v
+
 /**
  * Miscellaneous data for ACL rule.
  */
-- 
2.4.2



[dpdk-dev] [PATCHv2 4/8] acl: fix avoid unneeded trie splitting for subset of rules.

2015-06-08 Thread Konstantin Ananyev
When rebuilding a trie for limited rule-set,
don't try to split the rule-set even further.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/acl_bld.c | 16 +++-
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index 45ee065..d89c66a 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -97,6 +97,7 @@ struct acl_build_context {
struct rte_acl_build_rule *build_rules;
struct rte_acl_config cfg;
int32_t   node_max;
+   int32_t   cur_node_max;
uint32_t  node;
uint32_t  num_nodes;
uint32_t  category_mask;
@@ -1337,7 +1338,7 @@ build_trie(struct acl_build_context *context, struct 
rte_acl_build_rule *head,
return NULL;

node_count = context->num_nodes - node_count;
-   if (node_count > context->node_max) {
+   if (node_count > context->cur_node_max) {
*last = prev;
return trie;
}
@@ -1536,7 +1537,7 @@ acl_build_index(const struct rte_acl_config *config, 
uint32_t *data_index)
 static struct rte_acl_build_rule *
 build_one_trie(struct acl_build_context *context,
struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES],
-   uint32_t n)
+   uint32_t n, int32_t node_max)
 {
struct rte_acl_build_rule *last;
struct rte_acl_config *config;
@@ -1553,6 +1554,8 @@ build_one_trie(struct acl_build_context *context,
context->data_indexes[n]);
context->tries[n].data_index = context->data_indexes[n];

+   context->cur_node_max = node_max;
+
context->bld_tries[n].trie = build_trie(context, rule_sets[n],
, >tries[n].count);

@@ -1587,7 +1590,7 @@ acl_build_tries(struct acl_build_context *context,

num_tries = n + 1;

-   last = build_one_trie(context, rule_sets, n);
+   last = build_one_trie(context, rule_sets, n, context->node_max);
if (context->bld_tries[n].trie == NULL) {
RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
return -ENOMEM;
@@ -1618,8 +1621,11 @@ acl_build_tries(struct acl_build_context *context,
head = head->next)
head->config = config;

-   /* Rebuild the trie for the reduced rule-set. */
-   last = build_one_trie(context, rule_sets, n);
+   /*
+* Rebuild the trie for the reduced rule-set.
+* Don't try to split it any further.
+*/
+   last = build_one_trie(context, rule_sets, n, INT32_MAX);
if (context->bld_tries[n].trie == NULL || last != NULL) {
RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
return -ENOMEM;
-- 
2.4.2



[dpdk-dev] [PATCHv2 3/8] acl: add function to check build input parameters

2015-06-08 Thread Konstantin Ananyev
Move check for build confg parameter into a separate function.
Simplify acl_calc_wildness() function.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/acl_bld.c | 107 ---
 1 file changed, 54 insertions(+), 53 deletions(-)

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index ff3ba8b..45ee065 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -1350,7 +1350,7 @@ build_trie(struct acl_build_context *context, struct 
rte_acl_build_rule *head,
return trie;
 }

-static int
+static void
 acl_calc_wildness(struct rte_acl_build_rule *head,
const struct rte_acl_config *config)
 {
@@ -1362,10 +1362,10 @@ acl_calc_wildness(struct rte_acl_build_rule *head,
for (n = 0; n < config->num_fields; n++) {

double wild = 0;
-   uint64_t msk_val =
-   RTE_LEN2MASK(CHAR_BIT * config->defs[n].size,
+   uint32_t bit_len = CHAR_BIT * config->defs[n].size;
+   uint64_t msk_val = RTE_LEN2MASK(bit_len,
typeof(msk_val));
-   double size = CHAR_BIT * config->defs[n].size;
+   double size = bit_len;
int field_index = config->defs[n].field_index;
const struct rte_acl_field *fld = rule->f->field +
field_index;
@@ -1382,54 +1382,15 @@ acl_calc_wildness(struct rte_acl_build_rule *head,
break;

case RTE_ACL_FIELD_TYPE_RANGE:
-   switch (rule->config->defs[n].size) {
-   case sizeof(uint8_t):
-   wild = ((double)fld->mask_range.u8 -
-   fld->value.u8) / UINT8_MAX;
-   break;
-   case sizeof(uint16_t):
-   wild = ((double)fld->mask_range.u16 -
-   fld->value.u16) / UINT16_MAX;
-   break;
-   case sizeof(uint32_t):
-   wild = ((double)fld->mask_range.u32 -
-   fld->value.u32) / UINT32_MAX;
-   break;
-   case sizeof(uint64_t):
-   wild = ((double)fld->mask_range.u64 -
-   fld->value.u64) / UINT64_MAX;
-   break;
-   default:
-   RTE_LOG(ERR, ACL,
-   "%s(rule: %u) invalid %u-th "
-   "field, type: %hhu, "
-   "unknown size: %hhu\n",
-   __func__,
-   rule->f->data.userdata,
-   n,
-   rule->config->defs[n].type,
-   rule->config->defs[n].size);
-   return -EINVAL;
-   }
+   wild = (fld->mask_range.u64 & msk_val) -
+   (fld->value.u64 & msk_val);
+   wild = wild / msk_val;
break;
-
-   default:
-   RTE_LOG(ERR, ACL,
-   "%s(rule: %u) invalid %u-th "
-   "field, unknown type: %hhu\n",
-   __func__,
-   rule->f->data.userdata,
-   n,
-   rule->config->defs[n].type);
-   return -EINVAL;
-
}

rule->wildness[field_index] = (uint32_t)(wild * 100);
}
}
-
-   return 0;
 }

 static void
@@ -1602,7 +1563,6 @@ static int
 acl_build_tries(struct acl_build_context *context,
struct rte_acl_build_rule *head)
 {
-   int32_t rc;
uint32_t n, num_tries;
struct rte_acl_config *config;
struct rte_acl_build_rule *last;
@@ -1621,9 +1581,7 @@ acl_build_tries(struct acl_build_context *context,
context->tries[0].type = RTE_ACL_FULL_TRIE;

/* calc wildness of each field of each rule

[dpdk-dev] [PATCHv2 2/8] acl: code cleanup - use global EAL macro, instead of creating a local copy

2015-06-08 Thread Konstantin Ananyev
use global RTE_LEN2MASK macro, instead of LEN2MASK.

Signed-off-by: Konstantin Ananyev 
---
 app/test-acl/main.c| 3 ++-
 lib/librte_acl/acl_bld.c   | 3 ++-
 lib/librte_acl/rte_acl.c   | 3 ++-
 lib/librte_acl/rte_acl.h   | 2 +-
 lib/librte_acl/rte_acl_osdep.h | 2 --
 5 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/app/test-acl/main.c b/app/test-acl/main.c
index 524c43a..be3d773 100644
--- a/app/test-acl/main.c
+++ b/app/test-acl/main.c
@@ -739,7 +739,8 @@ add_cb_rules(FILE *f, struct rte_acl_ctx *ctx)
return rc;
}

-   v.data.category_mask = LEN2MASK(RTE_ACL_MAX_CATEGORIES);
+   v.data.category_mask = RTE_LEN2MASK(RTE_ACL_MAX_CATEGORIES,
+   typeof(v.data.category_mask));
v.data.priority = RTE_ACL_MAX_PRIORITY - n;
v.data.userdata = n;

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index aee6ed5..ff3ba8b 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -1772,7 +1772,8 @@ acl_bld(struct acl_build_context *bcx, struct rte_acl_ctx 
*ctx,
bcx->pool.alignment = ACL_POOL_ALIGN;
bcx->pool.min_alloc = ACL_POOL_ALLOC_MIN;
bcx->cfg = *cfg;
-   bcx->category_mask = LEN2MASK(bcx->cfg.num_categories);
+   bcx->category_mask = RTE_LEN2MASK(bcx->cfg.num_categories,
+   typeof(bcx->category_mask));
bcx->node_max = node_max;

rc = sigsetjmp(bcx->pool.fail, 0);
diff --git a/lib/librte_acl/rte_acl.c b/lib/librte_acl/rte_acl.c
index b6ddeeb..a54d531 100644
--- a/lib/librte_acl/rte_acl.c
+++ b/lib/librte_acl/rte_acl.c
@@ -271,7 +271,8 @@ acl_add_rules(struct rte_acl_ctx *ctx, const void *rules, 
uint32_t num)
 static int
 acl_check_rule(const struct rte_acl_rule_data *rd)
 {
-   if ((rd->category_mask & LEN2MASK(RTE_ACL_MAX_CATEGORIES)) == 0 ||
+   if ((RTE_LEN2MASK(RTE_ACL_MAX_CATEGORIES, typeof(rd->category_mask)) &
+   rd->category_mask) == 0 ||
rd->priority > RTE_ACL_MAX_PRIORITY ||
rd->priority < RTE_ACL_MIN_PRIORITY ||
rd->userdata == RTE_ACL_INVALID_USERDATA)
diff --git a/lib/librte_acl/rte_acl.h b/lib/librte_acl/rte_acl.h
index 3a93730..8d9bbe5 100644
--- a/lib/librte_acl/rte_acl.h
+++ b/lib/librte_acl/rte_acl.h
@@ -115,7 +115,7 @@ struct rte_acl_field {

 enum {
RTE_ACL_TYPE_SHIFT = 29,
-   RTE_ACL_MAX_INDEX = LEN2MASK(RTE_ACL_TYPE_SHIFT),
+   RTE_ACL_MAX_INDEX = RTE_LEN2MASK(RTE_ACL_TYPE_SHIFT, uint32_t),
RTE_ACL_MAX_PRIORITY = RTE_ACL_MAX_INDEX,
RTE_ACL_MIN_PRIORITY = 0,
 };
diff --git a/lib/librte_acl/rte_acl_osdep.h b/lib/librte_acl/rte_acl_osdep.h
index 81fdefb..41f7e3d 100644
--- a/lib/librte_acl/rte_acl_osdep.h
+++ b/lib/librte_acl/rte_acl_osdep.h
@@ -56,8 +56,6 @@
  * Common defines.
  */

-#defineLEN2MASK(ln)((uint32_t)(((uint64_t)1 << (ln)) - 1))
-
 #define DIM(x) RTE_DIM(x)

 #include 
-- 
2.4.2



[dpdk-dev] [PATCHv2 1/8] acl: fix invalid rule wildness calculation for bitmask field type

2015-06-08 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/acl_bld.c | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index 3801843..aee6ed5 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -1362,6 +1362,9 @@ acl_calc_wildness(struct rte_acl_build_rule *head,
for (n = 0; n < config->num_fields; n++) {

double wild = 0;
+   uint64_t msk_val =
+   RTE_LEN2MASK(CHAR_BIT * config->defs[n].size,
+   typeof(msk_val));
double size = CHAR_BIT * config->defs[n].size;
int field_index = config->defs[n].field_index;
const struct rte_acl_field *fld = rule->f->field +
@@ -1369,8 +1372,8 @@ acl_calc_wildness(struct rte_acl_build_rule *head,

switch (rule->config->defs[n].type) {
case RTE_ACL_FIELD_TYPE_BITMASK:
-   wild = (size - __builtin_popcount(
-   fld->mask_range.u8)) /
+   wild = (size - __builtin_popcountll(
+   fld->mask_range.u64 & msk_val)) /
size;
break;

-- 
2.4.2



[dpdk-dev] [PATCHv2 0/8] acl: various fixes and cleanups

2015-06-08 Thread Konstantin Ananyev
Several fixes and code cleanups for the librte_acl.
New test-cases for acl UT.

Konstantin Ananyev (8):
  acl: fix invalid rule wildness calculation for bitmask field type
  acl: code cleanup - use global EAL macro, instead of creating a local
copy
  acl: add function to check build input parameters
  acl: fix avoid unneeded trie splitting for subset of rules.
  acl: code dedup - introduce a new macro
  acl: cleanup remove unused code from acl_bld.c
  acl: fix ambiguity between ACL rules in UT.
  acl: add new test-cases into UT

 app/test-acl/main.c|   3 +-
 app/test/test_acl.c| 431 +-
 app/test/test_acl.h|  52 ++---
 lib/librte_acl/acl_bld.c   | 455 +++--
 lib/librte_acl/rte_acl.c   |   3 +-
 lib/librte_acl/rte_acl.h   |   5 +-
 lib/librte_acl/rte_acl_osdep.h |   2 -
 7 files changed, 530 insertions(+), 421 deletions(-)

-- 
2.4.2



[dpdk-dev] [PATCH 8/8] ACL: add new test-cases into UT

2015-06-04 Thread Konstantin Ananyev
Add several new test cases for ACL to cover different build configurations.

Signed-off-by: Konstantin Ananyev 
---
 app/test/test_acl.c | 431 +++-
 1 file changed, 423 insertions(+), 8 deletions(-)

diff --git a/app/test/test_acl.c b/app/test/test_acl.c
index 6a032f9..3090246 100644
--- a/app/test/test_acl.c
+++ b/app/test/test_acl.c
@@ -47,6 +47,8 @@

 #define LEN RTE_ACL_MAX_CATEGORIES

+RTE_ACL_RULE_DEF(acl_ipv4vlan_rule, RTE_ACL_IPV4VLAN_NUM_FIELDS);
+
 struct rte_acl_param acl_param = {
.name = "acl_ctx",
.socket_id = SOCKET_ID_ANY,
@@ -62,6 +64,15 @@ struct rte_acl_ipv4vlan_rule acl_rule = {
.dst_port_high = UINT16_MAX,
 };

+const uint32_t ipv4_7tuple_layout[RTE_ACL_IPV4VLAN_NUM] = {
+   offsetof(struct ipv4_7tuple, proto),
+   offsetof(struct ipv4_7tuple, vlan),
+   offsetof(struct ipv4_7tuple, ip_src),
+   offsetof(struct ipv4_7tuple, ip_dst),
+   offsetof(struct ipv4_7tuple, port_src),
+};
+
+
 /* byteswap to cpu or network order */
 static void
 bswap_test_data(struct ipv4_7tuple *data, int len, int to_be)
@@ -195,13 +206,6 @@ test_classify_buid(struct rte_acl_ctx *acx,
const struct rte_acl_ipv4vlan_rule *rules, uint32_t num)
 {
int ret;
-   const uint32_t layout[RTE_ACL_IPV4VLAN_NUM] = {
-   offsetof(struct ipv4_7tuple, proto),
-   offsetof(struct ipv4_7tuple, vlan),
-   offsetof(struct ipv4_7tuple, ip_src),
-   offsetof(struct ipv4_7tuple, ip_dst),
-   offsetof(struct ipv4_7tuple, port_src),
-   };

/* add rules to the context */
ret = rte_acl_ipv4vlan_add_rules(acx, rules, num);
@@ -212,7 +216,8 @@ test_classify_buid(struct rte_acl_ctx *acx,
}

/* try building the context */
-   ret = rte_acl_ipv4vlan_build(acx, layout, RTE_ACL_MAX_CATEGORIES);
+   ret = rte_acl_ipv4vlan_build(acx, ipv4_7tuple_layout,
+   RTE_ACL_MAX_CATEGORIES);
if (ret != 0) {
printf("Line %i: Building ACL context failed!\n", __LINE__);
return ret;
@@ -412,6 +417,414 @@ test_build_ports_range(void)
return ret;
 }

+static void
+convert_rule(const struct rte_acl_ipv4vlan_rule *ri,
+   struct acl_ipv4vlan_rule *ro)
+{
+   ro->data = ri->data;
+
+   ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].value.u8 = ri->proto;
+   ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].value.u16 = ri->vlan;
+   ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].value.u16 = ri->domain;
+   ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 = ri->src_addr;
+   ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].value.u32 = ri->dst_addr;
+   ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].value.u16 = ri->src_port_low;
+   ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].value.u16 = ri->dst_port_low;
+
+   ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].mask_range.u8 = ri->proto_mask;
+   ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].mask_range.u16 = ri->vlan_mask;
+   ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].mask_range.u16 =
+   ri->domain_mask;
+   ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 =
+   ri->src_mask_len;
+   ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32 = ri->dst_mask_len;
+   ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].mask_range.u16 =
+   ri->src_port_high;
+   ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].mask_range.u16 =
+   ri->dst_port_high;
+}
+
+/*
+ * Convert IPV4 source and destination from RTE_ACL_FIELD_TYPE_MASK to
+ * RTE_ACL_FIELD_TYPE_BITMASK.
+ */
+static void
+convert_rule_1(const struct rte_acl_ipv4vlan_rule *ri,
+   struct acl_ipv4vlan_rule *ro)
+{
+   uint32_t v;
+
+   convert_rule(ri, ro);
+   v = ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32;
+   ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 =
+   RTE_ACL_MASKLEN_TO_BITMASK(v, sizeof(v));
+   v = ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32;
+   ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32 =
+   RTE_ACL_MASKLEN_TO_BITMASK(v, sizeof(v));
+}
+
+/*
+ * Convert IPV4 source and destination from RTE_ACL_FIELD_TYPE_MASK to
+ * RTE_ACL_FIELD_TYPE_RANGE.
+ */
+static void
+convert_rule_2(const struct rte_acl_ipv4vlan_rule *ri,
+   struct acl_ipv4vlan_rule *ro)
+{
+   uint32_t hi, lo, mask;
+
+   convert_rule(ri, ro);
+
+   mask = ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32;
+   mask = RTE_ACL_MASKLEN_TO_BITMASK(mask, sizeof(mask));
+   lo = ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 & mask;
+   hi = lo + ~mask;
+   ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 = lo;
+   ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 = hi;
+
+   mask = r

[dpdk-dev] [PATCH 7/8] ACL: fix remove ambiguity between rules at UT

2015-06-04 Thread Konstantin Ananyev
Some test rules had equal priorityi for the same category.
That can causes an ambiguity in build trie and test results.
Specify different priority value for each rule from the same category.

Signed-off-by: Konstantin Ananyev 
---
 app/test/test_acl.h | 52 ++--
 1 file changed, 26 insertions(+), 26 deletions(-)

diff --git a/app/test/test_acl.h b/app/test/test_acl.h
index 4af457d..4e8ff34 100644
--- a/app/test/test_acl.h
+++ b/app/test/test_acl.h
@@ -105,7 +105,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling to 192.168.0.0/16 */
{
.data = {.userdata = 1, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 2},
+   .priority = 230},
.dst_addr = IPv4(192,168,0,0),
.dst_mask_len = 16,
.src_port_low = 0,
@@ -116,7 +116,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling to 192.168.1.0/24 */
{
.data = {.userdata = 2, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 3},
+   .priority = 330},
.dst_addr = IPv4(192,168,1,0),
.dst_mask_len = 24,
.src_port_low = 0,
@@ -127,7 +127,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling to 192.168.1.50 */
{
.data = {.userdata = 3, .category_mask = 
ACL_DENY_MASK,
-   .priority = 2},
+   .priority = 230},
.dst_addr = IPv4(192,168,1,50),
.dst_mask_len = 32,
.src_port_low = 0,
@@ -140,7 +140,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling from 10.0.0.0/8 */
{
.data = {.userdata = 4, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 2},
+   .priority = 240},
.src_addr = IPv4(10,0,0,0),
.src_mask_len = 8,
.src_port_low = 0,
@@ -151,7 +151,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling from 10.1.1.0/24 */
{
.data = {.userdata = 5, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 3},
+   .priority = 340},
.src_addr = IPv4(10,1,1,0),
.src_mask_len = 24,
.src_port_low = 0,
@@ -162,7 +162,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets traveling from 10.1.1.1 */
{
.data = {.userdata = 6, .category_mask = 
ACL_DENY_MASK,
-   .priority = 2},
+   .priority = 240},
.src_addr = IPv4(10,1,1,1),
.src_mask_len = 32,
.src_port_low = 0,
@@ -175,7 +175,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets with lower 7 bytes of VLAN tag equal to 
0x64  */
{
.data = {.userdata = 7, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 2},
+   .priority = 260},
.vlan = 0x64,
.vlan_mask = 0x7f,
.src_port_low = 0,
@@ -186,7 +186,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets with VLAN tags that have 0x5 in them */
{
.data = {.userdata = 8, .category_mask = 
ACL_ALLOW_MASK,
-   .priority = 2},
+   .priority = 260},
.vlan = 0x5,
.vlan_mask = 0x5,
.src_port_low = 0,
@@ -197,7 +197,7 @@ struct rte_acl_ipv4vlan_rule acl_test_rules[] = {
/* matches all packets with VLAN tag 5

[dpdk-dev] [PATCH 6/8] ACL: cleanup remove unused code from acl_bld.c

2015-06-04 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/acl_bld.c | 310 ---
 1 file changed, 310 deletions(-)

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index e144503..83669ac 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -120,10 +120,6 @@ static int acl_merge_trie(struct acl_build_context 
*context,
struct rte_acl_node *node_a, struct rte_acl_node *node_b,
uint32_t level, struct rte_acl_node **node_c);

-static int acl_merge(struct acl_build_context *context,
-   struct rte_acl_node *node_a, struct rte_acl_node *node_b,
-   int move, int a_subset, int level);
-
 static void
 acl_deref_ptr(struct acl_build_context *context,
struct rte_acl_node *node, int index);
@@ -415,58 +411,6 @@ acl_intersect_type(const struct rte_acl_bitset *a_bits,
 }

 /*
- * Check if all bits in the bitset are on
- */
-static int
-acl_full(struct rte_acl_node *node)
-{
-   uint32_t n;
-   bits_t all_bits = -1;
-
-   for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
-   all_bits &= node->values.bits[n];
-   return all_bits == -1;
-}
-
-/*
- * Check if all bits in the bitset are off
- */
-static int
-acl_empty(struct rte_acl_node *node)
-{
-   uint32_t n;
-
-   if (node->ref_count == 0) {
-   for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
-   if (0 != node->values.bits[n])
-   return 0;
-   }
-   return 1;
-   } else {
-   return 0;
-   }
-}
-
-/*
- * Compute intersection of A and B
- * return 1 if there is an intersection else 0.
- */
-static int
-acl_intersect(struct rte_acl_bitset *a_bits,
-   struct rte_acl_bitset *b_bits,
-   struct rte_acl_bitset *intersect)
-{
-   uint32_t n;
-   bits_t all_bits = 0;
-
-   for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
-   intersect->bits[n] = a_bits->bits[n] & b_bits->bits[n];
-   all_bits |= intersect->bits[n];
-   }
-   return all_bits != 0;
-}
-
-/*
  * Duplicate a node
  */
 static struct rte_acl_node *
@@ -534,63 +478,6 @@ acl_deref_ptr(struct acl_build_context *context,
 }

 /*
- * Exclude bitset from a node pointer
- * returns  0 if poiter was deref'd
- *  1 otherwise.
- */
-static int
-acl_exclude_ptr(struct acl_build_context *context,
-   struct rte_acl_node *node,
-   int index,
-   struct rte_acl_bitset *b_bits)
-{
-   int retval = 1;
-
-   /*
-* remove bitset from node pointer and deref
-* if the bitset becomes empty.
-*/
-   if (!acl_exclude(>ptrs[index].values,
-   >ptrs[index].values,
-   b_bits)) {
-   acl_deref_ptr(context, node, index);
-   node->ptrs[index].ptr = NULL;
-   retval = 0;
-   }
-
-   /* exclude bits from the composite bits for the node */
-   acl_exclude(>values, >values, b_bits);
-   return retval;
-}
-
-/*
- * Remove a bitset from src ptr and move remaining ptr to dst
- */
-static int
-acl_move_ptr(struct acl_build_context *context,
-   struct rte_acl_node *dst,
-   struct rte_acl_node *src,
-   int index,
-   struct rte_acl_bitset *b_bits)
-{
-   int rc;
-
-   if (b_bits != NULL)
-   if (!acl_exclude_ptr(context, src, index, b_bits))
-   return 0;
-
-   /* add src pointer to dst node */
-   rc = acl_add_ptr(context, dst, src->ptrs[index].ptr,
-   >ptrs[index].values);
-   if (rc < 0)
-   return rc;
-
-   /* remove ptr from src */
-   acl_exclude_ptr(context, src, index, >ptrs[index].values);
-   return 1;
-}
-
-/*
  * acl_exclude rte_acl_bitset from src and copy remaining pointer to dst
  */
 static int
@@ -650,203 +537,6 @@ acl_compact_node_ptrs(struct rte_acl_node *node_a)
}
 }

-/*
- * acl_merge helper routine.
- */
-static int
-acl_merge_intersect(struct acl_build_context *context,
-   struct rte_acl_node *node_a, uint32_t idx_a,
-   struct rte_acl_node *node_b, uint32_t idx_b,
-   int next_move, int level,
-   struct rte_acl_bitset *intersect_ptr)
-{
-   struct rte_acl_node *node_c;
-
-   /* Duplicate A for intersection */
-   node_c = acl_dup_node(context, node_a->ptrs[idx_a].ptr);
-
-   /* Remove intersection from A */
-   acl_exclude_ptr(context, node_a, idx_a, intersect_ptr);
-
-   /*
-* Added link from A to C for all transitions
-* in the intersection
-*/
-   if (acl_add_ptr(context, node_a, node_c, intersect_ptr) < 0)
-   return -1;
-
-   /* merge B->node into C */
-   return acl_merge(context, node_c, node_b->ptrs[idx_b].ptr, next_move,
-   0, level + 1);
-}
-
-
-/*
- * Merge the children of nodes A and B t

[dpdk-dev] [PATCH 5/8] ACL: introduce RTE_ACL_MASKLEN_TO_BITMASK macro

2015-06-04 Thread Konstantin Ananyev
Introduce new RTE_ACL_MASKLEN_TO_BITMASK macro, that will be used
in several places inside librte_acl and it's UT.
Simplify iand cleanup build_trie() code a bit.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/acl_bld.c | 16 +++-
 lib/librte_acl/rte_acl.h |  3 +++
 2 files changed, 6 insertions(+), 13 deletions(-)

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index 4bcf637..e144503 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -1262,19 +1262,9 @@ build_trie(struct acl_build_context *context, struct 
rte_acl_build_rule *head,
 * all higher bits.
 */
uint64_t mask;
-
-   if (fld->mask_range.u32 == 0) {
-   mask = 0;
-
-   /*
-* arithmetic right shift for the length of
-* the mask less the msb.
-*/
-   } else {
-   mask = -1 <<
-   (rule->config->defs[n].size *
-   CHAR_BIT - fld->mask_range.u32);
-   }
+   mask = RTE_ACL_MASKLEN_TO_BITMASK(
+   fld->mask_range.u32,
+   rule->config->defs[n].size);

/* gen a mini-trie for this field */
merge = acl_gen_mask_trie(context,
diff --git a/lib/librte_acl/rte_acl.h b/lib/librte_acl/rte_acl.h
index 8d9bbe5..bd8f892 100644
--- a/lib/librte_acl/rte_acl.h
+++ b/lib/librte_acl/rte_acl.h
@@ -122,6 +122,9 @@ enum {

 #defineRTE_ACL_INVALID_USERDATA0

+#defineRTE_ACL_MASKLEN_TO_BITMASK(v, s)\
+((v) == 0 ? (v) : (typeof(v))((uint64_t)-1 << ((s) * CHAR_BIT - (v
+
 /**
  * Miscellaneous data for ACL rule.
  */
-- 
1.8.5.3



[dpdk-dev] [PATCH 4/8] ACL: fix rebuilding a trie for subset of rules

2015-06-04 Thread Konstantin Ananyev
When rebuilding a trie for limited rule-set,
don't try to split the rule-set even further.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/acl_bld.c | 16 +++-
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index 8315d84..4bcf637 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -97,6 +97,7 @@ struct acl_build_context {
struct rte_acl_build_rule *build_rules;
struct rte_acl_config cfg;
int32_t   node_max;
+   int32_t   cur_node_max;
uint32_t  node;
uint32_t  num_nodes;
uint32_t  category_mask;
@@ -1337,7 +1338,7 @@ build_trie(struct acl_build_context *context, struct 
rte_acl_build_rule *head,
return NULL;

node_count = context->num_nodes - node_count;
-   if (node_count > context->node_max) {
+   if (node_count > context->cur_node_max) {
*last = prev;
return trie;
}
@@ -1536,7 +1537,7 @@ acl_build_index(const struct rte_acl_config *config, 
uint32_t *data_index)
 static struct rte_acl_build_rule *
 build_one_trie(struct acl_build_context *context,
struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES],
-   uint32_t n)
+   uint32_t n, int32_t node_max)
 {
struct rte_acl_build_rule *last;
struct rte_acl_config *config;
@@ -1553,6 +1554,8 @@ build_one_trie(struct acl_build_context *context,
context->data_indexes[n]);
context->tries[n].data_index = context->data_indexes[n];

+   context->cur_node_max = node_max;
+
context->bld_tries[n].trie = build_trie(context, rule_sets[n],
, >tries[n].count);

@@ -1587,7 +1590,7 @@ acl_build_tries(struct acl_build_context *context,

num_tries = n + 1;

-   last = build_one_trie(context, rule_sets, n);
+   last = build_one_trie(context, rule_sets, n, context->node_max);
if (context->bld_tries[n].trie == NULL) {
RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
return -ENOMEM;
@@ -1618,8 +1621,11 @@ acl_build_tries(struct acl_build_context *context,
head = head->next)
head->config = config;

-   /* Rebuild the trie for the reduced rule-set. */
-   last = build_one_trie(context, rule_sets, n);
+   /*
+* Rebuild the trie for the reduced rule-set.
+* Don't try to split it any further.
+*/
+   last = build_one_trie(context, rule_sets, n, INT32_MAX);
if (context->bld_tries[n].trie == NULL || last != NULL) {
RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
return -ENOMEM;
-- 
1.8.5.3



[dpdk-dev] [PATCH 3/8] ACL: add function to check rte_acl_build() input parameters

2015-06-04 Thread Konstantin Ananyev
Move check for build parameters into a separate function.
Simplify acl_calc_wildness() function.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/acl_bld.c | 107 ---
 1 file changed, 54 insertions(+), 53 deletions(-)

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index 19a4178..8315d84 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -1350,7 +1350,7 @@ build_trie(struct acl_build_context *context, struct 
rte_acl_build_rule *head,
return trie;
 }

-static int
+static void
 acl_calc_wildness(struct rte_acl_build_rule *head,
const struct rte_acl_config *config)
 {
@@ -1362,10 +1362,10 @@ acl_calc_wildness(struct rte_acl_build_rule *head,
for (n = 0; n < config->num_fields; n++) {

double wild = 0;
-   uint64_t msk_val =
-   RTE_LEN2MASK(CHAR_BIT * config->defs[n].size,
+   uint32_t bit_len = CHAR_BIT * config->defs[n].size;
+   uint64_t msk_val = RTE_LEN2MASK(bit_len,
typeof(msk_val));
-   double size = CHAR_BIT * config->defs[n].size;
+   double size = bit_len;
int field_index = config->defs[n].field_index;
const struct rte_acl_field *fld = rule->f->field +
field_index;
@@ -1382,54 +1382,15 @@ acl_calc_wildness(struct rte_acl_build_rule *head,
break;

case RTE_ACL_FIELD_TYPE_RANGE:
-   switch (rule->config->defs[n].size) {
-   case sizeof(uint8_t):
-   wild = ((double)fld->mask_range.u8 -
-   fld->value.u8) / UINT8_MAX;
-   break;
-   case sizeof(uint16_t):
-   wild = ((double)fld->mask_range.u16 -
-   fld->value.u16) / UINT16_MAX;
-   break;
-   case sizeof(uint32_t):
-   wild = ((double)fld->mask_range.u32 -
-   fld->value.u32) / UINT32_MAX;
-   break;
-   case sizeof(uint64_t):
-   wild = ((double)fld->mask_range.u64 -
-   fld->value.u64) / UINT64_MAX;
-   break;
-   default:
-   RTE_LOG(ERR, ACL,
-   "%s(rule: %u) invalid %u-th "
-   "field, type: %hhu, "
-   "unknown size: %hhu\n",
-   __func__,
-   rule->f->data.userdata,
-   n,
-   rule->config->defs[n].type,
-   rule->config->defs[n].size);
-   return -EINVAL;
-   }
+   wild = (fld->mask_range.u64 & msk_val) -
+   (fld->value.u64 & msk_val);
+   wild = wild / msk_val;
break;
-
-   default:
-   RTE_LOG(ERR, ACL,
-   "%s(rule: %u) invalid %u-th "
-   "field, unknown type: %hhu\n",
-   __func__,
-   rule->f->data.userdata,
-   n,
-   rule->config->defs[n].type);
-   return -EINVAL;
-
}

rule->wildness[field_index] = (uint32_t)(wild * 100);
}
}
-
-   return 0;
 }

 static void
@@ -1602,7 +1563,6 @@ static int
 acl_build_tries(struct acl_build_context *context,
struct rte_acl_build_rule *head)
 {
-   int32_t rc;
uint32_t n, num_tries;
struct rte_acl_config *config;
struct rte_acl_build_rule *last;
@@ -1621,9 +1581,7 @@ acl_build_tries(struct acl_build_context *context,
context->tries[0].type = RTE_ACL_FULL_TRIE;

/* calc wildness of each field of each rule */

[dpdk-dev] [PATCH 2/8] ACL: code cleanup - use global RTE_LEN2MASK macro

2015-06-04 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 app/test-acl/main.c| 3 ++-
 lib/librte_acl/acl_bld.c   | 3 ++-
 lib/librte_acl/rte_acl.c   | 3 ++-
 lib/librte_acl/rte_acl.h   | 2 +-
 lib/librte_acl/rte_acl_osdep.h | 2 --
 5 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/app/test-acl/main.c b/app/test-acl/main.c
index 524c43a..be3d773 100644
--- a/app/test-acl/main.c
+++ b/app/test-acl/main.c
@@ -739,7 +739,8 @@ add_cb_rules(FILE *f, struct rte_acl_ctx *ctx)
return rc;
}

-   v.data.category_mask = LEN2MASK(RTE_ACL_MAX_CATEGORIES);
+   v.data.category_mask = RTE_LEN2MASK(RTE_ACL_MAX_CATEGORIES,
+   typeof(v.data.category_mask));
v.data.priority = RTE_ACL_MAX_PRIORITY - n;
v.data.userdata = n;

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index e2db9bf..19a4178 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -1772,7 +1772,8 @@ acl_bld(struct acl_build_context *bcx, struct rte_acl_ctx 
*ctx,
bcx->pool.alignment = ACL_POOL_ALIGN;
bcx->pool.min_alloc = ACL_POOL_ALLOC_MIN;
bcx->cfg = *cfg;
-   bcx->category_mask = LEN2MASK(bcx->cfg.num_categories);
+   bcx->category_mask = RTE_LEN2MASK(bcx->cfg.num_categories,
+   typeof(bcx->category_mask));
bcx->node_max = node_max;

rc = sigsetjmp(bcx->pool.fail, 0);
diff --git a/lib/librte_acl/rte_acl.c b/lib/librte_acl/rte_acl.c
index b6ddeeb..a54d531 100644
--- a/lib/librte_acl/rte_acl.c
+++ b/lib/librte_acl/rte_acl.c
@@ -271,7 +271,8 @@ acl_add_rules(struct rte_acl_ctx *ctx, const void *rules, 
uint32_t num)
 static int
 acl_check_rule(const struct rte_acl_rule_data *rd)
 {
-   if ((rd->category_mask & LEN2MASK(RTE_ACL_MAX_CATEGORIES)) == 0 ||
+   if ((RTE_LEN2MASK(RTE_ACL_MAX_CATEGORIES, typeof(rd->category_mask)) &
+   rd->category_mask) == 0 ||
rd->priority > RTE_ACL_MAX_PRIORITY ||
rd->priority < RTE_ACL_MIN_PRIORITY ||
rd->userdata == RTE_ACL_INVALID_USERDATA)
diff --git a/lib/librte_acl/rte_acl.h b/lib/librte_acl/rte_acl.h
index 3a93730..8d9bbe5 100644
--- a/lib/librte_acl/rte_acl.h
+++ b/lib/librte_acl/rte_acl.h
@@ -115,7 +115,7 @@ struct rte_acl_field {

 enum {
RTE_ACL_TYPE_SHIFT = 29,
-   RTE_ACL_MAX_INDEX = LEN2MASK(RTE_ACL_TYPE_SHIFT),
+   RTE_ACL_MAX_INDEX = RTE_LEN2MASK(RTE_ACL_TYPE_SHIFT, uint32_t),
RTE_ACL_MAX_PRIORITY = RTE_ACL_MAX_INDEX,
RTE_ACL_MIN_PRIORITY = 0,
 };
diff --git a/lib/librte_acl/rte_acl_osdep.h b/lib/librte_acl/rte_acl_osdep.h
index 81fdefb..41f7e3d 100644
--- a/lib/librte_acl/rte_acl_osdep.h
+++ b/lib/librte_acl/rte_acl_osdep.h
@@ -56,8 +56,6 @@
  * Common defines.
  */

-#defineLEN2MASK(ln)((uint32_t)(((uint64_t)1 << (ln)) - 1))
-
 #define DIM(x) RTE_DIM(x)

 #include 
-- 
1.8.5.3



[dpdk-dev] [PATCH 1/8] ACL: fix invalid rule wildness calculation for RTE_ACL_FIELD_TYPE_BITMASK

2015-06-04 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/acl_bld.c | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index db23b7b..e2db9bf 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -1362,6 +1362,9 @@ acl_calc_wildness(struct rte_acl_build_rule *head,
for (n = 0; n < config->num_fields; n++) {

double wild = 0;
+   uint64_t msk_val =
+   RTE_LEN2MASK(CHAR_BIT * config->defs[n].size,
+   typeof(msk_val));
double size = CHAR_BIT * config->defs[n].size;
int field_index = config->defs[n].field_index;
const struct rte_acl_field *fld = rule->f->field +
@@ -1369,8 +1372,8 @@ acl_calc_wildness(struct rte_acl_build_rule *head,

switch (rule->config->defs[n].type) {
case RTE_ACL_FIELD_TYPE_BITMASK:
-   wild = (size - __builtin_popcount(
-   fld->mask_range.u8)) /
+   wild = (size - __builtin_popcountll(
+   fld->mask_range.u64 & msk_val)) /
size;
break;

-- 
1.8.5.3



[dpdk-dev] [PATCH 0/8] ACL: various fixes and cleanups

2015-06-04 Thread Konstantin Ananyev
This patch-set is based on:
[PATCHv2 0/3] ACL: Fix bug in acl_merge_trie() and add a new test-case for it 
to the UT.

Konstantin Ananyev (8):
 ACL: fix invalid rule wildness calculation for RTE_ACL_FIELD_TYPE_BITMASK
 ACL: code cleanup - use global RTE_LEN2MASK macro
 ACL: add function to check rte_acl_build() input parameters
 ACL: fix rebuilding a trie for subset of rules
 ACL: introduce RTE_ACL_MASKLEN_TO_BITMASK macro
 ACL: cleanup remove unused code from acl_bld.c
 ACL: fix remove ambiguity between rules at UT
 ACL: add new test-cases into UT

 app/test-acl/main.c|   3 +-
 app/test/test_acl.c| 431 +-
 app/test/test_acl.h|  52 ++---
 lib/librte_acl/acl_bld.c   | 455 +++--
 lib/librte_acl/rte_acl.c   |   3 +-
 lib/librte_acl/rte_acl.h   |   5 +-
 lib/librte_acl/rte_acl_osdep.h |   2 -
 7 files changed, 530 insertions(+), 421 deletions(-)

-- 
1.8.5.3



[dpdk-dev] [PATCHv2 3/3] ACL: remove subtree_id calculations at build stage

2015-06-03 Thread Konstantin Ananyev
v2:
- reorder code a bit to avoid gcc 5.1 warnings.

As now subtree_id is not used acl_merge_trie() any more,
there is no point to calculate and maintain that information.

Signed-off-by: Konstantin Ananyev 
---
 lib/librte_acl/acl.h |   7 ---
 lib/librte_acl/acl_bld.c | 121 +--
 2 files changed, 13 insertions(+), 115 deletions(-)

diff --git a/lib/librte_acl/acl.h b/lib/librte_acl/acl.h
index 4dadab5..eb4930c 100644
--- a/lib/librte_acl/acl.h
+++ b/lib/librte_acl/acl.h
@@ -151,13 +151,6 @@ struct rte_acl_node {
/* free list link or pointer to duplicate node during merge */
struct rte_acl_node *prev;
/* points to node from which this node was duplicated */
-
-   uint32_tsubtree_id;
-   uint32_tsubtree_ref_count;
-
-};
-enum {
-   RTE_ACL_SUBTREE_NODE = 0x8000
 };

 /*
diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index 92a85df..3801843 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -117,7 +117,7 @@ struct acl_build_context {

 static int acl_merge_trie(struct acl_build_context *context,
struct rte_acl_node *node_a, struct rte_acl_node *node_b,
-   uint32_t level, uint32_t subtree_id, struct rte_acl_node **node_c);
+   uint32_t level, struct rte_acl_node **node_c);

 static int acl_merge(struct acl_build_context *context,
struct rte_acl_node *node_a, struct rte_acl_node *node_b,
@@ -386,8 +386,8 @@ acl_gen_mask(struct rte_acl_bitset *bitset, uint32_t value, 
uint32_t mask)
  * Determine if A and/or B are supersets of the intersection.
  */
 static int
-acl_intersect_type(struct rte_acl_bitset *a_bits,
-   struct rte_acl_bitset *b_bits,
+acl_intersect_type(const struct rte_acl_bitset *a_bits,
+   const struct rte_acl_bitset *b_bits,
struct rte_acl_bitset *intersect)
 {
uint32_t n;
@@ -901,94 +901,6 @@ acl_resolve_leaf(struct acl_build_context *context,
 }

 /*
-* Within the existing trie structure, determine which nodes are
-* part of the subtree of the trie to be merged.
-*
-* For these purposes, a subtree is defined as the set of nodes that
-* are 1) not a superset of the intersection with the same level of
-* the merging tree, and 2) do not have any references from a node
-* outside of the subtree.
-*/
-static void
-mark_subtree(struct rte_acl_node *node,
-   struct rte_acl_bitset *level_bits,
-   uint32_t level,
-   uint32_t id)
-{
-   uint32_t n;
-
-   /* mark this node as part of the subtree */
-   node->subtree_id = id | RTE_ACL_SUBTREE_NODE;
-
-   for (n = 0; n < node->num_ptrs; n++) {
-
-   if (node->ptrs[n].ptr != NULL) {
-
-   struct rte_acl_bitset intersect_bits;
-   int intersect;
-
-   /*
-   * Item 1) :
-   * check if this child pointer is not a superset of the
-   * same level of the merging tree.
-   */
-   intersect = acl_intersect_type(>ptrs[n].values,
-   _bits[level],
-   _bits);
-
-   if ((intersect & ACL_INTERSECT_A) == 0) {
-
-   struct rte_acl_node *child = node->ptrs[n].ptr;
-
-   /*
-* reset subtree reference if this is
-* the first visit by this subtree.
-*/
-   if (child->subtree_id != id) {
-   child->subtree_id = id;
-   child->subtree_ref_count = 0;
-   }
-
-   /*
-   * Item 2) :
-   * increment the subtree reference count and if
-   * all references are from this subtree then
-   * recurse to that child
-   */
-   child->subtree_ref_count++;
-   if (child->subtree_ref_count ==
-   child->ref_count)
-   mark_subtree(child, level_bits,
-   level + 1, id);
-   }
-   }
-   }
-}
-
-/*
- * Build the set of bits that define the set of transitions
- * for each level of a trie.
- */
-static void
-build_subset_mask(struct rte_acl_node *node,
-   struct rte_acl_bitset *level_bits,
-   int level)
-{
-   uint32_t n;
-
-   /* Add this node's transitions to the set for this level */
-   for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
-   level_bits[level].bits[n] &= node-&g

[dpdk-dev] [PATCHv2 2/3] ACL: add new test case for ranges build

2015-06-03 Thread Konstantin Ananyev
Signed-off-by: Konstantin Ananyev 
---
 app/test/test_acl.c | 147 ++--
 1 file changed, 143 insertions(+), 4 deletions(-)

diff --git a/app/test/test_acl.c b/app/test/test_acl.c
index 7119ad3..6a032f9 100644
--- a/app/test/test_acl.c
+++ b/app/test/test_acl.c
@@ -191,7 +191,8 @@ err:
 }

 static int
-test_classify_buid(struct rte_acl_ctx *acx)
+test_classify_buid(struct rte_acl_ctx *acx,
+   const struct rte_acl_ipv4vlan_rule *rules, uint32_t num)
 {
int ret;
const uint32_t layout[RTE_ACL_IPV4VLAN_NUM] = {
@@ -203,8 +204,7 @@ test_classify_buid(struct rte_acl_ctx *acx)
};

/* add rules to the context */
-   ret = rte_acl_ipv4vlan_add_rules(acx, acl_test_rules,
-   RTE_DIM(acl_test_rules));
+   ret = rte_acl_ipv4vlan_add_rules(acx, rules, num);
if (ret != 0) {
printf("Line %i: Adding rules to ACL context failed!\n",
__LINE__);
@@ -246,7 +246,8 @@ test_classify(void)
else
rte_acl_reset_rules(acx);

-   ret = test_classify_buid(acx);
+   ret = test_classify_buid(acx, acl_test_rules,
+   RTE_DIM(acl_test_rules));
if (ret != 0) {
printf("Line %i, iter: %d: "
"Adding rules to ACL context failed!\n",
@@ -275,6 +276,142 @@ test_classify(void)
return ret;
 }

+static int
+test_build_ports_range(void)
+{
+   static const struct rte_acl_ipv4vlan_rule test_rules[] = {
+   {
+   /* match all packets. */
+   .data = {
+   .userdata = 1,
+   .category_mask = ACL_ALLOW_MASK,
+   .priority = 101,
+   },
+   .src_port_low = 0,
+   .src_port_high = UINT16_MAX,
+   .dst_port_low = 0,
+   .dst_port_high = UINT16_MAX,
+   },
+   {
+   /* match all packets with dst ports [54-65280]. */
+   .data = {
+   .userdata = 2,
+   .category_mask = ACL_ALLOW_MASK,
+   .priority = 102,
+   },
+   .src_port_low = 0,
+   .src_port_high = UINT16_MAX,
+   .dst_port_low = 54,
+   .dst_port_high = 65280,
+   },
+   {
+   /* match all packets with dst ports [0-52]. */
+   .data = {
+   .userdata = 3,
+   .category_mask = ACL_ALLOW_MASK,
+   .priority = 103,
+   },
+   .src_port_low = 0,
+   .src_port_high = UINT16_MAX,
+   .dst_port_low = 0,
+   .dst_port_high = 52,
+   },
+   {
+   /* match all packets with dst ports [53]. */
+   .data = {
+   .userdata = 4,
+   .category_mask = ACL_ALLOW_MASK,
+   .priority = 99,
+   },
+   .src_port_low = 0,
+   .src_port_high = UINT16_MAX,
+   .dst_port_low = 53,
+   .dst_port_high = 53,
+   },
+   {
+   /* match all packets with dst ports [65279-65535]. */
+   .data = {
+   .userdata = 5,
+   .category_mask = ACL_ALLOW_MASK,
+   .priority = 98,
+   },
+   .src_port_low = 0,
+   .src_port_high = UINT16_MAX,
+   .dst_port_low = 65279,
+   .dst_port_high = UINT16_MAX,
+   },
+   };
+
+   static struct ipv4_7tuple test_data[] = {
+   {
+   .proto = 6,
+   .ip_src = IPv4(10, 1, 1, 1),
+   .ip_dst = IPv4(192, 168, 0, 33),
+   .port_dst = 53,
+   .allow = 1,
+   },
+   {
+   .proto = 6,
+   .ip_src = IPv4(127, 84, 33, 1),
+   .ip_dst = IPv4(1, 2, 3, 4),
+   .port_dst = 65281,
+   .allow = 1,
+   },
+   };
+
+   struct rte_acl_ctx *acx;
+   int32_t ret, i, j;
+   uint32_t results[RTE_DIM(test_data)];
+   const uint8_t *data[RTE_DIM(test_data)];
+
+   acx = rte_acl_create(_p

  1   2   3   >