When testing the ixgbe driver version 3.19.1 (from
http://sourceforge.net/projects/e1000/files/ixgbe%20stable/3.19.1/),
I observed receiving packets were forwarded to just a subset of cpu instead of
to all cpus in the system.
Here is what the PCIe msix table shows.
[admin@oak-sh185 tmp]# ./iotools mmio_dump 0xdfedc000 0x128
0x00000000dfedc000: 0xfee15000 0x00000000 0x0000404b 0x00000000
0x00000000dfedc010: 0xfee13000 0x00000000 0x000040e5 0x00000000
0x00000000dfedc020: 0xfee12000 0x00000000 0x00004063 0x00000000
0x00000000dfedc030: 0xfee10000 0x00000000 0x0000409d 0x00000000
0x00000000dfedc040: 0xfee11000 0x00000000 0x0000406d 0x00000000
0x00000000dfedc050: 0xfee13000 0x00000000 0x00004046 0x00000000
0x00000000dfedc060: 0xfee11000 0x00000000 0x000040cd 0x00000000
0x00000000dfedc070: 0xfee14000 0x00000000 0x00004056 0x00000000
0x00000000dfedc080: 0xfee10000 0x00000000 0x000040dd 0x00000000
0x00000000dfedc090: 0xfee15000 0x00000000 0x000040ea 0x00000000
0x00000000dfedc0a0: 0xfee11000 0x00000000 0x000040bd 0x00000000
0x00000000dfedc0b0: 0xfee10000 0x00000000 0x0000404e 0x00000000
0x00000000dfedc0c0: 0xfee08000 0x00000000 0x00004072 0x00000000
0x00000000dfedc0d0: 0x58db4190 0xe2948e0e 0xb14d1a42 0x00000001
0x00000000dfedc0e0: 0x0feb86b4 0x309bfba0 0xae8548c9 0x00000001
0x00000000dfedc0f0: 0xd8f2cfb4 0x0d96e5bd 0x8ce02047 0x00000001
0x00000000dfedc100: 0x3f3c62fc 0x71e3f158 0x29261926 0x00000001
0x00000000dfedc110: 0x60f137e4 0xa0d5b53b 0x448e4366 0x00000001
0x00000000dfedc120: 0xdeadbeaf 0xdeadbeaf
The content of the above clearly suggests multiple entries are using the same
addresses to generate interrupts.
Further debugging uncover a bug is in the code path of HAVE_IRQ_AFFINITY_HINT.
The patch below should fix the problem but I would like you guys to comment on
the fix.
sla@alum:/work/sla/intel-drivers/ixgbe/ixgbe-3.19.1/src$ diff -Naur ixgbe_lib.c
ixgbe_lib.c.ori
--- ixgbe_lib.c 2014-07-24 17:35:54.000000000 -0700
+++ ixgbe_lib.c.ori 2014-07-24 17:34:22.000000000 -0700
@@ -697,8 +697,7 @@
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
*/
- if (rss_i > 1 && (adapter->atr_sample_rate ||
- adapter->hw.mac.type ==
ixgbe_mac_82598EB)) {
+ if (rss_i > 1 && adapter->atr_sample_rate) {
f = &adapter->ring_feature[RING_F_FDIR];
rss_i = f->indices = f->limit;
Since the driver was not installed to pass in the parameter "AtrSampleRate",
the following code in ixgbe_param.c causes
"adapter->atr_sample_rate" set to 0 by default.
{ /* Flow Director ATR Tx sample packet rate */
static struct ixgbe_option opt = {
.type = range_option,
.name = "Software ATR Tx packet sample rate",
.err = "using default of "
__MODULE_STRING(IXGBE_DEFAULT_ATR_SAMPLE_RATE),
.def = IXGBE_DEFAULT_ATR_SAMPLE_RATE,
.arg = {.r = {.min = IXGBE_ATR_SAMPLE_RATE_OFF,
.max = IXGBE_MAX_ATR_SAMPLE_RATE} }
};
static const char atr_string[] =
"ATR Tx Packet sample rate set to";
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF;
} else if (num_AtrSampleRate > bd) {
Subsequently, the following code in ixgbe_alloc_q_vector() checks
"adapter->atr_sample_rate" and see it set as 0. b/c of that, cpu is set to -1.
This resulting cpumask_set_cpu() not to be called.
#ifdef HAVE_IRQ_AFFINITY_HINT
/* customize cpu for Flow Director mapping */
if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) {
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
if (rss_i > 1 && adapter->atr_sample_rate) {
if (cpu_online(v_idx)) {
cpu = v_idx;
node = cpu_to_node(cpu);
}
}
}
#endif
/* allocate q_vector and rings */
q_vector = kzalloc_node(size, GFP_KERNEL, node);
if (!q_vector)
q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
/* setup affinity mask and node */
#ifdef HAVE_IRQ_AFFINITY_HINT
if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
#endif
Please provide feedback on this issue. I have checked the latest driver
(version 3.21.2) and it does not fix this problem.
Thanks so much.
--Steven
------------------------------------------------------------------------------
Want fast and easy access to all the code in your enterprise? Index and
search up to 200,000 lines of code with a free copy of Black Duck
Code Sight - the same software that powers the world's largest code
search on Ohloh, the Black Duck Open Hub! Try it now.
http://p.sf.net/sfu/bds
_______________________________________________
E1000-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/e1000-devel
To learn more about Intel® Ethernet, visit
http://communities.intel.com/community/wired