[ Upstream commit 266155b2de8fb721ae353688529b2f8bcdde2f90 ]

The dumping prematurely stops, it seems the callback argument that
indicates that all entries have been dumped is set after iterating
on the first cpu list. The dumping also may stop before the entire
per-cpu list content is also dumped.

With this patch, conntrack -L dying now shows the dying list content
again.

Cc: <[email protected]> # 3.15.x
Fixes: b7779d06 ("netfilter: conntrack: spinlock per cpu to protect special 
lists.")
Signed-off-by: Pablo Neira Ayuso <[email protected]>
---
 net/netfilter/nf_conntrack_netlink.c |    8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/net/netfilter/nf_conntrack_netlink.c 
b/net/netfilter/nf_conntrack_netlink.c
index 5857963..ef0eedd 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1163,9 +1163,6 @@ ctnetlink_dump_list(struct sk_buff *skb, struct 
netlink_callback *cb, bool dying
        if (cb->args[2])
                return 0;
 
-       if (cb->args[0] == nr_cpu_ids)
-               return 0;
-
        for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
                struct ct_pcpu *pcpu;
 
@@ -1194,6 +1191,7 @@ restart:
                        rcu_read_unlock();
                        if (res < 0) {
                                nf_conntrack_get(&ct->ct_general);
+                               cb->args[0] = cpu;
                                cb->args[1] = (unsigned long)ct;
                                spin_unlock_bh(&pcpu->lock);
                                goto out;
@@ -1202,10 +1200,10 @@ restart:
                if (cb->args[1]) {
                        cb->args[1] = 0;
                        goto restart;
-               } else
-                       cb->args[2] = 1;
+               }
                spin_unlock_bh(&pcpu->lock);
        }
+       cb->args[2] = 1;
 out:
        if (last)
                nf_ct_put(last);
-- 
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to