In netif_receive_skb_list_internal(), all of skb_defer_rx_timestamp(),
 do_xdp_generic() and enqueue_to_backlog() can lead to kfree(skb).  Thus,
 we cannot wait until after they return to remove the skb from the list;
 instead, we remove it first and, in the pass case, add it to a sublist
 afterwards.
In the case of enqueue_to_backlog() we have already decided not to pass
 when we call the function, so we do not need a sublist.

Fixes: 7da517a3bc52 ("net: core: Another step of skb receive list processing")
Reported-by: Dan Carpenter <dan.carpen...@oracle.com>
Signed-off-by: Edward Cree <ec...@solarflare.com>
---
 net/core/dev.c | 21 +++++++++++++--------
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/net/core/dev.c b/net/core/dev.c
index 89825c1eccdc..ce4583564e00 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4982,25 +4982,30 @@ static void netif_receive_skb_list_internal(struct 
list_head *head)
 {
        struct bpf_prog *xdp_prog = NULL;
        struct sk_buff *skb, *next;
+       struct list_head sublist;
 
+       INIT_LIST_HEAD(&sublist);
        list_for_each_entry_safe(skb, next, head, list) {
                net_timestamp_check(netdev_tstamp_prequeue, skb);
-               if (skb_defer_rx_timestamp(skb))
-                       /* Handled, remove from list */
-                       list_del(&skb->list);
+               list_del(&skb->list);
+               if (!skb_defer_rx_timestamp(skb))
+                       list_add_tail(&skb->list, &sublist);
        }
+       list_splice_init(&sublist, head);
 
        if (static_branch_unlikely(&generic_xdp_needed_key)) {
                preempt_disable();
                rcu_read_lock();
                list_for_each_entry_safe(skb, next, head, list) {
                        xdp_prog = rcu_dereference(skb->dev->xdp_prog);
-                       if (do_xdp_generic(xdp_prog, skb) != XDP_PASS)
-                               /* Dropped, remove from list */
-                               list_del(&skb->list);
+                       list_del(&skb->list);
+                       if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
+                               list_add_tail(&skb->list, &sublist);
                }
                rcu_read_unlock();
                preempt_enable();
+               /* Put passed packets back on main list */
+               list_splice_init(&sublist, head);
        }
 
        rcu_read_lock();
@@ -5011,9 +5016,9 @@ static void netif_receive_skb_list_internal(struct 
list_head *head)
                        int cpu = get_rps_cpu(skb->dev, skb, &rflow);
 
                        if (cpu >= 0) {
-                               enqueue_to_backlog(skb, cpu, 
&rflow->last_qtail);
-                               /* Handled, remove from list */
+                               /* Will be handled, remove from list */
                                list_del(&skb->list);
+                               enqueue_to_backlog(skb, cpu, 
&rflow->last_qtail);
                        }
                }
        }

Reply via email to