This reverts commit fbd9a2ceba5c ("net: Add lockdep asserts to
____napi_schedule()."). While good in theory, in practice it causes
issues with various drivers, and so it can be revisited earlier in the
cycle where those drivers can be adjusted if needed.Link: https://lore.kernel.org/netdev/20220317192145.g23wprums5iunx6c@sx1/ Link: https://lore.kernel.org/netdev/cahmme9ohfzl6cyvh8nlgknkokmewi2gmxs_f7s8patwwc6u...@mail.gmail.com/ Link: https://lore.kernel.org/wireguard/[email protected]/ Cc: Sebastian Andrzej Siewior <[email protected]> Cc: Jakub Kicinski <[email protected]> Cc: Saeed Mahameed <[email protected]> Cc: Eric Dumazet <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Peter Zijlstra <[email protected]> Signed-off-by: Jason A. Donenfeld <[email protected]> --- include/linux/lockdep.h | 7 ------- net/core/dev.c | 5 +---- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 0cc65d216701..467b94257105 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -329,12 +329,6 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_assert_none_held_once() \ lockdep_assert_once(!current->lockdep_depth) -/* - * Ensure that softirq is handled within the callchain and not delayed and - * handled by chance. - */ -#define lockdep_assert_softirq_will_run() \ - lockdep_assert_once(hardirq_count() | softirq_count()) #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) @@ -420,7 +414,6 @@ extern int lockdep_is_held(const void *); #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) #define lockdep_assert_none_held_once() do { } while (0) -#define lockdep_assert_softirq_will_run() do { } while (0) #define lockdep_recursing(tsk) (0) diff --git a/net/core/dev.c b/net/core/dev.c index 8e0cc5f2020d..6cad39b73a8e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4277,9 +4277,6 @@ static inline void ____napi_schedule(struct softnet_data *sd, { struct task_struct *thread; - lockdep_assert_softirq_will_run(); - lockdep_assert_irqs_disabled(); - if (test_bit(NAPI_STATE_THREADED, &napi->state)) { /* Paired with smp_mb__before_atomic() in * napi_enable()/dev_set_threaded(). @@ -4887,7 +4884,7 @@ int __netif_rx(struct sk_buff *skb) { int ret; - lockdep_assert_softirq_will_run(); + lockdep_assert_once(hardirq_count() | softirq_count()); trace_netif_rx_entry(skb); ret = netif_rx_internal(skb); -- 2.35.1
