On Tue, Jul 08, 2025 at 10:22:19AM -0400, Joel Fernandes wrote:
> During rcu_read_unlock_special(), if this happens during irq_exit(), we
> can lockup if an IPI is issued. This is because the IPI itself triggers
> the irq_exit() path causing a recursive lock up.
> 
> This is precisely what Xiongfeng found when invoking a BPF program on
> the trace_tick_stop() tracepoint As shown in the trace below. Fix by
> managing the irq_work state correctly.
> 
> irq_exit()
>   __irq_exit_rcu()
>     /* in_hardirq() returns false after this */
>     preempt_count_sub(HARDIRQ_OFFSET)
>     tick_irq_exit()
>       tick_nohz_irq_exit()
>           tick_nohz_stop_sched_tick()
>             trace_tick_stop()  /* a bpf prog is hooked on this trace point */
>                  __bpf_trace_tick_stop()
>                     bpf_trace_run2()
>                           rcu_read_unlock_special()
>                               /* will send a IPI to itself */
>                             irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
> 
> A simple reproducer can also be obtained by doing the following in
> tick_irq_exit(). It will hang on boot without the patch:
> 
>   static inline void tick_irq_exit(void)
>   {
>  +    rcu_read_lock();
>  +    WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
>  +    rcu_read_unlock();
>  +
> 
> Reported-by: Xiongfeng Wang <wangxiongfe...@huawei.com>
> Closes: 
> https://lore.kernel.org/all/9acd5f9f-6732-7701-6880-4b51190aa...@huawei.com/
> Tested-by: Qi Xi <xi...@huawei.com>
> Signed-off-by: Joel Fernandes <joelagn...@nvidia.com>

Reviewed-by: Paul E. McKenney <paul...@kernel.org>

> ---
>  kernel/rcu/tree.h        | 11 ++++++++++-
>  kernel/rcu/tree_plugin.h | 23 +++++++++++++++++++----
>  2 files changed, 29 insertions(+), 5 deletions(-)
> 
> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
> index 3830c19cf2f6..f8f612269e6e 100644
> --- a/kernel/rcu/tree.h
> +++ b/kernel/rcu/tree.h
> @@ -174,6 +174,15 @@ struct rcu_snap_record {
>       unsigned long   jiffies;        /* Track jiffies value */
>  };
>  
> +/*
> + * The IRQ work (deferred_qs_iw) is used by RCU to get scheduler's attention.
> + * It can be in one of the following states:
> + * - DEFER_QS_IDLE: An IRQ work was never scheduled.
> + * - DEFER_QS_PENDING: An IRQ work was scheduler but never run.
> + */
> +#define DEFER_QS_IDLE                0
> +#define DEFER_QS_PENDING     1
> +
>  /* Per-CPU data for read-copy update. */
>  struct rcu_data {
>       /* 1) quiescent-state and grace-period handling : */
> @@ -192,7 +201,7 @@ struct rcu_data {
>                                       /*  during and after the last grace */
>                                       /* period it is aware of. */
>       struct irq_work defer_qs_iw;    /* Obtain later scheduler attention. */
> -     bool defer_qs_iw_pending;       /* Scheduler attention pending? */
> +     int defer_qs_iw_pending;        /* Scheduler attention pending? */
>       struct work_struct strict_work; /* Schedule readers for strict GPs. */
>  
>       /* 2) batch handling */
> diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
> index dd1c156c1759..fa7b0d854833 100644
> --- a/kernel/rcu/tree_plugin.h
> +++ b/kernel/rcu/tree_plugin.h
> @@ -486,13 +486,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct 
> *t, unsigned long flags)
>       struct rcu_node *rnp;
>       union rcu_special special;
>  
> +     rdp = this_cpu_ptr(&rcu_data);
> +     if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING)
> +             rdp->defer_qs_iw_pending = DEFER_QS_IDLE;
> +
>       /*
>        * If RCU core is waiting for this CPU to exit its critical section,
>        * report the fact that it has exited.  Because irqs are disabled,
>        * t->rcu_read_unlock_special cannot change.
>        */
>       special = t->rcu_read_unlock_special;
> -     rdp = this_cpu_ptr(&rcu_data);
>       if (!special.s && !rdp->cpu_no_qs.b.exp) {
>               local_irq_restore(flags);
>               return;
> @@ -628,7 +631,18 @@ static void rcu_preempt_deferred_qs_handler(struct 
> irq_work *iwp)
>  
>       rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
>       local_irq_save(flags);
> -     rdp->defer_qs_iw_pending = false;
> +
> +     /*
> +      * Requeue the IRQ work on next unlock in following situation:
> +      * 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING)
> +      * 2. CPU enters new rcu_read_lock()
> +      * 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0
> +      * 4. rcu_read_unlock() does not re-queue work (state still PENDING)
> +      * 5. Deferred QS reporting does not happen.
> +      */
> +     if (rcu_preempt_depth() > 0)
> +             WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE);
> +
>       local_irq_restore(flags);
>  }
>  
> @@ -675,7 +689,8 @@ static void rcu_read_unlock_special(struct task_struct *t)
>                       set_tsk_need_resched(current);
>                       set_preempt_need_resched();
>                       if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
> -                         expboost && !rdp->defer_qs_iw_pending && 
> cpu_online(rdp->cpu)) {
> +                         expboost && rdp->defer_qs_iw_pending != 
> DEFER_QS_PENDING &&
> +                         cpu_online(rdp->cpu)) {
>                               // Get scheduler to re-evaluate and call hooks.
>                               // If !IRQ_WORK, FQS scan will eventually IPI.
>                               if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) 
> &&
> @@ -685,7 +700,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
>                               else
>                                       init_irq_work(&rdp->defer_qs_iw,
>                                                     
> rcu_preempt_deferred_qs_handler);
> -                             rdp->defer_qs_iw_pending = true;
> +                             rdp->defer_qs_iw_pending = DEFER_QS_PENDING;
>                               irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
>                       }
>               }
> -- 
> 2.34.1
> 

Reply via email to