On Tue, Jul 29, 2025 at 02:23:13PM -0400, Steven Rostedt wrote:
> From: Steven Rostedt <rost...@goodmis.org>
> 
> Instead of using the callback_mutex to protect the link list of callbacks
> in unwind_deferred_task_work(), use SRCU instead. This gets called every
> time a task exits that has to record a stack trace that was requested.
> This can happen for many tasks on several CPUs at the same time. A mutex
> is a bottleneck and can cause a bit of contention and slow down performance.
> 
> As the callbacks themselves are allowed to sleep, regular RCU cannot be
> used to protect the list. Instead use SRCU, as that still allows the
> callbacks to sleep and the list can be read without needing to hold the
> callback_mutex.
> 
> Link: 
> https://lore.kernel.org/all/ca9bd83a-6c80-4ee0-a83c-224b9d60b...@efficios.com/
> 
> Cc: "Paul E. McKenney" <paul...@kernel.org>
> Suggested-by: Mathieu Desnoyers <mathieu.desnoy...@efficios.com>
> Signed-off-by: Steven Rostedt (Google) <rost...@goodmis.org>

One quite likely stupid question below.

                                                        Thanx, Paul

> ---
>  kernel/unwind/deferred.c | 27 +++++++++++++++++++++------
>  1 file changed, 21 insertions(+), 6 deletions(-)
> 
> diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
> index 2311b725d691..a5ef1c1f915e 100644
> --- a/kernel/unwind/deferred.c
> +++ b/kernel/unwind/deferred.c
> @@ -41,7 +41,7 @@ static inline bool try_assign_cnt(struct unwind_task_info 
> *info, u32 cnt)
>  #define UNWIND_MAX_ENTRIES                                   \
>       ((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long))
>  
> -/* Guards adding to and reading the list of callbacks */
> +/* Guards adding to or removing from the list of callbacks */
>  static DEFINE_MUTEX(callback_mutex);
>  static LIST_HEAD(callbacks);
>  
> @@ -49,6 +49,7 @@ static LIST_HEAD(callbacks);
>  
>  /* Zero'd bits are available for assigning callback users */
>  static unsigned long unwind_mask = RESERVED_BITS;
> +DEFINE_STATIC_SRCU(unwind_srcu);
>  
>  static inline bool unwind_pending(struct unwind_task_info *info)
>  {
> @@ -174,8 +175,9 @@ static void unwind_deferred_task_work(struct 
> callback_head *head)
>  
>       cookie = info->id.id;
>  
> -     guard(mutex)(&callback_mutex);
> -     list_for_each_entry(work, &callbacks, list) {
> +     guard(srcu)(&unwind_srcu);
> +     list_for_each_entry_srcu(work, &callbacks, list,
> +                              srcu_read_lock_held(&unwind_srcu)) {
>               if (test_bit(work->bit, &bits)) {
>                       work->func(work, &trace, cookie);
>                       if (info->cache)
> @@ -213,7 +215,7 @@ int unwind_deferred_request(struct unwind_work *work, u64 
> *cookie)
>  {
>       struct unwind_task_info *info = &current->unwind_info;
>       unsigned long old, bits;
> -     unsigned long bit = BIT(work->bit);
> +     unsigned long bit;
>       int ret;
>  
>       *cookie = 0;
> @@ -230,6 +232,14 @@ int unwind_deferred_request(struct unwind_work *work, 
> u64 *cookie)
>       if (WARN_ON_ONCE(!CAN_USE_IN_NMI && in_nmi()))
>               return -EINVAL;
>  
> +     /* Do not allow cancelled works to request again */
> +     bit = READ_ONCE(work->bit);
> +     if (WARN_ON_ONCE(bit < 0))
> +             return -EINVAL;
> +
> +     /* Only need the mask now */
> +     bit = BIT(bit);
> +
>       guard(irqsave)();
>  
>       *cookie = get_cookie(info);
> @@ -281,10 +291,15 @@ void unwind_deferred_cancel(struct unwind_work *work)
>               return;
>  
>       guard(mutex)(&callback_mutex);
> -     list_del(&work->list);

What happens if unwind_deferred_task_work() finds this item right here...

> +     list_del_rcu(&work->list);

...and then unwind_deferred_request() does its WARN_ON_ONCE() check
against -1 right here?

Can't that cause UAF?

This is quite possibly a stupid question because I am not seeing where to
apply this patch, so I don't know what other mechanisms might be in place.

> +     /* Do not allow any more requests and prevent callbacks */
> +     work->bit = -1;
>  
>       __clear_bit(bit, &unwind_mask);
>  
> +     synchronize_srcu(&unwind_srcu);
> +
>       guard(rcu)();
>       /* Clear this bit from all threads */
>       for_each_process_thread(g, t) {
> @@ -307,7 +322,7 @@ int unwind_deferred_init(struct unwind_work *work, 
> unwind_callback_t func)
>       work->bit = ffz(unwind_mask);
>       __set_bit(work->bit, &unwind_mask);
>  
> -     list_add(&work->list, &callbacks);
> +     list_add_rcu(&work->list, &callbacks);
>       work->func = func;
>       return 0;
>  }
> -- 
> 2.47.2
> 
> 

Reply via email to