From: "Paul E. McKenney" <[email protected]>

In some cases, the allocator return address is in a common function, so
that more information is desired.  For example, percpu_ref reference-count
underflow happens in an RCU callback function having access only
to a block of memory that is always allocated in percpu_ref_init().
This information is unhelpful.

This commit therefore causes the percpu_ref_switch_to_atomic_rcu()
function to use the new kmem_last_alloc_stack() function to collect
and print a stack trace upon reference-count underflow.  This requires
the kernel use the slub allocator and be built with CONFIG_STACKTRACE=y.
As always, slub debugging must be enabled one way or another, for example,
by booting with the "slub_debug=U" kernel boot parameter.

Cc: Ming Lei <[email protected]>
Cc: Jens Axboe <[email protected]>
Reported-by: Andrii Nakryiko <[email protected]>
Signed-off-by: Paul E. McKenney <[email protected]>
---
 lib/percpu-refcount.c | 24 +++++++++++++++++-------
 1 file changed, 17 insertions(+), 7 deletions(-)

diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 8c7b21a0..ebdfa47 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -169,8 +169,6 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head 
*rcu)
        struct percpu_ref *ref = data->ref;
        unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
        unsigned long count = 0;
-       void *allocaddr;
-       const char *allocerr;
        int cpu;
 
        for_each_possible_cpu(cpu)
@@ -194,14 +192,26 @@ static void percpu_ref_switch_to_atomic_rcu(struct 
rcu_head *rcu)
        atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
 
        if (atomic_long_read(&data->count) <= 0) {
-               allocaddr = kmem_last_alloc(data);
+               void *allocaddr;
+               const char *allocerr;
+               void *allocstack[8];
+               int i;
+
+               allocaddr = kmem_last_alloc_stack(data, allocstack, 
ARRAY_SIZE(allocstack));
                allocerr = kmem_last_alloc_errstring(allocaddr);
-               if (allocerr)
+               if (allocerr) {
                        WARN_ONCE(1, "percpu ref (%ps) <= 0 (%ld) after 
switching to atomic (%s)",
                                  data->release, 
atomic_long_read(&data->count), allocerr);
-               else
-                       WARN_ONCE(1, "percpu ref (%ps) <= 0 (%ld) after 
switching to atomic (allocated at %pS)",
-                                 data->release, 
atomic_long_read(&data->count), allocaddr);
+               } else {
+                       pr_err("percpu ref (%ps) <= 0 (%ld) after switching to 
atomic (allocated at %pS)\n",
+                              data->release, atomic_long_read(&data->count), 
allocaddr);
+                       for (i = 0; i < ARRAY_SIZE(allocstack); i++) {
+                               if (!allocstack[i])
+                                       break;
+                               pr_err("\t%pS\n", allocstack[i]);
+                       }
+                       WARN_ON_ONCE(1);
+               }
        }
 
        /* @ref is viewed as dead on all CPUs, send out switch confirmation */
-- 
2.9.5

Reply via email to