The batched kfree_rcu()/kvfree_rcu() path (CONFIG_KVFREE_RCU_BATCHED) manages its own per-CPU queues in struct kfree_rcu_cpu, bypassing the main RCU segmented callback list. Objects queued through this path were not visible in the debugfs pending_cbs file.
Add a kfree_rcu_pending() helper that returns the number of objects waiting in the kfree_rcu batching layer for a given CPU, and include this count as a "kfree_rcu" column in the debugfs output. Example output: cpu done wait next_ready next lazy kfree_rcu 0 0 0 0 5 5 12 1 0 0 0 3 3 8 total 0 0 0 8 8 20 Signed-off-by: Gustavo Luiz Duarte <[email protected]> --- kernel/rcu/rcu.h | 1 + kernel/rcu/tree_stall.h | 17 +++++++++++------ mm/slab_common.c | 18 ++++++++++++++++++ 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index fa6d30ce73d1..a28c3c7dc4da 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -652,6 +652,7 @@ void rcu_fwd_progress_check(unsigned long j); void rcu_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; extern struct kthread_worker *rcu_exp_gp_kworker; +int kfree_rcu_pending(int cpu); void rcu_gp_slow_register(atomic_t *rgssp); void rcu_gp_slow_unregister(atomic_t *rgssp); #endif /* #else #ifdef CONFIG_TINY_RCU */ diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index d9fc9bfdaf96..5fd63730d5f5 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -84,14 +84,17 @@ late_initcall(kernel_rcu_stall_sysfs_init); static int rcu_pending_cbs_show(struct seq_file *m, void *v) { int cpu; + int kfree; long done, wait, nxtrdy, nxt, lazy; long total_done = 0, total_wait = 0, total_nxtrdy = 0; long total_nxt = 0, total_lazy = 0; + int total_kfree = 0; struct rcu_data *rdp; struct rcu_segcblist *rsclp; - seq_printf(m, "%-8s %10s %10s %10s %10s %10s\n", - "cpu", "done", "wait", "next_ready", "next", "lazy"); + seq_printf(m, "%-8s %10s %10s %10s %10s %10s %10s\n", + "cpu", "done", "wait", "next_ready", "next", "lazy", + "kfree_rcu"); for_each_possible_cpu(cpu) { rdp = per_cpu_ptr(&rcu_data, cpu); @@ -105,20 +108,22 @@ static int rcu_pending_cbs_show(struct seq_file *m, void *v) nxtrdy = rcu_segcblist_get_seglen(rsclp, RCU_NEXT_READY_TAIL); nxt = rcu_segcblist_get_seglen(rsclp, RCU_NEXT_TAIL); lazy = READ_ONCE(rdp->lazy_len); + kfree = kfree_rcu_pending(cpu); - seq_printf(m, "%-8d %10ld %10ld %10ld %10ld %10ld\n", - cpu, done, wait, nxtrdy, nxt, lazy); + seq_printf(m, "%-8d %10ld %10ld %10ld %10ld %10ld %10d\n", + cpu, done, wait, nxtrdy, nxt, lazy, kfree); total_done += done; total_wait += wait; total_nxtrdy += nxtrdy; total_nxt += nxt; total_lazy += lazy; + total_kfree += kfree; } - seq_printf(m, "%-8s %10ld %10ld %10ld %10ld %10ld\n", + seq_printf(m, "%-8s %10ld %10ld %10ld %10ld %10ld %10d\n", "total", total_done, total_wait, total_nxtrdy, - total_nxt, total_lazy); + total_nxt, total_lazy, total_kfree); return 0; } diff --git a/mm/slab_common.c b/mm/slab_common.c index d5a70a831a2a..93b5d64399f2 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1280,6 +1280,11 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr) } EXPORT_SYMBOL_GPL(kvfree_call_rcu); +int kfree_rcu_pending(int cpu) +{ + return 0; +} + void __init kvfree_rcu_init(void) { } @@ -2216,4 +2221,17 @@ void __init kvfree_rcu_init(void) shrinker_register(kfree_rcu_shrinker); } +/** + * kfree_rcu_pending() - Return number of objects pending in kfree_rcu batches. + * @cpu: CPU number to query. + * + * Returns the number of objects queued in kfree_rcu()/kvfree_rcu() batches + * on @cpu that are waiting for a grace period. These objects are tracked + * separately from the main RCU callback list. + */ +int kfree_rcu_pending(int cpu) +{ + return krc_count(per_cpu_ptr(&krc, cpu)); +} + #endif /* CONFIG_KVFREE_RCU_BATCHED */ -- 2.53.0-Meta

