The WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp()) in
rcu_gp_cleanup() triggers (inexplicably, of course) every so often.
This commit therefore extracts more information.

Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 08b72dd02d88..6ad2ac34558f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2147,7 +2147,22 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
         */
        rcu_for_each_node_breadth_first(rsp, rnp) {
                raw_spin_lock_irq_rcu_node(rnp);
-               WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
+#ifdef CONFIG_PREEMPT_RCU
+               if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) {
+                       int i;
+                       struct task_struct *t;
+
+                       pr_info("%s: grp: %d-%d level: %d ->gp_tasks %p 
->exp_tasks %p &->blkd_tasks: %p offset: %u\n", __func__, rnp->grplo, 
rnp->grphi, rnp->level, rnp->gp_tasks, rnp->exp_tasks, &rnp->blkd_tasks, 
(unsigned int)offsetof(typeof(*rnp), blkd_tasks));
+                       pr_cont("\t->blkd_tasks");
+                       i = 0;
+                       list_for_each_entry(t, &rnp->blkd_tasks, 
rcu_node_entry) {
+                               pr_cont(" %p", t);
+                               if (++i >= 10)
+                                       break;
+                       }
+                       pr_cont("\n");
+               }
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
                WARN_ON_ONCE(rnp->qsmask);
                WRITE_ONCE(rnp->completed, rsp->gpnum);
                rdp = this_cpu_ptr(rsp->rda);
-- 
2.5.2

Reply via email to