The current Tiny SRCU implementation of srcu_read_unlock() awakens
the grace-period processing when exiting the outermost SRCU read-side
critical section.  However, not all Linux-kernel configurations and
contexts permit swake_up_one() to be invoked while interrupts are
disabled, and this can result in indefinitely extended SRCU grace periods.
This commit therefore only invokes swake_up_one() when interrupts are
enabled, and introduces polling to the grace-period workqueue handler.

Reported-by: kernel test robot <[email protected]>
Reported-by: Zqiang <[email protected]>
Closes: https://lore.kernel.org/oe-lkp/[email protected]
Signed-off-by: Paul E. McKenney <[email protected]>
---
 kernel/rcu/srcutiny.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index e3b64a5e0ec7..3450c3751ef7 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -106,15 +106,15 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
        newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
        WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
        preempt_enable();
-       if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task())
+       if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task() && 
!irqs_disabled())
                swake_up_one(&ssp->srcu_wq);
 }
 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 
 /*
  * Workqueue handler to drive one grace period and invoke any callbacks
- * that become ready as a result.  Single-CPU and !PREEMPTION operation
- * means that we get away with murder on synchronization.  ;-)
+ * that become ready as a result.  Single-CPU operation and preemption
+ * disabling mean that we get away with murder on synchronization.  ;-)
  */
 void srcu_drive_gp(struct work_struct *wp)
 {
@@ -141,7 +141,12 @@ void srcu_drive_gp(struct work_struct *wp)
        WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
        WRITE_ONCE(ssp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
        preempt_enable();
-       swait_event_exclusive(ssp->srcu_wq, 
!READ_ONCE(ssp->srcu_lock_nesting[idx]));
+       do {
+               // Deadlock issues prevent __srcu_read_unlock() from
+               // doing an unconditional wakeup, so polling is required.
+               swait_event_timeout_exclusive(ssp->srcu_wq,
+                                             
!READ_ONCE(ssp->srcu_lock_nesting[idx]), HZ / 10);
+       } while (READ_ONCE(ssp->srcu_lock_nesting[idx]));
        preempt_disable();  // Needed for PREEMPT_LAZY
        WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
        WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
-- 
2.40.1


Reply via email to