Add a call to rcu_promote_blocked_tasks() in sync_exp_reset_tree() before checking for blocked tasks. This ensures that expedited grace periods properly wait for tasks that were blocked on per-CPU lists before the expedited GP was initiated.
Signed-off-by: Joel Fernandes <[email protected]> --- kernel/rcu/tree.h | 1 + kernel/rcu/tree_exp.h | 5 +++++ kernel/rcu/tree_plugin.h | 8 +++++--- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 13d5649a80fb..b71c6c1de8d3 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -501,6 +501,7 @@ static bool rcu_is_callbacks_kthread(struct rcu_data *rdp); static void rcu_cpu_kthread_setup(unsigned int cpu); static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); static bool rcu_preempt_has_tasks(struct rcu_node *rnp); +static void rcu_promote_blocked_tasks(struct rcu_node *rnp); static bool rcu_preempt_need_deferred_qs(struct task_struct *t); static void zero_cpu_stall_ticks(struct rcu_data *rdp); static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 96c49c56fc14..f6cb0e3147c4 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -141,6 +141,11 @@ static void __maybe_unused sync_exp_reset_tree(void) raw_spin_lock_irqsave_rcu_node(rnp, flags); WARN_ON_ONCE(rnp->expmask); WRITE_ONCE(rnp->expmask, rnp->expmaskinit); + /* + * Promote tasks from per-CPU lists before checking blkd_tasks. + * This ensures expedited GPs see tasks blocked. + */ + rcu_promote_blocked_tasks(rnp); /* * Need to wait for any blocked tasks as well. Note that * additional blocking tasks will also block the expedited GP diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6810f1b72d2a..ad33fdd0efe8 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -810,8 +810,8 @@ static void rcu_read_unlock_special(struct task_struct *t) /* * Promote blocked tasks from a single CPU's per-CPU list to the rnp list. * - * If there are no tracked blockers (gp_tasks NULL) and this CPU - * is still blocking the corresponding GP (bit set in qsmask), set + * If there are no tracked blockers (gp_tasks/exp_tasks NULL) and this CPU + * is still blocking the corresponding GP (bit set in qsmask/expmask), set * the pointer to ensure the GP machinery knows about the blocking task. * This handles late promotion during QS reporting, where tasks may have * blocked after rcu_gp_init() or sync_exp_reset_tree() ran their scans. @@ -844,11 +844,13 @@ static void rcu_promote_blocked_tasks_rdp(struct rcu_data *rdp, t->rcu_blocked_cpu = -1; /* - * Set gp_tasks if this is the first blocker and + * Set gp_tasks/exp_tasks if this is the first blocker and * this CPU is still blocking the corresponding GP. */ if (!rnp->gp_tasks && (rnp->qsmask & rdp->grpmask)) WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); + if (!rnp->exp_tasks && (rnp->expmask & rdp->grpmask)) + WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); } raw_spin_unlock(&rdp->blkd_lock); } -- 2.34.1

