Context: The RCU Non-Callback (NOCB) infrastructure traditionally requires boot-time parameters (e.g., rcu_nocbs) to allocate masks and spawn management kthreads (rcuog/rcuo). This prevents systems from activating offloading on-demand without a reboot.
Problem: Dynamic Housekeeping Management requires CPUs to transition to NOCB mode at runtime when they are newly isolated. Without boot-time setup, the NOCB masks are unallocated, and critical kthreads are missing, preventing effective tick suppression and isolation. Solution: Refactor RCU initialization to support dynamic on-demand setup. - Introduce rcu_init_nocb_dynamic() to allocate masks and organize kthreads if the system wasn't initially configured for NOCB. - Introduce rcu_housekeeping_reconfigure() to iterate over CPUs and perform safe offload/deoffload transitions via hotplug sequences (cpu_down -> offload -> cpu_up) when a housekeeping cpuset triggers a notifier event. - Remove __init from rcu_organize_nocb_kthreads to allow runtime reconfiguration of the callback management hierarchy. This enables a true "Zero-Conf" isolation experience where any CPU can be fully isolated at runtime regardless of boot parameters. Signed-off-by: Qiliang Yuan <[email protected]> --- kernel/rcu/rcu.h | 4 +++ kernel/rcu/tree.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/rcu/tree.h | 2 +- kernel/rcu/tree_nocb.h | 31 +++++++++++++-------- 4 files changed, 100 insertions(+), 12 deletions(-) diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 9b10b57b79ada..282874443c96b 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -663,8 +663,12 @@ unsigned long srcu_batches_completed(struct srcu_struct *sp); #endif // #else // #ifdef CONFIG_TINY_SRCU #ifdef CONFIG_RCU_NOCB_CPU +void rcu_init_nocb_dynamic(void); +void rcu_spawn_cpu_nocb_kthread(int cpu); void rcu_bind_current_to_nocb(void); #else +static inline void rcu_init_nocb_dynamic(void) { } +static inline void rcu_spawn_cpu_nocb_kthread(int cpu) { } static inline void rcu_bind_current_to_nocb(void) { } #endif diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 55df6d37145e8..84c8388cf89a1 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4928,4 +4928,79 @@ void __init rcu_init(void) #include "tree_stall.h" #include "tree_exp.h" #include "tree_nocb.h" + +#ifdef CONFIG_SMP +static int rcu_housekeeping_reconfigure(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct housekeeping_update *upd = data; + struct task_struct *t; + int cpu; + + if (action != HK_UPDATE_MASK || upd->type != HK_TYPE_RCU) + return NOTIFY_OK; + + rcu_init_nocb_dynamic(); + + for_each_possible_cpu(cpu) { + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); + bool isolated = !cpumask_test_cpu(cpu, upd->new_mask); + bool offloaded = rcu_rdp_is_offloaded(rdp); + + if (isolated && !offloaded) { + /* Transition to NOCB */ + pr_info("rcu: CPU %d transitioning to NOCB mode\n", cpu); + if (cpu_online(cpu)) { + remove_cpu(cpu); + rcu_spawn_cpu_nocb_kthread(cpu); + rcu_nocb_cpu_offload(cpu); + add_cpu(cpu); + } else { + rcu_spawn_cpu_nocb_kthread(cpu); + rcu_nocb_cpu_offload(cpu); + } + } else if (!isolated && offloaded) { + /* Transition to CB */ + pr_info("rcu: CPU %d transitioning to CB mode\n", cpu); + if (cpu_online(cpu)) { + remove_cpu(cpu); + rcu_nocb_cpu_deoffload(cpu); + add_cpu(cpu); + } else { + rcu_nocb_cpu_deoffload(cpu); + } + } + } + + t = READ_ONCE(rcu_state.gp_kthread); + if (t) + housekeeping_affine(t, HK_TYPE_RCU); + +#ifdef CONFIG_TASKS_RCU + t = get_rcu_tasks_gp_kthread(); + if (t) + housekeeping_affine(t, HK_TYPE_RCU); +#endif + +#ifdef CONFIG_TASKS_RUDE_RCU + t = get_rcu_tasks_rude_gp_kthread(); + if (t) + housekeeping_affine(t, HK_TYPE_RCU); +#endif + + return NOTIFY_OK; +} + +static struct notifier_block rcu_housekeeping_nb = { + .notifier_call = rcu_housekeeping_reconfigure, +}; + +static int __init rcu_init_housekeeping_notifier(void) +{ + housekeeping_register_notifier(&rcu_housekeeping_nb); + return 0; +} +late_initcall(rcu_init_housekeeping_notifier); +#endif + #include "tree_plugin.h" diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 7dfc57e9adb18..f3d31918ea322 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -517,7 +517,7 @@ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, unsigned long flags); static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp); #ifdef CONFIG_RCU_NOCB_CPU -static void __init rcu_organize_nocb_kthreads(void); +static void rcu_organize_nocb_kthreads(void); /* * Disable IRQs before checking offloaded state so that local diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index b3337c7231ccb..36f6c9be937aa 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1259,6 +1259,22 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) } #endif // #ifdef CONFIG_RCU_LAZY +void rcu_init_nocb_dynamic(void) +{ + if (rcu_state.nocb_is_setup) + return; + + if (!cpumask_available(rcu_nocb_mask)) { + if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { + pr_info("rcu_nocb_mask allocation failed, dynamic offloading disabled.\n"); + return; + } + } + + rcu_state.nocb_is_setup = true; + rcu_organize_nocb_kthreads(); +} + void __init rcu_init_nohz(void) { int cpu; @@ -1276,15 +1292,8 @@ void __init rcu_init_nohz(void) cpumask = cpu_possible_mask; if (cpumask) { - if (!cpumask_available(rcu_nocb_mask)) { - if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { - pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); - return; - } - } - + rcu_init_nocb_dynamic(); cpumask_or(rcu_nocb_mask, rcu_nocb_mask, cpumask); - rcu_state.nocb_is_setup = true; } if (!rcu_state.nocb_is_setup) @@ -1344,7 +1353,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread * for this CPU's group has not yet been created, spawn it as well. */ -static void rcu_spawn_cpu_nocb_kthread(int cpu) +void rcu_spawn_cpu_nocb_kthread(int cpu) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_data *rdp_gp; @@ -1416,7 +1425,7 @@ module_param(rcu_nocb_gp_stride, int, 0444); /* * Initialize GP-CB relationships for all no-CBs CPU. */ -static void __init rcu_organize_nocb_kthreads(void) +static void rcu_organize_nocb_kthreads(void) { int cpu; bool firsttime = true; @@ -1668,7 +1677,7 @@ static bool do_nocb_deferred_wakeup(struct rcu_data *rdp) return false; } -static void rcu_spawn_cpu_nocb_kthread(int cpu) +void rcu_spawn_cpu_nocb_kthread(int cpu) { } -- 2.43.0

