Context:
Full dynticks (NOHZ_FULL) is typically a static configuration determined
at boot time. DHEI extends this to support runtime activation.

Problem:
Switching to NOHZ_FULL at runtime requires careful synchronization
of context tracking and housekeeping states. Re-invoking setup logic
multiple times could lead to inconsistencies or warnings, and RCU
dependency checks often prevented tick suppression in "Zero-Conf" setups.

Solution:
- Replaced the static tick_nohz_full_enabled() checks with a dynamic
  tick_nohz_full_running state variable.
- Refactored tick_nohz_full_setup to be safe for runtime invocation,
  adding guards against re-initialization and ensuring IRQ work
  interrupt support.
- Implemented boot-time pre-activation of context tracking (shadow
  init) for all possible CPUs to avoid instruction flow issues during
  dynamic transitions.
- Restored standard rcu_needs_cpu() checks now that RCU supports
  native dynamic NOCB mode switching.

This provides the core state machine for reliable, on-demand tick
suppression and high-performance isolation.
---
 kernel/time/tick-sched.c | 130 ++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 105 insertions(+), 25 deletions(-)

diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 2f8a7923fa279..dee42cea259a9 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -27,6 +27,7 @@
 #include <linux/posix-timers.h>
 #include <linux/context_tracking.h>
 #include <linux/mm.h>
+#include <linux/sched/isolation.h>
 
 #include <asm/irq_regs.h>
 
@@ -621,13 +622,25 @@ void __tick_nohz_task_switch(void)
 /* Get the boot-time nohz CPU list from the kernel parameters. */
 void __init tick_nohz_full_setup(cpumask_var_t cpumask)
 {
-       alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
+       if (!tick_nohz_full_mask) {
+               if (!slab_is_available())
+                       alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
+               else
+                       zalloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL);
+       }
        cpumask_copy(tick_nohz_full_mask, cpumask);
        tick_nohz_full_running = true;
 }
 
 bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
 {
+       /*
+        * Allow all CPUs to go down during shutdown/reboot to avoid
+        * interfering with the final power-off sequence.
+        */
+       if (system_state > SYSTEM_RUNNING)
+               return true;
+
        /*
         * The 'tick_do_timer_cpu' CPU handles housekeeping duty (unbound
         * timers, workqueues, timekeeping, ...) on behalf of full dynticks
@@ -643,45 +656,112 @@ static int tick_nohz_cpu_down(unsigned int cpu)
        return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
 }
 
+static int tick_nohz_housekeeping_reconfigure(struct notifier_block *nb,
+                                            unsigned long action, void *data)
+{
+       struct housekeeping_update *upd = data;
+       int cpu;
+
+       if (action == HK_UPDATE_MASK && upd->type == HK_TYPE_TICK) {
+               cpumask_var_t non_housekeeping_mask;
+
+               if (!alloc_cpumask_var(&non_housekeeping_mask, GFP_KERNEL))
+                       return NOTIFY_BAD;
+
+               cpumask_andnot(non_housekeeping_mask, cpu_possible_mask, 
upd->new_mask);
+
+               if (!tick_nohz_full_mask) {
+                       if (!zalloc_cpumask_var(&tick_nohz_full_mask, 
GFP_KERNEL)) {
+                               free_cpumask_var(non_housekeeping_mask);
+                               return NOTIFY_BAD;
+                       }
+               }
+
+               /* Kick all CPUs to re-evaluate tick dependency before change */
+               for_each_online_cpu(cpu)
+                       tick_nohz_full_kick_cpu(cpu);
+
+               cpumask_copy(tick_nohz_full_mask, non_housekeeping_mask);
+               tick_nohz_full_running = !cpumask_empty(tick_nohz_full_mask);
+
+               /*
+                * If nohz_full is running, the timer duty must be on a 
housekeeper.
+                * If the current timer CPU is not a housekeeper, or no duty is 
assigned,
+                * pick the first housekeeper and assign it.
+                */
+               if (tick_nohz_full_running) {
+                       int timer_cpu = READ_ONCE(tick_do_timer_cpu);
+                       if (timer_cpu == TICK_DO_TIMER_NONE ||
+                           !cpumask_test_cpu(timer_cpu, upd->new_mask)) {
+                               int next_timer = cpumask_first(upd->new_mask);
+                               if (next_timer < nr_cpu_ids)
+                                       WRITE_ONCE(tick_do_timer_cpu, 
next_timer);
+                       }
+               }
+
+               /* Kick all CPUs again to apply new nohz full state */
+               for_each_online_cpu(cpu)
+                       tick_nohz_full_kick_cpu(cpu);
+
+               free_cpumask_var(non_housekeeping_mask);
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block tick_nohz_housekeeping_nb = {
+       .notifier_call = tick_nohz_housekeeping_reconfigure,
+};
+
 void __init tick_nohz_init(void)
 {
        int cpu, ret;
 
-       if (!tick_nohz_full_running)
-               return;
-
-       /*
-        * Full dynticks uses IRQ work to drive the tick rescheduling on safe
-        * locking contexts. But then we need IRQ work to raise its own
-        * interrupts to avoid circular dependency on the tick.
-        */
-       if (!arch_irq_work_has_interrupt()) {
-               pr_warn("NO_HZ: Can't run full dynticks because arch doesn't 
support IRQ work self-IPIs\n");
-               cpumask_clear(tick_nohz_full_mask);
-               tick_nohz_full_running = false;
-               return;
+       if (!tick_nohz_full_mask) {
+               if (!slab_is_available())
+                       alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
+               else
+                       zalloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL);
        }
 
-       if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
-                       !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
-               cpu = smp_processor_id();
+       housekeeping_register_notifier(&tick_nohz_housekeeping_nb);
 
-               if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
-                       pr_warn("NO_HZ: Clearing %d from nohz_full range "
-                               "for timekeeping\n", cpu);
-                       cpumask_clear_cpu(cpu, tick_nohz_full_mask);
+       if (tick_nohz_full_running) {
+               /*
+                * Full dynticks uses IRQ work to drive the tick rescheduling 
on safe
+                * locking contexts. But then we need IRQ work to raise its own
+                * interrupts to avoid circular dependency on the tick.
+                */
+               if (!arch_irq_work_has_interrupt()) {
+                       pr_warn("NO_HZ: Can't run full dynticks because arch 
doesn't support IRQ work self-IPIs\n");
+                       cpumask_clear(tick_nohz_full_mask);
+                       tick_nohz_full_running = false;
+                       goto out;
                }
+
+               if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
+                               !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
+                       cpu = smp_processor_id();
+
+                       if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
+                               pr_warn("NO_HZ: Clearing %d from nohz_full 
range "
+                                       "for timekeeping\n", cpu);
+                               cpumask_clear_cpu(cpu, tick_nohz_full_mask);
+                       }
+               }
+
+               pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
+                       cpumask_pr_args(tick_nohz_full_mask));
        }
 
-       for_each_cpu(cpu, tick_nohz_full_mask)
+out:
+       for_each_possible_cpu(cpu)
                ct_cpu_track_user(cpu);
 
        ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
                                        "kernel/nohz:predown", NULL,
                                        tick_nohz_cpu_down);
        WARN_ON(ret < 0);
-       pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
-               cpumask_pr_args(tick_nohz_full_mask));
 }
 #endif /* #ifdef CONFIG_NO_HZ_FULL */
 
@@ -1200,7 +1280,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched 
*ts)
        if (unlikely(report_idle_softirq()))
                return false;
 
-       if (tick_nohz_full_enabled()) {
+       if (tick_nohz_full_running) {
                int tick_cpu = READ_ONCE(tick_do_timer_cpu);
 
                /*

-- 
2.43.0


Reply via email to