A few comments still mention a context_tracking .dynticks field, but that
has been merged into the .state field as of:

171476775d32 ("context_tracking: Convert state to atomic_t")

Update these loose references.

Signed-off-by: Valentin Schneider <[email protected]>
---
 kernel/context_tracking.c | 8 ++++----
 kernel/rcu/tree.c         | 6 +++---
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index e82f19bab9ed9..dfa64f21d900d 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -87,7 +87,7 @@ static noinstr void ct_kernel_exit_state(int offset)
         * critical sections, and we also must force ordering with the
         * next idle sojourn.
         */
-       rcu_task_trace_enter();  // Before ->dynticks update!
+       rcu_task_trace_enter();  // Before CT state update!
        seq = ct_state_inc(offset);
        // RCU is no longer watching.  Better be in extended quiescent state!
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & 
CT_RCU_WATCHING));
@@ -109,7 +109,7 @@ static noinstr void ct_kernel_enter_state(int offset)
         */
        seq = ct_state_inc(offset);
        // RCU is now watching.  Better not be in an extended quiescent state!
-       rcu_task_trace_exit();  // After ->dynticks update!
+       rcu_task_trace_exit();  // After CT state update!
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & 
CT_RCU_WATCHING));
 }
 
@@ -207,7 +207,7 @@ void noinstr ct_nmi_exit(void)
 
        instrumentation_begin();
        /*
-        * Check for ->nmi_nesting underflow and bad ->dynticks.
+        * Check for ->nmi_nesting underflow and bad CT state.
         * (We are exiting an NMI handler, so RCU better be paying attention
         * to us!)
         */
@@ -264,7 +264,7 @@ void noinstr ct_nmi_enter(void)
        WARN_ON_ONCE(ct_nmi_nesting() < 0);
 
        /*
-        * If idle from RCU viewpoint, atomically increment ->dynticks
+        * If idle from RCU viewpoint, atomically increment CT state
         * to mark non-idle and increment ->nmi_nesting by one.
         * Otherwise, increment ->nmi_nesting by two.  This means
         * if ->nmi_nesting is equal to one, we are guaranteed
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e4783a5d18e0d..11bd55f7ec398 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -286,7 +286,7 @@ void rcu_softirq_qs(void)
  * to the next non-quiescent value.
  *
  * The non-atomic test/increment sequence works because the upper bits
- * of the ->dynticks counter are manipulated only by the corresponding CPU,
+ * of the ->state variable are manipulated only by the corresponding CPU,
  * or when the corresponding CPU is offline.
  */
 static void rcu_watching_eqs_online(void)
@@ -335,10 +335,10 @@ bool rcu_watching_zero_in_eqs(int cpu, int *vp)
 
        // If not quiescent, force back to earlier extended quiescent state.
        snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING;
-       smp_rmb(); // Order ->dynticks and *vp reads.
+       smp_rmb(); // Order CT state and *vp reads.
        if (READ_ONCE(*vp))
                return false;  // Non-zero, so report failure;
-       smp_rmb(); // Order *vp read and ->dynticks re-read.
+       smp_rmb(); // Order *vp read and CT state re-read.
 
        // If still in the same extended quiescent state, we are good!
        return snap == ct_rcu_watching_cpu(cpu);
-- 
2.43.0


Reply via email to