The context_tracking.state RCU_DYNTICKS subvariable has been renamed to
RCU_WATCHING, reflect that change in the related helpers.

Signed-off-by: Valentin Schneider <[email protected]>
---
 .../Memory-Ordering/Tree-RCU-Memory-Ordering.rst |  2 +-
 kernel/rcu/tree.c                                | 16 ++++++++--------
 kernel/rcu/tree_exp.h                            |  2 +-
 kernel/rcu/tree_stall.h                          |  4 ++--
 4 files changed, 12 insertions(+), 12 deletions(-)

diff --git 
a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst 
b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
index 5750f125361b0..e8ef12ca1e9da 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
@@ -149,7 +149,7 @@ This case is handled by calls to the strongly ordered
 ``atomic_add_return()`` read-modify-write atomic operation that
 is invoked within ``rcu_dynticks_eqs_enter()`` at idle-entry
 time and within ``rcu_dynticks_eqs_exit()`` at idle-exit time.
-The grace-period kthread invokes ``rcu_dynticks_snap()`` and
+The grace-period kthread invokes ``rcu_watching_snap()`` and
 ``rcu_dynticks_in_eqs_since()`` (both of which invoke
 an ``atomic_add_return()`` of zero) to detect idle CPUs.
 
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index fe2beb7d2e82d..857c2565efeac 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -297,17 +297,17 @@ static void rcu_watching_eqs_online(void)
 }
 
 /*
- * Snapshot the ->dynticks counter with full ordering so as to allow
+ * Snapshot the RCU_WATCHING counter with full ordering so as to allow
  * stable comparison of this counter with past and future snapshots.
  */
-static int rcu_dynticks_snap(int cpu)
+static int rcu_watching_snap(int cpu)
 {
        smp_mb();  // Fundamental RCU ordering guarantee.
        return ct_rcu_watching_cpu_acquire(cpu);
 }
 
 /*
- * Return true if the snapshot returned from rcu_dynticks_snap()
+ * Return true if the snapshot returned from rcu_watching_snap()
  * indicates that RCU is in an extended quiescent state.
  */
 static bool rcu_dynticks_in_eqs(int snap)
@@ -318,11 +318,11 @@ static bool rcu_dynticks_in_eqs(int snap)
 /*
  * Return true if the CPU corresponding to the specified rcu_data
  * structure has spent some time in an extended quiescent state since
- * rcu_dynticks_snap() returned the specified snapshot.
+ * rcu_watching_snap() returned the specified snapshot.
  */
 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
 {
-       return snap != rcu_dynticks_snap(rdp->cpu);
+       return snap != rcu_watching_snap(rdp->cpu);
 }
 
 /*
@@ -770,7 +770,7 @@ static void rcu_gpnum_ovf(struct rcu_node *rnp, struct 
rcu_data *rdp)
  */
 static int dyntick_save_progress_counter(struct rcu_data *rdp)
 {
-       rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
+       rdp->dynticks_snap = rcu_watching_snap(rdp->cpu);
        if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
                trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, 
TPS("dti"));
                rcu_gpnum_ovf(rdp->mynode, rdp);
@@ -2185,7 +2185,7 @@ static noinline void rcu_gp_cleanup(void)
 
                // We get here either if there is no need for an
                // additional grace period or if rcu_accelerate_cbs() has
-               // already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 
+               // already set the RCU_GP_FLAG_INIT bit in ->gp_flags.
                // So all we need to do is to clear all of the other
                // ->gp_flags bits.
 
@@ -4798,7 +4798,7 @@ rcu_boot_init_percpu_data(int cpu)
        rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
        INIT_WORK(&rdp->strict_work, strict_work_handler);
        WARN_ON_ONCE(ct->nesting != 1);
-       WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
+       WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_watching_snap(cpu)));
        rdp->barrier_seq_snap = rcu_state.barrier_sequence;
        rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
        rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 8a1d9c8bd9f74..50ec57304c1b7 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -357,7 +357,7 @@ static void __sync_rcu_exp_select_node_cpus(struct 
rcu_exp_work *rewp)
                    !(rnp->qsmaskinitnext & mask)) {
                        mask_ofl_test |= mask;
                } else {
-                       snap = rcu_dynticks_snap(cpu);
+                       snap = rcu_watching_snap(cpu);
                        if (rcu_dynticks_in_eqs(snap))
                                mask_ofl_test |= mask;
                        else
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 6cb346952e3e4..4fa23f9fc207f 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -501,7 +501,7 @@ static void print_cpu_stall_info(int cpu)
        }
        delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
        falsepositive = rcu_is_gp_kthread_starving(NULL) &&
-                       rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
+                       rcu_dynticks_in_eqs(rcu_watching_snap(cpu));
        rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
        if (rcuc_starved)
                // Print signed value, as negative values indicate a probable 
bug.
@@ -515,7 +515,7 @@ static void print_cpu_stall_info(int cpu)
                        rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
                                "!."[!delta],
               ticks_value, ticks_title,
-              rcu_dynticks_snap(cpu) & 0xffff,
+              rcu_watching_snap(cpu) & 0xffff,
               ct_nesting_cpu(cpu), ct_nmi_nesting_cpu(cpu),
               rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
               data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
-- 
2.43.0


Reply via email to