The context_tracking.state RCU_DYNTICKS subvariable has been renamed to
RCU_WATCHING, reflect that change in the related helpers.

Signed-off-by: Valentin Schneider <[email protected]>
Reviewed-by: Frederic Weisbecker <[email protected]>
---
 include/linux/context_tracking_state.h |  2 +-
 kernel/rcu/tree.c                      | 10 +++++-----
 kernel/rcu/tree_stall.h                |  4 ++--
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/include/linux/context_tracking_state.h 
b/include/linux/context_tracking_state.h
index cb90d8c178104..ad5a06a42b4a0 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -61,7 +61,7 @@ static __always_inline int ct_rcu_watching(void)
        return atomic_read(this_cpu_ptr(&context_tracking.state)) & 
CT_RCU_WATCHING_MASK;
 }
 
-static __always_inline int ct_dynticks_cpu(int cpu)
+static __always_inline int ct_rcu_watching_cpu(int cpu)
 {
        struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
 
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index ae478e13fc837..e60f878444704 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -327,14 +327,14 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
        int snap;
 
        // If not quiescent, force back to earlier extended quiescent state.
-       snap = ct_dynticks_cpu(cpu) & ~CT_RCU_WATCHING;
-       smp_rmb(); // Order ->dynticks and *vp reads.
+       snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING;
+       smp_rmb(); // Order CT state and *vp reads.
        if (READ_ONCE(*vp))
                return false;  // Non-zero, so report failure;
-       smp_rmb(); // Order *vp read and ->dynticks re-read.
+       smp_rmb(); // Order *vp read and CT state re-read.
 
        // If still in the same extended quiescent state, we are good!
-       return snap == ct_dynticks_cpu(cpu);
+       return snap == ct_rcu_watching_cpu(cpu);
 }
 
 /*
@@ -4793,7 +4793,7 @@ rcu_boot_init_percpu_data(int cpu)
        rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
        INIT_WORK(&rdp->strict_work, strict_work_handler);
        WARN_ON_ONCE(ct->dynticks_nesting != 1);
-       WARN_ON_ONCE(rcu_dynticks_in_eqs(ct_dynticks_cpu(cpu)));
+       WARN_ON_ONCE(rcu_dynticks_in_eqs(ct_rcu_watching_cpu(cpu)));
        rdp->barrier_seq_snap = rcu_state.barrier_sequence;
        rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
        rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 4b0e9d7c4c68e..d65974448e813 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -501,7 +501,7 @@ static void print_cpu_stall_info(int cpu)
        }
        delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
        falsepositive = rcu_is_gp_kthread_starving(NULL) &&
-                       rcu_dynticks_in_eqs(ct_dynticks_cpu(cpu));
+                       rcu_dynticks_in_eqs(ct_rcu_watching_cpu(cpu));
        rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
        if (rcuc_starved)
                // Print signed value, as negative values indicate a probable 
bug.
@@ -515,7 +515,7 @@ static void print_cpu_stall_info(int cpu)
                        rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
                                "!."[!delta],
               ticks_value, ticks_title,
-              ct_dynticks_cpu(cpu) & 0xffff,
+              ct_rcu_watching_cpu(cpu) & 0xffff,
               ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu),
               rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
               data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
-- 
2.43.0


Reply via email to