Currently, rcu_torture_timer() relies on a lock to guard updates to
n_rcu_torture_timers.  Unfortunately, consolidating code with
rcu_torture_reader() will dispense with this lock.  This commit
therefore makes n_rcu_torture_timers be an atomic_long_t and uses
atomic_long_inc() to carry out the update.

Signed-off-by: Paul E. McKenney <[email protected]>
---
 kernel/rcu/rcutorture.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 971e31ae9bcf..2452e4a29923 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -151,7 +151,7 @@ static long n_rcu_torture_boost_ktrerror;
 static long n_rcu_torture_boost_rterror;
 static long n_rcu_torture_boost_failure;
 static long n_rcu_torture_boosts;
-static long n_rcu_torture_timers;
+static atomic_long_t n_rcu_torture_timers;
 static long n_barrier_attempts;
 static long n_barrier_successes;
 static atomic_long_t n_cbfloods;
@@ -1160,6 +1160,7 @@ static void rcu_torture_timer(struct timer_list *unused)
        int pipe_count;
        unsigned long long ts;
 
+       atomic_long_inc(&n_rcu_torture_timers);
        idx = cur_ops->readlock();
        started = cur_ops->get_gp_seq();
        ts = rcu_trace_clock_local();
@@ -1177,7 +1178,6 @@ static void rcu_torture_timer(struct timer_list *unused)
                atomic_inc(&n_rcu_torture_mberror);
        spin_lock(&rand_lock);
        cur_ops->read_delay(&rand);
-       n_rcu_torture_timers++;
        spin_unlock(&rand_lock);
        preempt_disable();
        pipe_count = p->rtort_pipe_count;
@@ -1290,7 +1290,7 @@ rcu_torture_stats_print(void)
        pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
                n_rcu_torture_boost_failure,
                n_rcu_torture_boosts,
-               n_rcu_torture_timers);
+               atomic_long_read(&n_rcu_torture_timers));
        torture_onoff_stats();
        pr_cont("barrier: %ld/%ld:%ld ",
                n_barrier_successes,
-- 
2.17.1

Reply via email to