There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_report_qs_rnp().

Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 23 +++++++++++------------
 1 file changed, 11 insertions(+), 12 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f0e7e3972fd9..c9f4d7f3de91 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -132,9 +132,8 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static int rcu_scheduler_fully_active __read_mostly;
 
-static void
-rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
-                 struct rcu_node *rnp, unsigned long gps, unsigned long flags);
+static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
+                             unsigned long gps, unsigned long flags);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int 
outgoingcpu);
@@ -1946,7 +1945,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                mask = rnp->qsmask & ~rnp->qsmaskinitnext;
                rnp->rcu_gp_init_mask = mask;
                if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
-                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+                       rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
                else
                        raw_spin_unlock_irq_rcu_node(rnp);
                cond_resched_tasks_rcu_qs();
@@ -2213,13 +2212,13 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, 
unsigned long flags)
  * disabled.  This allows propagating quiescent state due to resumed tasks
  * during grace-period initialization.
  */
-static void
-rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
-                 struct rcu_node *rnp, unsigned long gps, unsigned long flags)
+static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
+                             unsigned long gps, unsigned long flags)
        __releases(rnp->lock)
 {
        unsigned long oldmask = 0;
        struct rcu_node *rnp_c;
+       struct rcu_state __maybe_unused *rsp = &rcu_state;
 
        raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -2311,7 +2310,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
        mask = rnp->grpmask;
        raw_spin_unlock_rcu_node(rnp);  /* irqs remain disabled. */
        raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
-       rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
+       rcu_report_qs_rnp(mask, rnp_p, gps, flags);
 }
 
 /*
@@ -2354,7 +2353,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct 
rcu_data *rdp)
                 */
                needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
 
-               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
                /* ^^^ Released rnp->lock */
                if (needwake)
                        rcu_gp_kthread_wake(rsp);
@@ -2622,7 +2621,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int 
(*f)(struct rcu_data *rsp))
                }
                if (mask != 0) {
                        /* Idle/offline CPUs, report (releases rnp->lock). */
-                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+                       rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
                } else {
                        /* Nothing to do here, so just drop the lock. */
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -3576,7 +3575,7 @@ void rcu_cpu_starting(unsigned int cpu)
                rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
                if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
                        /* Report QS -after- changing ->qsmaskinitnext! */
-                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+                       rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
                } else {
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                }
@@ -3605,7 +3604,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct 
rcu_state *rsp)
        rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
        if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
                /* Report quiescent state -before- changing ->qsmaskinitnext! */
-               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
        }
        rnp->qsmaskinitnext &= ~mask;
-- 
2.17.1

Reply via email to