We acquire gp_seq_needed locklessly. To be safe, lets do the unlocking
after the access.

Signed-off-by: Joel Fernandes <j...@joelfernandes.org>
---
 kernel/rcu/tree.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 879c67a31116..efbd21b2a1a6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1603,13 +1603,13 @@ static bool rcu_start_this_gp(struct rcu_node 
*rnp_start, struct rcu_data *rdp,
        trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), 
TPS("newreq"));
        ret = true;  /* Caller must wake GP kthread. */
 unlock_out:
-       if (rnp != rnp_start)
-               raw_spin_unlock_rcu_node(rnp);
        /* Push furthest requested GP to leaf node and rcu_data structure. */
        if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req)) {
                rnp_start->gp_seq_needed = gp_seq_req;
                rdp->gp_seq_needed = gp_seq_req;
        }
+       if (rnp != rnp_start)
+               raw_spin_unlock_rcu_node(rnp);
        return ret;
 }
 
-- 
2.17.0.441.gb46fe60e1d-goog

Reply via email to