From: "Paul E. McKenney" <[email protected]>

This commit abstracts funnel locking from synchronize_sched_expedited()
so that it may be used by synchronize_rcu_expedited().

Signed-off-by: Paul E. McKenney <[email protected]>
---
 kernel/rcu/tree.c | 80 ++++++++++++++++++++++++++++++++-----------------------
 1 file changed, 47 insertions(+), 33 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f96500e462fd..ad6980af1c99 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3361,16 +3361,6 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, 
unsigned long s)
        return rcu_seq_done(&rsp->expedited_sequence, s);
 }
 
-static int synchronize_sched_expedited_cpu_stop(void *data)
-{
-       struct rcu_state *rsp = data;
-
-       /* We are here: If we are last, do the wakeup. */
-       if (atomic_dec_and_test(&rsp->expedited_need_qs))
-               wake_up(&rsp->expedited_wq);
-       return 0;
-}
-
 /* Common code for synchronize_sched_expedited() work-done checking. */
 static bool sync_sched_exp_wd(struct rcu_state *rsp, struct rcu_node *rnp,
                              atomic_long_t *stat, unsigned long s)
@@ -3387,6 +3377,48 @@ static bool sync_sched_exp_wd(struct rcu_state *rsp, 
struct rcu_node *rnp,
        return false;
 }
 
+/*
+ * Funnel-lock acquisition for expedited grace periods.  Returns a
+ * pointer to the root rcu_node structure, or NULL if some other
+ * task did the expedited grace period for us.
+ */
+static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
+{
+       struct rcu_node *rnp0;
+       struct rcu_node *rnp1 = NULL;
+
+       /*
+        * Each pass through the following loop works its way
+        * up the rcu_node tree, returning if others have done the
+        * work or otherwise falls through holding the root rnp's
+        * ->exp_funnel_mutex.  The mapping from CPU to rcu_node structure
+        * can be inexact, as it is just promoting locality and is not
+        * strictly needed for correctness.
+        */
+       rnp0 = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
+       for (; rnp0 != NULL; rnp0 = rnp0->parent) {
+               if (sync_sched_exp_wd(rsp, rnp1, &rsp->expedited_workdone1, s))
+                       return NULL;
+               mutex_lock(&rnp0->exp_funnel_mutex);
+               if (rnp1)
+                       mutex_unlock(&rnp1->exp_funnel_mutex);
+               rnp1 = rnp0;
+       }
+       if (sync_sched_exp_wd(rsp, rnp1, &rsp->expedited_workdone2, s))
+               return NULL;
+       return rnp1;
+}
+
+static int synchronize_sched_expedited_cpu_stop(void *data)
+{
+       struct rcu_state *rsp = data;
+
+       /* We are here: If we are last, do the wakeup. */
+       if (atomic_dec_and_test(&rsp->expedited_need_qs))
+               wake_up(&rsp->expedited_wq);
+       return 0;
+}
+
 /**
  * synchronize_sched_expedited - Brute-force RCU-sched grace period
  *
@@ -3407,8 +3439,7 @@ void synchronize_sched_expedited(void)
 {
        int cpu;
        long s;
-       struct rcu_node *rnp0;
-       struct rcu_node *rnp1 = NULL;
+       struct rcu_node *rnp;
        struct rcu_state *rsp = &rcu_sched_state;
 
        /* Take a snapshot of the sequence number.  */
@@ -3422,26 +3453,9 @@ void synchronize_sched_expedited(void)
        }
        WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
 
-       /*
-        * Each pass through the following loop works its way
-        * up the rcu_node tree, returning if others have done the
-        * work or otherwise falls through holding the root rnp's
-        * ->exp_funnel_mutex.  The mapping from CPU to rcu_node structure
-        * can be inexact, as it is just promoting locality and is not
-        * strictly needed for correctness.
-        */
-       rnp0 = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
-       for (; rnp0 != NULL; rnp0 = rnp0->parent) {
-               if (sync_sched_exp_wd(rsp, rnp1, &rsp->expedited_workdone1, s))
-                       return;
-               mutex_lock(&rnp0->exp_funnel_mutex);
-               if (rnp1)
-                       mutex_unlock(&rnp1->exp_funnel_mutex);
-               rnp1 = rnp0;
-       }
-       rnp0 = rnp1;  /* rcu_get_root(rsp), AKA root rcu_node structure. */
-       if (sync_sched_exp_wd(rsp, rnp0, &rsp->expedited_workdone2, s))
-               return;
+       rnp = exp_funnel_lock(rsp, s);
+       if (rnp == NULL)
+               return;  /* Someone else did our work for us. */
 
        rcu_exp_gp_seq_start(rsp);
 
@@ -3467,7 +3481,7 @@ void synchronize_sched_expedited(void)
                           !atomic_read(&rsp->expedited_need_qs));
 
        rcu_exp_gp_seq_end(rsp);
-       mutex_unlock(&rnp0->exp_funnel_mutex);
+       mutex_unlock(&rnp->exp_funnel_mutex);
        smp_mb(); /* ensure subsequent action seen after grace period. */
 
        put_online_cpus();
-- 
1.8.1.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to