This commit removes the obsolete nocb_q_count and nocb_q_count_lazy
fields, also removing rcu_get_n_cbs_nocb_cpu(), adjusting
rcu_get_n_cbs_cpu(), and making rcutree_migrate_callbacks() once again
disable the ->cblist fields of offline CPUs.

Signed-off-by: Paul E. McKenney <[email protected]>
---
 kernel/rcu/tree.c        |  6 +++---
 kernel/rcu/tree.h        |  3 ---
 kernel/rcu/tree_plugin.h | 14 --------------
 3 files changed, 3 insertions(+), 20 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 054418d2d960..e5f30b364276 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -210,10 +210,9 @@ static long rcu_get_n_cbs_cpu(int cpu)
 {
        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 
-       if (rcu_segcblist_is_enabled(&rdp->cblist) &&
-           !rcu_segcblist_is_offloaded(&rdp->cblist)) /* Online normal CPU? */
+       if (rcu_segcblist_is_enabled(&rdp->cblist))
                return rcu_segcblist_n_cbs(&rdp->cblist);
-       return rcu_get_n_cbs_nocb_cpu(rdp); /* Works for offline, too. */
+       return 0;
 }
 
 void rcu_softirq_qs(void)
@@ -3181,6 +3180,7 @@ void rcutree_migrate_callbacks(int cpu)
        needwake = rcu_advance_cbs(my_rnp, rdp) ||
                   rcu_advance_cbs(my_rnp, my_rdp);
        rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
+       rcu_segcblist_disable(&rdp->cblist);
        WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
                     !rcu_segcblist_n_cbs(&my_rdp->cblist));
        if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 74e3a4ab8095..d1df192272fb 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -194,8 +194,6 @@ struct rcu_data {
 
        /* 5) Callback offloading. */
 #ifdef CONFIG_RCU_NOCB_CPU
-       atomic_long_t nocb_q_count;     /* # CBs waiting for nocb */
-       atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
        struct rcu_head *nocb_cb_head;  /* CBs ready to invoke. */
        struct rcu_head **nocb_cb_tail;
        struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
@@ -437,7 +435,6 @@ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
 #ifdef CONFIG_RCU_NOCB_CPU
 static void __init rcu_organize_nocb_kthreads(void);
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp);
 static void rcu_bind_gp_kthread(void);
 static bool rcu_nohz_full_cpu(void);
 static void rcu_dynticks_task_enter(void);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 838e0caaf53a..458838c63a6c 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2040,15 +2040,6 @@ void rcu_bind_current_to_nocb(void)
 }
 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
 
-/*
- * Return the number of RCU callbacks still queued from the specified
- * CPU, which must be a nocbs CPU.
- */
-static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
-{
-       return atomic_long_read(&rdp->nocb_q_count);
-}
-
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
 
 /* No ->nocb_lock to acquire.  */
@@ -2108,11 +2099,6 @@ static void __init rcu_spawn_nocb_kthreads(void)
 {
 }
 
-static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
-{
-       return 0;
-}
-
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 
 /*
-- 
2.17.1

Reply via email to