This commit saves a few lines by consolidating the RCU-bh function
definitions at the end of include/linux/rcupdate.h.  This consolidation
also makes it easier to remove them all when the time comes.

Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
---
 include/linux/rcupdate.h | 27 ++++++++++++++++++++++-----
 include/linux/rcutiny.h  | 15 ---------------
 include/linux/rcutree.h  | 17 -----------------
 kernel/rcu/tree.c        |  9 ---------
 4 files changed, 22 insertions(+), 46 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1207c6c9bd8b..e530f5739033 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -58,11 +58,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
 void rcu_barrier_tasks(void);
 void synchronize_rcu(void);
 
-static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
-{
-       call_rcu(head, func);
-}
-
 #ifdef CONFIG_PREEMPT_RCU
 
 void __rcu_read_lock(void);
@@ -875,4 +870,26 @@ static inline notrace void 
rcu_read_unlock_sched_notrace(void)
 #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
 
 
+/* Transitional pre-consolidation compatibility definitions. */
+
+static inline void synchronize_rcu_bh(void)
+{
+       synchronize_rcu();
+}
+
+static inline void synchronize_rcu_bh_expedited(void)
+{
+       synchronize_rcu_expedited();
+}
+
+static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
+{
+       call_rcu(head, func);
+}
+
+static inline void rcu_barrier_bh(void)
+{
+       rcu_barrier();
+}
+
 #endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e66fb8bc2127..df82bada9b19 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -68,21 +68,6 @@ static inline void rcu_barrier_sched(void)
        rcu_barrier();  /* Only one CPU, so only one list of callbacks! */
 }
 
-static inline void rcu_barrier_bh(void)
-{
-       rcu_barrier();
-}
-
-static inline void synchronize_rcu_bh(void)
-{
-       synchronize_sched();
-}
-
-static inline void synchronize_rcu_bh_expedited(void)
-{
-       synchronize_sched();
-}
-
 static inline void synchronize_rcu_expedited(void)
 {
        synchronize_sched();
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 6d30a0809300..94820156aa62 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -45,11 +45,6 @@ static inline void rcu_virt_note_context_switch(int cpu)
        rcu_note_context_switch(false);
 }
 
-static inline void synchronize_rcu_bh(void)
-{
-       synchronize_rcu();
-}
-
 void synchronize_rcu_expedited(void);
 
 static inline void synchronize_sched_expedited(void)
@@ -59,19 +54,7 @@ static inline void synchronize_sched_expedited(void)
 
 void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
 
-/**
- * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
- *
- * This is a transitional API and will soon be removed, with all
- * callers converted to synchronize_rcu_expedited().
- */
-static inline void synchronize_rcu_bh_expedited(void)
-{
-       synchronize_rcu_expedited();
-}
-
 void rcu_barrier(void);
-void rcu_barrier_bh(void);
 void rcu_barrier_sched(void);
 bool rcu_eqs_special_set(int cpu);
 unsigned long get_state_synchronize_rcu(void);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index bfb6b15bc27c..f1e8a4431ebd 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3251,15 +3251,6 @@ static void _rcu_barrier(void)
        mutex_unlock(&rcu_state.barrier_mutex);
 }
 
-/**
- * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
- */
-void rcu_barrier_bh(void)
-{
-       _rcu_barrier();
-}
-EXPORT_SYMBOL_GPL(rcu_barrier_bh);
-
 /**
  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
  *
-- 
2.17.1

Reply via email to