From: Lai Jiangshan <[email protected]>

Add rcu_read_unlock_special_thunk(), so that the inlined rcu_read_unlock()
doesn't need any code to save the caller-saved registers.

Make rcu_read_unlock() only two instructions in the slow path at the
caller site.

Cc: "Paul E. McKenney" <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Signed-off-by: Lai Jiangshan <[email protected]>
---
 arch/x86/entry/thunk.S             | 5 +++++
 arch/x86/include/asm/rcu_preempt.h | 4 +++-
 2 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/arch/x86/entry/thunk.S b/arch/x86/entry/thunk.S
index 119ebdc3d362..10c60369a67c 100644
--- a/arch/x86/entry/thunk.S
+++ b/arch/x86/entry/thunk.S
@@ -13,3 +13,8 @@ THUNK preempt_schedule_thunk, preempt_schedule
 THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
 EXPORT_SYMBOL(preempt_schedule_thunk)
 EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
+
+#ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
+THUNK rcu_read_unlock_special_thunk, rcu_read_unlock_special
+EXPORT_SYMBOL_GPL(rcu_read_unlock_special_thunk)
+#endif /* #ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT */
diff --git a/arch/x86/include/asm/rcu_preempt.h 
b/arch/x86/include/asm/rcu_preempt.h
index cb25ebe038a5..acdd73b74c05 100644
--- a/arch/x86/include/asm/rcu_preempt.h
+++ b/arch/x86/include/asm/rcu_preempt.h
@@ -97,9 +97,11 @@ static __always_inline bool 
pcpu_rcu_preempt_count_dec_and_test(void)
                               __percpu_arg([var]));
 }
 
+extern asmlinkage void rcu_read_unlock_special_thunk(void);
+
 #define pcpu_rcu_read_unlock_special()                                         
\
 do {                                                                           
\
-       rcu_read_unlock_special();                                              
\
+       asm volatile ("call rcu_read_unlock_special_thunk" : 
ASM_CALL_CONSTRAINT);\
 } while (0)
 
 #endif // #ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
-- 
2.19.1.6.gb485710b


Reply via email to