From: Lai Jiangshan <[email protected]>

When the per-cpu rcu_preempt_count is used, it has to be switched too
on context-switching.  And the instructions to switch the per-cpu
rcu_preempt_count are few, so it is inlined to avoid the overhead
in the scheduler.

Cc: "Paul E. McKenney" <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Signed-off-by: Lai Jiangshan <[email protected]>
---
 kernel/rcu/rcu.h    | 5 +++++
 kernel/sched/core.c | 2 ++
 2 files changed, 7 insertions(+)

diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index b17b2ed657fc..ea5ae957c687 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -668,4 +668,9 @@ int rcu_stall_notifier_call_chain(unsigned long val, void 
*v);
 static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { 
return NOTIFY_DONE; }
 #endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && 
defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
 
+static inline void
+rcu_preempt_switch(struct task_struct *prev, struct task_struct *next)
+{
+}
+
 #endif /* __KERNEL_RCU_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7019a40457a6..1d9e3c51c913 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -95,6 +95,7 @@
 #include "../workqueue_internal.h"
 #include "../../io_uring/io-wq.h"
 #include "../smpboot.h"
+#include "../rcu/rcu.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
@@ -6737,6 +6738,7 @@ static void __sched notrace __schedule(unsigned int 
sched_mode)
                 */
                ++*switch_count;
 
+               rcu_preempt_switch(prev, next);
                migrate_disable_switch(rq, prev);
                psi_sched_switch(prev, next, !task_on_rq_queued(prev));
 
-- 
2.19.1.6.gb485710b


Reply via email to