rcu_read_lock_notrace() follows preempt_disable_notrace(). The idea is
provide a rcu_read_lock() version so that the preempt_disable_notrace()
user (which use RCU-sched under the hood) can migrate to preemptible
RCU.
The first user should be tracing (tracepoint) which is using the
_notrace variant.

rcu_read_lock_notrace() is a slim version of rcu_read_lock(). It simply
increments/ decrements the counter. It does not emit any warnings if
RCU_NEST_PMAX is exceeded or patricipates in STRICT_GRACE_PERIOD. It
also does not participate in rcu_read_unlock_special() as it would if
invoke from NMI.

Signed-off-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
---
 include/linux/rcupdate.h | 41 ++++++++++++++++++++++++++++++++++++++++
 kernel/rcu/tree_plugin.h | 14 ++++++++++++++
 2 files changed, 55 insertions(+)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 120536f4c6eb1..0de7e68a2411a 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -71,6 +71,8 @@ static inline bool same_state_synchronize_rcu(unsigned long 
oldstate1, unsigned
 
 void __rcu_read_lock(void);
 void __rcu_read_unlock(void);
+void __rcu_read_lock_notrace(void);
+void __rcu_read_unlock_notrace(void);
 
 /*
  * Defined as a macro as it is a very low level header included from
@@ -93,6 +95,11 @@ static inline void __rcu_read_lock(void)
        preempt_disable();
 }
 
+static inline void __rcu_read_lock_notrace(void)
+{
+       preempt_disable_notrace();
+}
+
 static inline void __rcu_read_unlock(void)
 {
        if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
@@ -100,6 +107,11 @@ static inline void __rcu_read_unlock(void)
        preempt_enable();
 }
 
+static inline void __rcu_read_unlock_notrace(void)
+{
+       preempt_enable_notrace();
+}
+
 static inline int rcu_preempt_depth(void)
 {
        return 0;
@@ -843,6 +855,16 @@ static __always_inline void rcu_read_lock(void)
                         "rcu_read_lock() used illegally while idle");
 }
 
+/*
+ * Used by tracing: cannot be traced, NMI safe, usable from the scheduler,
+ * usable to trace the RCU implementation.
+ */
+static __always_inline void rcu_read_lock_notrace(void)
+{
+       __rcu_read_lock_notrace();
+       __acquire(RCU);
+}
+
 /*
  * So where is rcu_write_lock()?  It does not exist, as there is no
  * way for writers to lock out RCU readers.  This is a feature, not
@@ -873,6 +895,11 @@ static inline void rcu_read_unlock(void)
        __rcu_read_unlock();
 }
 
+static __always_inline void rcu_read_unlock_notrace(void)
+{
+       __release(RCU);
+       __rcu_read_unlock_notrace();
+}
 /**
  * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
  *
@@ -1166,4 +1193,18 @@ DEFINE_LOCK_GUARD_0(rcu,
        } while (0),
        rcu_read_unlock())
 
+DEFINE_LOCK_GUARD_0(rcu_notrace,
+       do {
+               rcu_read_lock_notrace();
+               /*
+                * sparse doesn't call the cleanup function,
+                * so just release immediately and don't track
+                * the context. We don't need to anyway, since
+                * the whole point of the guard is to not need
+                * the explicit unlock.
+                */
+               __release(RCU);
+       } while (0),
+       rcu_read_unlock_notrace())
+
 #endif /* __LINUX_RCUPDATE_H */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 0b0f56f6abc85..02cccca917a22 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -420,6 +420,13 @@ void __rcu_read_lock(void)
 }
 EXPORT_SYMBOL_GPL(__rcu_read_lock);
 
+notrace void __rcu_read_lock_notrace(void)
+{
+       rcu_preempt_read_enter();
+       barrier();  /* critical section after entry code. */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_lock_notrace);
+
 /*
  * Preemptible RCU implementation for rcu_read_unlock().
  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
@@ -445,6 +452,13 @@ void __rcu_read_unlock(void)
 }
 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
 
+notrace void __rcu_read_unlock_notrace(void)
+{
+       barrier();  // critical section before exit code.
+       rcu_preempt_read_exit();
+}
+EXPORT_SYMBOL_GPL(__rcu_read_unlock_notrace);
+
 /*
  * Advance a ->blkd_tasks-list pointer to the next entry, instead
  * returning NULL if at the end of the list.
-- 
2.49.0


Reply via email to