On 8/7/25 14:30, Zhao Liu wrote:
Make rcu_read_lock & rcu_read_unlock not inline, then bindgen could
generate the bindings.
Suggested-by: Paolo Bonzini <pbonz...@redhat.com>
Signed-off-by: Zhao Liu <zhao1....@intel.com>
Either this, or keep it inline and add wrappers rust_rcu_read_lock() and
rust_rcu_read_unlock().
Paolo
---
include/qemu/rcu.h | 45 ++-------------------------------------------
util/rcu.c | 43 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 45 insertions(+), 43 deletions(-)
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
index 020dbe4d8b77..34d955204b81 100644
--- a/include/qemu/rcu.h
+++ b/include/qemu/rcu.h
@@ -75,49 +75,8 @@ struct rcu_reader_data {
QEMU_DECLARE_CO_TLS(struct rcu_reader_data, rcu_reader)
-static inline void rcu_read_lock(void)
-{
- struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader();
- unsigned ctr;
-
- if (p_rcu_reader->depth++ > 0) {
- return;
- }
-
- ctr = qatomic_read(&rcu_gp_ctr);
- qatomic_set(&p_rcu_reader->ctr, ctr);
-
- /*
- * Read rcu_gp_ptr and write p_rcu_reader->ctr before reading
- * RCU-protected pointers.
- */
- smp_mb_placeholder();
-}
-
-static inline void rcu_read_unlock(void)
-{
- struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader();
-
- assert(p_rcu_reader->depth != 0);
- if (--p_rcu_reader->depth > 0) {
- return;
- }
-
- /* Ensure that the critical section is seen to precede the
- * store to p_rcu_reader->ctr. Together with the following
- * smp_mb_placeholder(), this ensures writes to p_rcu_reader->ctr
- * are sequentially consistent.
- */
- qatomic_store_release(&p_rcu_reader->ctr, 0);
-
- /* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */
- smp_mb_placeholder();
- if (unlikely(qatomic_read(&p_rcu_reader->waiting))) {
- qatomic_set(&p_rcu_reader->waiting, false);
- qemu_event_set(&rcu_gp_event);
- }
-}
-
+void rcu_read_lock(void);
+void rcu_read_unlock(void);
void synchronize_rcu(void);
/*
diff --git a/util/rcu.c b/util/rcu.c
index b703c86f15a3..2dfd82796e1e 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -141,6 +141,49 @@ static void wait_for_readers(void)
QLIST_SWAP(®istry, &qsreaders, node);
}
+void rcu_read_lock(void)
+{
+ struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader();
+ unsigned ctr;
+
+ if (p_rcu_reader->depth++ > 0) {
+ return;
+ }
+
+ ctr = qatomic_read(&rcu_gp_ctr);
+ qatomic_set(&p_rcu_reader->ctr, ctr);
+
+ /*
+ * Read rcu_gp_ptr and write p_rcu_reader->ctr before reading
+ * RCU-protected pointers.
+ */
+ smp_mb_placeholder();
+}
+
+void rcu_read_unlock(void)
+{
+ struct rcu_reader_data *p_rcu_reader = get_ptr_rcu_reader();
+
+ assert(p_rcu_reader->depth != 0);
+ if (--p_rcu_reader->depth > 0) {
+ return;
+ }
+
+ /* Ensure that the critical section is seen to precede the
+ * store to p_rcu_reader->ctr. Together with the following
+ * smp_mb_placeholder(), this ensures writes to p_rcu_reader->ctr
+ * are sequentially consistent.
+ */
+ qatomic_store_release(&p_rcu_reader->ctr, 0);
+
+ /* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */
+ smp_mb_placeholder();
+ if (unlikely(qatomic_read(&p_rcu_reader->waiting))) {
+ qatomic_set(&p_rcu_reader->waiting, false);
+ qemu_event_set(&rcu_gp_event);
+ }
+}
+
void synchronize_rcu(void)
{
QEMU_LOCK_GUARD(&rcu_sync_lock);