When __atomic is not available, we use full memory barriers instead of smp/wmb, since acquire/release barriers apply to all memory operations and not just to loads/stores, respectively.
Signed-off-by: Emilio G. Cota <c...@braap.org> --- include/qemu/atomic.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index 6061a46..1766c22 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -56,6 +56,21 @@ __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \ } while(0) +/* atomic read/set with acquire/release barrier */ +#define atomic_read_acquire(ptr) \ + ({ \ + QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ + typeof(*ptr) _val; \ + __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \ + _val; \ + }) + +#define atomic_set_release(ptr, i) do { \ + QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \ + typeof(*ptr) _val = (i); \ + __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \ +} while(0) + /* Atomic RCU operations imply weak memory barriers */ #define atomic_rcu_read(ptr) \ @@ -243,6 +258,18 @@ #define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr)) #define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i)) +/* atomic read/set with acquire/release barrier */ +#define atomic_read_acquire(ptr) ({ \ + typeof(*ptr) _val = atomic_read(ptr); \ + smp_mb(); \ + _val; \ +}) + +#define atomic_set_release(ptr, i) do { \ + smp_mb(); \ + atomic_set(ptr, i); \ +} while (0) + /** * atomic_rcu_read - reads a RCU-protected pointer to a local variable * into a RCU read-side critical section. The pointer can later be safely -- 2.5.0