The names of the barriers conflict with the namespaces of other projects
when trying to directly include liburing.h. Avoid using popular global
names.

Signed-off-by: Julia Suvorova <[email protected]>
---
 src/include/liburing.h         |  9 +++++---
 src/include/liburing/barrier.h | 42 ++++++++++++++++++----------------
 src/queue.c                    |  2 +-
 test/io_uring_enter.c          |  2 +-
 4 files changed, 30 insertions(+), 25 deletions(-)

diff --git a/src/include/liburing.h b/src/include/liburing.h
index fb78cd3..7d7c9df 100644
--- a/src/include/liburing.h
+++ b/src/include/liburing.h
@@ -88,9 +88,12 @@ extern int io_uring_register_eventfd(struct io_uring *ring, 
int fd);
 extern int io_uring_unregister_eventfd(struct io_uring *ring);
 
 #define io_uring_for_each_cqe(ring, head, cqe)                                 
\
-       /* smp_load_acquire() enforces the order of tail and CQE reads. */      
\
+       /*                                                                      
\
+        * io_uring_smp_load_acquire() enforces the order of tail               
\
+        * and CQE reads.                                                       
\
+        */                                                                     
\
        for (head = *(ring)->cq.khead;                                          
\
-            (cqe = (head != smp_load_acquire((ring)->cq.ktail) ?               
\
+            (cqe = (head != io_uring_smp_load_acquire((ring)->cq.ktail) ?      
\
                &(ring)->cq.cqes[head & (*(ring)->cq.kring_mask)] : NULL));     
\
             head++)                                                            
\
 
@@ -108,7 +111,7 @@ static inline void io_uring_cq_advance(struct io_uring 
*ring,
                 * Ensure that the kernel only sees the new value of the head
                 * index after the CQEs have been read.
                 */
-               smp_store_release(cq->khead, *cq->khead + nr);
+               io_uring_smp_store_release(cq->khead, *cq->khead + nr);
        }
 }
 
diff --git a/src/include/liburing/barrier.h b/src/include/liburing/barrier.h
index 98be9e5..4f3d1d7 100644
--- a/src/include/liburing/barrier.h
+++ b/src/include/liburing/barrier.h
@@ -23,7 +23,7 @@ after the acquire operation executes. This is implemented 
using
 /* From tools/include/linux/compiler.h */
 /* Optimization barrier */
 /* The "volatile" is due to gcc bugs */
-#define barrier() __asm__ __volatile__("": : :"memory")
+#define io_uring_barrier()     __asm__ __volatile__("": : :"memory")
 
 /* From tools/virtio/linux/compiler.h */
 #define WRITE_ONCE(var, val) \
@@ -33,27 +33,29 @@ after the acquire operation executes. This is implemented 
using
 
 #if defined(__x86_64__) || defined(__i386__)
 /* Adapted from arch/x86/include/asm/barrier.h */
-#define mb()   asm volatile("mfence" ::: "memory")
-#define rmb()  asm volatile("lfence" ::: "memory")
-#define wmb()  asm volatile("sfence" ::: "memory")
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
+#define io_uring_mb()          asm volatile("mfence" ::: "memory")
+#define io_uring_rmb()         asm volatile("lfence" ::: "memory")
+#define io_uring_wmb()         asm volatile("sfence" ::: "memory")
+#define io_uring_smp_rmb()     io_uring_barrier()
+#define io_uring_smp_wmb()     io_uring_barrier()
 #if defined(__i386__)
-#define smp_mb()  asm volatile("lock; addl $0,0(%%esp)" ::: "memory", "cc")
+#define io_uring_smp_mb()      asm volatile("lock; addl $0,0(%%esp)" \
+                                            ::: "memory", "cc")
 #else
-#define smp_mb()  asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
+#define io_uring_smp_mb()      asm volatile("lock; addl $0,-132(%%rsp)" \
+                                            ::: "memory", "cc")
 #endif
 
-#define smp_store_release(p, v)                        \
+#define io_uring_smp_store_release(p, v)       \
 do {                                           \
-       barrier();                              \
+       io_uring_barrier();                     \
        WRITE_ONCE(*(p), (v));                  \
 } while (0)
 
-#define smp_load_acquire(p)                    \
+#define io_uring_smp_load_acquire(p)           \
 ({                                             \
        __typeof(*p) ___p1 = READ_ONCE(*(p));   \
-       barrier();                              \
+       io_uring_barrier();                     \
        ___p1;                                  \
 })
 #else /* defined(__x86_64__) || defined(__i386__) */
@@ -61,25 +63,25 @@ do {                                                \
  * Add arch appropriate definitions. Be safe and use full barriers for
  * archs we don't have support for.
  */
-#define smp_rmb()      __sync_synchronize()
-#define smp_wmb()      __sync_synchronize()
+#define io_uring_smp_rmb()     __sync_synchronize()
+#define io_uring_smp_wmb()     __sync_synchronize()
 #endif /* defined(__x86_64__) || defined(__i386__) */
 
 /* From tools/include/asm/barrier.h */
 
-#ifndef smp_store_release
-# define smp_store_release(p, v)               \
+#ifndef io_uring_smp_store_release
+# define io_uring_smp_store_release(p, v)      \
 do {                                           \
-       smp_mb();                               \
+       io_uring_smp_mb();                      \
        WRITE_ONCE(*p, v);                      \
 } while (0)
 #endif
 
-#ifndef smp_load_acquire
-# define smp_load_acquire(p)                   \
+#ifndef io_uring_smp_load_acquire
+# define io_uring_smp_load_acquire(p)          \
 ({                                             \
        __typeof(*p) ___p1 = READ_ONCE(*p);     \
-       smp_mb();                               \
+       io_uring_smp_mb();                      \
        ___p1;                                  \
 })
 #endif
diff --git a/src/queue.c b/src/queue.c
index 74a077f..007733c 100644
--- a/src/queue.c
+++ b/src/queue.c
@@ -104,7 +104,7 @@ static int __io_uring_submit(struct io_uring *ring, 
unsigned wait_nr)
         * Ensure that the kernel sees the SQE updates before it sees the tail
         * update.
         */
-       smp_store_release(sq->ktail, ktail);
+       io_uring_smp_store_release(sq->ktail, ktail);
 
        flags = 0;
        if (wait_nr || sq_ring_needs_enter(ring, &flags)) {
diff --git a/test/io_uring_enter.c b/test/io_uring_enter.c
index c2030c1..8190178 100644
--- a/test/io_uring_enter.c
+++ b/test/io_uring_enter.c
@@ -266,7 +266,7 @@ main(int argc, char **argv)
         * Ensure that the kernel sees the SQE update before it sees the tail
         * update.
         */
-       smp_store_release(sq->ktail, ktail);
+       io_uring_smp_store_release(sq->ktail, ktail);
 
        ret = io_uring_enter(ring.ring_fd, 1, 0, 0, NULL);
        /* now check to see if our sqe was dropped */
-- 
2.21.0

Reply via email to