* David Goulet ([email protected]) wrote: > In order to not pollute the userspace namespace for application > using liburcu or any lib/apps linked with urcu, this patch if the > first of three major refactor for naming convention. > > The cmm_ prefix is a short name for Concurrent Memory Model and was > suggested by Mathieu Desnoyers and Paul E. Mckenney. Every memory > primitives such as mb, wmb, rmb, and so on are renamed.
David, You are my hero! :-) All three patches committed and pushed. Version 0.5.0 is on its way. Thanks! Mathieu > > Signed-off-by: David Goulet <[email protected]> > --- > compat_futex.c | 4 +- > tests/api_gcc.h | 8 ++-- > tests/api_ppc.h | 8 ++-- > tests/api_x86.h | 8 ++-- > tests/rcutorture.h | 18 ++++---- > tests/test_mutex.c | 6 +- > tests/test_perthreadlock.c | 6 +- > tests/test_qsbr.c | 6 +- > tests/test_qsbr_gc.c | 6 +- > tests/test_rwlock.c | 6 +- > tests/test_urcu.c | 6 +- > tests/test_urcu_assign.c | 6 +- > tests/test_urcu_bp.c | 6 +- > tests/test_urcu_defer.c | 6 +- > tests/test_urcu_gc.c | 6 +- > tests/test_urcu_lfq.c | 6 +- > tests/test_urcu_lfs.c | 6 +- > tests/test_urcu_wfq.c | 6 +- > tests/test_urcu_wfs.c | 6 +- > urcu-bp-static.h | 8 ++-- > urcu-bp.c | 16 ++++---- > urcu-defer.c | 14 +++--- > urcu-pointer-static.h | 8 ++-- > urcu-pointer.c | 6 +- > urcu-qsbr-static.h | 16 ++++---- > urcu-qsbr.c | 28 ++++++------ > urcu-static.h | 12 +++--- > urcu.c | 36 ++++++++-------- > urcu/arch_alpha.h | 6 +- > urcu/arch_armv7l.h | 2 +- > urcu/arch_generic.h | 98 > ++++++++++++++++++++++---------------------- > urcu/arch_ppc.h | 6 +- > urcu/arch_s390.h | 2 +- > urcu/arch_sparc64.h | 6 +- > urcu/arch_x86.h | 14 +++--- > urcu/compiler.h | 2 +- > urcu/rcuhlist.h | 2 +- > urcu/rculist.h | 2 +- > urcu/system.h | 8 ++-- > 39 files changed, 211 insertions(+), 211 deletions(-) > > diff --git a/compat_futex.c b/compat_futex.c > index 61bc6c3..9dfcfeb 100644 > --- a/compat_futex.c > +++ b/compat_futex.c > @@ -55,7 +55,7 @@ int compat_futex_noasync(int *uaddr, int op, int val, > /* > * memory barriers to serialize with the previous uaddr modification. > */ > - smp_mb(); > + cmm_smp_mb(); > > ret = pthread_mutex_lock(&compat_futex_lock); > assert(!ret); > @@ -100,7 +100,7 @@ int compat_futex_async(int *uaddr, int op, int val, > /* > * Ensure previous memory operations on uaddr have completed. > */ > - smp_mb(); > + cmm_smp_mb(); > > switch (op) { > case FUTEX_WAIT: > diff --git a/tests/api_gcc.h b/tests/api_gcc.h > index be437cc..b23110d 100644 > --- a/tests/api_gcc.h > +++ b/tests/api_gcc.h > @@ -251,10 +251,10 @@ cmpxchg(volatile long *ptr, long oldval, long newval) > #define atomic_dec_return(v) (atomic_sub_return(1,v)) > > /* Atomic operations are already serializing on x86 */ > -#define smp_mb__before_atomic_dec() barrier() > -#define smp_mb__after_atomic_dec() barrier() > -#define smp_mb__before_atomic_inc() barrier() > -#define smp_mb__after_atomic_inc() barrier() > +#define smp_mb__before_atomic_dec() cmm_barrier() > +#define smp_mb__after_atomic_dec() cmm_barrier() > +#define smp_mb__before_atomic_inc() cmm_barrier() > +#define smp_mb__after_atomic_inc() cmm_barrier() > > #endif //0 /* duplicate with arch_atomic.h */ > > diff --git a/tests/api_ppc.h b/tests/api_ppc.h > index 6548077..9773500 100644 > --- a/tests/api_ppc.h > +++ b/tests/api_ppc.h > @@ -616,10 +616,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, > int a, int u) > #define atomic_dec_return(v) (atomic_sub_return(1,v)) > > /* Atomic operations are already serializing on x86 */ > -#define smp_mb__before_atomic_dec() smp_mb() > -#define smp_mb__after_atomic_dec() smp_mb() > -#define smp_mb__before_atomic_inc() smp_mb() > -#define smp_mb__after_atomic_inc() smp_mb() > +#define smp_mb__before_atomic_dec() cmm_smp_mb() > +#define smp_mb__after_atomic_dec() cmm_smp_mb() > +#define smp_mb__before_atomic_inc() cmm_smp_mb() > +#define smp_mb__after_atomic_inc() cmm_smp_mb() > > #endif //0 /* duplicate with arch_atomic.h */ > > diff --git a/tests/api_x86.h b/tests/api_x86.h > index cdd4667..fe00a35 100644 > --- a/tests/api_x86.h > +++ b/tests/api_x86.h > @@ -312,10 +312,10 @@ __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ > : : "r" (mask),"m" (*(addr)) : "memory") > > /* Atomic operations are already serializing on x86 */ > -#define smp_mb__before_atomic_dec() barrier() > -#define smp_mb__after_atomic_dec() barrier() > -#define smp_mb__before_atomic_inc() barrier() > -#define smp_mb__after_atomic_inc() barrier() > +#define smp_mb__before_atomic_dec() cmm_barrier() > +#define smp_mb__after_atomic_dec() cmm_barrier() > +#define smp_mb__before_atomic_inc() cmm_barrier() > +#define smp_mb__after_atomic_inc() cmm_barrier() > > #endif //0 > > diff --git a/tests/rcutorture.h b/tests/rcutorture.h > index 181547a..4192bd0 100644 > --- a/tests/rcutorture.h > +++ b/tests/rcutorture.h > @@ -170,15 +170,15 @@ void perftestrun(int nthreads, int nreaders, int > nupdaters) > int t; > int duration = 1; > > - smp_mb(); > + cmm_smp_mb(); > while (uatomic_read(&nthreadsrunning) < nthreads) > poll(NULL, 0, 1); > goflag = GOFLAG_RUN; > - smp_mb(); > + cmm_smp_mb(); > sleep(duration); > - smp_mb(); > + cmm_smp_mb(); > goflag = GOFLAG_STOP; > - smp_mb(); > + cmm_smp_mb(); > wait_all_threads(); > for_each_thread(t) { > n_reads += per_thread(n_reads_pt, t); > @@ -309,7 +309,7 @@ void *rcu_update_stress_test(void *arg) > i = 0; > p = &rcu_stress_array[i]; > p->mbtest = 0; > - smp_mb(); > + cmm_smp_mb(); > p->pipe_count = 0; > p->mbtest = 1; > rcu_assign_pointer(rcu_stress_current, p); > @@ -355,13 +355,13 @@ void stresstest(int nreaders) > create_thread(rcu_update_stress_test, NULL); > for (i = 0; i < 5; i++) > create_thread(rcu_fake_update_stress_test, NULL); > - smp_mb(); > + cmm_smp_mb(); > goflag = GOFLAG_RUN; > - smp_mb(); > + cmm_smp_mb(); > sleep(10); > - smp_mb(); > + cmm_smp_mb(); > goflag = GOFLAG_STOP; > - smp_mb(); > + cmm_smp_mb(); > wait_all_threads(); > for_each_thread(t) > n_reads += per_thread(n_reads_pt, t); > diff --git a/tests/test_mutex.c b/tests/test_mutex.c > index 7b2d1a5..7dd1e31 100644 > --- a/tests/test_mutex.c > +++ b/tests/test_mutex.c > @@ -229,7 +229,7 @@ void *thr_writer(void *data) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > pthread_mutex_lock(&lock); > @@ -278,7 +278,7 @@ int main(int argc, char **argv) > show_usage(argc, argv); > return -1; > } > - smp_mb(); > + cmm_smp_mb(); > > err = sscanf(argv[1], "%u", &nr_readers); > if (err != 1) { > @@ -376,7 +376,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_perthreadlock.c b/tests/test_perthreadlock.c > index 7862f62..9de8ced 100644 > --- a/tests/test_perthreadlock.c > +++ b/tests/test_perthreadlock.c > @@ -234,7 +234,7 @@ void *thr_writer(void *data) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > for (tidx = 0; tidx < nr_readers; tidx++) { > @@ -287,7 +287,7 @@ int main(int argc, char **argv) > show_usage(argc, argv); > return -1; > } > - smp_mb(); > + cmm_smp_mb(); > > err = sscanf(argv[1], "%u", &nr_readers); > if (err != 1) { > @@ -386,7 +386,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_qsbr.c b/tests/test_qsbr.c > index 0101907..4f19c72 100644 > --- a/tests/test_qsbr.c > +++ b/tests/test_qsbr.c > @@ -230,7 +230,7 @@ void *thr_reader(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > rcu_read_lock(); > @@ -275,7 +275,7 @@ void *thr_writer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > new = test_array_alloc(); > @@ -424,7 +424,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_qsbr_gc.c b/tests/test_qsbr_gc.c > index 2b4fa7a..875fd36 100644 > --- a/tests/test_qsbr_gc.c > +++ b/tests/test_qsbr_gc.c > @@ -203,7 +203,7 @@ void *thr_reader(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > _rcu_read_lock(); > @@ -279,7 +279,7 @@ void *thr_writer(void *data) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > #ifndef TEST_LOCAL_GC > @@ -444,7 +444,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_rwlock.c b/tests/test_rwlock.c > index 1c65fca..445ce95 100644 > --- a/tests/test_rwlock.c > +++ b/tests/test_rwlock.c > @@ -225,7 +225,7 @@ void *thr_writer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > pthread_rwlock_wrlock(&lock); > @@ -274,7 +274,7 @@ int main(int argc, char **argv) > show_usage(argc, argv); > return -1; > } > - smp_mb(); > + cmm_smp_mb(); > > err = sscanf(argv[1], "%u", &nr_readers); > if (err != 1) { > @@ -370,7 +370,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_urcu.c b/tests/test_urcu.c > index 5096415..eeea7f5 100644 > --- a/tests/test_urcu.c > +++ b/tests/test_urcu.c > @@ -231,7 +231,7 @@ void *thr_reader(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > rcu_read_lock(); > @@ -273,7 +273,7 @@ void *thr_writer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > new = test_array_alloc(); > @@ -421,7 +421,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_urcu_assign.c b/tests/test_urcu_assign.c > index b1298a4..481cdd6 100644 > --- a/tests/test_urcu_assign.c > +++ b/tests/test_urcu_assign.c > @@ -231,7 +231,7 @@ void *thr_reader(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > rcu_read_lock(); > @@ -269,7 +269,7 @@ void *thr_writer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > new = test_array_alloc(); > @@ -420,7 +420,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_urcu_bp.c b/tests/test_urcu_bp.c > index 239e843..e831824 100644 > --- a/tests/test_urcu_bp.c > +++ b/tests/test_urcu_bp.c > @@ -231,7 +231,7 @@ void *thr_reader(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > rcu_read_lock(); > @@ -269,7 +269,7 @@ void *thr_writer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > new = test_array_alloc(); > @@ -417,7 +417,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_urcu_defer.c b/tests/test_urcu_defer.c > index 6fae313..2cbb041 100644 > --- a/tests/test_urcu_defer.c > +++ b/tests/test_urcu_defer.c > @@ -199,7 +199,7 @@ void *thr_reader(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > rcu_read_lock(); > @@ -247,7 +247,7 @@ void *thr_writer(void *data) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > new = malloc(sizeof(*new)); > @@ -400,7 +400,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_urcu_gc.c b/tests/test_urcu_gc.c > index 2c944a5..ddafb87 100644 > --- a/tests/test_urcu_gc.c > +++ b/tests/test_urcu_gc.c > @@ -207,7 +207,7 @@ void *thr_reader(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > rcu_read_lock(); > @@ -280,7 +280,7 @@ void *thr_writer(void *data) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > #ifndef TEST_LOCAL_GC > @@ -444,7 +444,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_urcu_lfq.c b/tests/test_urcu_lfq.c > index 87c53b6..901bcae 100644 > --- a/tests/test_urcu_lfq.c > +++ b/tests/test_urcu_lfq.c > @@ -170,7 +170,7 @@ void *thr_enqueuer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > struct rcu_lfq_node *node = malloc(sizeof(*node)); > @@ -223,7 +223,7 @@ void *thr_dequeuer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > struct rcu_lfq_node *node = rcu_lfq_dequeue(&q, > @@ -380,7 +380,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_urcu_lfs.c b/tests/test_urcu_lfs.c > index 8f881e7..8249eba 100644 > --- a/tests/test_urcu_lfs.c > +++ b/tests/test_urcu_lfs.c > @@ -170,7 +170,7 @@ void *thr_enqueuer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > struct rcu_lfs_node *node = malloc(sizeof(*node)); > @@ -215,7 +215,7 @@ void *thr_dequeuer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > struct rcu_lfs_node *node = rcu_lfs_pop(&s); > @@ -365,7 +365,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_urcu_wfq.c b/tests/test_urcu_wfq.c > index b0629d2..d446e47 100644 > --- a/tests/test_urcu_wfq.c > +++ b/tests/test_urcu_wfq.c > @@ -167,7 +167,7 @@ void *thr_enqueuer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > struct wfq_node *node = malloc(sizeof(*node)); > @@ -207,7 +207,7 @@ void *thr_dequeuer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > struct wfq_node *node = wfq_dequeue_blocking(&q); > @@ -354,7 +354,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/tests/test_urcu_wfs.c b/tests/test_urcu_wfs.c > index f50b140..294e955 100644 > --- a/tests/test_urcu_wfs.c > +++ b/tests/test_urcu_wfs.c > @@ -167,7 +167,7 @@ void *thr_enqueuer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > struct wfs_node *node = malloc(sizeof(*node)); > @@ -207,7 +207,7 @@ void *thr_dequeuer(void *_count) > while (!test_go) > { > } > - smp_mb(); > + cmm_smp_mb(); > > for (;;) { > struct wfs_node *node = wfs_pop_blocking(&s); > @@ -354,7 +354,7 @@ int main(int argc, char **argv) > exit(1); > } > > - smp_mb(); > + cmm_smp_mb(); > > test_go = 1; > > diff --git a/urcu-bp-static.h b/urcu-bp-static.h > index 6d7e3c8..394476b 100644 > --- a/urcu-bp-static.h > +++ b/urcu-bp-static.h > @@ -175,7 +175,7 @@ static inline void _rcu_read_lock(void) > if (unlikely(!rcu_reader)) > rcu_bp_register(); > > - barrier(); /* Ensure the compiler does not reorder us with mutex */ > + cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ > tmp = rcu_reader->ctr; > /* > * rcu_gp_ctr is > @@ -187,7 +187,7 @@ static inline void _rcu_read_lock(void) > * Set active readers count for outermost nesting level before > * accessing the pointer. > */ > - smp_mb(); > + cmm_smp_mb(); > } else { > _STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT); > } > @@ -198,9 +198,9 @@ static inline void _rcu_read_unlock(void) > /* > * Finish using rcu before decrementing the pointer. > */ > - smp_mb(); > + cmm_smp_mb(); > _STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT); > - barrier(); /* Ensure the compiler does not reorder us with mutex */ > + cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ > } > > #ifdef __cplusplus > diff --git a/urcu-bp.c b/urcu-bp.c > index 61d42fa..33352c2 100644 > --- a/urcu-bp.c > +++ b/urcu-bp.c > @@ -96,9 +96,9 @@ static void mutex_lock(pthread_mutex_t *mutex) > exit(-1); > } > if (rcu_reader.need_mb) { > - smp_mb(); > + cmm_smp_mb(); > rcu_reader.need_mb = 0; > - smp_mb(); > + cmm_smp_mb(); > } > poll(NULL,0,10); > } > @@ -133,11 +133,11 @@ void update_counter_and_wait(void) > */ > > /* > - * Adding a smp_mb() which is _not_ formally required, but makes the > + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the > * model easier to understand. It does not have a big performance impact > * anyway, given this is the write-side. > */ > - smp_mb(); > + cmm_smp_mb(); > > /* > * Wait for each thread rcu_reader.ctr count to become 0. > @@ -180,7 +180,7 @@ void synchronize_rcu(void) > /* All threads should read qparity before accessing data structure > * where new ptr points to. */ > /* Write new ptr before changing the qparity */ > - smp_mb(); > + cmm_smp_mb(); > > /* Remove old registry elements */ > rcu_gc_registry(); > @@ -191,11 +191,11 @@ void synchronize_rcu(void) > update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */ > > /* > - * Adding a smp_mb() which is _not_ formally required, but makes the > + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the > * model easier to understand. It does not have a big performance impact > * anyway, given this is the write-side. > */ > - smp_mb(); > + cmm_smp_mb(); > > /* > * Wait for previous parity to be empty of readers. > @@ -206,7 +206,7 @@ void synchronize_rcu(void) > * Finish waiting for reader threads before letting the old ptr being > * freed. > */ > - smp_mb(); > + cmm_smp_mb(); > out: > mutex_unlock(&rcu_gp_lock); > ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); > diff --git a/urcu-defer.c b/urcu-defer.c > index e241302..6dc08a3 100644 > --- a/urcu-defer.c > +++ b/urcu-defer.c > @@ -123,13 +123,13 @@ static unsigned long rcu_defer_num_callbacks(void) > static void wait_defer(void) > { > uatomic_dec(&defer_thread_futex); > - smp_mb(); /* Write futex before read queue */ > + cmm_smp_mb(); /* Write futex before read queue */ > if (rcu_defer_num_callbacks()) { > - smp_mb(); /* Read queue before write futex */ > + cmm_smp_mb(); /* Read queue before write futex */ > /* Callbacks are queued, don't wait. */ > uatomic_set(&defer_thread_futex, 0); > } else { > - smp_rmb(); /* Read queue before read futex */ > + cmm_smp_rmb(); /* Read queue before read futex */ > if (uatomic_read(&defer_thread_futex) == -1) > futex_noasync(&defer_thread_futex, FUTEX_WAIT, -1, > NULL, NULL, 0); > @@ -152,7 +152,7 @@ static void rcu_defer_barrier_queue(struct defer_queue > *queue, > */ > > for (i = queue->tail; i != head;) { > - smp_rmb(); /* read head before q[]. */ > + cmm_smp_rmb(); /* read head before q[]. */ > p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); > if (unlikely(DQ_IS_FCT_BIT(p))) { > DQ_CLEAR_FCT_BIT(p); > @@ -166,7 +166,7 @@ static void rcu_defer_barrier_queue(struct defer_queue > *queue, > fct = queue->last_fct_out; > fct(p); > } > - smp_mb(); /* push tail after having used q[] */ > + cmm_smp_mb(); /* push tail after having used q[] */ > STORE_SHARED(queue->tail, i); > } > > @@ -283,10 +283,10 @@ void _defer_rcu(void (*fct)(void *p), void *p) > } > } > _STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p); > - smp_wmb(); /* Publish new pointer before head */ > + cmm_smp_wmb(); /* Publish new pointer before head */ > /* Write q[] before head. */ > STORE_SHARED(defer_queue.head, head); > - smp_mb(); /* Write queue head before read futex */ > + cmm_smp_mb(); /* Write queue head before read futex */ > /* > * Wake-up any waiting defer thread. > */ > diff --git a/urcu-pointer-static.h b/urcu-pointer-static.h > index 983d0d5..c8ac7f0 100644 > --- a/urcu-pointer-static.h > +++ b/urcu-pointer-static.h > @@ -63,7 +63,7 @@ extern "C" { > > #define _rcu_dereference(p) ({ \ > typeof(p) _________p1 = LOAD_SHARED(p); \ > - smp_read_barrier_depends(); \ > + cmm_smp_read_barrier_depends(); \ > (_________p1); \ > }) > > @@ -81,7 +81,7 @@ extern "C" { > typeof(*p) _________pnew = (_new); \ > if (!__builtin_constant_p(_new) || \ > ((_new) != NULL)) \ > - wmb(); \ > + cmm_wmb(); > \ > uatomic_cmpxchg(p, _________pold, _________pnew); \ > }) > > @@ -96,7 +96,7 @@ extern "C" { > typeof(*p) _________pv = (v); \ > if (!__builtin_constant_p(v) || \ > ((v) != NULL)) \ > - wmb(); \ > + cmm_wmb(); \ > uatomic_xchg(p, _________pv); \ > }) > > @@ -106,7 +106,7 @@ extern "C" { > typeof(*p) _________pv = (v); \ > if (!__builtin_constant_p(v) || \ > ((v) != NULL)) \ > - wmb(); \ > + cmm_wmb(); \ > uatomic_set(p, _________pv); \ > }) > > diff --git a/urcu-pointer.c b/urcu-pointer.c > index f5c9811..180c834 100644 > --- a/urcu-pointer.c > +++ b/urcu-pointer.c > @@ -39,18 +39,18 @@ void *rcu_dereference_sym(void *p) > > void *rcu_set_pointer_sym(void **p, void *v) > { > - wmb(); > + cmm_wmb(); > return uatomic_set(p, v); > } > > void *rcu_xchg_pointer_sym(void **p, void *v) > { > - wmb(); > + cmm_wmb(); > return uatomic_xchg(p, v); > } > > void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new) > { > - wmb(); > + cmm_wmb(); > return uatomic_cmpxchg(p, old, _new); > } > diff --git a/urcu-qsbr-static.h b/urcu-qsbr-static.h > index 147eb18..108ef6e 100644 > --- a/urcu-qsbr-static.h > +++ b/urcu-qsbr-static.h > @@ -174,27 +174,27 @@ static inline void _rcu_read_unlock(void) > > static inline void _rcu_quiescent_state(void) > { > - smp_mb(); > + cmm_smp_mb(); > _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr)); > - smp_mb(); /* write rcu_reader.ctr before read futex */ > + cmm_smp_mb(); /* write rcu_reader.ctr before read futex */ > wake_up_gp(); > - smp_mb(); > + cmm_smp_mb(); > } > > static inline void _rcu_thread_offline(void) > { > - smp_mb(); > + cmm_smp_mb(); > STORE_SHARED(rcu_reader.ctr, 0); > - smp_mb(); /* write rcu_reader.ctr before read futex */ > + cmm_smp_mb(); /* write rcu_reader.ctr before read futex */ > wake_up_gp(); > - barrier(); /* Ensure the compiler does not reorder us with mutex */ > + cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ > } > > static inline void _rcu_thread_online(void) > { > - barrier(); /* Ensure the compiler does not reorder us with mutex */ > + cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ > _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr)); > - smp_mb(); > + cmm_smp_mb(); > } > > #ifdef __cplusplus > diff --git a/urcu-qsbr.c b/urcu-qsbr.c > index 5e8b960..2cf73d5 100644 > --- a/urcu-qsbr.c > +++ b/urcu-qsbr.c > @@ -100,7 +100,7 @@ static void mutex_unlock(pthread_mutex_t *mutex) > static void wait_gp(void) > { > /* Read reader_gp before read futex */ > - smp_rmb(); > + cmm_smp_rmb(); > if (uatomic_read(&gp_futex) == -1) > futex_noasync(&gp_futex, FUTEX_WAIT, -1, > NULL, NULL, 0); > @@ -126,14 +126,14 @@ static void update_counter_and_wait(void) > * while new readers are always accessing data (no progress). Enforce > * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr. > */ > - barrier(); > + cmm_barrier(); > > /* > - * Adding a smp_mb() which is _not_ formally required, but makes the > + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the > * model easier to understand. It does not have a big performance impact > * anyway, given this is the write-side. > */ > - smp_mb(); > + cmm_smp_mb(); > > /* > * Wait for each thread rcu_reader_qs_gp count to become 0. > @@ -143,7 +143,7 @@ static void update_counter_and_wait(void) > if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { > uatomic_dec(&gp_futex); > /* Write futex before read reader_gp */ > - smp_mb(); > + cmm_smp_mb(); > } > > list_for_each_entry_safe(index, tmp, ®istry, node) { > @@ -154,7 +154,7 @@ static void update_counter_and_wait(void) > if (list_empty(®istry)) { > if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { > /* Read reader_gp before write futex */ > - smp_mb(); > + cmm_smp_mb(); > uatomic_set(&gp_futex, 0); > } > break; > @@ -165,7 +165,7 @@ static void update_counter_and_wait(void) > #ifndef HAS_INCOHERENT_CACHES > cpu_relax(); > #else /* #ifndef HAS_INCOHERENT_CACHES */ > - smp_mb(); > + cmm_smp_mb(); > #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ > } > } > @@ -190,7 +190,7 @@ void synchronize_rcu(void) > * where new ptr points to. > */ > /* Write new ptr before changing the qparity */ > - smp_mb(); > + cmm_smp_mb(); > > /* > * Mark the writer thread offline to make sure we don't wait for > @@ -217,14 +217,14 @@ void synchronize_rcu(void) > * accessing data (no progress). Enforce compiler-order of load > * rcu_reader ctr before store to rcu_gp_ctr. > */ > - barrier(); > + cmm_barrier(); > > /* > - * Adding a smp_mb() which is _not_ formally required, but makes the > + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the > * model easier to understand. It does not have a big performance impact > * anyway, given this is the write-side. > */ > - smp_mb(); > + cmm_smp_mb(); > > /* > * Wait for previous parity to be empty of readers. > @@ -239,7 +239,7 @@ out: > */ > if (was_online) > _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr)); > - smp_mb(); > + cmm_smp_mb(); > } > #else /* !(BITS_PER_LONG < 64) */ > void synchronize_rcu(void) > @@ -253,7 +253,7 @@ void synchronize_rcu(void) > * our own quiescent state. This allows using synchronize_rcu() in > * threads registered as readers. > */ > - smp_mb(); > + cmm_smp_mb(); > if (was_online) > STORE_SHARED(rcu_reader.ctr, 0); > > @@ -266,7 +266,7 @@ out: > > if (was_online) > _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr)); > - smp_mb(); > + cmm_smp_mb(); > } > #endif /* !(BITS_PER_LONG < 64) */ > > diff --git a/urcu-static.h b/urcu-static.h > index eea39bb..ad415ac 100644 > --- a/urcu-static.h > +++ b/urcu-static.h > @@ -181,23 +181,23 @@ extern int has_sys_membarrier; > static inline void smp_mb_slave(int group) > { > if (likely(has_sys_membarrier)) > - barrier(); > + cmm_barrier(); > else > - smp_mb(); > + cmm_smp_mb(); > } > #endif > > #ifdef RCU_MB > static inline void smp_mb_slave(int group) > { > - smp_mb(); > + cmm_smp_mb(); > } > #endif > > #ifdef RCU_SIGNAL > static inline void smp_mb_slave(int group) > { > - barrier(); > + cmm_barrier(); > } > #endif > > @@ -259,7 +259,7 @@ static inline void _rcu_read_lock(void) > { > unsigned long tmp; > > - barrier(); /* Ensure the compiler does not reorder us with mutex */ > + cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ > tmp = rcu_reader.ctr; > /* > * rcu_gp_ctr is > @@ -295,7 +295,7 @@ static inline void _rcu_read_unlock(void) > } else { > _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT); > } > - barrier(); /* Ensure the compiler does not reorder us with mutex */ > + cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */ > } > > #ifdef __cplusplus > diff --git a/urcu.c b/urcu.c > index 918d274..9c556aa 100644 > --- a/urcu.c > +++ b/urcu.c > @@ -100,9 +100,9 @@ static void mutex_lock(pthread_mutex_t *mutex) > exit(-1); > } > if (LOAD_SHARED(rcu_reader.need_mb)) { > - smp_mb(); > + cmm_smp_mb(); > _STORE_SHARED(rcu_reader.need_mb, 0); > - smp_mb(); > + cmm_smp_mb(); > } > poll(NULL,0,10); > } > @@ -126,14 +126,14 @@ static void smp_mb_master(int group) > if (likely(has_sys_membarrier)) > membarrier(MEMBARRIER_EXPEDITED); > else > - smp_mb(); > + cmm_smp_mb(); > } > #endif > > #ifdef RCU_MB > static void smp_mb_master(int group) > { > - smp_mb(); > + cmm_smp_mb(); > } > #endif > > @@ -143,15 +143,15 @@ static void force_mb_all_readers(void) > struct rcu_reader *index; > > /* > - * Ask for each threads to execute a smp_mb() so we can consider the > + * Ask for each threads to execute a cmm_smp_mb() so we can consider the > * compiler barriers around rcu read lock as real memory barriers. > */ > if (list_empty(®istry)) > return; > /* > - * pthread_kill has a smp_mb(). But beware, we assume it performs > + * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs > * a cache flush on architectures with non-coherent cache. Let's play > - * safe and don't assume anything : we use smp_mc() to make sure the > + * safe and don't assume anything : we use cmm_smp_mc() to make sure the > * cache flush is enforced. > */ > list_for_each_entry(index, ®istry, node) { > @@ -177,7 +177,7 @@ static void force_mb_all_readers(void) > poll(NULL, 0, 1); > } > } > - smp_mb(); /* read ->need_mb before ending the barrier */ > + cmm_smp_mb(); /* read ->need_mb before ending the barrier */ > } > > static void smp_mb_master(int group) > @@ -213,15 +213,15 @@ void update_counter_and_wait(void) > * while new readers are always accessing data (no progress). Enforce > * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr. > */ > - barrier(); > + cmm_barrier(); > > /* > * > - * Adding a smp_mb() which is _not_ formally required, but makes the > + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the > * model easier to understand. It does not have a big performance impact > * anyway, given this is the write-side. > */ > - smp_mb(); > + cmm_smp_mb(); > > /* > * Wait for each thread rcu_reader.ctr count to become 0. > @@ -309,14 +309,14 @@ void synchronize_rcu(void) > * accessing data (no progress). Enforce compiler-order of load > * rcu_reader ctr before store to rcu_gp_ctr. > */ > - barrier(); > + cmm_barrier(); > > /* > - * Adding a smp_mb() which is _not_ formally required, but makes the > + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the > * model easier to understand. It does not have a big performance impact > * anyway, given this is the write-side. > */ > - smp_mb(); > + cmm_smp_mb(); > > /* > * Wait for previous parity to be empty of readers. > @@ -379,13 +379,13 @@ void rcu_init(void) > static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context) > { > /* > - * Executing this smp_mb() is the only purpose of this signal handler. > - * It punctually promotes barrier() into smp_mb() on every thread it is > + * Executing this cmm_smp_mb() is the only purpose of this signal > handler. > + * It punctually promotes cmm_barrier() into cmm_smp_mb() on every > thread it is > * executed on. > */ > - smp_mb(); > + cmm_smp_mb(); > _STORE_SHARED(rcu_reader.need_mb, 0); > - smp_mb(); > + cmm_smp_mb(); > } > > /* > diff --git a/urcu/arch_alpha.h b/urcu/arch_alpha.h > index 0596939..51a1fff 100644 > --- a/urcu/arch_alpha.h > +++ b/urcu/arch_alpha.h > @@ -28,9 +28,9 @@ > extern "C" { > #endif > > -#define mb() asm volatile("mb":::"memory") > -#define wmb() asm volatile("wmb":::"memory") > -#define read_barrier_depends() asm volatile("mb":::"memory") > +#define cmm_mb() asm volatile("mb":::"memory") > +#define cmm_wmb() asm volatile("wmb":::"memory") > +#define cmm_read_barrier_depends() asm volatile("mb":::"memory") > > typedef unsigned long long cycles_t; > > diff --git a/urcu/arch_armv7l.h b/urcu/arch_armv7l.h > index e63ad3c..6d58bb7 100644 > --- a/urcu/arch_armv7l.h > +++ b/urcu/arch_armv7l.h > @@ -29,7 +29,7 @@ > extern "C" { > #endif > > -#define mb() asm volatile("dmb":::"memory") > +#define cmm_mb() asm volatile("dmb":::"memory") > > #include <stdlib.h> > #include <sys/time.h> > diff --git a/urcu/arch_generic.h b/urcu/arch_generic.h > index 36a0003..25a9f7a 100644 > --- a/urcu/arch_generic.h > +++ b/urcu/arch_generic.h > @@ -32,93 +32,93 @@ extern "C" { > #define CACHE_LINE_SIZE 64 > #endif > > -#if !defined(mc) && !defined(rmc) && !defined(wmc) > +#if !defined(cmm_mc) && !defined(cmm_rmc) && !defined(cmm_wmc) > #define CONFIG_HAVE_MEM_COHERENCY > /* > - * Architectures with cache coherency must _not_ define mc/rmc/wmc. > + * Architectures with cache coherency must _not_ define > cmm_mc/cmm_rmc/cmm_wmc. > * > - * For them, mc/rmc/wmc are implemented with a * simple compiler barrier; > - * in addition, we provide defaults for mb (using GCC builtins) as well as > - * rmb and wmb (defaulting to mb). > + * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a * simple compiler > barrier; > + * in addition, we provide defaults for cmm_mb (using GCC builtins) as well > as > + * cmm_rmb and cmm_wmb (defaulting to cmm_mb). > */ > > -#ifndef mb > -#define mb() __sync_synchronize() > +#ifndef cmm_mb > +#define cmm_mb() __sync_synchronize() > #endif > > -#ifndef rmb > -#define rmb() mb() > +#ifndef cmm_rmb > +#define cmm_rmb() cmm_mb() > #endif > > -#ifndef wmb > -#define wmb() mb() > +#ifndef cmm_wmb > +#define cmm_wmb() cmm_mb() > #endif > > -#define mc() barrier() > -#define rmc() barrier() > -#define wmc() barrier() > +#define cmm_mc() cmm_barrier() > +#define cmm_rmc() cmm_barrier() > +#define cmm_wmc() cmm_barrier() > #else > /* > * Architectures without cache coherency need something like the following: > * > - * #define mc() arch_cache_flush() > - * #define rmc() arch_cache_flush_read() > - * #define wmc() arch_cache_flush_write() > + * #define cmm_mc() arch_cache_flush() > + * #define cmm_rmc() arch_cache_flush_read() > + * #define cmm_wmc() arch_cache_flush_write() > * > - * Of these, only mc is mandatory. rmc and wmc default to mc. mb/rmb/wmb > - * use these definitions by default: > + * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to > cmm_mc. > + * cmm_mb/cmm_rmb/cmm_wmb use these definitions by default: > * > - * #define mb() mc() > - * #define rmb() rmc() > - * #define wmb() wmc() > + * #define cmm_mb() cmm_mc() > + * #define cmm_rmb() cmm_rmc() > + * #define cmm_wmb() cmm_wmc() > */ > > -#ifndef mb > -#define mb() mc() > +#ifndef cmm_mb > +#define cmm_mb() cmm_mc() > #endif > > -#ifndef rmb > -#define rmb() rmc() > +#ifndef cmm_rmb > +#define cmm_rmb() cmm_rmc() > #endif > > -#ifndef wmb > -#define wmb() wmc() > +#ifndef cmm_wmb > +#define cmm_wmb() cmm_wmc() > #endif > > -#ifndef rmc > -#define rmc() mc() > +#ifndef cmm_rmc > +#define cmm_rmc() cmm_mc() > #endif > > -#ifndef wmc > -#define wmc() mc() > +#ifndef cmm_wmc > +#define cmm_wmc() cmm_mc() > #endif > #endif > > /* Nop everywhere except on alpha. */ > -#ifndef read_barrier_depends > -#define read_barrier_depends() > +#ifndef cmm_read_barrier_depends > +#define cmm_read_barrier_depends() > #endif > > #ifdef CONFIG_RCU_SMP > -#define smp_mb() mb() > -#define smp_rmb() rmb() > -#define smp_wmb() wmb() > -#define smp_mc() mc() > -#define smp_rmc() rmc() > -#define smp_wmc() wmc() > -#define smp_read_barrier_depends() read_barrier_depends() > +#define cmm_smp_mb() cmm_mb() > +#define cmm_smp_rmb() cmm_rmb() > +#define cmm_smp_wmb() cmm_wmb() > +#define cmm_smp_mc() cmm_mc() > +#define cmm_smp_rmc() cmm_rmc() > +#define cmm_smp_wmc() cmm_wmc() > +#define cmm_smp_read_barrier_depends() cmm_read_barrier_depends() > #else > -#define smp_mb() barrier() > -#define smp_rmb() barrier() > -#define smp_wmb() barrier() > -#define smp_mc() barrier() > -#define smp_rmc() barrier() > -#define smp_wmc() barrier() > -#define smp_read_barrier_depends() > +#define cmm_smp_mb() cmm_barrier() > +#define cmm_smp_rmb() cmm_barrier() > +#define cmm_smp_wmb() cmm_barrier() > +#define cmm_smp_mc() cmm_barrier() > +#define cmm_smp_rmc() cmm_barrier() > +#define cmm_smp_wmc() cmm_barrier() > +#define cmm_smp_read_barrier_depends() > #endif > > #ifndef cpu_relax > -#define cpu_relax() barrier() > +#define cpu_relax() cmm_barrier() > #endif > > #ifdef __cplusplus > diff --git a/urcu/arch_ppc.h b/urcu/arch_ppc.h > index 7a217c8..93aed2a 100644 > --- a/urcu/arch_ppc.h > +++ b/urcu/arch_ppc.h > @@ -32,7 +32,7 @@ extern "C" { > /* Include size of POWER5+ L3 cache lines: 256 bytes */ > #define CACHE_LINE_SIZE 256 > > -#define mb() asm volatile("sync":::"memory") > +#define cmm_mb() asm volatile("sync":::"memory") > > #define mftbl() \ > ({ \ > @@ -56,9 +56,9 @@ static inline cycles_t get_cycles (void) > > for (;;) { > h = mftbu(); > - barrier(); > + cmm_barrier(); > l = mftbl(); > - barrier(); > + cmm_barrier(); > if (mftbu() == h) > return (((cycles_t) h) << 32) + l; > } > diff --git a/urcu/arch_s390.h b/urcu/arch_s390.h > index 1a55e0d..8a33e20 100644 > --- a/urcu/arch_s390.h > +++ b/urcu/arch_s390.h > @@ -37,7 +37,7 @@ extern "C" { > > #define CACHE_LINE_SIZE 128 > > -#define mb() __asm__ __volatile__("bcr 15,0" : : : "memory") > +#define cmm_mb() __asm__ __volatile__("bcr 15,0" : : : "memory") > > typedef unsigned long long cycles_t; > > diff --git a/urcu/arch_sparc64.h b/urcu/arch_sparc64.h > index 8e991b1..39f27c7 100644 > --- a/urcu/arch_sparc64.h > +++ b/urcu/arch_sparc64.h > @@ -40,9 +40,9 @@ __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ > "1:\n" \ > : : : "memory") > > -#define mb() membar_safe("#LoadLoad | #LoadStore | #StoreStore | > #StoreLoad") > -#define rmb() membar_safe("#LoadLoad") > -#define wmb() membar_safe("#StoreStore") > +#define cmm_mb() membar_safe("#LoadLoad | #LoadStore | #StoreStore | > #StoreLoad") > +#define cmm_rmb() membar_safe("#LoadLoad") > +#define cmm_wmb() membar_safe("#StoreStore") > > typedef unsigned long long cycles_t; > > diff --git a/urcu/arch_x86.h b/urcu/arch_x86.h > index aad541e..d0a58e8 100644 > --- a/urcu/arch_x86.h > +++ b/urcu/arch_x86.h > @@ -32,17 +32,17 @@ extern "C" { > #define CACHE_LINE_SIZE 128 > > #ifdef CONFIG_RCU_HAVE_FENCE > -#define mb() asm volatile("mfence":::"memory") > -#define rmb() asm volatile("lfence":::"memory") > -#define wmb() asm volatile("sfence"::: "memory") > +#define cmm_mb() asm volatile("mfence":::"memory") > +#define cmm_rmb() asm volatile("lfence":::"memory") > +#define cmm_wmb() asm volatile("sfence"::: "memory") > #else > /* > - * Some non-Intel clones support out of order store. wmb() ceases to be a > + * Some non-Intel clones support out of order store. cmm_wmb() ceases to be a > * nop for these. > */ > -#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") > -#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") > -#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") > +#define cmm_mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") > +#define cmm_rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") > +#define cmm_wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") > #endif > > #define cpu_relax() asm volatile("rep; nop" : : : "memory"); > diff --git a/urcu/compiler.h b/urcu/compiler.h > index fb8b829..d67e6c5 100644 > --- a/urcu/compiler.h > +++ b/urcu/compiler.h > @@ -23,7 +23,7 @@ > #define likely(x) __builtin_expect(!!(x), 1) > #define unlikely(x) __builtin_expect(!!(x), 0) > > -#define barrier() asm volatile("" : : : "memory") > +#define cmm_barrier() asm volatile("" : : : "memory") > > /* > * Instruct the compiler to perform only a single access to a variable > diff --git a/urcu/rcuhlist.h b/urcu/rcuhlist.h > index 89f7679..cce80aa 100644 > --- a/urcu/rcuhlist.h > +++ b/urcu/rcuhlist.h > @@ -35,7 +35,7 @@ static inline void hlist_add_head_rcu(struct hlist_node > *newp, > { > newp->next = head->next; > newp->prev = (struct hlist_node *)head; > - smp_wmb(); > + cmm_smp_wmb(); > if (head->next) > head->next->prev = newp; > head->next = newp; > diff --git a/urcu/rculist.h b/urcu/rculist.h > index b0c4f1a..3a4e335 100644 > --- a/urcu/rculist.h > +++ b/urcu/rculist.h > @@ -34,7 +34,7 @@ static inline void list_add_rcu(list_t *newp, list_t *head) > { > newp->next = head->next; > newp->prev = head; > - smp_wmb(); > + cmm_smp_wmb(); > head->next->prev = newp; > head->next = newp; > } > diff --git a/urcu/system.h b/urcu/system.h > index 0de7dd2..11a499e 100644 > --- a/urcu/system.h > +++ b/urcu/system.h > @@ -22,7 +22,7 @@ > #include <urcu/arch.h> > > /* > - * Identify a shared load. A smp_rmc() or smp_mc() should come before the > load. > + * Identify a shared load. A cmm_smp_rmc() or cmm_smp_mc() should come > before the load. > */ > #define _LOAD_SHARED(p) ACCESS_ONCE(p) > > @@ -31,12 +31,12 @@ > */ > #define LOAD_SHARED(p) \ > ({ \ > - smp_rmc(); \ > + cmm_smp_rmc(); \ > _LOAD_SHARED(p); \ > }) > > /* > - * Identify a shared store. A smp_wmc() or smp_mc() should follow the store. > + * Identify a shared store. A cmm_smp_wmc() or cmm_smp_mc() should follow > the store. > */ > #define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); }) > > @@ -47,7 +47,7 @@ > #define STORE_SHARED(x, v) \ > ({ \ > typeof(x) _v = _STORE_SHARED(x, v); \ > - smp_wmc(); \ > + cmm_smp_wmc(); \ > _v; \ > }) > > -- > 1.7.3.2 > > > _______________________________________________ > ltt-dev mailing list > [email protected] > http://lists.casi.polymtl.ca/cgi-bin/mailman/listinfo/ltt-dev > -- Mathieu Desnoyers Operating System Efficiency R&D Consultant EfficiOS Inc. http://www.efficios.com _______________________________________________ ltt-dev mailing list [email protected] http://lists.casi.polymtl.ca/cgi-bin/mailman/listinfo/ltt-dev
