* David Goulet ([email protected]) wrote:
> This commit makes UST compatible with liburcu 0.5.2
> 
> Signed-off-by: David Goulet <[email protected]>

Committed and pushed, thanks !

Mathieu

> ---
>  include/ust/marker.h     |    2 +-
>  include/ust/probe.h      |    2 +-
>  include/ust/processor.h  |    2 +-
>  include/ust/tracepoint.h |    4 +-
>  include/ust/ustd.h       |    2 +-
>  libust/buffers.c         |   38 ++++++++++++------------
>  libust/buffers.h         |   10 +++---
>  libust/channels.c        |   18 +++++-----
>  libust/channels.h        |    4 +-
>  libust/marker-control.c  |   28 +++++++++---------
>  libust/marker.c          |   74 
> +++++++++++++++++++++++-----------------------
>  libust/serialize.c       |   12 ++++----
>  libust/trace_event.c     |   10 +++---
>  libust/tracectl.c        |   32 ++++++++++----------
>  libust/tracepoint.c      |   32 ++++++++++----------
>  libust/tracer.c          |   28 +++++++++---------
>  libust/tracer.h          |    6 ++--
>  libust/tracercore.c      |    4 +-
>  libust/tracercore.h      |    4 +-
>  libust/type-serializer.c |   10 +++---
>  libustcomm/ustcomm.c     |    8 ++--
>  libustcomm/ustcomm.h     |    4 +-
>  libustd/libustd.c        |    2 +-
>  23 files changed, 168 insertions(+), 168 deletions(-)
> 
> diff --git a/include/ust/marker.h b/include/ust/marker.h
> index 6103629..0f42d5b 100644
> --- a/include/ust/marker.h
> +++ b/include/ust/marker.h
> @@ -330,7 +330,7 @@ struct lib {
>       struct marker_addr *markers_addr_start;
>  #endif
>       int markers_count;
> -     struct list_head list;
> +     struct cds_list_head list;
>  };
>  
>  extern int marker_register_lib(struct marker *markers_start, int 
> markers_count);
> diff --git a/include/ust/probe.h b/include/ust/probe.h
> index 7285a2d..5002721 100644
> --- a/include/ust/probe.h
> +++ b/include/ust/probe.h
> @@ -38,7 +38,7 @@ struct ltt_available_probe {
>          const char *format;
>          marker_probe_func *probe_func;
>          ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
> -        struct list_head node;          /* registered probes list */
> +        struct cds_list_head node;          /* registered probes list */
>  };
>  
>  extern int ltt_probe_register(struct ltt_available_probe *pdata); 
> diff --git a/include/ust/processor.h b/include/ust/processor.h
> index 68ac258..098e6b3 100644
> --- a/include/ust/processor.h
> +++ b/include/ust/processor.h
> @@ -25,7 +25,7 @@
>  extern __thread long ust_reg_stack[500];
>  extern volatile __thread long *ust_reg_stack_ptr;
>  
> -#define ____cacheline_aligned __attribute__((aligned(CACHE_LINE_SIZE)))
> +#define ____cacheline_aligned __attribute__((aligned(CAA_CACHE_LINE_SIZE)))
>  
>  #ifdef __i386
>  
> diff --git a/include/ust/tracepoint.h b/include/ust/tracepoint.h
> index 1c4a384..1d286a0 100644
> --- a/include/ust/tracepoint.h
> +++ b/include/ust/tracepoint.h
> @@ -214,7 +214,7 @@ static inline void tracepoint_synchronize_unregister(void)
>  struct tracepoint_lib {
>       struct tracepoint *tracepoints_start;
>       int tracepoints_count;
> -     struct list_head list;
> +     struct cds_list_head list;
>  };
>  
>  extern int tracepoint_register_lib(struct tracepoint *tracepoints_start,
> @@ -346,7 +346,7 @@ struct trace_event {
>  struct trace_event_lib {
>       struct trace_event *trace_events_start;
>       int trace_events_count;
> -     struct list_head list;
> +     struct cds_list_head list;
>  };
>  
>  struct trace_event_iter {
> diff --git a/include/ust/ustd.h b/include/ust/ustd.h
> index 0757acb..6336e69 100644
> --- a/include/ust/ustd.h
> +++ b/include/ust/ustd.h
> @@ -79,7 +79,7 @@ struct libustd_instance {
>       struct libustd_callbacks *callbacks;
>       int quit_program;
>       int is_init;
> -     struct list_head connections;
> +     struct cds_list_head connections;
>       int epoll_fd;
>       struct ustcomm_sock *listen_sock;
>       char *sock_path;
> diff --git a/libust/buffers.c b/libust/buffers.c
> index bb5d8c5..2e4bb66 100644
> --- a/libust/buffers.c
> +++ b/libust/buffers.c
> @@ -43,7 +43,7 @@ struct ltt_reserve_switch_offsets {
>  
>  
>  static DEFINE_MUTEX(ust_buffers_channels_mutex);
> -static LIST_HEAD(ust_buffers_channels);
> +static CDS_LIST_HEAD(ust_buffers_channels);
>  
>  static int get_n_cpus(void)
>  {
> @@ -288,7 +288,7 @@ int ust_buffers_channel_open(struct ust_channel *chan, 
> size_t subbuf_size, size_
>               if (result == -1)
>                       goto error;
>       }
> -     list_add(&chan->list, &ust_buffers_channels);
> +     cds_list_add(&chan->list, &ust_buffers_channels);
>       pthread_mutex_unlock(&ust_buffers_channels_mutex);
>  
>       return 0;
> @@ -320,7 +320,7 @@ void ust_buffers_channel_close(struct ust_channel *chan)
>                       ust_buffers_close_buf(chan->buf[i]);
>       }
>  
> -     list_del(&chan->list);
> +     cds_list_del(&chan->list);
>       kref_put(&chan->kref, ust_buffers_destroy_channel);
>       pthread_mutex_unlock(&ust_buffers_channels_mutex);
>  }
> @@ -349,7 +349,7 @@ static void ltt_buffer_begin(struct ust_buffer *buf,
>       header->cycle_count_begin = tsc;
>       header->data_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
>       header->sb_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
> -     /* FIXME: add memory barrier? */
> +     /* FIXME: add memory cmm_barrier? */
>       ltt_write_trace_header(channel->trace, header);
>  }
>  
> @@ -386,7 +386,7 @@ static notrace void ltt_buf_unfull(struct ust_buffer *buf,
>  }
>  
>  /*
> - * Promote compiler barrier to a smp_mb().
> + * Promote compiler cmm_barrier to a smp_mb().
>   * For the specific LTTng case, this IPI call should be removed if the
>   * architecture does not reorder writes.  This should eventually be provided 
> by
>   * a separate architecture-specific infrastructure.
> @@ -414,7 +414,7 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long 
> *consumed)
>        * this is OK because then there is no wmb to execute there.
>        * If our thread is executing on the same CPU as the on the buffers
>        * belongs to, we don't have to synchronize it at all. If we are
> -      * migrated, the scheduler will take care of the memory barriers.
> +      * migrated, the scheduler will take care of the memory cmm_barriers.
>        * Normally, smp_call_function_single() should ensure program order when
>        * executing the remote function, which implies that it surrounds the
>        * function execution with :
> @@ -429,7 +429,7 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long 
> *consumed)
>        * smp_mb()
>        *
>        * However, smp_call_function_single() does not seem to clearly execute
> -      * such barriers. It depends on spinlock semantic to provide the barrier
> +      * such cmm_barriers. It depends on spinlock semantic to provide the 
> cmm_barrier
>        * before executing the IPI and, when busy-looping, csd_lock_wait only
>        * executes smp_mb() when it has to wait for the other CPU.
>        *
> @@ -437,9 +437,9 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long 
> *consumed)
>        * required ourself, even if duplicated. It has no performance impact
>        * anyway.
>        *
> -      * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
> +      * smp_mb() is needed because cmm_smp_rmb() and cmm_smp_wmb() only 
> order read vs
>        * read and write vs write. They do not ensure core synchronization. We
> -      * really have to ensure total order between the 3 barriers running on
> +      * really have to ensure total order between the 3 cmm_barriers running 
> on
>        * the 2 CPUs.
>        */
>  //ust// #ifdef LTT_NO_IPI_BARRIER
> @@ -447,7 +447,7 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long 
> *consumed)
>        * Local rmb to match the remote wmb to read the commit count before the
>        * buffer data and the write offset.
>        */
> -     smp_rmb();
> +     cmm_smp_rmb();
>  //ust// #else
>  //ust//      if (raw_smp_processor_id() != buf->cpu) {
>  //ust//              smp_mb();       /* Total order with IPI handler 
> smp_mb() */
> @@ -895,10 +895,10 @@ static void ltt_reserve_switch_old_subbuf(
>  
>       /*
>        * Must write slot data before incrementing commit count.
> -      * This compiler barrier is upgraded into a smp_wmb() by the IPI
> -      * sent by get_subbuf() when it does its smp_rmb().
> +      * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI
> +      * sent by get_subbuf() when it does its cmm_smp_rmb().
>        */
> -     smp_wmb();
> +     cmm_smp_wmb();
>       uatomic_add(&buf->commit_count[oldidx].cc, padding_size);
>       commit_count = uatomic_read(&buf->commit_count[oldidx].cc);
>       ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
> @@ -924,10 +924,10 @@ static void ltt_reserve_switch_new_subbuf(
>  
>       /*
>        * Must write slot data before incrementing commit count.
> -      * This compiler barrier is upgraded into a smp_wmb() by the IPI
> -      * sent by get_subbuf() when it does its smp_rmb().
> +      * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI
> +      * sent by get_subbuf() when it does its cmm_smp_rmb().
>        */
> -     smp_wmb();
> +     cmm_smp_wmb();
>       uatomic_add(&buf->commit_count[beginidx].cc, 
> ltt_subbuffer_header_size());
>       commit_count = uatomic_read(&buf->commit_count[beginidx].cc);
>       /* Check if the written buffer has to be delivered */
> @@ -969,10 +969,10 @@ static void ltt_reserve_end_switch_current(
>  
>       /*
>        * Must write slot data before incrementing commit count.
> -      * This compiler barrier is upgraded into a smp_wmb() by the IPI
> -      * sent by get_subbuf() when it does its smp_rmb().
> +      * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI
> +      * sent by get_subbuf() when it does its cmm_smp_rmb().
>        */
> -     smp_wmb();
> +     cmm_smp_wmb();
>       uatomic_add(&buf->commit_count[endidx].cc, padding_size);
>       commit_count = uatomic_read(&buf->commit_count[endidx].cc);
>       ltt_check_deliver(chan, buf,
> diff --git a/libust/buffers.h b/libust/buffers.h
> index a2ad83e..e7630bb 100644
> --- a/libust/buffers.h
> +++ b/libust/buffers.h
> @@ -86,7 +86,7 @@ struct ust_buffer {
>        * List of buffers with an open pipe, used for fork and forced subbuffer
>        * switch.
>        */
> -     struct list_head open_buffers_list;
> +     struct cds_list_head open_buffers_list;
>  
>       unsigned int finalized;
>  //ust//      struct timer_list switch_timer; /* timer for periodical switch 
> */
> @@ -276,7 +276,7 @@ static __inline__ int ltt_poll_deliver(struct ust_channel 
> *chan, struct ust_buff
>       consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
>       commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb);
>       /*
> -      * No memory barrier here, since we are only interested
> +      * No memory cmm_barrier here, since we are only interested
>        * in a statistically correct polling result. The next poll will
>        * get the data is we are racing. The mb() that ensures correct
>        * memory order is in get_subbuf.
> @@ -375,7 +375,7 @@ static __inline__ int ltt_reserve_slot(struct ust_channel 
> *chan,
>        * Perform retryable operations.
>        */
>       /* FIXME: make this really per cpu? */
> -     if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) {
> +     if (unlikely(CMM_LOAD_SHARED(ltt_nesting) > 4)) {
>               DBG("Dropping event because nesting is too deep.");
>               uatomic_inc(&buf->events_lost);
>               return -EPERM;
> @@ -488,7 +488,7 @@ static __inline__ void ltt_commit_slot(
>       long endidx = SUBBUF_INDEX(offset_end - 1, chan);
>       long commit_count;
>  
> -     smp_wmb();
> +     cmm_smp_wmb();
>  
>       uatomic_add(&buf->commit_count[endidx].cc, slot_size);
>       /*
> @@ -574,7 +574,7 @@ size_t ust_buffers_do_strncpy(void *dest, const void 
> *src, size_t len,
>        * don't have constants, so gcc generally uses a function call.
>        */
>       for (; len > 0; len--) {
> -             *(u8 *)dest = LOAD_SHARED(*(const u8 *)src);
> +             *(u8 *)dest = CMM_LOAD_SHARED(*(const u8 *)src);
>               /* Check with dest, because src may be modified concurrently */
>               if (*(const u8 *)dest == '\0') {
>                       len--;
> diff --git a/libust/channels.c b/libust/channels.c
> index 57e9801..6716b5d 100644
> --- a/libust/channels.c
> +++ b/libust/channels.c
> @@ -33,7 +33,7 @@
>   * ltt_channel_mutex mutex may be nested inside markers mutex.
>   */
>  static DEFINE_MUTEX(ltt_channel_mutex);
> -static LIST_HEAD(ltt_channels);
> +static CDS_LIST_HEAD(ltt_channels);
>  /*
>   * Index of next channel in array. Makes sure that as long as a trace 
> channel is
>   * allocated, no array index will be re-used when a channel is freed and then
> @@ -51,7 +51,7 @@ static struct ltt_channel_setting *lookup_channel(const 
> char *name)
>  {
>       struct ltt_channel_setting *iter;
>  
> -     list_for_each_entry(iter, &ltt_channels, list)
> +     cds_list_for_each_entry(iter, &ltt_channels, list)
>               if (strcmp(name, iter->name) == 0)
>                       return iter;
>       return NULL;
> @@ -72,11 +72,11 @@ static void release_channel_setting(struct kref *kref)
>  
>       if (uatomic_read(&index_kref.refcount) == 0
>           && uatomic_read(&setting->kref.refcount) == 0) {
> -             list_del(&setting->list);
> +             cds_list_del(&setting->list);
>               free(setting);
>  
>               free_index = 0;
> -             list_for_each_entry(iter, &ltt_channels, list) {
> +             cds_list_for_each_entry(iter, &ltt_channels, list) {
>                       iter->index = free_index++;
>                       iter->free_event_id = 0;
>               }
> @@ -94,7 +94,7 @@ static void release_trace_channel(struct kref *kref)
>  {
>       struct ltt_channel_setting *iter, *n;
>  
> -     list_for_each_entry_safe(iter, n, &ltt_channels, list)
> +     cds_list_for_each_entry_safe(iter, n, &ltt_channels, list)
>               release_channel_setting(&iter->kref);
>  }
>  
> @@ -124,7 +124,7 @@ int ltt_channels_register(const char *name)
>               ret = -ENOMEM;
>               goto end;
>       }
> -     list_add(&setting->list, &ltt_channels);
> +     cds_list_add(&setting->list, &ltt_channels);
>       strncpy(setting->name, name, PATH_MAX-1);
>       setting->index = free_index++;
>  init_kref:
> @@ -197,7 +197,7 @@ const char *ltt_channels_get_name_from_index(unsigned int 
> index)
>  {
>       struct ltt_channel_setting *iter;
>  
> -     list_for_each_entry(iter, &ltt_channels, list)
> +     cds_list_for_each_entry(iter, &ltt_channels, list)
>               if (iter->index == index && uatomic_read(&iter->kref.refcount))
>                       return iter->name;
>       return NULL;
> @@ -209,7 +209,7 @@ ltt_channels_get_setting_from_name(const char *name)
>  {
>       struct ltt_channel_setting *iter;
>  
> -     list_for_each_entry(iter, &ltt_channels, list)
> +     cds_list_for_each_entry(iter, &ltt_channels, list)
>               if (!strcmp(iter->name, name)
>                   && uatomic_read(&iter->kref.refcount))
>                       return iter;
> @@ -269,7 +269,7 @@ struct ust_channel *ltt_channels_trace_alloc(unsigned int 
> *nr_channels,
>               WARN("ltt_channel_struct: channel null after alloc");
>               goto end;
>       }
> -     list_for_each_entry(iter, &ltt_channels, list) {
> +     cds_list_for_each_entry(iter, &ltt_channels, list) {
>               if (!uatomic_read(&iter->kref.refcount))
>                       continue;
>               channel[iter->index].subbuf_size = iter->subbuf_size;
> diff --git a/libust/channels.h b/libust/channels.h
> index 2000430..6db8e63 100644
> --- a/libust/channels.h
> +++ b/libust/channels.h
> @@ -61,14 +61,14 @@ struct ust_channel {
>  
>       u32 version;
>       size_t alloc_size;
> -     struct list_head list;
> +     struct cds_list_head list;
>  } ____cacheline_aligned;
>  
>  struct ltt_channel_setting {
>       unsigned int subbuf_size;
>       unsigned int subbuf_cnt;
>       struct kref kref;       /* Number of references to structure content */
> -     struct list_head list;
> +     struct cds_list_head list;
>       unsigned int index;     /* index of channel in trace channel array */
>       u16 free_event_id;      /* Next event ID to allocate */
>       char name[PATH_MAX];
> diff --git a/libust/marker-control.c b/libust/marker-control.c
> index db7311a..3ad2e6a 100644
> --- a/libust/marker-control.c
> +++ b/libust/marker-control.c
> @@ -31,7 +31,7 @@
>  #define DEFAULT_CHANNEL "cpu"
>  #define DEFAULT_PROBE "default"
>  
> -LIST_HEAD(probes_list);
> +CDS_LIST_HEAD(probes_list);
>  
>  /*
>   * Mutex protecting the probe slab cache.
> @@ -47,11 +47,11 @@ struct ltt_available_probe default_probe = {
>  };
>  
>  //ust//static struct kmem_cache *markers_loaded_cachep;
> -static LIST_HEAD(markers_loaded_list);
> +static CDS_LIST_HEAD(markers_loaded_list);
>  /*
>   * List sorted by name strcmp order.
>   */
> -static LIST_HEAD(probes_registered_list);
> +static CDS_LIST_HEAD(probes_registered_list);
>  
>  //ust// static struct proc_dir_entry *pentry;
>  
> @@ -64,7 +64,7 @@ static struct ltt_available_probe 
> *get_probe_from_name(const char *pname)
>  
>       if (!pname)
>               pname = DEFAULT_PROBE;
> -     list_for_each_entry(iter, &probes_registered_list, node) {
> +     cds_list_for_each_entry(iter, &probes_registered_list, node) {
>               comparison = strcmp(pname, iter->name);
>               if (!comparison)
>                       found = 1;
> @@ -108,19 +108,19 @@ int ltt_probe_register(struct ltt_available_probe 
> *pdata)
>       struct ltt_available_probe *iter;
>  
>       pthread_mutex_lock(&probes_mutex);
> -     list_for_each_entry_reverse(iter, &probes_registered_list, node) {
> +     cds_list_for_each_entry_reverse(iter, &probes_registered_list, node) {
>               comparison = strcmp(pdata->name, iter->name);
>               if (!comparison) {
>                       ret = -EBUSY;
>                       goto end;
>               } else if (comparison > 0) {
>                       /* We belong to the location right after iter. */
> -                     list_add(&pdata->node, &iter->node);
> +                     cds_list_add(&pdata->node, &iter->node);
>                       goto end;
>               }
>       }
>       /* Should be added at the head of the list */
> -     list_add(&pdata->node, &probes_registered_list);
> +     cds_list_add(&pdata->node, &probes_registered_list);
>  end:
>       pthread_mutex_unlock(&probes_mutex);
>       return ret;
> @@ -135,17 +135,17 @@ int ltt_probe_unregister(struct ltt_available_probe 
> *pdata)
>       struct ltt_active_marker *amark, *tmp;
>  
>       pthread_mutex_lock(&probes_mutex);
> -     list_for_each_entry_safe(amark, tmp, &markers_loaded_list, node) {
> +     cds_list_for_each_entry_safe(amark, tmp, &markers_loaded_list, node) {
>               if (amark->probe == pdata) {
>                       ret = marker_probe_unregister_private_data(
>                               pdata->probe_func, amark);
>                       if (ret)
>                               goto end;
> -                     list_del(&amark->node);
> +                     cds_list_del(&amark->node);
>                       free(amark);
>               }
>       }
> -     list_del(&pdata->node);
> +     cds_list_del(&pdata->node);
>  end:
>       pthread_mutex_unlock(&probes_mutex);
>       return ret;
> @@ -189,7 +189,7 @@ int ltt_marker_connect(const char *channel, const char 
> *mname,
>       if (ret)
>               free(pdata);
>       else
> -             list_add(&pdata->node, &markers_loaded_list);
> +             cds_list_add(&pdata->node, &markers_loaded_list);
>  end:
>       pthread_mutex_unlock(&probes_mutex);
>       ltt_unlock_traces();
> @@ -227,7 +227,7 @@ int ltt_marker_disconnect(const char *channel, const char 
> *mname,
>       if (ret)
>               goto end;
>       else {
> -             list_del(&pdata->node);
> +             cds_list_del(&pdata->node);
>               free(pdata);
>       }
>  end:
> @@ -391,10 +391,10 @@ static void disconnect_all_markers(void)
>  {
>       struct ltt_active_marker *pdata, *tmp;
>  
> -     list_for_each_entry_safe(pdata, tmp, &markers_loaded_list, node) {
> +     cds_list_for_each_entry_safe(pdata, tmp, &markers_loaded_list, node) {
>               marker_probe_unregister_private_data(pdata->probe->probe_func,
>                       pdata);
> -             list_del(&pdata->node);
> +             cds_list_del(&pdata->node);
>               free(pdata);
>       }
>  }
> diff --git a/libust/marker.c b/libust/marker.c
> index cb8ffc1..39e12bb 100644
> --- a/libust/marker.c
> +++ b/libust/marker.c
> @@ -45,7 +45,7 @@ static const int marker_debug;
>   */
>  static DEFINE_MUTEX(markers_mutex);
>  
> -static LIST_HEAD(libs);
> +static CDS_LIST_HEAD(libs);
>  
>  
>  void lock_markers(void)
> @@ -127,7 +127,7 @@ notrace void __mark_empty_function(const struct marker 
> *mdata,
>   * @...:  Variable argument list.
>   *
>   * Since we do not use "typical" pointer based RCU in the 1 argument case, we
> - * need to put a full smp_rmb() in this branch. This is why we do not use
> + * need to put a full cmm_smp_rmb() in this branch. This is why we do not use
>   * rcu_dereference() for the pointer read.
>   */
>  notrace void marker_probe_cb(const struct marker *mdata,
> @@ -146,12 +146,12 @@ notrace void marker_probe_cb(const struct marker *mdata,
>       if (likely(!ptype)) {
>               marker_probe_func *func;
>               /* Must read the ptype before ptr. They are not data dependant,
> -              * so we put an explicit smp_rmb() here. */
> -             smp_rmb();
> +              * so we put an explicit cmm_smp_rmb() here. */
> +             cmm_smp_rmb();
>               func = mdata->single.func;
>               /* Must read the ptr before private data. They are not data
> -              * dependant, so we put an explicit smp_rmb() here. */
> -             smp_rmb();
> +              * dependant, so we put an explicit cmm_smp_rmb() here. */
> +             cmm_smp_rmb();
>               va_start(args, regs);
>               func(mdata, mdata->single.probe_private, regs, call_private,
>                       mdata->format, &args);
> @@ -162,16 +162,16 @@ notrace void marker_probe_cb(const struct marker *mdata,
>               /*
>                * Read mdata->ptype before mdata->multi.
>                */
> -             smp_rmb();
> +             cmm_smp_rmb();
>               multi = mdata->multi;
>               /*
>                * multi points to an array, therefore accessing the array
>                * depends on reading multi. However, even in this case,
>                * we must insure that the pointer is read _before_ the array
> -              * data. Same as rcu_dereference, but we need a full smp_rmb()
> -              * in the fast path, so put the explicit barrier here.
> +              * data. Same as rcu_dereference, but we need a full 
> cmm_smp_rmb()
> +              * in the fast path, so put the explicit cmm_barrier here.
>                */
> -             smp_read_barrier_depends();
> +             cmm_smp_read_barrier_depends();
>               for (i = 0; multi[i].func; i++) {
>                       va_start(args, regs);
>                       multi[i].func(mdata, multi[i].probe_private,
> @@ -202,12 +202,12 @@ static notrace void marker_probe_cb_noarg(const struct 
> marker *mdata,
>       if (likely(!ptype)) {
>               marker_probe_func *func;
>               /* Must read the ptype before ptr. They are not data dependant,
> -              * so we put an explicit smp_rmb() here. */
> -             smp_rmb();
> +              * so we put an explicit cmm_smp_rmb() here. */
> +             cmm_smp_rmb();
>               func = mdata->single.func;
>               /* Must read the ptr before private data. They are not data
> -              * dependant, so we put an explicit smp_rmb() here. */
> -             smp_rmb();
> +              * dependant, so we put an explicit cmm_smp_rmb() here. */
> +             cmm_smp_rmb();
>               func(mdata, mdata->single.probe_private, regs, call_private,
>                       mdata->format, &args);
>       } else {
> @@ -216,16 +216,16 @@ static notrace void marker_probe_cb_noarg(const struct 
> marker *mdata,
>               /*
>                * Read mdata->ptype before mdata->multi.
>                */
> -             smp_rmb();
> +             cmm_smp_rmb();
>               multi = mdata->multi;
>               /*
>                * multi points to an array, therefore accessing the array
>                * depends on reading multi. However, even in this case,
>                * we must insure that the pointer is read _before_ the array
> -              * data. Same as rcu_dereference, but we need a full smp_rmb()
> -              * in the fast path, so put the explicit barrier here.
> +              * data. Same as rcu_dereference, but we need a full 
> cmm_smp_rmb()
> +              * in the fast path, so put the explicit cmm_barrier here.
>                */
> -             smp_read_barrier_depends();
> +             cmm_smp_read_barrier_depends();
>               for (i = 0; multi[i].func; i++)
>                       multi[i].func(mdata, multi[i].probe_private, regs,
>                               call_private, mdata->format, &args);
> @@ -239,7 +239,7 @@ static void free_old_closure(struct rcu_head *head)
>               struct marker_entry, rcu);
>       free(entry->oldptr);
>       /* Make sure we free the data before setting the pending flag to 0 */
> -     smp_wmb();
> +     cmm_smp_wmb();
>       entry->rcu_pending = 0;
>  }
>  
> @@ -497,7 +497,7 @@ static int remove_marker(const char *channel, const char 
> *name)
>       WARN_ON(ret);
>       /* Make sure the call_rcu has been executed */
>  //ust//      if (e->rcu_pending)
> -//ust//              rcu_barrier_sched();
> +//ust//              rcu_cmm_barrier_sched();
>       free(e);
>       return 0;
>  }
> @@ -563,7 +563,7 @@ static int set_marker(struct marker_entry *entry, struct 
> marker *elem,
>        * Make sure the private data is valid when we update the
>        * single probe ptr.
>        */
> -     smp_wmb();
> +     cmm_smp_wmb();
>       elem->single.func = entry->single.func;
>       /*
>        * We also make sure that the new probe callbacks array is consistent
> @@ -574,7 +574,7 @@ static int set_marker(struct marker_entry *entry, struct 
> marker *elem,
>        * Update the function or multi probe array pointer before setting the
>        * ptype.
>        */
> -     smp_wmb();
> +     cmm_smp_wmb();
>       elem->ptype = entry->ptype;
>  
>       if (elem->tp_name && (active ^ _imv_read(elem->state))) {
> @@ -641,7 +641,7 @@ static void disable_marker(struct marker *elem)
>       elem->state__imv = 0;
>       elem->single.func = __mark_empty_function;
>       /* Update the function before setting the ptype */
> -     smp_wmb();
> +     cmm_smp_wmb();
>       elem->ptype = 0;        /* single probe */
>       /*
>        * Leave the private data and channel_id/event_id there, because removal
> @@ -716,7 +716,7 @@ static void lib_update_markers(void)
>  
>       /* FIXME: we should probably take a mutex here on libs */
>  //ust//      pthread_mutex_lock(&module_mutex);
> -     list_for_each_entry(lib, &libs, list)
> +     cds_list_for_each_entry(lib, &libs, list)
>               marker_update_probe_range(lib->markers_start,
>                               lib->markers_start + lib->markers_count);
>  //ust//      pthread_mutex_unlock(&module_mutex);
> @@ -816,7 +816,7 @@ int marker_probe_register(const char *channel, const char 
> *name,
>        * make sure it's executed now.
>        */
>  //ust//      if (entry->rcu_pending)
> -//ust//              rcu_barrier_sched();
> +//ust//              rcu_cmm_barrier_sched();
>       old = marker_entry_add_probe(entry, probe, probe_private);
>       if (IS_ERR(old)) {
>               ret = PTR_ERR(old);
> @@ -835,11 +835,11 @@ int marker_probe_register(const char *channel, const 
> char *name,
>       if (!entry)
>               goto end;
>  //ust//      if (entry->rcu_pending)
> -//ust//              rcu_barrier_sched();
> +//ust//              rcu_cmm_barrier_sched();
>       entry->oldptr = old;
>       entry->rcu_pending = 1;
>       /* write rcu_pending before calling the RCU callback */
> -     smp_wmb();
> +     cmm_smp_wmb();
>  //ust//      call_rcu_sched(&entry->rcu, free_old_closure);
>       synchronize_rcu(); free_old_closure(&entry->rcu);
>       goto end;
> @@ -881,7 +881,7 @@ int marker_probe_unregister(const char *channel, const 
> char *name,
>       if (!entry)
>               goto end;
>  //ust//      if (entry->rcu_pending)
> -//ust//              rcu_barrier_sched();
> +//ust//              rcu_cmm_barrier_sched();
>       old = marker_entry_remove_probe(entry, probe, probe_private);
>       pthread_mutex_unlock(&markers_mutex);
>  
> @@ -892,11 +892,11 @@ int marker_probe_unregister(const char *channel, const 
> char *name,
>       if (!entry)
>               goto end;
>  //ust//      if (entry->rcu_pending)
> -//ust//              rcu_barrier_sched();
> +//ust//              rcu_cmm_barrier_sched();
>       entry->oldptr = old;
>       entry->rcu_pending = 1;
>       /* write rcu_pending before calling the RCU callback */
> -     smp_wmb();
> +     cmm_smp_wmb();
>  //ust//      call_rcu_sched(&entry->rcu, free_old_closure);
>       synchronize_rcu(); free_old_closure(&entry->rcu);
>       remove_marker(channel, name);   /* Ignore busy error message */
> @@ -966,7 +966,7 @@ int 
> marker_probe_unregister_private_data(marker_probe_func *probe,
>               goto end;
>       }
>  //ust//      if (entry->rcu_pending)
> -//ust//              rcu_barrier_sched();
> +//ust//              rcu_cmm_barrier_sched();
>       old = marker_entry_remove_probe(entry, NULL, probe_private);
>       channel = strdup(entry->channel);
>       name = strdup(entry->name);
> @@ -979,11 +979,11 @@ int 
> marker_probe_unregister_private_data(marker_probe_func *probe,
>       if (!entry)
>               goto end;
>  //ust//      if (entry->rcu_pending)
> -//ust//              rcu_barrier_sched();
> +//ust//              rcu_cmm_barrier_sched();
>       entry->oldptr = old;
>       entry->rcu_pending = 1;
>       /* write rcu_pending before calling the RCU callback */
> -     smp_wmb();
> +     cmm_smp_wmb();
>  //ust//      call_rcu_sched(&entry->rcu, free_old_closure);
>       synchronize_rcu(); free_old_closure(&entry->rcu);
>       /* Ignore busy error message */
> @@ -1086,7 +1086,7 @@ int lib_get_iter_markers(struct marker_iter *iter)
>       int found = 0;
>  
>  //ust//      pthread_mutex_lock(&module_mutex);
> -     list_for_each_entry(iter_lib, &libs, list) {
> +     cds_list_for_each_entry(iter_lib, &libs, list) {
>               if (iter_lib < iter->lib)
>                       continue;
>               else if (iter_lib > iter->lib)
> @@ -1370,7 +1370,7 @@ int marker_register_lib(struct marker *markers_start, 
> int markers_count)
>  
>       /* FIXME: maybe protect this with its own mutex? */
>       lock_markers();
> -     list_add(&pl->list, &libs);
> +     cds_list_add(&pl->list, &libs);
>       unlock_markers();
>  
>       new_markers(markers_start, markers_start + markers_count);
> @@ -1394,10 +1394,10 @@ int marker_unregister_lib(struct marker 
> *markers_start)
>  
>       /* FIXME: we should probably take a mutex here on libs */
>  //ust//      pthread_mutex_lock(&module_mutex);
> -     list_for_each_entry(lib, &libs, list) {
> +     cds_list_for_each_entry(lib, &libs, list) {
>               if(lib->markers_start == markers_start) {
>                       struct lib *lib2free = lib;
> -                     list_del(&lib->list);
> +                     cds_list_del(&lib->list);
>                       free(lib2free);
>                       break;
>               }
> diff --git a/libust/serialize.c b/libust/serialize.c
> index bd947ab..8aa3f4b 100644
> --- a/libust/serialize.c
> +++ b/libust/serialize.c
> @@ -429,7 +429,7 @@ static inline size_t serialize_trace_data(struct 
> ust_buffer *buf,
>                        */
>                       tracer_stack_pos++;
>                       assert(tracer_stack_pos <= TRACER_STACK_LEN);
> -                     barrier();
> +                     cmm_barrier();
>                       tracer_stack[*stack_pos_ctx] =
>                                       strlen(tmp.v_string.s) + 1;
>               }
> @@ -657,9 +657,9 @@ notrace void ltt_vtrace(const struct marker *mdata, void 
> *probe_data,
>       cpu = ust_get_cpu();
>  
>       /* Force volatile access. */
> -     STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
> +     CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
>       stack_pos_ctx = tracer_stack_pos;
> -     barrier();
> +     cmm_barrier();
>  
>       pdata = (struct ltt_active_marker *)probe_data;
>       eID = mdata->event_id;
> @@ -685,7 +685,7 @@ notrace void ltt_vtrace(const struct marker *mdata, void 
> *probe_data,
>       va_end(args_copy);
>  
>       /* Iterate on each trace */
> -     list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
> +     cds_list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
>               /*
>                * Expect the filter to filter out events. If we get here,
>                * we went through tracepoint activation as a first step.
> @@ -745,9 +745,9 @@ notrace void ltt_vtrace(const struct marker *mdata, void 
> *probe_data,
>               DBG("just commited event (%s/%s) at offset %ld and size %zd", 
> mdata->channel, mdata->name, buf_offset, slot_size);
>       }
>  
> -     barrier();
> +     cmm_barrier();
>       tracer_stack_pos = stack_pos_ctx;
> -     STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
> +     CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
>  
>       rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace();
>  }
> diff --git a/libust/trace_event.c b/libust/trace_event.c
> index af1e3fb..76628c5 100644
> --- a/libust/trace_event.c
> +++ b/libust/trace_event.c
> @@ -27,7 +27,7 @@
>  #include <urcu-bp.h>
>  
>  /* libraries that contain trace_events (struct trace_event_lib) */
> -static LIST_HEAD(libs);
> +static CDS_LIST_HEAD(libs);
>  
>  static DEFINE_MUTEX(trace_events_mutex);
>  
> @@ -47,7 +47,7 @@ int lib_get_iter_trace_events(struct trace_event_iter *iter)
>       struct trace_event_lib *iter_lib;
>       int found = 0;
>  
> -     list_for_each_entry(iter_lib, &libs, list) {
> +     cds_list_for_each_entry(iter_lib, &libs, list) {
>               if (iter_lib < iter->lib)
>                       continue;
>               else if (iter_lib > iter->lib)
> @@ -128,7 +128,7 @@ int trace_event_register_lib(struct trace_event 
> *trace_events_start,
>  
>       /* FIXME: maybe protect this with its own mutex? */
>       pthread_mutex_lock(&trace_events_mutex);
> -     list_add(&pl->list, &libs);
> +     cds_list_add(&pl->list, &libs);
>       pthread_mutex_unlock(&trace_events_mutex);
>  
>       DBG("just registered a trace_events section from %p and having %d 
> trace_events", trace_events_start, trace_events_count);
> @@ -142,10 +142,10 @@ int trace_event_unregister_lib(struct trace_event 
> *trace_events_start)
>  
>       pthread_mutex_lock(&trace_events_mutex);
>  
> -     list_for_each_entry(lib, &libs, list) {
> +     cds_list_for_each_entry(lib, &libs, list) {
>               if(lib->trace_events_start == trace_events_start) {
>                       struct trace_event_lib *lib2free = lib;
> -                     list_del(&lib->list);
> +                     cds_list_del(&lib->list);
>                       free(lib2free);
>                       break;
>               }
> diff --git a/libust/tracectl.c b/libust/tracectl.c
> index d5ca7d2..7c05467 100644
> --- a/libust/tracectl.c
> +++ b/libust/tracectl.c
> @@ -66,9 +66,9 @@ static struct ustcomm_sock *listen_sock;
>  
>  extern struct chan_info_struct chan_infos[];
>  
> -static struct list_head open_buffers_list = 
> LIST_HEAD_INIT(open_buffers_list);
> +static struct cds_list_head open_buffers_list = 
> CDS_LIST_HEAD_INIT(open_buffers_list);
>  
> -static struct list_head ust_socks = LIST_HEAD_INIT(ust_socks);
> +static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
>  
>  /* volatile because shared between the listener and the main thread */
>  int buffers_to_export = 0;
> @@ -216,8 +216,8 @@ static void inform_consumer_daemon(const char *trace_name)
>                               ch_name = trace->channels[i].channel_name;
>                               request_buffer_consumer(sock, trace_name,
>                                                       ch_name, j);
> -                             STORE_SHARED(buffers_to_export,
> -                                          LOAD_SHARED(buffers_to_export)+1);
> +                             CMM_STORE_SHARED(buffers_to_export,
> +                                          
> CMM_LOAD_SHARED(buffers_to_export)+1);
>                       }
>               }
>       }
> @@ -473,13 +473,13 @@ static int notify_buffer_mapped(const char *trace_name,
>        */
>       if (uatomic_read(&buf->consumed) == 0) {
>               DBG("decrementing buffers_to_export");
> -             STORE_SHARED(buffers_to_export, 
> LOAD_SHARED(buffers_to_export)-1);
> +             CMM_STORE_SHARED(buffers_to_export, 
> CMM_LOAD_SHARED(buffers_to_export)-1);
>       }
>  
>       /* The buffer has been exported, ergo, we can add it to the
>        * list of open buffers
>        */
> -     list_add(&buf->open_buffers_list, &open_buffers_list);
> +     cds_list_add(&buf->open_buffers_list, &open_buffers_list);
>  
>  unlock_traces:
>       ltt_unlock_traces();
> @@ -539,7 +539,7 @@ static void force_subbuf_switch()
>  {
>       struct ust_buffer *buf;
>  
> -     list_for_each_entry(buf, &open_buffers_list,
> +     cds_list_for_each_entry(buf, &open_buffers_list,
>                           open_buffers_list) {
>               ltt_force_switch(buf, FORCE_FLUSH);
>       }
> @@ -1312,7 +1312,7 @@ static void __attribute__((constructor)) init()
>       if (getenv("UST_OVERWRITE")) {
>               int val = atoi(getenv("UST_OVERWRITE"));
>               if (val == 0 || val == 1) {
> -                     STORE_SHARED(ust_channels_overwrite_by_default, val);
> +                     CMM_STORE_SHARED(ust_channels_overwrite_by_default, 
> val);
>               } else {
>                       WARN("invalid value for UST_OVERWRITE");
>               }
> @@ -1321,7 +1321,7 @@ static void __attribute__((constructor)) init()
>       if (getenv("UST_AUTOCOLLECT")) {
>               int val = atoi(getenv("UST_AUTOCOLLECT"));
>               if (val == 0 || val == 1) {
> -                     
> STORE_SHARED(ust_channels_request_collection_by_default, val);
> +                     
> CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
>               } else {
>                       WARN("invalid value for UST_AUTOCOLLECT");
>               }
> @@ -1453,7 +1453,7 @@ static int trace_recording(void)
>  
>       ltt_lock_traces();
>  
> -     list_for_each_entry(trace, &ltt_traces.head, list) {
> +     cds_list_for_each_entry(trace, &ltt_traces.head, list) {
>               if (trace->active) {
>                       retval = 1;
>                       break;
> @@ -1513,10 +1513,10 @@ static void __attribute__((destructor)) keepalive()
>               return;
>       }
>  
> -     if (trace_recording() && LOAD_SHARED(buffers_to_export)) {
> +     if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
>               int total = 0;
>               DBG("Keeping process alive for consumer daemon...");
> -             while (LOAD_SHARED(buffers_to_export)) {
> +             while (CMM_LOAD_SHARED(buffers_to_export)) {
>                       const int interv = 200000;
>                       restarting_usleep(interv);
>                       total += interv;
> @@ -1572,12 +1572,12 @@ static void ust_fork(void)
>       ltt_trace_stop("auto");
>       ltt_trace_destroy("auto", 1);
>       /* Delete all active connections, but leave them in the epoll set */
> -     list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
> +     cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
>               ustcomm_del_sock(sock, 1);
>       }
>  
>       /* Delete all blocked consumers */
> -     list_for_each_entry_safe(buf, buf_tmp, &open_buffers_list,
> +     cds_list_for_each_entry_safe(buf, buf_tmp, &open_buffers_list,
>                                open_buffers_list) {
>               result = close(buf->data_ready_fd_read);
>               if (result == -1) {
> @@ -1587,7 +1587,7 @@ static void ust_fork(void)
>               if (result == -1) {
>                       PERROR("close");
>               }
> -             list_del(&buf->open_buffers_list);
> +             cds_list_del(&buf->open_buffers_list);
>       }
>  
>       /* Clean up the listener socket and epoll, keeping the scoket file */
> @@ -1595,7 +1595,7 @@ static void ust_fork(void)
>       close(epoll_fd);
>  
>       /* Re-start the launch sequence */
> -     STORE_SHARED(buffers_to_export, 0);
> +     CMM_STORE_SHARED(buffers_to_export, 0);
>       have_listener = 0;
>  
>       /* Set up epoll */
> diff --git a/libust/tracepoint.c b/libust/tracepoint.c
> index 6fe9cd7..dbaca6f 100644
> --- a/libust/tracepoint.c
> +++ b/libust/tracepoint.c
> @@ -35,7 +35,7 @@
>  static const int tracepoint_debug;
>  
>  /* libraries that contain tracepoints (struct tracepoint_lib) */
> -static LIST_HEAD(libs);
> +static CDS_LIST_HEAD(libs);
>  
>  /*
>   * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects 
> the
> @@ -67,7 +67,7 @@ struct tracepoint_entry {
>  struct tp_probes {
>       union {
>  //ust//              struct rcu_head rcu;
> -             struct list_head list;
> +             struct cds_list_head list;
>       } u;
>       struct probe probes[0];
>  };
> @@ -257,10 +257,10 @@ static void set_tracepoint(struct tracepoint_entry 
> **entry,
>       WARN_ON(strcmp((*entry)->name, elem->name) != 0);
>  
>       /*
> -      * rcu_assign_pointer has a smp_wmb() which makes sure that the new
> +      * rcu_assign_pointer has a cmm_smp_wmb() which makes sure that the new
>        * probe callbacks array is consistent before setting a pointer to it.
>        * This array is referenced by __DO_TRACE from
> -      * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
> +      * include/linux/tracepoints.h. A matching 
> cmm_smp_read_barrier_depends()
>        * is used.
>        */
>       rcu_assign_pointer(elem->probes, (*entry)->probes);
> @@ -314,7 +314,7 @@ static void lib_update_tracepoints(void)
>       struct tracepoint_lib *lib;
>  
>  //ust//      pthread_mutex_lock(&module_mutex);
> -     list_for_each_entry(lib, &libs, list)
> +     cds_list_for_each_entry(lib, &libs, list)
>               tracepoint_update_probe_range(lib->tracepoints_start,
>                               lib->tracepoints_start + 
> lib->tracepoints_count);
>  //ust//      pthread_mutex_unlock(&module_mutex);
> @@ -420,7 +420,7 @@ int tracepoint_probe_unregister(const char *name, void 
> *probe, void *data)
>  }
>  //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
>  
> -static LIST_HEAD(old_probes);
> +static CDS_LIST_HEAD(old_probes);
>  static int need_update;
>  
>  static void tracepoint_add_old_probes(void *old)
> @@ -429,7 +429,7 @@ static void tracepoint_add_old_probes(void *old)
>       if (old) {
>               struct tp_probes *tp_probes = _ust_container_of(old,
>                       struct tp_probes, probes[0]);
> -             list_add(&tp_probes->u.list, &old_probes);
> +             cds_list_add(&tp_probes->u.list, &old_probes);
>       }
>  }
>  
> @@ -486,7 +486,7 @@ int tracepoint_probe_unregister_noupdate(const char 
> *name, void *probe,
>   */
>  void tracepoint_probe_update_all(void)
>  {
> -     LIST_HEAD(release_probes);
> +     CDS_LIST_HEAD(release_probes);
>       struct tp_probes *pos, *next;
>  
>       pthread_mutex_lock(&tracepoints_mutex);
> @@ -494,14 +494,14 @@ void tracepoint_probe_update_all(void)
>               pthread_mutex_unlock(&tracepoints_mutex);
>               return;
>       }
> -     if (!list_empty(&old_probes))
> -             list_replace_init(&old_probes, &release_probes);
> +     if (!cds_list_empty(&old_probes))
> +             cds_list_replace_init(&old_probes, &release_probes);
>       need_update = 0;
>       pthread_mutex_unlock(&tracepoints_mutex);
>  
>       tracepoint_update_probes();
> -     list_for_each_entry_safe(pos, next, &release_probes, u.list) {
> -             list_del(&pos->u.list);
> +     cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
> +             cds_list_del(&pos->u.list);
>  //ust//              call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
>               synchronize_rcu();
>               free(pos);
> @@ -519,7 +519,7 @@ int lib_get_iter_tracepoints(struct tracepoint_iter *iter)
>       int found = 0;
>  
>  //ust//      pthread_mutex_lock(&module_mutex);
> -     list_for_each_entry(iter_lib, &libs, list) {
> +     cds_list_for_each_entry(iter_lib, &libs, list) {
>               if (iter_lib < iter->lib)
>                       continue;
>               else if (iter_lib > iter->lib)
> @@ -668,7 +668,7 @@ int tracepoint_register_lib(struct tracepoint 
> *tracepoints_start, int tracepoint
>  
>       /* FIXME: maybe protect this with its own mutex? */
>       pthread_mutex_lock(&tracepoints_mutex);
> -     list_add(&pl->list, &libs);
> +     cds_list_add(&pl->list, &libs);
>       pthread_mutex_unlock(&tracepoints_mutex);
>  
>       new_tracepoints(tracepoints_start, tracepoints_start + 
> tracepoints_count);
> @@ -687,10 +687,10 @@ int tracepoint_unregister_lib(struct tracepoint 
> *tracepoints_start)
>  
>       pthread_mutex_lock(&tracepoints_mutex);
>  
> -     list_for_each_entry(lib, &libs, list) {
> +     cds_list_for_each_entry(lib, &libs, list) {
>               if(lib->tracepoints_start == tracepoints_start) {
>                       struct tracepoint_lib *lib2free = lib;
> -                     list_del(&lib->list);
> +                     cds_list_del(&lib->list);
>                       free(lib2free);
>                       break;
>               }
> diff --git a/libust/tracer.c b/libust/tracer.c
> index c9422c8..ecf403a 100644
> --- a/libust/tracer.c
> +++ b/libust/tracer.c
> @@ -181,7 +181,7 @@ static enum ltt_channels get_channel_type_from_name(const 
> char *name)
>  //ust// 
>  //ust// }
>  
> -static LIST_HEAD(ltt_transport_list);
> +static CDS_LIST_HEAD(ltt_transport_list);
>  
>  /**
>   * ltt_transport_register - LTT transport registration
> @@ -205,7 +205,7 @@ void ltt_transport_register(struct ltt_transport 
> *transport)
>  //ust//      vmalloc_sync_all();
>  
>       ltt_lock_traces();
> -     list_add_tail(&transport->node, &ltt_transport_list);
> +     cds_list_add_tail(&transport->node, &ltt_transport_list);
>       ltt_unlock_traces();
>  }
>  
> @@ -216,7 +216,7 @@ void ltt_transport_register(struct ltt_transport 
> *transport)
>  void ltt_transport_unregister(struct ltt_transport *transport)
>  {
>       ltt_lock_traces();
> -     list_del(&transport->node);
> +     cds_list_del(&transport->node);
>       ltt_unlock_traces();
>  }
>  
> @@ -275,7 +275,7 @@ static void trace_async_wakeup(struct ust_trace *trace)
>  //ust// #else
>  //ust//      ltt_lock_traces();
>  //ust// #endif
> -//ust//      list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
> +//ust//      cds_list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
>  //ust//              trace_async_wakeup(trace);
>  //ust//      }
>  //ust// #ifndef CONFIG_PREEMPT_RT
> @@ -297,7 +297,7 @@ struct ust_trace *_ltt_trace_find(const char *trace_name)
>  {
>       struct ust_trace *trace;
>  
> -     list_for_each_entry(trace, &ltt_traces.head, list)
> +     cds_list_for_each_entry(trace, &ltt_traces.head, list)
>               if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
>                       return trace;
>  
> @@ -313,7 +313,7 @@ struct ust_trace *_ltt_trace_find_setup(const char 
> *trace_name)
>  {
>       struct ust_trace *trace;
>  
> -     list_for_each_entry(trace, &ltt_traces.setup_head, list)
> +     cds_list_for_each_entry(trace, &ltt_traces.setup_head, list)
>               if (!strncmp(trace->trace_name, trace_name, NAME_MAX))
>                       return trace;
>  
> @@ -417,7 +417,7 @@ int _ltt_trace_setup(const char *trace_name)
>                       chan_infos[chantype].def_subbufcount;
>       }
>  
> -     list_add(&new_trace->list, &ltt_traces.setup_head);
> +     cds_list_add(&new_trace->list, &ltt_traces.setup_head);
>       return 0;
>  
>  trace_free:
> @@ -439,7 +439,7 @@ int ltt_trace_setup(const char *trace_name)
>  /* must be called from within a traces lock. */
>  static void _ltt_trace_free(struct ust_trace *trace)
>  {
> -     list_del(&trace->list);
> +     cds_list_del(&trace->list);
>       free(trace);
>  }
>  
> @@ -458,7 +458,7 @@ int ltt_trace_set_type(const char *trace_name, const char 
> *trace_type)
>               goto traces_error;
>       }
>  
> -     list_for_each_entry(tran_iter, &ltt_transport_list, node) {
> +     cds_list_for_each_entry(tran_iter, &ltt_transport_list, node) {
>               if (!strcmp(tran_iter->name, trace_type)) {
>                       transport = tran_iter;
>                       break;
> @@ -692,13 +692,13 @@ int ltt_trace_alloc(const char *trace_name)
>               }
>       }
>  
> -     list_del(&trace->list);
> -//ust//      if (list_empty(&ltt_traces.head)) {
> +     cds_list_del(&trace->list);
> +//ust//      if (cds_list_empty(&ltt_traces.head)) {
>  //ust//              mod_timer(&ltt_async_wakeup_timer,
>  //ust//                              jiffies + LTT_PERCPU_TIMER_INTERVAL);
>  //ust//              set_kernel_trace_flag_all_tasks();
>  //ust//      }
> -     list_add_rcu(&trace->list, &ltt_traces.head);
> +     cds_list_add_rcu(&trace->list, &ltt_traces.head);
>  //ust//      synchronize_sched();
>  
>       ltt_unlock_traces();
> @@ -762,9 +762,9 @@ static int _ltt_trace_destroy(struct ust_trace *trace)
>               goto active_error;
>       }
>       /* Everything went fine */
> -     list_del_rcu(&trace->list);
> +     cds_list_del_rcu(&trace->list);
>       synchronize_rcu();
> -     if (list_empty(&ltt_traces.head)) {
> +     if (cds_list_empty(&ltt_traces.head)) {
>  //ust//              clear_kernel_trace_flag_all_tasks();
>               /*
>                * We stop the asynchronous delivery of reader wakeup, but
> diff --git a/libust/tracer.h b/libust/tracer.h
> index c5df6ec..c316c9a 100644
> --- a/libust/tracer.h
> +++ b/libust/tracer.h
> @@ -94,7 +94,7 @@ struct chan_info_struct {
>  };
>  
>  struct ltt_active_marker {
> -     struct list_head node;          /* active markers list */
> +     struct cds_list_head node;              /* active markers list */
>       const char *channel;
>       const char *name;
>       const char *format;
> @@ -158,7 +158,7 @@ struct ltt_trace_ops {
>  struct ltt_transport {
>       char *name;
>       struct module *owner;
> -     struct list_head node;
> +     struct cds_list_head node;
>       struct ltt_trace_ops ops;
>  };
>  
> @@ -170,7 +170,7 @@ enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, 
> LTT_TRACE_HYBRID };
>  /* Per-trace information - each trace/flight recorder represented by one */
>  struct ust_trace {
>       /* First 32 bytes cache-hot cacheline */
> -     struct list_head list;
> +     struct cds_list_head list;
>       struct ltt_trace_ops *ops;
>       int active;
>       /* Second 32 bytes cache-hot cacheline */
> diff --git a/libust/tracercore.c b/libust/tracercore.c
> index bbc8691..1e418b6 100644
> --- a/libust/tracercore.c
> +++ b/libust/tracercore.c
> @@ -22,8 +22,8 @@
>  
>  /* Traces structures */
>  struct ltt_traces ltt_traces = {
> -     .setup_head = LIST_HEAD_INIT(ltt_traces.setup_head),
> -     .head = LIST_HEAD_INIT(ltt_traces.head),
> +     .setup_head = CDS_LIST_HEAD_INIT(ltt_traces.setup_head),
> +     .head = CDS_LIST_HEAD_INIT(ltt_traces.head),
>  };
>  
>  /* Traces list writer locking */
> diff --git a/libust/tracercore.h b/libust/tracercore.h
> index 5c396f4..9673cca 100644
> --- a/libust/tracercore.h
> +++ b/libust/tracercore.h
> @@ -32,8 +32,8 @@
>   * list.
>   */
>  struct ltt_traces {
> -     struct list_head setup_head;    /* Pre-allocated traces list */
> -     struct list_head head;          /* Allocated Traces list */
> +     struct cds_list_head setup_head;        /* Pre-allocated traces list */
> +     struct cds_list_head head;              /* Allocated Traces list */
>       unsigned int num_active_traces; /* Number of active traces */
>  } ____cacheline_aligned;
>  
> diff --git a/libust/type-serializer.c b/libust/type-serializer.c
> index bf1c496..2c278df 100644
> --- a/libust/type-serializer.c
> +++ b/libust/type-serializer.c
> @@ -43,7 +43,7 @@ void _ltt_specialized_trace(const struct marker *mdata, 
> void *probe_data,
>       cpu = ust_get_cpu();
>  
>       /* Force volatile access. */
> -     STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
> +     CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
>  
>       /*
>        * asm volatile and "memory" clobber prevent the compiler from moving
> @@ -52,7 +52,7 @@ void _ltt_specialized_trace(const struct marker *mdata, 
> void *probe_data,
>        * traps, divisions by 0, ...) are triggered within the incremented
>        * nesting count section.
>        */
> -     barrier();
> +     cmm_barrier();
>       eID = mdata->event_id;
>       chan_index = mdata->channel_id;
>  
> @@ -60,7 +60,7 @@ void _ltt_specialized_trace(const struct marker *mdata, 
> void *probe_data,
>        * Iterate on each trace, typically small number of active traces,
>        * list iteration with prefetch is usually slower.
>        */
> -     list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
> +     cds_list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
>               if (unlikely(!trace->active))
>                       continue;
>  //ust//              if (unlikely(!ltt_run_filter(trace, eID)))
> @@ -109,7 +109,7 @@ void _ltt_specialized_trace(const struct marker *mdata, 
> void *probe_data,
>        * traps, divisions by 0, ...) are triggered within the incremented
>        * nesting count section.
>        */
> -     barrier();
> -     STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
> +     cmm_barrier();
> +     CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
>       rcu_read_unlock();
>  }
> diff --git a/libustcomm/ustcomm.c b/libustcomm/ustcomm.c
> index bbdbd7e..63eed5f 100644
> --- a/libustcomm/ustcomm.c
> +++ b/libustcomm/ustcomm.c
> @@ -113,7 +113,7 @@ static struct sockaddr_un * create_sock_addr(const char 
> *name,
>  }
>  
>  struct ustcomm_sock * ustcomm_init_sock(int fd, int epoll_fd,
> -                                     struct list_head *list)
> +                                     struct cds_list_head *list)
>  {
>       struct epoll_event ev;
>       struct ustcomm_sock *sock;
> @@ -136,9 +136,9 @@ struct ustcomm_sock * ustcomm_init_sock(int fd, int 
> epoll_fd,
>  
>       sock->epoll_fd = epoll_fd;
>       if (list) {
> -             list_add(&sock->list, list);
> +             cds_list_add(&sock->list, list);
>       } else {
> -             INIT_LIST_HEAD(&sock->list);
> +             CDS_INIT_LIST_HEAD(&sock->list);
>       }
>  
>       return sock;
> @@ -146,7 +146,7 @@ struct ustcomm_sock * ustcomm_init_sock(int fd, int 
> epoll_fd,
>  
>  void ustcomm_del_sock(struct ustcomm_sock *sock, int keep_in_epoll)
>  {
> -     list_del(&sock->list);
> +     cds_list_del(&sock->list);
>       if (!keep_in_epoll) {
>               if (epoll_ctl(sock->epoll_fd, EPOLL_CTL_DEL, sock->fd, NULL) == 
> -1) {
>                       PERROR("epoll_ctl: failed to delete socket");
> diff --git a/libustcomm/ustcomm.h b/libustcomm/ustcomm.h
> index ad4848a..689c151 100644
> --- a/libustcomm/ustcomm.h
> +++ b/libustcomm/ustcomm.h
> @@ -27,7 +27,7 @@
>  #define SOCK_DIR "/tmp/ust-app-socks"
>  
>  struct ustcomm_sock {
> -     struct list_head list;
> +     struct cds_list_head list;
>       int fd;
>       int epoll_fd;
>  };
> @@ -127,7 +127,7 @@ extern int ensure_dir_exists(const char *dir);
>  
>  /* Create and delete sockets */
>  extern struct ustcomm_sock * ustcomm_init_sock(int fd, int epoll_fd,
> -                                            struct list_head *list);
> +                                            struct cds_list_head *list);
>  extern void ustcomm_del_sock(struct ustcomm_sock *sock, int keep_in_epoll);
>  
>  /* Create and delete named sockets */
> diff --git a/libustd/libustd.c b/libustd/libustd.c
> index 1581f83..0dc6940 100644
> --- a/libustd/libustd.c
> +++ b/libustd/libustd.c
> @@ -863,7 +863,7 @@ static int init_ustd_socket(struct libustd_instance 
> *instance)
>               goto close_epoll;
>       }
>  
> -     INIT_LIST_HEAD(&instance->connections);
> +     CDS_INIT_LIST_HEAD(&instance->connections);
>  
>       free(name);
>  
> -- 
> 1.7.3.2
> 
> 
> _______________________________________________
> ltt-dev mailing list
> [email protected]
> http://lists.casi.polymtl.ca/cgi-bin/mailman/listinfo/ltt-dev
> 

-- 
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com

_______________________________________________
ltt-dev mailing list
[email protected]
http://lists.casi.polymtl.ca/cgi-bin/mailman/listinfo/ltt-dev

Reply via email to