totempg still needs locks to protect the top level APIs for those that 
want to punish themselves by using totem directly in a threaded 
application.  There are people doing this today and I don't want to 
disrupt their applications.  Ideally there would be a libtotem_pg, and 
libtotem_pg_mt lib built (where corosync would use libtotem_pg) to avoid 
the locking overhead introduced by libtotem_pg_mt.

Unfortunately we can't rely on lazy bindings to solve the problem for us 
because logging pulls in the pthreads library.

Regards
-steve

On 11/08/2010 03:58 PM, Angus Salkeld wrote:
> Signed-off-by: Angus Salkeld<[email protected]>
> ---
>   exec/main.c                 |   17 -----------------
>   exec/objdb.c                |    9 ---------
>   exec/totempg.c              |   40 ----------------------------------------
>   include/corosync/cs_queue.h |   30 ------------------------------
>   4 files changed, 0 insertions(+), 96 deletions(-)
>
> diff --git a/exec/main.c b/exec/main.c
> index 30d8d0f..2c1a97d 100644
> --- a/exec/main.c
> +++ b/exec/main.c
> @@ -246,12 +246,10 @@ static struct totempg_group corosync_group = {
>
>   static void serialize_lock (void)
>   {
> -     qb_thread_lock (serialize_lock_f);
>   }
>
>   static void serialize_unlock (void)
>   {
> -     qb_thread_unlock (serialize_lock_f);
>   }
>
>   static void corosync_sync_completed (void)
> @@ -915,24 +913,9 @@ int corosync_sending_allowed (
>                       sending_allowed = QB_TRUE;
>               } else if (pd->reserved_msgs&&  sync_in_process == 0) {
>                       sending_allowed = QB_TRUE;
> -             } else {
> -                     log_printf(LOGSYS_LEVEL_NOTICE,
> -                                "no tx: (have quorum) (FC req) reserved:%d 
> sync:%d",
> -                                pd->reserved_msgs, sync_in_process);
>               }
> -     } else {
> -                     log_printf(LOGSYS_LEVEL_NOTICE, "no tx: not quorate!");
>       }
>
> -/*
> -     sending_allowed =
> -             (corosync_quorum_is_quorate() == 1 ||
> -             ais_service[service]->allow_inquorate == 
> CS_LIB_ALLOW_INQUORATE)&&
> -             ((ais_service[service]->lib_engine[id].flow_control == 
> CS_LIB_FLOW_CONTROL_NOT_REQUIRED) ||
> -             ((ais_service[service]->lib_engine[id].flow_control == 
> CS_LIB_FLOW_CONTROL_REQUIRED)&&
> -             (pd->reserved_msgs)&&
> -             (sync_in_process == 0)));
> -*/
>       return (sending_allowed);
>   }
>
> diff --git a/exec/objdb.c b/exec/objdb.c
> index d77c8d9..50ef566 100644
> --- a/exec/objdb.c
> +++ b/exec/objdb.c
> @@ -102,8 +102,6 @@ struct object_find_instance {
>
>   struct objdb_iface_ver0 objdb_iface;
>   struct list_head objdb_trackers_head;
> -static pthread_mutex_t objdb_mutex;
> -static pthread_mutexattr_t objdb_mutex_attr;
>
>   DECLARE_HDB_DATABASE (object_instance_database,NULL);
>
> @@ -111,12 +109,10 @@ DECLARE_HDB_DATABASE 
> (object_find_instance_database,NULL);
>
>   static void objdb_lock(void)
>   {
> -     pthread_mutex_lock(&objdb_mutex);
>   }
>
>   static void objdb_unlock(void)
>   {
> -     pthread_mutex_unlock(&objdb_mutex);
>   }
>
>   static int objdb_init (void)
> @@ -149,11 +145,6 @@ static int objdb_init (void)
>       list_init (&instance->track_head);
>       list_init (&objdb_trackers_head);
>
> -     pthread_mutexattr_init(&objdb_mutex_attr);
> -
> -     pthread_mutexattr_settype(&objdb_mutex_attr, PTHREAD_MUTEX_RECURSIVE);
> -     pthread_mutex_init(&objdb_mutex,&objdb_mutex_attr);
> -
>       hdb_handle_put (&object_instance_database, handle);
>       return (0);
>
> diff --git a/exec/totempg.c b/exec/totempg.c
> index cffb192..e83db9e 100644
> --- a/exec/totempg.c
> +++ b/exec/totempg.c
> @@ -247,12 +247,6 @@ DECLARE_HDB_DATABASE 
> (totempg_groups_instance_database,NULL);
>
>   static unsigned char next_fragment = 1;
>
> -static pthread_mutex_t totempg_mutex = PTHREAD_MUTEX_INITIALIZER;
> -
> -static pthread_mutex_t callback_token_mutex = PTHREAD_MUTEX_INITIALIZER;
> -
> -static pthread_mutex_t mcast_msg_mutex = PTHREAD_MUTEX_INITIALIZER;
> -
>   #define log_printf(level, format, args...)                          \
>   do {                                                                        
> \
>           totempg_log_printf (                                                
> \
> @@ -668,13 +662,10 @@ int callback_token_received_fn (enum 
> totem_callback_token_type type,
>       struct iovec iovecs[3];
>       int res;
>
> -     pthread_mutex_lock (&mcast_msg_mutex);
>       if (mcast_packed_msg_count == 0) {
> -             pthread_mutex_unlock (&mcast_msg_mutex);
>               return (0);
>       }
>       if (totemmrp_avail() == 0) {
> -             pthread_mutex_unlock (&mcast_msg_mutex);
>               return (0);
>       }
>       mcast.header.version = 0;
> @@ -700,7 +691,6 @@ int callback_token_received_fn (enum 
> totem_callback_token_type type,
>       mcast_packed_msg_count = 0;
>       fragment_size = 0;
>
> -     pthread_mutex_unlock (&mcast_msg_mutex);
>       return (0);
>   }
>
> @@ -752,9 +742,7 @@ int totempg_initialize (
>
>   void totempg_finalize (void)
>   {
> -     pthread_mutex_lock (&totempg_mutex);
>       totemmrp_finalize ();
> -     pthread_mutex_unlock (&totempg_mutex);
>   }
>
>   /*
> @@ -776,7 +764,6 @@ static int mcast_msg (
>       int copy_base = 0;
>       int total_size = 0;
>
> -     pthread_mutex_lock (&mcast_msg_mutex);
>       totemmrp_event_signal (TOTEM_EVENT_NEW_MSG, 1);
>
>       /*
> @@ -806,7 +793,6 @@ static int mcast_msg (
>       if (byte_count_send_ok (total_size + sizeof(unsigned short) *
>               (mcast_packed_msg_count)) == 0) {
>
> -             pthread_mutex_unlock (&mcast_msg_mutex);
>               return(-1);
>       }
>
> @@ -924,7 +910,6 @@ static int mcast_msg (
>       }
>
>   error_exit:
> -     pthread_mutex_unlock (&mcast_msg_mutex);
>       return (res);
>   }
>
> @@ -979,19 +964,15 @@ int totempg_callback_token_create (
>       const void *data)
>   {
>       unsigned int res;
> -     pthread_mutex_lock (&callback_token_mutex);
>       res = totemmrp_callback_token_create (handle_out, type, delete,
>               callback_fn, data);
> -     pthread_mutex_unlock (&callback_token_mutex);
>       return (res);
>   }
>
>   void totempg_callback_token_destroy (
>       void *handle_out)
>   {
> -     pthread_mutex_lock (&callback_token_mutex);
>       totemmrp_callback_token_destroy (handle_out);
> -     pthread_mutex_unlock (&callback_token_mutex);
>   }
>
>   /*
> @@ -1017,7 +998,6 @@ int totempg_groups_initialize (
>       struct totempg_group_instance *instance;
>       unsigned int res;
>
> -     pthread_mutex_lock (&totempg_mutex);
>       res = hdb_handle_create (&totempg_groups_instance_database,
>               sizeof (struct totempg_group_instance), handle);
>       if (res != 0) {
> @@ -1043,13 +1023,11 @@ int totempg_groups_initialize (
>
>       hdb_handle_put (&totempg_groups_instance_database, *handle);
>
> -     pthread_mutex_unlock (&totempg_mutex);
>       return (0);
>   error_destroy:
>       hdb_handle_destroy (&totempg_groups_instance_database, *handle);
>
>   error_exit:
> -     pthread_mutex_unlock (&totempg_mutex);
>       return (-1);
>   }
>
> @@ -1062,7 +1040,6 @@ int totempg_groups_join (
>       struct totempg_group *new_groups;
>       unsigned int res;
>
> -     pthread_mutex_lock (&totempg_mutex);
>       res = hdb_handle_get (&totempg_groups_instance_database, handle,
>               (void *)&instance);
>       if (res != 0) {
> @@ -1084,7 +1061,6 @@ int totempg_groups_join (
>       hdb_handle_put (&totempg_groups_instance_database, handle);
>
>   error_exit:
> -     pthread_mutex_unlock (&totempg_mutex);
>       return (res);
>   }
>
> @@ -1096,7 +1072,6 @@ int totempg_groups_leave (
>       struct totempg_group_instance *instance;
>       unsigned int res;
>
> -     pthread_mutex_lock (&totempg_mutex);
>       res = hdb_handle_get (&totempg_groups_instance_database, handle,
>               (void *)&instance);
>       if (res != 0) {
> @@ -1106,7 +1081,6 @@ int totempg_groups_leave (
>       hdb_handle_put (&totempg_groups_instance_database, handle);
>
>   error_exit:
> -     pthread_mutex_unlock (&totempg_mutex);
>       return (res);
>   }
>
> @@ -1125,7 +1099,6 @@ int totempg_groups_mcast_joined (
>       int i;
>       unsigned int res;
>
> -     pthread_mutex_lock (&totempg_mutex);
>       res = hdb_handle_get (&totempg_groups_instance_database, handle,
>               (void *)&instance);
>       if (res != 0) {
> @@ -1152,7 +1125,6 @@ int totempg_groups_mcast_joined (
>       hdb_handle_put (&totempg_groups_instance_database, handle);
>
>   error_exit:
> -     pthread_mutex_unlock (&totempg_mutex);
>       return (res);
>   }
>
> @@ -1203,8 +1175,6 @@ int totempg_groups_joined_reserve (
>       unsigned int res;
>       unsigned int reserved = 0;
>
> -     pthread_mutex_lock (&totempg_mutex);
> -     pthread_mutex_lock (&mcast_msg_mutex);
>       res = hdb_handle_get (&totempg_groups_instance_database, handle,
>               (void *)&instance);
>       if (res != 0) {
> @@ -1234,19 +1204,13 @@ error_put:
>       hdb_handle_put (&totempg_groups_instance_database, handle);
>
>   error_exit:
> -     pthread_mutex_unlock (&mcast_msg_mutex);
> -     pthread_mutex_unlock (&totempg_mutex);
>       return (reserved);
>   }
>
>
>   int totempg_groups_joined_release (int msg_count)
>   {
> -     pthread_mutex_lock (&totempg_mutex);
> -     pthread_mutex_lock (&mcast_msg_mutex);
>       send_release (msg_count);
> -     pthread_mutex_unlock (&mcast_msg_mutex);
> -     pthread_mutex_unlock (&totempg_mutex);
>       return 0;
>   }
>
> @@ -1264,7 +1228,6 @@ int totempg_groups_mcast_groups (
>       int i;
>       unsigned int res;
>
> -     pthread_mutex_lock (&totempg_mutex);
>       res = hdb_handle_get (&totempg_groups_instance_database, handle,
>               (void *)&instance);
>       if (res != 0) {
> @@ -1292,7 +1255,6 @@ int totempg_groups_mcast_groups (
>       hdb_handle_put (&totempg_groups_instance_database, handle);
>
>   error_exit:
> -     pthread_mutex_unlock (&totempg_mutex);
>       return (res);
>   }
>
> @@ -1311,7 +1273,6 @@ int totempg_groups_send_ok_groups (
>       unsigned int i;
>       unsigned int res;
>
> -     pthread_mutex_lock (&totempg_mutex);
>       res = hdb_handle_get (&totempg_groups_instance_database, handle,
>               (void *)&instance);
>       if (res != 0) {
> @@ -1329,7 +1290,6 @@ int totempg_groups_send_ok_groups (
>
>       hdb_handle_put (&totempg_groups_instance_database, handle);
>   error_exit:
> -     pthread_mutex_unlock (&totempg_mutex);
>       return (res);
>   }
>
> diff --git a/include/corosync/cs_queue.h b/include/corosync/cs_queue.h
> index 8fbe973..d13c0ea 100644
> --- a/include/corosync/cs_queue.h
> +++ b/include/corosync/cs_queue.h
> @@ -47,7 +47,6 @@ struct cs_queue {
>       void *items;
>       int size_per_item;
>       int iterator;
> -     pthread_mutex_t mutex;
>   };
>
>   static inline int cs_queue_init (struct cs_queue *cs_queue, int 
> cs_queue_items, int size_per_item) {
> @@ -63,43 +62,35 @@ static inline int cs_queue_init (struct cs_queue 
> *cs_queue, int cs_queue_items,
>               return (-ENOMEM);
>       }
>       memset (cs_queue->items, 0, cs_queue_items * size_per_item);
> -     pthread_mutex_init (&cs_queue->mutex, NULL);
>       return (0);
>   }
>
>   static inline int cs_queue_reinit (struct cs_queue *cs_queue)
>   {
> -     pthread_mutex_lock (&cs_queue->mutex);
>       cs_queue->head = 0;
>       cs_queue->tail = cs_queue->size - 1;
>       cs_queue->used = 0;
>       cs_queue->usedhw = 0;
>
>       memset (cs_queue->items, 0, cs_queue->size * cs_queue->size_per_item);
> -     pthread_mutex_unlock (&cs_queue->mutex);
>       return (0);
>   }
>
>   static inline void cs_queue_free (struct cs_queue *cs_queue) {
> -     pthread_mutex_destroy (&cs_queue->mutex);
>       free (cs_queue->items);
>   }
>
>   static inline int cs_queue_is_full (struct cs_queue *cs_queue) {
>       int full;
>
> -     pthread_mutex_lock (&cs_queue->mutex);
>       full = ((cs_queue->size - 1) == cs_queue->used);
> -     pthread_mutex_unlock (&cs_queue->mutex);
>       return (full);
>   }
>
>   static inline int cs_queue_is_empty (struct cs_queue *cs_queue) {
>       int empty;
>
> -     pthread_mutex_lock (&cs_queue->mutex);
>       empty = (cs_queue->used == 0);
> -     pthread_mutex_unlock (&cs_queue->mutex);
>       return (empty);
>   }
>
> @@ -108,7 +99,6 @@ static inline void cs_queue_item_add (struct cs_queue 
> *cs_queue, void *item)
>       char *cs_queue_item;
>       int cs_queue_position;
>
> -     pthread_mutex_lock (&cs_queue->mutex);
>       cs_queue_position = cs_queue->head;
>       cs_queue_item = cs_queue->items;
>       cs_queue_item += cs_queue_position * cs_queue->size_per_item;
> @@ -121,7 +111,6 @@ static inline void cs_queue_item_add (struct cs_queue 
> *cs_queue, void *item)
>       if (cs_queue->used>  cs_queue->usedhw) {
>               cs_queue->usedhw = cs_queue->used;
>       }
> -     pthread_mutex_unlock (&cs_queue->mutex);
>   }
>
>   static inline void *cs_queue_item_get (struct cs_queue *cs_queue)
> @@ -129,42 +118,34 @@ static inline void *cs_queue_item_get (struct cs_queue 
> *cs_queue)
>       char *cs_queue_item;
>       int cs_queue_position;
>
> -     pthread_mutex_lock (&cs_queue->mutex);
>       cs_queue_position = (cs_queue->tail + 1) % cs_queue->size;
>       cs_queue_item = cs_queue->items;
>       cs_queue_item += cs_queue_position * cs_queue->size_per_item;
> -     pthread_mutex_unlock (&cs_queue->mutex);
>       return ((void *)cs_queue_item);
>   }
>
>   static inline void cs_queue_item_remove (struct cs_queue *cs_queue) {
> -     pthread_mutex_lock (&cs_queue->mutex);
>       cs_queue->tail = (cs_queue->tail + 1) % cs_queue->size;
>
>       assert (cs_queue->tail != cs_queue->head);
>
>       cs_queue->used--;
>       assert (cs_queue->used>= 0);
> -     pthread_mutex_unlock (&cs_queue->mutex);
>   }
>
>   static inline void cs_queue_items_remove (struct cs_queue *cs_queue, int 
> rel_count)
>   {
> -     pthread_mutex_lock (&cs_queue->mutex);
>       cs_queue->tail = (cs_queue->tail + rel_count) % cs_queue->size;
>
>       assert (cs_queue->tail != cs_queue->head);
>
>       cs_queue->used -= rel_count;
> -     pthread_mutex_unlock (&cs_queue->mutex);
>   }
>
>
>   static inline void cs_queue_item_iterator_init (struct cs_queue *cs_queue)
>   {
> -     pthread_mutex_lock (&cs_queue->mutex);
>       cs_queue->iterator = (cs_queue->tail + 1) % cs_queue->size;
> -     pthread_mutex_unlock (&cs_queue->mutex);
>   }
>
>   static inline void *cs_queue_item_iterator_get (struct cs_queue *cs_queue)
> @@ -172,15 +153,12 @@ static inline void *cs_queue_item_iterator_get (struct 
> cs_queue *cs_queue)
>       char *cs_queue_item;
>       int cs_queue_position;
>
> -     pthread_mutex_lock (&cs_queue->mutex);
>       cs_queue_position = (cs_queue->iterator) % cs_queue->size;
>       if (cs_queue->iterator == cs_queue->head) {
> -             pthread_mutex_unlock (&cs_queue->mutex);
>               return (0);
>       }
>       cs_queue_item = cs_queue->items;
>       cs_queue_item += cs_queue_position * cs_queue->size_per_item;
> -     pthread_mutex_unlock (&cs_queue->mutex);
>       return ((void *)cs_queue_item);
>   }
>
> @@ -188,28 +166,22 @@ static inline int cs_queue_item_iterator_next (struct 
> cs_queue *cs_queue)
>   {
>       int next_res;
>
> -     pthread_mutex_lock (&cs_queue->mutex);
>       cs_queue->iterator = (cs_queue->iterator + 1) % cs_queue->size;
>
>       next_res = cs_queue->iterator == cs_queue->head;
> -     pthread_mutex_unlock (&cs_queue->mutex);
>       return (next_res);
>   }
>
>   static inline void cs_queue_avail (struct cs_queue *cs_queue, int *avail)
>   {
> -     pthread_mutex_lock (&cs_queue->mutex);
>       *avail = cs_queue->size - cs_queue->used - 2;
>       assert (*avail>= 0);
> -     pthread_mutex_unlock (&cs_queue->mutex);
>   }
>
>   static inline int cs_queue_used (struct cs_queue *cs_queue) {
>       int used;
>
> -     pthread_mutex_lock (&cs_queue->mutex);
>       used = cs_queue->used;
> -     pthread_mutex_unlock (&cs_queue->mutex);
>
>       return (used);
>   }
> @@ -217,9 +189,7 @@ static inline int cs_queue_used (struct cs_queue 
> *cs_queue) {
>   static inline int cs_queue_usedhw (struct cs_queue *cs_queue) {
>       int usedhw;
>
> -     pthread_mutex_lock (&cs_queue->mutex);
>       usedhw = cs_queue->usedhw;
> -     pthread_mutex_unlock (&cs_queue->mutex);
>
>       return (usedhw);
>   }

_______________________________________________
Openais mailing list
[email protected]
https://lists.linux-foundation.org/mailman/listinfo/openais

Reply via email to