From: Ripduman Sohan <[email protected]>

Spinlocking keeps the thread active and spinning until it acquires the
lock.  Leads to slightly better performance in high-load situations as
the thread is not automatically de-scheduled.

TODO: timeout based lock acquisition would probably be more efficient.

Signed-off-by: Ripduman Sohan <[email protected]>
---
 config_static.h                         |    1 +
 configure.ac                            |   12 ++++
 daemon/cache.c                          |   13 ++--
 daemon/isasl.c                          |   16 +++---
 daemon/memcached.c                      |   96 +++++++++++++++---------------
 daemon/memcached.h                      |    4 +-
 daemon/thread.c                         |   44 +++++++-------
 daemon/topkeys.c                        |    5 +-
 daemon/topkeys.h                        |    4 +-
 engines/default_engine/assoc.c          |   14 ++--
 engines/default_engine/default_engine.c |   24 ++++----
 engines/default_engine/items.c          |   72 ++++++++++++------------
 engines/default_engine/slabs.c          |   16 +++---
 include/memcached/locking.h             |   37 ++++++++++++
 14 files changed, 205 insertions(+), 153 deletions(-)
 create mode 100644 include/memcached/locking.h

diff --git a/config_static.h b/config_static.h
index b568c9c..c784417 100644
--- a/config_static.h
+++ b/config_static.h
@@ -129,4 +129,5 @@ extern void initialize_sockets(void);
 #define SASL_ENABLED
 #endif
 
+#include "memcached/locking.h"
 #endif
diff --git a/configure.ac b/configure.ac
index 4dca521..5d7d2f1 100644
--- a/configure.ac
+++ b/configure.ac
@@ -521,6 +521,18 @@ else
     AC_MSG_RESULT([yes])
 fi
 
+dnl spinning on lock acquisition
+AC_MSG_CHECKING([for spinlock configuration parameter])
+AC_ARG_ENABLE([spinlock],
+    [AS_HELP_STRING([--enable-spinlock],
+            [Continually spin on lock acquisition])])
+if test x"$enable_spinlock" = x"yes"; then
+    AC_DEFINE([SPIN_LOCK],1,[Set to zero if you want to disable spinning on 
lock acquisition])
+    AC_MSG_RESULT([yes])
+else
+    AC_MSG_RESULT([no])
+fi
+
 dnl Generate output files
 CPPFLAGS="-I\${top_srcdir}/include ${CPPFLAGS}"
 AC_CONFIG_FILES(Makefile config/Doxyfile config/Doxyfile-api)
diff --git a/daemon/cache.c b/daemon/cache.c
index 7a8dd2a..0f0e3c7 100644
--- a/daemon/cache.c
+++ b/daemon/cache.c
@@ -9,6 +9,7 @@
 #include <signal.h>
 #endif
 
+#include "config.h"
 #include "cache.h"
 
 #ifndef NDEBUG
@@ -82,7 +83,7 @@ void cache_destroy(cache_t *cache) {
 void* cache_alloc(cache_t *cache) {
     void *ret;
     void *object;
-    pthread_mutex_lock(&cache->mutex);
+    mutex_lock(&cache->mutex);
     if (cache->freecurr > 0) {
         ret = cache->ptr[--cache->freecurr];
         object = get_object(ret);
@@ -99,7 +100,7 @@ void* cache_alloc(cache_t *cache) {
             }
         }
     }
-    pthread_mutex_unlock(&cache->mutex);
+    mutex_unlock(&cache->mutex);
 
 #ifndef NDEBUG
     if (object != NULL) {
@@ -117,7 +118,7 @@ void* cache_alloc(cache_t *cache) {
 
 void cache_free(cache_t *cache, void *object) {
     void *ptr = object;
-    pthread_mutex_lock(&cache->mutex);
+    mutex_lock(&cache->mutex);
 
 #ifndef NDEBUG
     /* validate redzone... */
@@ -125,7 +126,7 @@ void cache_free(cache_t *cache, void *object) {
                &redzone_pattern, sizeof(redzone_pattern)) != 0) {
         raise(SIGABRT);
         cache_error = 1;
-        pthread_mutex_unlock(&cache->mutex);
+        mutex_unlock(&cache->mutex);
         return;
     }
     uint64_t *pre = ptr;
@@ -133,7 +134,7 @@ void cache_free(cache_t *cache, void *object) {
     if (*pre != redzone_pattern) {
         raise(SIGABRT);
         cache_error = -1;
-        pthread_mutex_unlock(&cache->mutex);
+        mutex_unlock(&cache->mutex);
         return;
     }
     ptr = pre;
@@ -159,6 +160,6 @@ void cache_free(cache_t *cache, void *object) {
             assert(!inFreeList(cache, ptr));
         }
     }
-    pthread_mutex_unlock(&cache->mutex);
+    mutex_unlock(&cache->mutex);
 }
 
diff --git a/daemon/isasl.c b/daemon/isasl.c
index bbc66d4..e8bd5f7 100644
--- a/daemon/isasl.c
+++ b/daemon/isasl.c
@@ -114,10 +114,10 @@ static int load_user_db(void)
         return SASL_NOMEM;
     }
 
-    pthread_mutex_lock(&uhash_lock);
+    mutex_lock(&uhash_lock);
     free_user_ht();
     user_ht = new_ut;
-    pthread_mutex_unlock(&uhash_lock);
+    mutex_unlock(&uhash_lock);
 
     const char *filename = get_isasl_filename();
     if (!filename) {
@@ -236,11 +236,11 @@ static void* check_isasl_db_thread(void* arg)
             load_user_db();
         }
 
-        pthread_mutex_lock(&sasl_db_thread_lock);
+        mutex_lock(&sasl_db_thread_lock);
         if (!run_sasl_db_thread) {
            run = false;
         }
-        pthread_mutex_unlock(&sasl_db_thread_lock);
+        mutex_unlock(&sasl_db_thread_lock);
     }
 
     return NULL;
@@ -248,9 +248,9 @@ static void* check_isasl_db_thread(void* arg)
 
 void shutdown_sasl(void)
 {
-   pthread_mutex_lock(&sasl_db_thread_lock);
+   mutex_lock(&sasl_db_thread_lock);
    run_sasl_db_thread = false;
-   pthread_mutex_unlock(&sasl_db_thread_lock);
+   mutex_unlock(&sasl_db_thread_lock);
    pthread_join(sasl_db_thread_tid, NULL);
 }
 
@@ -323,10 +323,10 @@ int sasl_listmech(sasl_conn_t *conn,
 
 static bool check_up(const char *username, const char *password, char **cfg)
 {
-    pthread_mutex_lock(&uhash_lock);
+    mutex_lock(&uhash_lock);
     char *pw = find_pw(username, cfg);
     bool rv = pw && (strcmp(password, pw) == 0);
-    pthread_mutex_unlock(&uhash_lock);
+    mutex_unlock(&uhash_lock);
     return rv;
 }
 
diff --git a/daemon/memcached.c b/daemon/memcached.c
index a2b1519..e19e324 100644
--- a/daemon/memcached.c
+++ b/daemon/memcached.c
@@ -56,9 +56,9 @@ static inline void item_set_cas(const void *cookie, item *it, 
uint64_t cas) {
     struct thread_stats *thread_stats = \
         &independent_stats->thread_stats[conn->thread->index]; \
     topkeys_t *topkeys = independent_stats->topkeys; \
-    pthread_mutex_lock(&thread_stats->mutex); \
+    mutex_lock(&thread_stats->mutex); \
     GUTS(conn, thread_stats, slab_op, thread_op); \
-    pthread_mutex_unlock(&thread_stats->mutex); \
+    mutex_unlock(&thread_stats->mutex); \
     TK(topkeys, slab_op, key, nkey, current_time); \
 }
 
@@ -83,26 +83,26 @@ static inline void item_set_cas(const void *cookie, item 
*it, uint64_t cas) {
 #define STATS_NOKEY(conn, op) { \
     struct thread_stats *thread_stats = \
         get_thread_stats(conn); \
-    pthread_mutex_lock(&thread_stats->mutex); \
+    mutex_lock(&thread_stats->mutex); \
     thread_stats->op++; \
-    pthread_mutex_unlock(&thread_stats->mutex); \
+    mutex_unlock(&thread_stats->mutex); \
 }
 
 #define STATS_NOKEY2(conn, op1, op2) { \
     struct thread_stats *thread_stats = \
         get_thread_stats(conn); \
-    pthread_mutex_lock(&thread_stats->mutex); \
+    mutex_lock(&thread_stats->mutex); \
     thread_stats->op1++; \
     thread_stats->op2++; \
-    pthread_mutex_unlock(&thread_stats->mutex); \
+    mutex_unlock(&thread_stats->mutex); \
 }
 
 #define STATS_ADD(conn, op, amt) { \
     struct thread_stats *thread_stats = \
         get_thread_stats(conn); \
-    pthread_mutex_lock(&thread_stats->mutex); \
+    mutex_lock(&thread_stats->mutex); \
     thread_stats->op += amt; \
-    pthread_mutex_unlock(&thread_stats->mutex); \
+    mutex_unlock(&thread_stats->mutex); \
 }
 
 volatile sig_atomic_t memcached_shutdown;
@@ -345,26 +345,26 @@ struct {
 
 static bool is_listen_disabled(void) {
     bool ret;
-    pthread_mutex_lock(&listen_state.mutex);
+    mutex_lock(&listen_state.mutex);
     ret = listen_state.disabled;
-    pthread_mutex_unlock(&listen_state.mutex);
+    mutex_unlock(&listen_state.mutex);
     return ret;
 }
 
 static uint64_t get_listen_disabled_num(void) {
     uint64_t ret;
-    pthread_mutex_lock(&listen_state.mutex);
+    mutex_lock(&listen_state.mutex);
     ret = listen_state.num_disable;
-    pthread_mutex_unlock(&listen_state.mutex);
+    mutex_unlock(&listen_state.mutex);
     return ret;
 }
 
 static void disable_listen(void) {
-    pthread_mutex_lock(&listen_state.mutex);
+    mutex_lock(&listen_state.mutex);
     listen_state.disabled = true;
     listen_state.count = 10;
     ++listen_state.num_disable;
-    pthread_mutex_unlock(&listen_state.mutex);
+    mutex_unlock(&listen_state.mutex);
 
     conn *next;
     for (next = listen_conn; next; next = next->next) {
@@ -2423,20 +2423,20 @@ static void ship_tap_log(conn *c) {
             if (event == TAP_CHECKPOINT_START) {
                 msg.mutation.message.header.request.opcode =
                     PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_START;
-                pthread_mutex_lock(&tap_stats.mutex);
+                mutex_lock(&tap_stats.mutex);
                 tap_stats.sent.checkpoint_start++;
-                pthread_mutex_unlock(&tap_stats.mutex);
+                mutex_unlock(&tap_stats.mutex);
             } else if (event == TAP_CHECKPOINT_END) {
                 msg.mutation.message.header.request.opcode =
                     PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_END;
-                pthread_mutex_lock(&tap_stats.mutex);
+                mutex_lock(&tap_stats.mutex);
                 tap_stats.sent.checkpoint_end++;
-                pthread_mutex_unlock(&tap_stats.mutex);
+                mutex_unlock(&tap_stats.mutex);
             } else if (event == TAP_MUTATION) {
                 msg.mutation.message.header.request.opcode = 
PROTOCOL_BINARY_CMD_TAP_MUTATION;
-                pthread_mutex_lock(&tap_stats.mutex);
+                mutex_lock(&tap_stats.mutex);
                 tap_stats.sent.mutation++;
-                pthread_mutex_unlock(&tap_stats.mutex);
+                mutex_unlock(&tap_stats.mutex);
             }
 
             msg.mutation.message.header.request.cas = htonll(info.cas);
@@ -2509,9 +2509,9 @@ static void ship_tap_log(conn *c) {
                 add_iov(c, info.value[0].iov_base, info.value[0].iov_len);
             }
 
-            pthread_mutex_lock(&tap_stats.mutex);
+            mutex_lock(&tap_stats.mutex);
             tap_stats.sent.delete++;
-            pthread_mutex_unlock(&tap_stats.mutex);
+            mutex_unlock(&tap_stats.mutex);
             break;
 
         case TAP_DISCONNECT:
@@ -2525,21 +2525,21 @@ static void ship_tap_log(conn *c) {
 
             if (event == TAP_OPAQUE) {
                 msg.flush.message.header.request.opcode = 
PROTOCOL_BINARY_CMD_TAP_OPAQUE;
-                pthread_mutex_lock(&tap_stats.mutex);
+                mutex_lock(&tap_stats.mutex);
                 tap_stats.sent.opaque++;
-                pthread_mutex_unlock(&tap_stats.mutex);
+                mutex_unlock(&tap_stats.mutex);
 
             } else if (event == TAP_FLUSH) {
                 msg.flush.message.header.request.opcode = 
PROTOCOL_BINARY_CMD_TAP_FLUSH;
-                pthread_mutex_lock(&tap_stats.mutex);
+                mutex_lock(&tap_stats.mutex);
                 tap_stats.sent.flush++;
-                pthread_mutex_unlock(&tap_stats.mutex);
+                mutex_unlock(&tap_stats.mutex);
             } else if (event == TAP_VBUCKET_SET) {
                 msg.flush.message.header.request.opcode = 
PROTOCOL_BINARY_CMD_TAP_VBUCKET_SET;
                 msg.flush.message.body.tap.flags = htons(tap_flags);
-                pthread_mutex_lock(&tap_stats.mutex);
+                mutex_lock(&tap_stats.mutex);
                 tap_stats.sent.vbucket_set++;
-                pthread_mutex_unlock(&tap_stats.mutex);
+                mutex_unlock(&tap_stats.mutex);
             }
 
             msg.flush.message.header.request.bodylen = htonl(8 + nengine);
@@ -2818,51 +2818,51 @@ static void process_bin_packet(conn *c) {
     /* @todo this should be an array of funciton pointers and call through */
     switch (c->binary_header.request.opcode) {
     case PROTOCOL_BINARY_CMD_TAP_CONNECT:
-        pthread_mutex_lock(&tap_stats.mutex);
+        mutex_lock(&tap_stats.mutex);
         tap_stats.received.connect++;
-        pthread_mutex_unlock(&tap_stats.mutex);
+        mutex_unlock(&tap_stats.mutex);
         conn_set_state(c, conn_add_tap_client);
         break;
     case PROTOCOL_BINARY_CMD_TAP_MUTATION:
-        pthread_mutex_lock(&tap_stats.mutex);
+        mutex_lock(&tap_stats.mutex);
         tap_stats.received.mutation++;
-        pthread_mutex_unlock(&tap_stats.mutex);
+        mutex_unlock(&tap_stats.mutex);
         process_bin_tap_packet(TAP_MUTATION, c);
         break;
     case PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_START:
-        pthread_mutex_lock(&tap_stats.mutex);
+        mutex_lock(&tap_stats.mutex);
         tap_stats.received.checkpoint_start++;
-        pthread_mutex_unlock(&tap_stats.mutex);
+        mutex_unlock(&tap_stats.mutex);
         process_bin_tap_packet(TAP_CHECKPOINT_START, c);
         break;
     case PROTOCOL_BINARY_CMD_TAP_CHECKPOINT_END:
-        pthread_mutex_lock(&tap_stats.mutex);
+        mutex_lock(&tap_stats.mutex);
         tap_stats.received.checkpoint_end++;
-        pthread_mutex_unlock(&tap_stats.mutex);
+        mutex_unlock(&tap_stats.mutex);
         process_bin_tap_packet(TAP_CHECKPOINT_END, c);
         break;
     case PROTOCOL_BINARY_CMD_TAP_DELETE:
-        pthread_mutex_lock(&tap_stats.mutex);
+        mutex_lock(&tap_stats.mutex);
         tap_stats.received.delete++;
-        pthread_mutex_unlock(&tap_stats.mutex);
+        mutex_unlock(&tap_stats.mutex);
         process_bin_tap_packet(TAP_DELETION, c);
         break;
     case PROTOCOL_BINARY_CMD_TAP_FLUSH:
-        pthread_mutex_lock(&tap_stats.mutex);
+        mutex_lock(&tap_stats.mutex);
         tap_stats.received.flush++;
-        pthread_mutex_unlock(&tap_stats.mutex);
+        mutex_unlock(&tap_stats.mutex);
         process_bin_tap_packet(TAP_FLUSH, c);
         break;
     case PROTOCOL_BINARY_CMD_TAP_OPAQUE:
-        pthread_mutex_lock(&tap_stats.mutex);
+        mutex_lock(&tap_stats.mutex);
         tap_stats.received.opaque++;
-        pthread_mutex_unlock(&tap_stats.mutex);
+        mutex_unlock(&tap_stats.mutex);
         process_bin_tap_packet(TAP_OPAQUE, c);
         break;
     case PROTOCOL_BINARY_CMD_TAP_VBUCKET_SET:
-        pthread_mutex_lock(&tap_stats.mutex);
+        mutex_lock(&tap_stats.mutex);
         tap_stats.received.vbucket_set++;
-        pthread_mutex_unlock(&tap_stats.mutex);
+        mutex_unlock(&tap_stats.mutex);
         process_bin_tap_packet(TAP_VBUCKET_SET, c);
         break;
     case PROTOCOL_BINARY_CMD_VERBOSITY:
@@ -3756,9 +3756,9 @@ static void server_stats(ADD_STAT add_stats, conn *c, 
bool aggregate) {
      * Add tap stats (only if non-zero)
      */
     struct tap_stats ts;
-    pthread_mutex_lock(&tap_stats.mutex);
+    mutex_lock(&tap_stats.mutex);
     ts = tap_stats;
-    pthread_mutex_unlock(&tap_stats.mutex);
+    mutex_unlock(&tap_stats.mutex);
 
     if (ts.sent.connect) {
         APPEND_STAT("tap_connect_sent", "%"PRIu64, ts.sent.connect);
@@ -5653,13 +5653,13 @@ static void dispatch_event_handler(int fd, short which, 
void *arg) {
 
     if (nr != -1 && is_listen_disabled()) {
         bool enable = false;
-        pthread_mutex_lock(&listen_state.mutex);
+        mutex_lock(&listen_state.mutex);
         listen_state.count -= nr;
         if (listen_state.count <= 0) {
             enable = true;
             listen_state.disabled = false;
         }
-        pthread_mutex_unlock(&listen_state.mutex);
+        mutex_unlock(&listen_state.mutex);
         if (enable) {
             conn *next;
             for (next = listen_conn; next; next = next->next) {
diff --git a/daemon/memcached.h b/daemon/memcached.h
index b592b7b..c638864 100644
--- a/daemon/memcached.h
+++ b/daemon/memcached.h
@@ -246,7 +246,7 @@ typedef struct {
 } LIBEVENT_THREAD;
 
 #define LOCK_THREAD(t)                          \
-    if (pthread_mutex_lock(&t->mutex) != 0) {   \
+    if (mutex_lock(&t->mutex) != 0) {   \
         abort();                                \
     }                                           \
     assert(t->is_locked == false);              \
@@ -255,7 +255,7 @@ typedef struct {
 #define UNLOCK_THREAD(t)                         \
     assert(t->is_locked == true);                \
     t->is_locked = false;                        \
-    if (pthread_mutex_unlock(&t->mutex) != 0) {  \
+    if (mutex_unlock(&t->mutex) != 0) {  \
         abort();                                 \
     }
 
diff --git a/daemon/thread.c b/daemon/thread.c
index 3c30520..7fc3a33 100644
--- a/daemon/thread.c
+++ b/daemon/thread.c
@@ -90,14 +90,14 @@ static void cq_init(CQ *cq) {
 static CQ_ITEM *cq_pop(CQ *cq) {
     CQ_ITEM *item;
 
-    pthread_mutex_lock(&cq->lock);
+    mutex_lock(&cq->lock);
     item = cq->head;
     if (NULL != item) {
         cq->head = item->next;
         if (NULL == cq->head)
             cq->tail = NULL;
     }
-    pthread_mutex_unlock(&cq->lock);
+    mutex_unlock(&cq->lock);
 
     return item;
 }
@@ -108,14 +108,14 @@ static CQ_ITEM *cq_pop(CQ *cq) {
 static void cq_push(CQ *cq, CQ_ITEM *item) {
     item->next = NULL;
 
-    pthread_mutex_lock(&cq->lock);
+    mutex_lock(&cq->lock);
     if (NULL == cq->tail)
         cq->head = item;
     else
         cq->tail->next = item;
     cq->tail = item;
     pthread_cond_signal(&cq->cond);
-    pthread_mutex_unlock(&cq->lock);
+    mutex_unlock(&cq->lock);
 }
 
 /*
@@ -123,12 +123,12 @@ static void cq_push(CQ *cq, CQ_ITEM *item) {
  */
 static CQ_ITEM *cqi_new(void) {
     CQ_ITEM *item = NULL;
-    pthread_mutex_lock(&cqi_freelist_lock);
+    mutex_lock(&cqi_freelist_lock);
     if (cqi_freelist) {
         item = cqi_freelist;
         cqi_freelist = item->next;
     }
-    pthread_mutex_unlock(&cqi_freelist_lock);
+    mutex_unlock(&cqi_freelist_lock);
 
     if (NULL == item) {
         int i;
@@ -146,10 +146,10 @@ static CQ_ITEM *cqi_new(void) {
         for (i = 2; i < ITEMS_PER_ALLOC; i++)
             item[i - 1].next = &item[i];
 
-        pthread_mutex_lock(&cqi_freelist_lock);
+        mutex_lock(&cqi_freelist_lock);
         item[ITEMS_PER_ALLOC - 1].next = cqi_freelist;
         cqi_freelist = &item[1];
-        pthread_mutex_unlock(&cqi_freelist_lock);
+        mutex_unlock(&cqi_freelist_lock);
     }
 
     return item;
@@ -160,10 +160,10 @@ static CQ_ITEM *cqi_new(void) {
  * Frees a connection queue item (adds it to the freelist.)
  */
 static void cqi_free(CQ_ITEM *item) {
-    pthread_mutex_lock(&cqi_freelist_lock);
+    mutex_lock(&cqi_freelist_lock);
     item->next = cqi_freelist;
     cqi_freelist = item;
-    pthread_mutex_unlock(&cqi_freelist_lock);
+    mutex_unlock(&cqi_freelist_lock);
 }
 
 
@@ -296,10 +296,10 @@ static void *worker_libevent(void *arg) {
      * all threads have finished initializing.
      */
 
-    pthread_mutex_lock(&init_lock);
+    mutex_lock(&init_lock);
     init_count++;
     pthread_cond_signal(&init_cond);
-    pthread_mutex_unlock(&init_lock);
+    mutex_unlock(&init_lock);
 
     event_base_loop(me->base, 0);
     return NULL;
@@ -361,10 +361,10 @@ static void thread_libevent_process(int fd, short which, 
void *arg) {
         cqi_free(item);
     }
 
-    pthread_mutex_lock(&me->mutex);
+    mutex_lock(&me->mutex);
     conn* pending = me->pending_io;
     me->pending_io = NULL;
-    pthread_mutex_unlock(&me->mutex);
+    mutex_unlock(&me->mutex);
     while (pending != NULL) {
         conn *c = pending;
         assert(me == c->thread);
@@ -729,11 +729,11 @@ void notify_dispatcher(void) {
 /******************************* GLOBAL STATS ******************************/
 
 void STATS_LOCK() {
-    pthread_mutex_lock(&stats_lock);
+    mutex_lock(&stats_lock);
 }
 
 void STATS_UNLOCK() {
-    pthread_mutex_unlock(&stats_lock);
+    mutex_unlock(&stats_lock);
 }
 
 void threadlocal_stats_clear(struct thread_stats *stats) {
@@ -759,16 +759,16 @@ void threadlocal_stats_clear(struct thread_stats *stats) {
 void threadlocal_stats_reset(struct thread_stats *thread_stats) {
     int ii;
     for (ii = 0; ii < settings.num_threads; ++ii) {
-        pthread_mutex_lock(&thread_stats[ii].mutex);
+        mutex_lock(&thread_stats[ii].mutex);
         threadlocal_stats_clear(&thread_stats[ii]);
-        pthread_mutex_unlock(&thread_stats[ii].mutex);
+        mutex_unlock(&thread_stats[ii].mutex);
     }
 }
 
 void threadlocal_stats_aggregate(struct thread_stats *thread_stats, struct 
thread_stats *stats) {
     int ii, sid;
     for (ii = 0; ii < settings.num_threads; ++ii) {
-        pthread_mutex_lock(&thread_stats[ii].mutex);
+        mutex_lock(&thread_stats[ii].mutex);
 
         stats->cmd_get += thread_stats[ii].cmd_get;
         stats->get_misses += thread_stats[ii].get_misses;
@@ -798,7 +798,7 @@ void threadlocal_stats_aggregate(struct thread_stats 
*thread_stats, struct threa
                 thread_stats[ii].slab_stats[sid].cas_badval;
         }
 
-        pthread_mutex_unlock(&thread_stats[ii].mutex);
+        mutex_unlock(&thread_stats[ii].mutex);
     }
 }
 
@@ -871,11 +871,11 @@ void thread_init(int nthr, struct event_base *main_base,
     tap_thread = &threads[nthreads - 1];
 
     /* Wait for all the threads to set themselves up before returning. */
-    pthread_mutex_lock(&init_lock);
+    mutex_lock(&init_lock);
     while (init_count < nthreads) {
         pthread_cond_wait(&init_cond, &init_lock);
     }
-    pthread_mutex_unlock(&init_lock);
+    mutex_unlock(&init_lock);
 }
 
 void threads_shutdown(void)
diff --git a/daemon/topkeys.c b/daemon/topkeys.c
index 3dc3e1d..b988ca3 100644
--- a/daemon/topkeys.c
+++ b/daemon/topkeys.c
@@ -6,6 +6,7 @@
 #include <pthread.h>
 #include <memcached/genhash.h>
 #include "topkeys.h"
+#include "config.h"
 
 static topkey_item_t *topkey_item_init(const void *key, int nkey, rel_time_t 
ctime) {
     topkey_item_t *item = calloc(sizeof(topkey_item_t) + nkey, 1);
@@ -173,8 +174,8 @@ ENGINE_ERROR_CODE topkeys_stats(topkeys_t *tk,
     context.add_stat = add_stat;
     context.current_time = current_time;
     assert(tk);
-    pthread_mutex_lock(&tk->mutex);
+    mutex_lock(&tk->mutex);
     dlist_iter(&tk->list, tk_iterfunc, &context);
-    pthread_mutex_unlock(&tk->mutex);
+    mutex_unlock(&tk->mutex);
     return ENGINE_SUCCESS;
 }
diff --git a/daemon/topkeys.h b/daemon/topkeys.h
index 35102ca..1e4eb64 100644
--- a/daemon/topkeys.h
+++ b/daemon/topkeys.h
@@ -17,11 +17,11 @@
     if (tk) { \
         assert(key); \
         assert(nkey > 0); \
-        pthread_mutex_lock(&tk->mutex); \
+        mutex_lock(&tk->mutex); \
         topkey_item_t *tmp = topkeys_item_get_or_create( \
             (tk), (key), (nkey), (ctime)); \
         tmp->op++; \
-        pthread_mutex_unlock(&tk->mutex); \
+        mutex_unlock(&tk->mutex); \
     } \
 }
 
diff --git a/engines/default_engine/assoc.c b/engines/default_engine/assoc.c
index f1cf9f5..a6a93b3 100644
--- a/engines/default_engine/assoc.c
+++ b/engines/default_engine/assoc.c
@@ -64,7 +64,7 @@ static inline struct assoc_bucket 
*get_assoc_bucket_ref(struct default_engine *e
     unsigned int oldbucket;
     struct assoc_bucket *it;
 
-    pthread_rwlock_rdlock(&engine->cache_lock);
+    rwlock_rdlock(&engine->cache_lock);
 
     if (engine->assoc.expanding &&
         (oldbucket = (hash & hashmask(engine->assoc.hashpower - 1))) >= 
engine->assoc.expand_bucket)
@@ -75,17 +75,17 @@ static inline struct assoc_bucket 
*get_assoc_bucket_ref(struct default_engine *e
     }
 
     if (wr_lock)
-        pthread_rwlock_wrlock(&it->lock);
+        rwlock_wrlock(&it->lock);
     else
-        pthread_rwlock_rdlock(&it->lock);
+        rwlock_rdlock(&it->lock);
 
     return it;
 }
 
 static inline void put_assoc_bucket_ref(struct default_engine *engine, struct 
assoc_bucket *bucket)
 {
-    pthread_rwlock_unlock(&bucket->lock);
-    pthread_rwlock_unlock(&engine->cache_lock);
+    rwlock_unlock(&bucket->lock);
+    rwlock_unlock(&engine->cache_lock);
 }
 
 hash_item *assoc_find(struct default_engine *engine, uint32_t hash, const char 
*key, const size_t nkey) {
@@ -185,7 +185,7 @@ int assoc_insert(struct default_engine *engine, hash_item 
*it) {
 
     if (! engine->assoc.no_expansion && ! engine->assoc.expanding &&
         engine->assoc.hash_items > (hashsize(engine->assoc.hashpower) * 3) / 
2) {
-        pthread_rwlock_wrlock(&engine->cache_lock);
+        rwlock_wrlock(&engine->cache_lock);
         if (! engine->assoc.no_expansion && ! engine->assoc.expanding) /* 
re-check expansion condition after lock acquisition */
             assoc_expand(engine);
         pthread_rwlock_unlock(&engine->cache_lock);
@@ -224,7 +224,7 @@ static void *assoc_maintenance_thread(void *arg) {
     bool done = false;
     do {
         int ii;
-        pthread_rwlock_wrlock(&engine->cache_lock);
+        rwlock_wrlock(&engine->cache_lock);
 
         for (ii = 0; ii < hash_bulk_move && engine->assoc.expanding; ++ii) {
             hash_item *it, *next;
diff --git a/engines/default_engine/default_engine.c 
b/engines/default_engine/default_engine.c
index 3588e3b..b6e9c68 100644
--- a/engines/default_engine/default_engine.c
+++ b/engines/default_engine/default_engine.c
@@ -405,7 +405,7 @@ static ENGINE_ERROR_CODE default_get_stats(ENGINE_HANDLE* 
handle,
       char val[128];
       int len;
 
-      pthread_mutex_lock(&engine->stats.lock);
+      mutex_lock(&engine->stats.lock);
       len = sprintf(val, "%"PRIu64, (uint64_t)engine->stats.evictions);
       add_stat("evictions", 9, val, len, cookie);
       len = sprintf(val, "%"PRIu64, (uint64_t)engine->stats.curr_items);
@@ -418,7 +418,7 @@ static ENGINE_ERROR_CODE default_get_stats(ENGINE_HANDLE* 
handle,
       add_stat("reclaimed", 9, val, len, cookie);
       len = sprintf(val, "%"PRIu64, (uint64_t)engine->config.maxbytes);
       add_stat("engine_maxbytes", 15, val, len, cookie);
-      pthread_mutex_unlock(&engine->stats.lock);
+      mutex_unlock(&engine->stats.lock);
    } else if (strncmp(stat_key, "slabs", 5) == 0) {
       slabs_stats(engine, add_stat, cookie);
    } else if (strncmp(stat_key, "items", 5) == 0) {
@@ -431,7 +431,7 @@ static ENGINE_ERROR_CODE default_get_stats(ENGINE_HANDLE* 
handle,
       char val[128];
       int len;
 
-      pthread_mutex_lock(&engine->scrubber.lock);
+      mutex_lock(&engine->scrubber.lock);
       if (engine->scrubber.running) {
          add_stat("scrubber:status", 15, "running", 7, cookie);
       } else {
@@ -450,7 +450,7 @@ static ENGINE_ERROR_CODE default_get_stats(ENGINE_HANDLE* 
handle,
          len = sprintf(val, "%"PRIu64, engine->scrubber.cleaned);
          add_stat("scrubber:cleaned", 16, val, len, cookie);
       }
-      pthread_mutex_unlock(&engine->scrubber.lock);
+      mutex_unlock(&engine->scrubber.lock);
    } else {
       ret = ENGINE_KEY_ENOENT;
    }
@@ -501,11 +501,11 @@ static void default_reset_stats(ENGINE_HANDLE* handle, 
const void *cookie) {
    struct default_engine *engine = get_handle(handle);
    item_stats_reset(engine);
 
-   pthread_mutex_lock(&engine->stats.lock);
+   mutex_lock(&engine->stats.lock);
    engine->stats.evictions = 0;
    engine->stats.reclaimed = 0;
    engine->stats.total_items = 0;
-   pthread_mutex_unlock(&engine->stats.lock);
+   mutex_unlock(&engine->stats.lock);
 }
 
 static ENGINE_ERROR_CODE initalize_configuration(struct default_engine *se,
@@ -861,7 +861,7 @@ static TAP_ITERATOR default_get_tap_iterator(ENGINE_HANDLE* 
handle,
         return NULL;
     }
 
-    pthread_mutex_lock(&engine->tap_connections.lock);
+    mutex_lock(&engine->tap_connections.lock);
     int ii;
     for (ii = 0; ii < engine->tap_connections.size; ++ii) {
         if (engine->tap_connections.clients[ii] == NULL) {
@@ -869,7 +869,7 @@ static TAP_ITERATOR default_get_tap_iterator(ENGINE_HANDLE* 
handle,
             break;
         }
     }
-    pthread_mutex_unlock(&engine->tap_connections.lock);
+    mutex_unlock(&engine->tap_connections.lock);
     if (ii == engine->tap_connections.size) {
         // @todo allow more connections :)
         return NULL;
@@ -877,9 +877,9 @@ static TAP_ITERATOR default_get_tap_iterator(ENGINE_HANDLE* 
handle,
 
     if (!initialize_item_tap_walker(engine, cookie)) {
         /* Failed to create */
-        pthread_mutex_lock(&engine->tap_connections.lock);
+        mutex_lock(&engine->tap_connections.lock);
         engine->tap_connections.clients[ii] = NULL;
-        pthread_mutex_unlock(&engine->tap_connections.lock);
+        mutex_unlock(&engine->tap_connections.lock);
         return NULL;
     }
 
@@ -891,7 +891,7 @@ static void default_handle_disconnect(const void *cookie,
                                       const void *event_data,
                                       const void *cb_data) {
     struct default_engine *engine = (struct default_engine*)cb_data;
-    pthread_mutex_lock(&engine->tap_connections.lock);
+    mutex_lock(&engine->tap_connections.lock);
     int ii;
     for (ii = 0; ii < engine->tap_connections.size; ++ii) {
         if (engine->tap_connections.clients[ii] == cookie) {
@@ -899,5 +899,5 @@ static void default_handle_disconnect(const void *cookie,
             break;
         }
     }
-    pthread_mutex_unlock(&engine->tap_connections.lock);
+    mutex_unlock(&engine->tap_connections.lock);
 }
diff --git a/engines/default_engine/items.c b/engines/default_engine/items.c
index befcc43..0aacf4a 100644
--- a/engines/default_engine/items.c
+++ b/engines/default_engine/items.c
@@ -32,8 +32,8 @@ static void acquire_key_operation_mutex(struct default_engine 
*engine, uint32_t
 static void release_key_operation_mutex(struct default_engine *engine, 
uint32_t hash);
 
 /* Macros to clarify the items datastructure LRU list acquisition and release  
*/
-#define lock_lru_list(engine, listid) 
pthread_mutex_lock(&engine->items.lock[listid])
-#define unlock_lru_list(engine, listid) 
pthread_mutex_unlock(&engine->items.lock[listid])
+#define lock_lru_list(engine, listid) mutex_lock(&engine->items.lock[listid])
+#define unlock_lru_list(engine, listid) 
mutex_unlock(&engine->items.lock[listid])
 /*
  * We only reposition items in the LRU queue if they haven't been repositioned
  * in this many seconds. That saves us from churning on frequently-accessed
@@ -47,9 +47,9 @@ static void release_key_operation_mutex(struct default_engine 
*engine, uint32_t
 static const int search_items = 50;
 
 void item_stats_reset(struct default_engine *engine) {
-    pthread_mutex_lock(&engine->items.itemstats_lock);
+    mutex_lock(&engine->items.itemstats_lock);
     memset(engine->items.itemstats, 0, sizeof(engine->items.itemstats));
-    pthread_mutex_unlock(&engine->items.itemstats_lock);
+    mutex_unlock(&engine->items.itemstats_lock);
 }
 
 
@@ -118,13 +118,13 @@ hash_item *do_item_alloc(struct default_engine *engine,
             /* I don't want to actually free the object, just steal
              * the item to avoid to grab the slab mutex twice ;-)
              */
-            pthread_mutex_lock(&engine->stats.lock);
+            mutex_lock(&engine->stats.lock);
             engine->stats.reclaimed++;
-            pthread_mutex_unlock(&engine->stats.lock);
+            mutex_unlock(&engine->stats.lock);
 
-            pthread_mutex_lock(&engine->items.itemstats_lock);
+            mutex_lock(&engine->items.itemstats_lock);
             engine->items.itemstats[id].reclaimed++;
-            pthread_mutex_unlock(&engine->items.itemstats_lock);
+            mutex_unlock(&engine->items.itemstats_lock);
 
             it->refcount = 1;
             slabs_adjust_mem_requested(engine, it->slabs_clsid, 
ITEM_ntotal(engine, it), ntotal);
@@ -149,9 +149,9 @@ hash_item *do_item_alloc(struct default_engine *engine,
          */
 
         if (engine->config.evict_to_free == 0) {
-            pthread_mutex_lock(&engine->items.itemstats_lock);
+            mutex_lock(&engine->items.itemstats_lock);
             engine->items.itemstats[id].outofmemory++;
-            pthread_mutex_unlock(&engine->items.itemstats_lock);
+            mutex_unlock(&engine->items.itemstats_lock);
             return NULL;
         }
 
@@ -163,9 +163,9 @@ hash_item *do_item_alloc(struct default_engine *engine,
          */
 
         if (engine->items.tails[id] == 0) {
-            pthread_mutex_lock(&engine->items.itemstats_lock);
+            mutex_lock(&engine->items.itemstats_lock);
             engine->items.itemstats[id].outofmemory++;
-            pthread_mutex_unlock(&engine->items.itemstats_lock);
+            mutex_unlock(&engine->items.itemstats_lock);
             return NULL;
         }
 
@@ -173,27 +173,27 @@ hash_item *do_item_alloc(struct default_engine *engine,
         for (search = engine->items.tails[id]; tries > 0 && search != NULL; 
tries--, search=search->prev) {
             if (search->refcount == 0) {
                 if (search->exptime == 0 || search->exptime > current_time) {
-                    pthread_mutex_lock(&engine->items.itemstats_lock);
+                    mutex_lock(&engine->items.itemstats_lock);
                     engine->items.itemstats[id].evicted++;
                     engine->items.itemstats[id].evicted_time = current_time - 
search->time;
                     if (search->exptime != 0) {
                         engine->items.itemstats[id].evicted_nonzero++;
                     }
-                    pthread_mutex_unlock(&engine->items.itemstats_lock);
-                    pthread_mutex_lock(&engine->stats.lock);
+                    mutex_unlock(&engine->items.itemstats_lock);
+                    mutex_lock(&engine->stats.lock);
                     engine->stats.evictions++;
-                    pthread_mutex_unlock(&engine->stats.lock);
+                    mutex_unlock(&engine->stats.lock);
                     engine->server.stat->evicting(cookie,
                                                   item_get_key(search),
                                                   search->nkey);
                 } else {
-                    pthread_mutex_lock(&engine->items.itemstats_lock);
+                    mutex_lock(&engine->items.itemstats_lock);
                     engine->items.itemstats[id].reclaimed++;
-                    pthread_mutex_unlock(&engine->items.itemstats_lock);
+                    mutex_unlock(&engine->items.itemstats_lock);
 
-                    pthread_mutex_lock(&engine->stats.lock);
+                    mutex_lock(&engine->stats.lock);
                     engine->stats.reclaimed++;
-                    pthread_mutex_unlock(&engine->stats.lock);
+                    mutex_unlock(&engine->stats.lock);
                 }
                 do_item_unlink(engine, search);
                 break;
@@ -203,9 +203,9 @@ hash_item *do_item_alloc(struct default_engine *engine,
 
         it = slabs_alloc(engine, ntotal, id);
         if (it == 0) {
-            pthread_mutex_lock(&engine->items.itemstats_lock);
+            mutex_lock(&engine->items.itemstats_lock);
             engine->items.itemstats[id].outofmemory++;
-            pthread_mutex_unlock(&engine->items.itemstats_lock);
+            mutex_unlock(&engine->items.itemstats_lock);
             /* Last ditch effort. There is a very rare bug which causes
              * refcount leaks. We've fixed most of them, but it still happens,
              * and it may happen in the future.
@@ -217,9 +217,9 @@ hash_item *do_item_alloc(struct default_engine *engine,
             lock_lru_list(engine, id);
             for (search = engine->items.tails[id]; tries > 0 && search != 
NULL; tries--, search=search->prev) {
                 if (search->refcount != 0 && search->time + TAIL_REPAIR_TIME < 
current_time) {
-                    pthread_mutex_lock(&engine->items.itemstats_lock);
+                    mutex_lock(&engine->items.itemstats_lock);
                     engine->items.itemstats[id].tailrepairs++;
-                    pthread_mutex_unlock(&engine->items.itemstats_lock);
+                    mutex_unlock(&engine->items.itemstats_lock);
                     search->refcount = 0;
                     do_item_unlink(engine, search);
                     break;
@@ -317,11 +317,11 @@ int do_item_link(struct default_engine *engine, hash_item 
*it) {
     it->time = engine->server.core->get_current_time();
     assoc_insert(engine, it);
 
-    pthread_mutex_lock(&engine->stats.lock);
+    mutex_lock(&engine->stats.lock);
     engine->stats.curr_bytes += ITEM_ntotal(engine, it);
     engine->stats.curr_items += 1;
     engine->stats.total_items += 1;
-    pthread_mutex_unlock(&engine->stats.lock);
+    mutex_unlock(&engine->stats.lock);
 
     /* Allocate a new CAS ID on link. */
     item_set_cas(NULL, NULL, it, get_cas_id());
@@ -335,10 +335,10 @@ void do_item_unlink(struct default_engine *engine, 
hash_item *it) {
     MEMCACHED_ITEM_UNLINK(item_get_key(it), it->nkey, it->nbytes);
     if ((it->iflag & ITEM_LINKED) != 0) {
         it->iflag &= ~ITEM_LINKED;
-        pthread_mutex_lock(&engine->stats.lock);
+        mutex_lock(&engine->stats.lock);
         engine->stats.curr_bytes -= ITEM_ntotal(engine, it);
         engine->stats.curr_items -= 1;
-        pthread_mutex_unlock(&engine->stats.lock);
+        mutex_unlock(&engine->stats.lock);
         assoc_delete(engine, it);
         item_unlink_q(engine, it);
         if (it->refcount == 0) {
@@ -1125,10 +1125,10 @@ static void *item_scubber_main(void *arg)
         unlock_lru_list(engine, ii);
     }
 
-    pthread_mutex_lock(&engine->scrubber.lock);
+    mutex_lock(&engine->scrubber.lock);
     engine->scrubber.stopped = time(NULL);
     engine->scrubber.running = false;
-    pthread_mutex_unlock(&engine->scrubber.lock);
+    mutex_unlock(&engine->scrubber.lock);
 
     return NULL;
 }
@@ -1136,7 +1136,7 @@ static void *item_scubber_main(void *arg)
 bool item_start_scrub(struct default_engine *engine)
 {
     bool ret = false;
-    pthread_mutex_lock(&engine->scrubber.lock);
+    mutex_lock(&engine->scrubber.lock);
     if (!engine->scrubber.running) {
         engine->scrubber.started = time(NULL);
         engine->scrubber.stopped = 0;
@@ -1156,7 +1156,7 @@ bool item_start_scrub(struct default_engine *engine)
             ret = true;
         }
     }
-    pthread_mutex_unlock(&engine->scrubber.lock);
+    mutex_unlock(&engine->scrubber.lock);
 
     return ret;
 }
@@ -1291,7 +1291,7 @@ static void acquire_key_operation_mutex(struct 
default_engine *engine, uint32_t
     int zero_index;
     struct cur_op_recs *cur_op_recs = &engine->cur_op_recs;
 
-    pthread_mutex_lock(&cur_op_recs->lock);
+    mutex_lock(&cur_op_recs->lock);
 
 start:
     zero_index = cur_op_recs->num_recs;
@@ -1311,7 +1311,7 @@ start:
     assert(zero_index != cur_op_recs->num_recs);
 
     cur_op_recs->keys[zero_index].hash = hash;
-    pthread_mutex_unlock(&cur_op_recs->lock);
+    mutex_unlock(&cur_op_recs->lock);
 }
 
 static void release_key_operation_mutex(struct default_engine *engine, 
uint32_t hash)
@@ -1319,7 +1319,7 @@ static void release_key_operation_mutex(struct 
default_engine *engine, uint32_t
     int i;
     struct cur_op_recs *cur_op_recs = &engine->cur_op_recs;
 
-    pthread_mutex_lock(&cur_op_recs->lock);
+    mutex_lock(&cur_op_recs->lock);
 
     for (i = 0; i < cur_op_recs->num_recs; i++)
     {
@@ -1333,7 +1333,7 @@ static void release_key_operation_mutex(struct 
default_engine *engine, uint32_t
     assert(i != cur_op_recs->num_recs);
 
     pthread_cond_broadcast(&cur_op_recs->key_wait_cv);
-    pthread_mutex_unlock(&cur_op_recs->lock);
+    mutex_unlock(&cur_op_recs->lock);
 }
 
 void items_destroy(struct default_engine *engine)
diff --git a/engines/default_engine/slabs.c b/engines/default_engine/slabs.c
index 7dc0795..f7a4156 100644
--- a/engines/default_engine/slabs.c
+++ b/engines/default_engine/slabs.c
@@ -388,27 +388,27 @@ static void *memory_allocate(struct default_engine 
*engine, size_t size) {
 void *slabs_alloc(struct default_engine *engine, size_t size, unsigned int id) 
{
     void *ret;
 
-    pthread_mutex_lock(&engine->slabs.lock);
+    mutex_lock(&engine->slabs.lock);
     ret = do_slabs_alloc(engine, size, id);
-    pthread_mutex_unlock(&engine->slabs.lock);
+    mutex_unlock(&engine->slabs.lock);
     return ret;
 }
 
 void slabs_free(struct default_engine *engine, void *ptr, size_t size, 
unsigned int id) {
-    pthread_mutex_lock(&engine->slabs.lock);
+    mutex_lock(&engine->slabs.lock);
     do_slabs_free(engine, ptr, size, id);
-    pthread_mutex_unlock(&engine->slabs.lock);
+    mutex_unlock(&engine->slabs.lock);
 }
 
 void slabs_stats(struct default_engine *engine, ADD_STAT add_stats, const void 
*c) {
-    pthread_mutex_lock(&engine->slabs.lock);
+    mutex_lock(&engine->slabs.lock);
     do_slabs_stats(engine, add_stats, c);
-    pthread_mutex_unlock(&engine->slabs.lock);
+    mutex_unlock(&engine->slabs.lock);
 }
 
 void slabs_adjust_mem_requested(struct default_engine *engine, unsigned int 
id, size_t old, size_t ntotal)
 {
-    pthread_mutex_lock(&engine->slabs.lock);
+    mutex_lock(&engine->slabs.lock);
     slabclass_t *p;
     if (id < POWER_SMALLEST || id > engine->slabs.power_largest) {
         EXTENSION_LOGGER_DESCRIPTOR *logger;
@@ -420,5 +420,5 @@ void slabs_adjust_mem_requested(struct default_engine 
*engine, unsigned int id,
 
     p = &engine->slabs.slabclass[id];
     p->requested = p->requested - old + ntotal;
-    pthread_mutex_unlock(&engine->slabs.lock);
+    mutex_unlock(&engine->slabs.lock);
 }
diff --git a/include/memcached/locking.h b/include/memcached/locking.h
new file mode 100644
index 0000000..e923e5f
--- /dev/null
+++ b/include/memcached/locking.h
@@ -0,0 +1,37 @@
+#include <pthread.h>
+
+#ifndef LOCKING_H
+#define LOCKING_H
+
+#if defined(SPIN_LOCK)
+
+static inline int mutex_lock(pthread_mutex_t *mutex)
+{
+    while (pthread_mutex_trylock(mutex));
+    return 0;
+}
+
+static inline int rwlock_rdlock(pthread_rwlock_t *rwlock)
+{
+    while (pthread_rwlock_tryrdlock(rwlock));
+    return 0;
+}
+
+static inline int rwlock_wrlock(pthread_rwlock_t *rwlock)
+{
+    while (pthread_rwlock_trywrlock(rwlock));
+    return 0;
+}
+
+#else
+
+#define mutex_lock(x) pthread_mutex_lock(x)
+#define rwlock_rdlock(x) pthread_rwlock_rdlock(x)
+#define rwlock_wrlock(x) pthread_rwlock_wrlock(x)
+
+#endif
+
+#define mutex_unlock(x) pthread_mutex_unlock(x)
+#define rwlock_unlock(x) pthread_rwlock_unlock(x)
+
+#endif
-- 
1.7.1

Reply via email to