Author: stefan2
Date: Mon Jun 4 19:56:58 2012
New Revision: 1346122
URL: http://svn.apache.org/viewvc?rev=1346122&view=rev
Log:
Improve membuffer cache scalability by replacing the per-segment
mutexes with r/w locks.
* subversion/libsvn_subr/cache-membuffer.c
(svn_membuffer_t): replace the lock type
(read_lock_cache, write_lock_cache, unlock_cache,
WITH_READ_LOCK, WITH_WRITE_LOCK): new locking utilities
(svn_cache__membuffer_cache_create): adapt lock init code
(membuffer_cache_set, membuffer_cache_get,
membuffer_cache_get_partial, membuffer_cache_set_partial,
svn_membuffer_cache_get_info): replace lock macros
Modified:
subversion/trunk/subversion/libsvn_subr/cache-membuffer.c
Modified: subversion/trunk/subversion/libsvn_subr/cache-membuffer.c
URL:
http://svn.apache.org/viewvc/subversion/trunk/subversion/libsvn_subr/cache-membuffer.c?rev=1346122&r1=1346121&r2=1346122&view=diff
==============================================================================
--- subversion/trunk/subversion/libsvn_subr/cache-membuffer.c (original)
+++ subversion/trunk/subversion/libsvn_subr/cache-membuffer.c Mon Jun 4
19:56:58 2012
@@ -23,6 +23,8 @@
#include <assert.h>
#include <apr_md5.h>
+#include <apr_thread_rwlock.h>
+
#include "svn_pools.h"
#include "svn_checksum.h"
#include "md5.h"
@@ -436,11 +438,13 @@ struct svn_membuffer_t
*/
apr_uint64_t total_hits;
+#if APR_HAS_THREADS
/* A lock for intra-process synchronization to the cache, or NULL if
* the cache's creator doesn't feel the cache needs to be
* thread-safe.
*/
- svn_mutex__t *mutex;
+ apr_thread_rwlock_t *lock;
+#endif
};
/* Align integer VALUE to the next ITEM_ALIGNMENT boundary.
@@ -451,6 +455,76 @@ struct svn_membuffer_t
*/
#define ALIGN_POINTER(pointer)
((void*)ALIGN_VALUE((apr_size_t)(char*)(pointer)))
+/* If locking is supported for CACHE, aquire a read lock for it.
+ */
+static svn_error_t *
+read_lock_cache(svn_membuffer_t *cache)
+{
+#if APR_HAS_THREADS
+ if (cache->lock)
+ {
+ apr_status_t status = apr_thread_rwlock_rdlock(cache->lock);
+ if (status)
+ return svn_error_wrap_apr(status, _("Can't lock cache mutex"));
+ }
+#endif
+ return SVN_NO_ERROR;
+}
+
+/* If locking is supported for CACHE, aquire a write lock for it.
+ */
+static svn_error_t *
+write_lock_cache(svn_membuffer_t *cache)
+{
+#if APR_HAS_THREADS
+ if (cache->lock)
+ {
+ apr_status_t status = apr_thread_rwlock_wrlock(cache->lock);
+ if (status)
+ return svn_error_wrap_apr(status, _("Can't write-lock cache mutex"));
+ }
+#endif
+ return SVN_NO_ERROR;
+}
+
+/* If locking is supported for CACHE, release the current lock
+ * (read or write).
+ */
+static svn_error_t *
+unlock_cache(svn_membuffer_t *cache, svn_error_t *err)
+{
+#if APR_HAS_THREADS
+ if (cache->lock)
+ {
+ apr_status_t status = apr_thread_rwlock_unlock(cache->lock);
+ if (err)
+ return err;
+
+ if (status)
+ return svn_error_wrap_apr(status, _("Can't unlock cache mutex"));
+ }
+#endif
+ return err;
+}
+
+/* If supported, guard the execution of EXPR with a read lock to cache.
+ * Macro has been modelled after SVN_MUTEX__WITH_LOCK.
+ */
+#define WITH_READ_LOCK(cache, expr) \
+do { \
+ SVN_ERR(read_lock_cache(cache)); \
+ SVN_ERR(unlock_cache(cache, (expr))); \
+} while (0)
+
+/* If supported, guard the execution of EXPR with a write lock to cache.
+ * Macro has been modelled after SVN_MUTEX__WITH_LOCK.
+ */
+#define WITH_WRITE_LOCK(cache, expr) \
+do { \
+ SVN_ERR(write_lock_cache(cache)); \
+ SVN_ERR(unlock_cache(cache, (expr))); \
+} while (0)
+
/* Resolve a dictionary entry reference, i.e. return the entry
* for the given IDX.
*/
@@ -1048,10 +1122,20 @@ svn_cache__membuffer_cache_create(svn_me
return svn_error_wrap_apr(APR_ENOMEM, _("OOM"));
}
+#if APR_HAS_THREADS
/* A lock for intra-process synchronization to the cache, or NULL if
* the cache's creator doesn't feel the cache needs to be
- * thread-safe. */
- SVN_ERR(svn_mutex__init(&c[seg].mutex, thread_safe, pool));
+ * thread-safe.
+ */
+ c[seg].lock = NULL;
+ if (thread_safe)
+ {
+ apr_status_t status =
+ apr_thread_rwlock_create(&(c[seg].lock), pool);
+ if (status)
+ return svn_error_wrap_apr(status, _("Can't create cache mutex"));
+ }
+#endif
}
/* done here
@@ -1156,14 +1240,14 @@ membuffer_cache_set(svn_membuffer_t *cac
/* The actual cache data access needs to sync'ed
*/
- SVN_MUTEX__WITH_LOCK(cache->mutex,
- membuffer_cache_set_internal(cache,
- key,
- group_index,
- buffer,
- size,
- DEBUG_CACHE_MEMBUFFER_TAG
- scratch_pool));
+ WITH_WRITE_LOCK(cache,
+ membuffer_cache_set_internal(cache,
+ key,
+ group_index,
+ buffer,
+ size,
+ DEBUG_CACHE_MEMBUFFER_TAG
+ scratch_pool));
return SVN_NO_ERROR;
}
@@ -1252,14 +1336,14 @@ membuffer_cache_get(svn_membuffer_t *cac
/* find the entry group that will hold the key.
*/
group_index = get_group_index(&cache, key);
- SVN_MUTEX__WITH_LOCK(cache->mutex,
- membuffer_cache_get_internal(cache,
- group_index,
- key,
- &buffer,
- &size,
- DEBUG_CACHE_MEMBUFFER_TAG
- result_pool));
+ WITH_READ_LOCK(cache,
+ membuffer_cache_get_internal(cache,
+ group_index,
+ key,
+ &buffer,
+ &size,
+ DEBUG_CACHE_MEMBUFFER_TAG
+ result_pool));
/* re-construct the original data object from its serialized form.
*/
@@ -1355,11 +1439,11 @@ membuffer_cache_get_partial(svn_membuffe
{
apr_uint32_t group_index = get_group_index(&cache, key);
- SVN_MUTEX__WITH_LOCK(cache->mutex,
- membuffer_cache_get_partial_internal
- (cache, group_index, key, item, found,
- deserializer, baton, DEBUG_CACHE_MEMBUFFER_TAG
- result_pool));
+ WITH_READ_LOCK(cache,
+ membuffer_cache_get_partial_internal
+ (cache, group_index, key, item, found,
+ deserializer, baton, DEBUG_CACHE_MEMBUFFER_TAG
+ result_pool));
return SVN_NO_ERROR;
}
@@ -1486,11 +1570,11 @@ membuffer_cache_set_partial(svn_membuffe
/* cache item lookup
*/
apr_uint32_t group_index = get_group_index(&cache, key);
- SVN_MUTEX__WITH_LOCK(cache->mutex,
- membuffer_cache_set_partial_internal
- (cache, group_index, key, func, baton,
- DEBUG_CACHE_MEMBUFFER_TAG_ARG
- scratch_pool));
+ WITH_WRITE_LOCK(cache,
+ membuffer_cache_set_partial_internal
+ (cache, group_index, key, func, baton,
+ DEBUG_CACHE_MEMBUFFER_TAG_ARG
+ scratch_pool));
/* done here -> unlock the cache
*/
@@ -1825,8 +1909,8 @@ svn_membuffer_cache_get_info(void *cache
for (i = 0; i < cache->membuffer->segment_count; ++i)
{
svn_membuffer_t *segment = cache->membuffer + i;
- SVN_MUTEX__WITH_LOCK(segment->mutex,
- svn_membuffer_get_segment_info(segment, info));
+ WITH_READ_LOCK(segment,
+ svn_membuffer_get_segment_info(segment, info));
}
return SVN_NO_ERROR;