Author: stefan2
Date: Sun Apr 20 18:35:43 2014
New Revision: 1588815
URL: http://svn.apache.org/r1588815
Log:
Enable FSFS to take out more than file lock at once through a single call.
Use that functionality to take out the new pack lock for upgrade, hotcopy
and recovery. Also, disallow new TXNs during upgrade and recovery.
The core is the introduction of a new data type describing a lock to take
out, which can be nested / chained. Switch all existing lock function to
using that infrastructure.
* subversion/libsvn_fs_fs/fs_fs.h
(svn_fs_fs__with_all_locks): Declare the new internal lock API.
* subversion/libsvn_fs_fs/fs_fs.c
(with_lock_baton_t): New data type. It allows us to describe a whole
chain of locks to take out with minimal extra
overhead.
(with_some_lock_file): Take all parameters and the lock pool from the
new data struct. Also, update the FS members
only once before calling the user-provided BODY
and release all file locks at once in the outermost
nesting level. Finally, make the funcion not
hold / leak file locks when updating FS members
failed.
(with_lock): New wrapper around the above that ensures in-process
serialization as well.
(lock_id_t): New enum that enables us to abstractly specify a FS-internal
lock.
(init_lock_baton,
create_lock_baton,
chain_lock_baton): New functions to construct single block batons as
well as whole chains of it.
(svn_fs_fs__with_write_lock,
svn_fs_fs__with_pack_lock,
svn_fs_fs__with_txn_current_lock): Switch to the new infrastructure.
(svn_fs_fs__upgrade): Use the new lock API for global serialization.
* subversion/libsvn_fs_fs/hotcopy.c
(svn_fs_fs__hotcopy): Same.
* subversion/libsvn_fs_fs/recovery.c
(svn_fs_fs__recover): Same.
Modified:
subversion/trunk/subversion/libsvn_fs_fs/fs_fs.c
subversion/trunk/subversion/libsvn_fs_fs/fs_fs.h
subversion/trunk/subversion/libsvn_fs_fs/hotcopy.c
subversion/trunk/subversion/libsvn_fs_fs/recovery.c
Modified: subversion/trunk/subversion/libsvn_fs_fs/fs_fs.c
URL:
http://svn.apache.org/viewvc/subversion/trunk/subversion/libsvn_fs_fs/fs_fs.c?rev=1588815&r1=1588814&r2=1588815&view=diff
==============================================================================
--- subversion/trunk/subversion/libsvn_fs_fs/fs_fs.c (original)
+++ subversion/trunk/subversion/libsvn_fs_fs/fs_fs.c Sun Apr 20 18:35:43 2014
@@ -151,31 +151,77 @@ reset_lock_flag(void *baton_void)
return APR_SUCCESS;
}
-/* Obtain a write lock on the file LOCK_FILENAME (protecting with
- LOCK_MUTEX if APR is threaded) in a subpool of POOL, call BODY with
- BATON and that subpool, destroy the subpool (releasing the write
- lock) and return what BODY returned. If IS_GLOBAL_LOCK is set,
- set the HAS_WRITE_LOCK flag while we keep the write lock. */
+/* Structure defining a file system lock to be acquired and the function
+ to be executed while the lock is held.
+
+ Instances of this structure may be nested to allow for multiple locks to
+ be taken out before executing the user-provided body. In that case, BODY
+ and BATON of the outer instances will be with_lock and a with_lock_baton_t
+ instance (transparently, no special treatment is required.). It is
+ illegal to attempt to acquire the same lock twice within the same lock
+ chain or via nesting calls using separate lock chains.
+
+ All instances along the chain share the same LOCK_POOL such that only one
+ pool needs to be created and cleared for all locks. We also allocate as
+ much data from that lock pool as possible to minimize memory usage in
+ caller pools. */
+typedef struct with_lock_baton_t
+{
+ /* The filesystem we operate on. Same for all instances along the chain. */
+ svn_fs_t *fs;
+
+ /* Mutex to complement the lock file in an APR threaded process.
+ No-op object for non-threaded processes but never NULL. */
+ svn_mutex__t *mutex;
+
+ /* Path to the file to lock. */
+ const char *lock_path;
+
+ /* If true, set FS->HAS_WRITE_LOCK after we acquired the lock. */
+ svn_boolean_t is_global_lock;
+
+ /* Function body to execute after we acquired the lock.
+ This may be user-provided or a nested call to with_lock(). */
+ svn_error_t *(*body)(void *baton,
+ apr_pool_t *pool);
+
+ /* Baton to pass to BODY; possibly NULL.
+ This may be user-provided or a nested lock baton instance. */
+ void *baton;
+
+ /* Pool for all allocations along the lock chain and BODY. Will hold the
+ file locks and gets destroyed after the outermost BODY returned,
+ releasing all file locks.
+ Same for all instances along the chain. */
+ apr_pool_t *lock_pool;
+
+ /* TRUE, iff BODY is the user-provided body. */
+ svn_boolean_t is_inner_most_lock;
+
+ /* TRUE, iff this is not a nested lock.
+ Then responsible for destroying LOCK_POOL. */
+ svn_boolean_t is_outer_most_lock;
+} with_lock_baton_t;
+
+/* Obtain a write lock on the file BATON->LOCK_PATH and call BATON->BODY
+ with BATON->BATON. If this is the outermost lock call, release all file
+ locks after the body returned. If BATON->IS_GLOBAL_LOCK is set, set the
+ HAS_WRITE_LOCK flag while we keep the write lock. */
static svn_error_t *
-with_some_lock_file(svn_fs_t *fs,
- svn_error_t *(*body)(void *baton,
- apr_pool_t *pool),
- void *baton,
- const char *lock_filename,
- svn_boolean_t is_global_lock,
- apr_pool_t *pool)
+with_some_lock_file(with_lock_baton_t *baton)
{
- apr_pool_t *subpool = svn_pool_create(pool);
- svn_error_t *err = get_lock_on_filesystem(lock_filename, subpool);
+ apr_pool_t *pool = baton->lock_pool;
+ svn_error_t *err = get_lock_on_filesystem(baton->lock_path, pool);
if (!err)
{
+ svn_fs_t *fs = baton->fs;
fs_fs_data_t *ffd = fs->fsap_data;
- if (is_global_lock)
+ if (baton->is_global_lock)
{
/* set the "got the lock" flag and register reset function */
- apr_pool_cleanup_register(subpool,
+ apr_pool_cleanup_register(pool,
ffd,
reset_lock_flag,
apr_pool_cleanup_null);
@@ -184,18 +230,127 @@ with_some_lock_file(svn_fs_t *fs,
/* nobody else will modify the repo state
=> read HEAD & pack info once */
- if (ffd->format >= SVN_FS_FS__MIN_PACKED_FORMAT)
- SVN_ERR(svn_fs_fs__update_min_unpacked_rev(fs, pool));
- SVN_ERR(get_youngest(&ffd->youngest_rev_cache, fs->path,
- pool));
- err = body(baton, subpool);
+ if (baton->is_inner_most_lock)
+ {
+ if (ffd->format >= SVN_FS_FS__MIN_PACKED_FORMAT)
+ err = svn_fs_fs__update_min_unpacked_rev(fs, pool);
+ if (!err)
+ err = get_youngest(&ffd->youngest_rev_cache, fs->path, pool);
+ }
+
+ if (!err)
+ err = baton->body(baton->baton, pool);
}
- svn_pool_destroy(subpool);
+ if (baton->is_outer_most_lock)
+ svn_pool_destroy(pool);
return svn_error_trace(err);
}
+/* Wraps with_some_lock_file, protecting it with BATON->MUTEX.
+
+ POOL is unused here and only provided for signature compatibility with
+ WITH_LOCK_BATON_T.BODY. */
+static svn_error_t *
+with_lock(void *baton,
+ apr_pool_t *pool)
+{
+ with_lock_baton_t *lock_baton = baton;
+ SVN_MUTEX__WITH_LOCK(lock_baton->mutex, with_some_lock_file(lock_baton));
+
+ return SVN_NO_ERROR;
+}
+
+/* Enum identifying a filesystem lock. */
+typedef enum lock_id_t
+{
+ write_lock,
+ txn_lock,
+ pack_lock
+} lock_id_t;
+
+/* Initialize BATON->MUTEX, BATON->LOCK_PATH and BATON->IS_GLOBAL_LOCK
+ according to the LOCK_ID. All other members of BATON must already be
+ valid. */
+static void
+init_lock_baton(with_lock_baton_t *baton,
+ lock_id_t lock_id)
+{
+ fs_fs_data_t *ffd = baton->fs->fsap_data;
+ fs_fs_shared_data_t *ffsd = ffd->shared;
+
+ switch (lock_id)
+ {
+ case write_lock: baton->mutex = ffsd->fs_write_lock;
+ baton->lock_path = path_lock(baton->fs,
+ baton->lock_pool);
+ baton->is_global_lock = TRUE;
+ break;
+
+ case txn_lock: baton->mutex = ffsd->txn_current_lock;
+ baton->lock_path = svn_fs_fs__path_txn_current_lock
+ (baton->fs, baton->lock_pool);
+ baton->is_global_lock = FALSE;
+ break;
+
+ case pack_lock: baton->mutex = ffsd->fs_pack_lock;
+ baton->lock_path = path_pack_lock(baton->fs,
+ baton->lock_pool);
+ baton->is_global_lock = FALSE;
+ break;
+ }
+}
+
+/* Return the baton for the innermost lock of a (potential) lock chain.
+ The baton shall take out LOCK_ID from FS and execute BODY with BATON
+ while the lock is being held. Allocate the result in a sub-pool of POOL.
+ */
+static with_lock_baton_t *
+create_lock_baton(svn_fs_t *fs,
+ lock_id_t lock_id,
+ svn_error_t *(*body)(void *baton,
+ apr_pool_t *pool),
+ void *baton,
+ apr_pool_t *pool)
+{
+ apr_pool_t *lock_pool = svn_pool_create(pool);
+ with_lock_baton_t *result = apr_pcalloc(lock_pool, sizeof(*result));
+
+ result->fs = fs;
+ result->body = body;
+ result->baton = baton;
+ result->lock_pool = lock_pool;
+ result->is_inner_most_lock = TRUE;
+ result->is_outer_most_lock = TRUE;
+
+ init_lock_baton(result, lock_id);
+
+ return result;
+}
+
+/* Return a baton that wraps NESTED and requests LOCK_ID as additional lock.
+ */
+static with_lock_baton_t *
+chain_lock_baton(lock_id_t lock_id,
+ with_lock_baton_t *nested)
+{
+ apr_pool_t *lock_pool = nested->lock_pool;
+ with_lock_baton_t *result = apr_pcalloc(lock_pool, sizeof(*result));
+
+ result->fs = nested->fs;
+ result->body = with_lock;
+ result->baton = nested;
+ result->lock_pool = lock_pool;
+ result->is_inner_most_lock = FALSE;
+ result->is_outer_most_lock = TRUE;
+ nested->is_outer_most_lock = FALSE;
+
+ init_lock_baton(result, lock_id);
+
+ return result;
+}
+
svn_error_t *
svn_fs_fs__with_write_lock(svn_fs_t *fs,
svn_error_t *(*body)(void *baton,
@@ -203,16 +358,9 @@ svn_fs_fs__with_write_lock(svn_fs_t *fs,
void *baton,
apr_pool_t *pool)
{
- fs_fs_data_t *ffd = fs->fsap_data;
- fs_fs_shared_data_t *ffsd = ffd->shared;
-
- SVN_MUTEX__WITH_LOCK(ffsd->fs_write_lock,
- with_some_lock_file(fs, body, baton,
- path_lock(fs, pool),
- TRUE,
- pool));
-
- return SVN_NO_ERROR;
+ return svn_error_trace(
+ with_lock(create_lock_baton(fs, write_lock, body, baton, pool),
+ pool));
}
svn_error_t *
@@ -222,20 +370,11 @@ svn_fs_fs__with_pack_lock(svn_fs_t *fs,
void *baton,
apr_pool_t *pool)
{
- fs_fs_data_t *ffd = fs->fsap_data;
- fs_fs_shared_data_t *ffsd = ffd->shared;
-
- SVN_MUTEX__WITH_LOCK(ffsd->fs_pack_lock,
- with_some_lock_file(fs, body, baton,
- path_pack_lock(fs, pool),
- FALSE,
- pool));
-
- return SVN_NO_ERROR;
+ return svn_error_trace(
+ with_lock(create_lock_baton(fs, pack_lock, body, baton, pool),
+ pool));
}
-/* Run BODY (with BATON and POOL) while the txn-current file
- of FS is locked. */
svn_error_t *
svn_fs_fs__with_txn_current_lock(svn_fs_t *fs,
svn_error_t *(*body)(void *baton,
@@ -243,18 +382,34 @@ svn_fs_fs__with_txn_current_lock(svn_fs_
void *baton,
apr_pool_t *pool)
{
+ return svn_error_trace(
+ with_lock(create_lock_baton(fs, txn_lock, body, baton, pool),
+ pool));
+}
+
+svn_error_t *
+svn_fs_fs__with_all_locks(svn_fs_t *fs,
+ svn_boolean_t allow_new_txns,
+ svn_error_t *(*body)(void *baton,
+ apr_pool_t *pool),
+ void *baton,
+ apr_pool_t *pool)
+{
fs_fs_data_t *ffd = fs->fsap_data;
- fs_fs_shared_data_t *ffsd = ffd->shared;
- SVN_MUTEX__WITH_LOCK(ffsd->txn_current_lock,
- with_some_lock_file(fs, body, baton,
- svn_fs_fs__path_txn_current_lock(fs, pool),
- FALSE,
- pool));
+ with_lock_baton_t *lock_baton
+ = create_lock_baton(fs, write_lock, body, baton, pool);
- return SVN_NO_ERROR;
+ if (ffd->format >= SVN_FS_FS__MIN_PACK_LOCK_FORMAT)
+ lock_baton = chain_lock_baton(pack_lock, lock_baton);
+
+ if (!allow_new_txns)
+ lock_baton = chain_lock_baton(txn_lock, lock_baton);
+
+ return svn_error_trace(with_lock(lock_baton, pool));
}
+
@@ -1010,7 +1165,7 @@ svn_fs_fs__upgrade(svn_fs_t *fs,
baton.cancel_func = cancel_func;
baton.cancel_baton = cancel_baton;
- return svn_fs_fs__with_write_lock(fs, upgrade_body, (void *)&baton, pool);
+ return svn_fs_fs__with_all_locks(fs, FALSE, upgrade_body, (void *)&baton,
pool);
}
/* Find the youngest revision in a repository at path FS_PATH and
Modified: subversion/trunk/subversion/libsvn_fs_fs/fs_fs.h
URL:
http://svn.apache.org/viewvc/subversion/trunk/subversion/libsvn_fs_fs/fs_fs.h?rev=1588815&r1=1588814&r2=1588815&view=diff
==============================================================================
--- subversion/trunk/subversion/libsvn_fs_fs/fs_fs.h (original)
+++ subversion/trunk/subversion/libsvn_fs_fs/fs_fs.h Sun Apr 20 18:35:43 2014
@@ -165,6 +165,22 @@ svn_fs_fs__with_txn_current_lock(svn_fs_
void *baton,
apr_pool_t *pool);
+/* Obtain all locks on the filesystem FS in a subpool of POOL, call BODY
+ with BATON and that subpool, destroy the subpool (releasing the locks)
+ and return what BODY returned. If ALLOW_NEW_TXNS is TRUE, other svn_fs_t
+ instances may start new transactions while we hold the locks.
+
+ This combines svn_fs_fs__with_write_lock, svn_fs_fs__with_pack_lock,
+ and optionally svn_fs_fs__with_txn_current_lock, ensuring correct lock
+ ordering. */
+svn_error_t *
+svn_fs_fs__with_all_locks(svn_fs_t *fs,
+ svn_boolean_t allow_new_txns,
+ svn_error_t *(*body)(void *baton,
+ apr_pool_t *pool),
+ void *baton,
+ apr_pool_t *pool);
+
/* Find the value of the property named PROPNAME in transaction TXN.
Return the contents in *VALUE_P. The contents will be allocated
from POOL. */
Modified: subversion/trunk/subversion/libsvn_fs_fs/hotcopy.c
URL:
http://svn.apache.org/viewvc/subversion/trunk/subversion/libsvn_fs_fs/hotcopy.c?rev=1588815&r1=1588814&r2=1588815&view=diff
==============================================================================
--- subversion/trunk/subversion/libsvn_fs_fs/hotcopy.c (original)
+++ subversion/trunk/subversion/libsvn_fs_fs/hotcopy.c Sun Apr 20 18:35:43 2014
@@ -1105,7 +1105,7 @@ svn_fs_fs__hotcopy(svn_fs_t *src_fs,
hbb.incremental = incremental;
hbb.cancel_func = cancel_func;
hbb.cancel_baton = cancel_baton;
- SVN_ERR(svn_fs_fs__with_write_lock(dst_fs, hotcopy_body, &hbb, pool));
+ SVN_ERR(svn_fs_fs__with_all_locks(dst_fs, TRUE, hotcopy_body, &hbb, pool));
return SVN_NO_ERROR;
}
Modified: subversion/trunk/subversion/libsvn_fs_fs/recovery.c
URL:
http://svn.apache.org/viewvc/subversion/trunk/subversion/libsvn_fs_fs/recovery.c?rev=1588815&r1=1588814&r2=1588815&view=diff
==============================================================================
--- subversion/trunk/subversion/libsvn_fs_fs/recovery.c (original)
+++ subversion/trunk/subversion/libsvn_fs_fs/recovery.c Sun Apr 20 18:35:43 2014
@@ -507,5 +507,5 @@ svn_fs_fs__recover(svn_fs_t *fs,
b.fs = fs;
b.cancel_func = cancel_func;
b.cancel_baton = cancel_baton;
- return svn_fs_fs__with_write_lock(fs, recover_body, &b, pool);
+ return svn_fs_fs__with_all_locks(fs, FALSE, recover_body, &b, pool);
}