Author: stefan2
Date: Wed Jun 17 10:09:12 2015
New Revision: 1685985

URL: http://svn.apache.org/r1685985
Log:
On the fsx-1.10 branch:
Introduce a new infrastructure to FSX that allows us to do efficient fsyncs.

It basically uses a thread pool to execute multiple fsyncs concurrently.
Interestingly, this generic implementation is faster on Linux than even the
POSIX-provided aio_fsync functionality on the same system. As a centralized
mechanism for scheduling fsyncs it also takes care of preventing redundant
flushes.

With this commit, FSX does not actually use the new capabilities. That will
be in the following commits.

* subversion/include/private/svn_mutex.h
  (svn_mutex__get): Declare new utility function.

* subversion/libsvn_subr/mutex.c
  (svn_mutex__get): Implement.

* subversion/libsvn_fs_x/batch_fsync.h
  (): New file with new internal API.

* subversion/libsvn_fs_x/batch_fsync.c
  (): New file with implementing the new internal API.

* subversion/libsvn_fs_x/fs.c
  (svn_fs_x__init): Trigger the initialization of the new infrastructure. 

* subversion/tests/libsvn_fs_x/fs-x-pack-test.c
  (test_batch_fsync): New test covering some basic state of the new code.
  (test_funcs): Register new test.

Added:
    subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.c   (with 
props)
    subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.h   (with 
props)
Modified:
    subversion/branches/fsx-1.10/subversion/include/private/svn_mutex.h
    subversion/branches/fsx-1.10/subversion/libsvn_fs_x/fs.c
    subversion/branches/fsx-1.10/subversion/libsvn_subr/mutex.c
    subversion/branches/fsx-1.10/subversion/tests/libsvn_fs_x/fs-x-pack-test.c

Modified: subversion/branches/fsx-1.10/subversion/include/private/svn_mutex.h
URL: 
http://svn.apache.org/viewvc/subversion/branches/fsx-1.10/subversion/include/private/svn_mutex.h?rev=1685985&r1=1685984&r2=1685985&view=diff
==============================================================================
--- subversion/branches/fsx-1.10/subversion/include/private/svn_mutex.h 
(original)
+++ subversion/branches/fsx-1.10/subversion/include/private/svn_mutex.h Wed Jun 
17 10:09:12 2015
@@ -104,6 +104,17 @@ do {
   SVN_ERR(svn_mutex__unlock(svn_mutex__m, (expr)));     \
 } while (0)
 
+#if APR_HAS_THREADS
+
+/** Return the APR mutex encapsulated in @a mutex.
+ *
+ * @note This function should only be called by APR wrapper code.
+ */
+apr_thread_mutex_t *
+svn_mutex__get(svn_mutex__t *mutex);
+
+#endif
+
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */

Added: subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.c
URL: 
http://svn.apache.org/viewvc/subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.c?rev=1685985&view=auto
==============================================================================
--- subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.c (added)
+++ subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.c Wed Jun 
17 10:09:12 2015
@@ -0,0 +1,480 @@
+/* batch_fsync.c --- efficiently fsync multiple targets
+ *
+ * ====================================================================
+ *    Licensed to the Apache Software Foundation (ASF) under one
+ *    or more contributor license agreements.  See the NOTICE file
+ *    distributed with this work for additional information
+ *    regarding copyright ownership.  The ASF licenses this file
+ *    to you under the Apache License, Version 2.0 (the
+ *    "License"); you may not use this file except in compliance
+ *    with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing,
+ *    software distributed under the License is distributed on an
+ *    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *    KIND, either express or implied.  See the License for the
+ *    specific language governing permissions and limitations
+ *    under the License.
+ * ====================================================================
+ */
+
+#include <apr_thread_pool.h>
+#include <apr_thread_cond.h>
+
+#include "batch_fsync.h"
+#include "svn_pools.h"
+#include "svn_hash.h"
+#include "svn_dirent_uri.h"
+#include "svn_private_config.h"
+
+#include "private/svn_dep_compat.h"
+#include "private/svn_mutex.h"
+#include "private/svn_subr_private.h"
+
+/* Handy macro to check APR function results and turning them into
+ * svn_error_t upon failure. */
+#define WRAP_APR_ERR(x,msg)                     \
+  {                                             \
+    apr_status_t status_ = (x);                 \
+    if (status_)                                \
+      return svn_error_wrap_apr(status_, msg);  \
+  }
+
+
+/* A simple SVN-wrapper around the apr_thread_cond_* API */
+#ifdef APR_HAS_THREADS
+typedef apr_thread_cond_t svn_thread_cond__t;
+#else
+typedef int svn_thread_cond__t;
+#endif
+
+static svn_error_t *
+svn_thread_cond__create(svn_thread_cond__t **cond,
+                        apr_pool_t *result_pool)
+{
+#ifdef APR_HAS_THREADS
+
+  WRAP_APR_ERR(apr_thread_cond_create(cond, result_pool),
+               _("Can't create condition variable"));
+
+#else
+
+  *cond = apr_pcalloc(result_pool, sizeof(**cond));
+
+#endif
+
+  return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+svn_thread_cond__broadcast(svn_thread_cond__t *cond)
+{
+#ifdef APR_HAS_THREADS
+
+  WRAP_APR_ERR(apr_thread_cond_broadcast(cond),
+               _("Can't broadcast condition variable"));
+
+#endif
+
+  return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+svn_thread_cond__wait(svn_thread_cond__t *cond,
+                      svn_mutex__t *mutex)
+{
+#ifdef APR_HAS_THREADS
+
+  WRAP_APR_ERR(apr_thread_cond_wait(cond, svn_mutex__get(mutex)),
+               _("Can't broadcast condition variable"));
+
+#endif
+
+  return SVN_NO_ERROR;
+}
+
+/* Utility construct:  Clients can efficiently wait for the encapsulated
+ * counter to reach a certain value.  Currently, only increments have been
+ * implemented.  This whole structure can be opaque to the API users.
+ */
+typedef struct waitable_counter_t
+{
+  /* Current value, initialized to 0. */
+  int value;
+
+  /* Synchronization objects. */
+  svn_thread_cond__t *cond;
+  svn_mutex__t *mutex;
+} waitable_counter_t;
+
+/* Set *COUNTER_P to a new waitable_counter_t instance allocated in
+ * RESULT_POOL.  The initial counter value is 0. */
+static svn_error_t *
+waitable_counter__create(waitable_counter_t **counter_p,
+                         apr_pool_t *result_pool)
+{
+  waitable_counter_t *counter = apr_pcalloc(result_pool, sizeof(*counter));
+  counter->value = 0;
+
+  SVN_ERR(svn_thread_cond__create(&counter->cond, result_pool));
+  SVN_ERR(svn_mutex__init(&counter->mutex, TRUE, result_pool));
+
+  *counter_p = counter;
+
+  return SVN_NO_ERROR;
+}
+
+/* Increment the value in COUNTER by 1. */
+static svn_error_t *
+waitable_counter__increment(waitable_counter_t *counter)
+{
+  SVN_ERR(svn_mutex__lock(counter->mutex));
+  counter->value++;
+  SVN_ERR(svn_mutex__unlock(counter->mutex, SVN_NO_ERROR));
+
+  SVN_ERR(svn_thread_cond__broadcast(counter->cond));
+
+  return SVN_NO_ERROR;
+}
+
+/* Efficiently wait for COUNTER to assume VALUE. */
+static svn_error_t *
+waitable_counter__wait_for(waitable_counter_t *counter,
+                           int value)
+{
+  svn_boolean_t done = FALSE;
+
+  /* This loop implicitly handles spurious wake-ups. */
+  do
+    {
+      SVN_ERR(svn_mutex__lock(counter->mutex));
+
+      if (counter->value == value)
+        done = TRUE;
+      else
+        SVN_ERR(svn_thread_cond__wait(counter->cond, counter->mutex));
+
+      SVN_ERR(svn_mutex__unlock(counter->mutex, SVN_NO_ERROR));
+    }
+  while (!done);
+
+  return SVN_NO_ERROR;
+}
+
+/* Set the value in COUNTER to 0. */
+static svn_error_t *
+waitable_counter__reset(waitable_counter_t *counter)
+{
+  SVN_ERR(svn_mutex__lock(counter->mutex));
+  counter->value = 0;
+  SVN_ERR(svn_mutex__unlock(counter->mutex, SVN_NO_ERROR));
+
+  SVN_ERR(svn_thread_cond__broadcast(counter->cond));
+
+  return SVN_NO_ERROR;
+}
+
+/* Entry type for the svn_fs_x__batch_fsync_t collection.  There is one
+ * instance per file handle.
+ */
+typedef struct to_sync_t
+{
+  /* Open handle of the file / directory to fsync. */
+  apr_file_t *file;
+
+  /* Pool to use with FILE.  It is private to FILE such that it can be
+   * used safely together with FILE in a separate thread. */
+  apr_pool_t *pool;
+
+  /* Result of the file operations. */
+  svn_error_t *result;
+
+  /* Counter to increment when we completed the task. */
+  waitable_counter_t *counter;
+} to_sync_t;
+
+/* The actual collection object. */
+struct svn_fs_x__batch_fsync_t
+{
+  /* Maps open file handles: C-string path to to_sync_t *. */
+  apr_hash_t *files;
+
+  /* Counts the number of completed fsync tasks. */
+  waitable_counter_t *counter;
+};
+
+/* Data structures for concurrent fsync execution are only available if
+ * we have threading support.
+ */
+#ifdef APR_HAS_THREADS
+
+/* Number of microseconds that an unused thread remains in the pool before
+ * being terminated.
+ *
+ * Higher values are useful if clients frequently send small requests and
+ * you want to minimize the latency for those.
+ */
+#define THREADPOOL_THREAD_IDLE_LIMIT 1000000
+
+/* Maximum number of threads in THREAD_POOL, i.e. number of paths we can
+ * fsync concurrently throughout the process. */
+#define MAX_THREADS 16
+
+/* Thread pool to execute the fsync tasks. */
+static apr_thread_pool_t *thread_pool = NULL;
+
+#endif
+
+/* We open non-directory files with these flags. */
+#define FILE_FLAGS (APR_READ | APR_WRITE | APR_BUFFERED | APR_CREATE)
+
+svn_error_t *
+svn_fs_x__batch_fsync_init(apr_pool_t *global_pool)
+{
+#ifdef APR_HAS_THREADS
+
+  /* This thread pool will get cleaned up automatically when GLOBAL_POOL
+     gets cleared.  No additional cleanup callback is needed. */
+  WRAP_APR_ERR(apr_thread_pool_create(&thread_pool, 0, MAX_THREADS,
+                                      global_pool),
+               _("Can't create fsync thread pool in FSX"));
+
+  /* let idle threads linger for a while in case more requests are
+     coming in */
+  apr_thread_pool_idle_wait_set(thread_pool, THREADPOOL_THREAD_IDLE_LIMIT);
+
+  /* don't queue requests unless we reached the worker thread limit */
+  apr_thread_pool_threshold_set(thread_pool, 0);
+
+#endif
+
+  return SVN_NO_ERROR;
+}
+
+/* Destructor for svn_fs_x__batch_fsync_t.  Releases all global pool memory
+ * and closes all open file handles. */
+static apr_status_t
+fsync_batch_cleanup(void *data)
+{
+  svn_fs_x__batch_fsync_t *batch = data;
+  apr_hash_index_t *hi;
+
+  /* Close all files (implicitly) and release memory. */
+  for (hi = apr_hash_first(apr_hash_pool_get(batch->files), batch->files);
+       hi;
+       hi = apr_hash_next(hi))
+    {
+      to_sync_t *to_sync = apr_hash_this_val(hi);
+      svn_pool_destroy(to_sync->pool);
+    }
+
+  return APR_SUCCESS;
+}
+
+svn_error_t *
+svn_fs_x__batch_fsync_create(svn_fs_x__batch_fsync_t **result_p,
+                             apr_pool_t *result_pool)
+{
+  svn_fs_x__batch_fsync_t *result = apr_pcalloc(result_pool, sizeof(*result));
+  result->files = svn_hash__make(result_pool);
+
+  SVN_ERR(waitable_counter__create(&result->counter, result_pool));
+  apr_pool_cleanup_register(result_pool, result, fsync_batch_cleanup,
+                            apr_pool_cleanup_null);
+
+  *result_p = result;
+
+  return SVN_NO_ERROR;
+}
+
+/* If BATCH does not contain a handle for PATH, yet, create one with FLAGS
+ * and add it to BATCH.  Set *FILE to the open file handle.
+ * Use SCRATCH_POOL for temporaries.
+ */
+static svn_error_t *
+internal_open_file(apr_file_t **file,
+                   svn_fs_x__batch_fsync_t *batch,
+                   const char *path,
+                   apr_int32_t flags,
+                   apr_pool_t *scratch_pool)
+{
+  svn_error_t *err;
+  apr_pool_t *pool;
+  to_sync_t *to_sync;
+
+  /* If we already have a handle for PATH, return that. */
+  to_sync = svn_hash_gets(batch->files, path);
+  if (to_sync)
+    {
+      *file = to_sync->file;
+      return SVN_NO_ERROR;
+    }
+
+  /* To be able to process each file in a separate thread, they must use
+   * separate, thread-safe pools.  Allocating a sub-pool from the standard
+   * thread-pool achieves exactly that. */
+  pool = svn_pool_create(NULL);
+  err = svn_io_file_open(file, path, flags, APR_OS_DEFAULT, pool);
+  if (err)
+    {
+      svn_pool_destroy(pool);
+      return svn_error_trace(err);
+    }
+
+  to_sync = apr_pcalloc(pool, sizeof(*to_sync));
+  to_sync->file = *file;
+  to_sync->pool = pool;
+  to_sync->result = SVN_NO_ERROR;
+  to_sync->counter = batch->counter;
+
+  svn_hash_sets(batch->files,
+                apr_pstrdup(apr_hash_pool_get(batch->files), path),
+                to_sync);
+
+  return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svn_fs_x__batch_fsync_open_file(apr_file_t **file,
+                                svn_fs_x__batch_fsync_t *batch,
+                                const char *filename,
+                                apr_pool_t *scratch_pool)
+{
+  apr_off_t offset = 0;
+
+  SVN_ERR(internal_open_file(file, batch, filename, FILE_FLAGS,
+                             scratch_pool));
+  SVN_ERR(svn_io_file_seek(*file, APR_SET, &offset, scratch_pool));
+
+  return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svn_fs_x__batch_fsync_new_path(svn_fs_x__batch_fsync_t *batch,
+                               const char *path,
+                               apr_pool_t *scratch_pool)
+{
+  apr_file_t *file;
+
+#ifdef SVN_ON_POSIX
+
+  /* On POSIX, we need to sync the parent directory because it contains
+   * the name for the file / folder given by PATH. */
+  path = svn_dirent_dirname(path, scratch_pool);
+  SVN_ERR(internal_open_file(&file, batch, path, APR_READ, scratch_pool));
+
+#else
+
+  svn_node_kind_t kind;
+
+  /* On non-POSIX systems, we assume that sync'ing the given PATH is the
+   * right thing to do.  Also, we assume that only files may be sync'ed. */
+  SVN_ERR(svn_io_check_path(path, &kind, scratch_pool));
+  if (kind == svn_node_file)
+    SVN_ERR(internal_open_file(&file, batch, path, FILE_FLAGS,
+                               scratch_pool));
+
+#endif
+
+  return SVN_NO_ERROR;
+}
+
+/* Thread-pool task Flush the to_sync_t instance given by DATA. */
+static void * APR_THREAD_FUNC
+flush_task(apr_thread_t *tid,
+           void *data)
+{
+  svn_error_t *err;
+  to_sync_t *to_sync = data;
+
+  err = svn_error_trace(svn_io_file_flush_to_disk(to_sync->file,
+                                                  to_sync->pool));
+  to_sync->result = svn_error_compose_create
+                       (err, waitable_counter__increment(to_sync->counter));
+
+  return NULL;
+}
+
+svn_error_t *
+svn_fs_x__batch_fsync_run(svn_fs_x__batch_fsync_t *batch,
+                          apr_pool_t *scratch_pool)
+{
+  apr_hash_index_t *hi;
+
+  /* Number of tasks sent to the thread pool. */
+  int tasks = 0;
+
+  /* Because we allocated the open files from our global pool, don't bail
+   * out on the first error.  Instead, process all files and but accumulate
+   * the errors in this chain.
+   */
+  svn_error_t *chain = SVN_NO_ERROR;
+
+  /* First, flush APR-internal buffers. This should minimize / prevent the
+   * introduction of additional meta-data changes during the next phase.
+   * We might otherwise issue redundant fsyncs.
+   */
+  for (hi = apr_hash_first(scratch_pool, batch->files);
+       hi;
+       hi = apr_hash_next(hi))
+    {
+      to_sync_t *to_sync = apr_hash_this_val(hi);
+      to_sync->result = svn_error_trace(svn_io_file_flush
+                                           (to_sync->file, to_sync->pool));
+    }
+
+  /* Make sure the task completion counter is set to 0. */
+  chain = svn_error_compose_create(chain,
+                                   waitable_counter__reset(batch->counter));
+
+  /* Start the actual fsyncing process. */
+  for (hi = apr_hash_first(scratch_pool, batch->files);
+       hi;
+       hi = apr_hash_next(hi))
+    {
+      to_sync_t *to_sync = apr_hash_this_val(hi);
+
+#if APR_HAS_THREADS
+
+      apr_status_t status = APR_SUCCESS;
+      status = apr_thread_pool_push(thread_pool, flush_task, to_sync,
+                                    0, NULL);
+      if (status)
+        to_sync->result = svn_error_wrap_apr(status, _("Can't push task"));
+      else
+        tasks++;
+
+#else
+
+      to_sync->result = svn_error_trace(svn_io_file_flush_to_disk
+                                           (to_sync->file, to_sync->pool));
+
+#endif
+    }
+
+  /* Wait for all outstanding flush operations to complete. */
+  chain = svn_error_compose_create(chain,
+                                   waitable_counter__wait_for(batch->counter,
+                                                              tasks));
+
+  /* Collect the results, close all files and release memory. */
+  for (hi = apr_hash_first(scratch_pool, batch->files);
+       hi;
+       hi = apr_hash_next(hi))
+    {
+      to_sync_t *to_sync = apr_hash_this_val(hi);
+      chain = svn_error_compose_create(chain, to_sync->result);
+      chain = svn_error_compose_create(chain,
+                                       svn_io_file_close(to_sync->file,
+                                                         scratch_pool));
+      svn_pool_destroy(to_sync->pool);
+    }
+
+  /* Don't process any file / folder twice. */
+  apr_hash_clear(batch->files);
+
+  /* Report the errors that we encountered. */
+  return svn_error_trace(chain);
+}

Propchange: subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.c
------------------------------------------------------------------------------
    svn:eol-style = native

Added: subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.h
URL: 
http://svn.apache.org/viewvc/subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.h?rev=1685985&view=auto
==============================================================================
--- subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.h (added)
+++ subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.h Wed Jun 
17 10:09:12 2015
@@ -0,0 +1,89 @@
+/* batch_fsync.h --- efficiently fsync multiple targets
+ *
+ * ====================================================================
+ *    Licensed to the Apache Software Foundation (ASF) under one
+ *    or more contributor license agreements.  See the NOTICE file
+ *    distributed with this work for additional information
+ *    regarding copyright ownership.  The ASF licenses this file
+ *    to you under the Apache License, Version 2.0 (the
+ *    "License"); you may not use this file except in compliance
+ *    with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing,
+ *    software distributed under the License is distributed on an
+ *    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *    KIND, either express or implied.  See the License for the
+ *    specific language governing permissions and limitations
+ *    under the License.
+ * ====================================================================
+ */
+
+#ifndef SVN_LIBSVN_FS_X__BATCH_FSYNC_H
+#define SVN_LIBSVN_FS_X__BATCH_FSYNC_H
+
+#include "svn_error.h"
+
+/* Infrastructure for efficiently calling fsync on files and directories.
+ *
+ * The idea is to have a container of open file handles (including
+ * directory handles on POSIX), at most one per file.  During the course
+ * of an FS operation that needs to be fsync'ed, all touched files and
+ * folders accumulate in the container.
+ *
+ * At the end of the FS operation, all file changes will be written the
+ * physical disk, once per file and folder.  Afterwards, all handles will
+ * be closed and the container is ready for reuse.
+ *
+ * To minimize the delay caused by the batch flush, run all fsync calls
+ * concurrently - if the OS supports multi-threading.
+ */
+
+/* Opaque container type.
+ */
+typedef struct svn_fs_x__batch_fsync_t svn_fs_x__batch_fsync_t;
+
+/* Initialize the concurrent fsync infrastructure.  It will automatically
+ * be cleaned up when GLOBAL_POOL is being cleaned up.
+ *
+ * This function must be called before using any of the other functions in
+ * in this module.  It should only be called once.
+ */
+svn_error_t *
+svn_fs_x__batch_fsync_init(apr_pool_t *global_pool);
+
+/* Set *RESULT_P to a new batch fsync structure, allocated in RESULT_POOL. */
+svn_error_t *
+svn_fs_x__batch_fsync_create(svn_fs_x__batch_fsync_t **result_p,
+                             apr_pool_t *result_pool);
+
+/* Open the file at FILENAME for read and write access.  Return it in *FILE
+ * and schedule it for fsync in BATCH.  If BATCH already contains an open
+ * file for FILENAME, return that instead creating a new instance.
+ *
+ * Use SCRATCH_POOL for temporaries. */
+svn_error_t *
+svn_fs_x__batch_fsync_open_file(apr_file_t **file,
+                                svn_fs_x__batch_fsync_t *batch,
+                                const char *filename,
+                                apr_pool_t *scratch_pool);
+
+/* Inform the BATCH that a file or directory has been created at PATH.
+ * "Created" means either newly created to renamed to PATH - even if another
+ * item with the same name existed before.  Depending on the OS, the correct
+ * path will scheduled for fsync.
+ *
+ * Use SCRATCH_POOL for temporaries. */
+svn_error_t *
+svn_fs_x__batch_fsync_new_path(svn_fs_x__batch_fsync_t *batch,
+                               const char *path,
+                               apr_pool_t *scratch_pool);
+
+/* For all files and directories in BATCH, flush all changes to disk and
+ * close the file handles.  Use SCRATCH_POOL for temporaries. */
+svn_error_t *
+svn_fs_x__batch_fsync_run(svn_fs_x__batch_fsync_t *batch,
+                          apr_pool_t *scratch_pool);
+
+#endif

Propchange: subversion/branches/fsx-1.10/subversion/libsvn_fs_x/batch_fsync.h
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: subversion/branches/fsx-1.10/subversion/libsvn_fs_x/fs.c
URL: 
http://svn.apache.org/viewvc/subversion/branches/fsx-1.10/subversion/libsvn_fs_x/fs.c?rev=1685985&r1=1685984&r2=1685985&view=diff
==============================================================================
--- subversion/branches/fsx-1.10/subversion/libsvn_fs_x/fs.c (original)
+++ subversion/branches/fsx-1.10/subversion/libsvn_fs_x/fs.c Wed Jun 17 
10:09:12 2015
@@ -32,6 +32,7 @@
 #include "svn_delta.h"
 #include "svn_version.h"
 #include "svn_pools.h"
+#include "batch_fsync.h"
 #include "fs.h"
 #include "fs_x.h"
 #include "pack.h"
@@ -663,6 +664,8 @@ svn_fs_x__init(const svn_version_t *load
                              loader_version->major);
   SVN_ERR(svn_ver_check_list2(x_version(), checklist, svn_ver_equal));
 
+  SVN_ERR(svn_fs_x__batch_fsync_init(common_pool));
+
   *vtable = &library_vtable;
   return SVN_NO_ERROR;
 }

Modified: subversion/branches/fsx-1.10/subversion/libsvn_subr/mutex.c
URL: 
http://svn.apache.org/viewvc/subversion/branches/fsx-1.10/subversion/libsvn_subr/mutex.c?rev=1685985&r1=1685984&r2=1685985&view=diff
==============================================================================
--- subversion/branches/fsx-1.10/subversion/libsvn_subr/mutex.c (original)
+++ subversion/branches/fsx-1.10/subversion/libsvn_subr/mutex.c Wed Jun 17 
10:09:12 2015
@@ -105,3 +105,13 @@ svn_mutex__unlock(svn_mutex__t *mutex,
 
   return err;
 }
+
+#if APR_HAS_THREADS
+
+apr_thread_mutex_t *
+svn_mutex__get(svn_mutex__t *mutex)
+{
+  return mutex->mutex;
+}
+
+#endif

Modified: 
subversion/branches/fsx-1.10/subversion/tests/libsvn_fs_x/fs-x-pack-test.c
URL: 
http://svn.apache.org/viewvc/subversion/branches/fsx-1.10/subversion/tests/libsvn_fs_x/fs-x-pack-test.c?rev=1685985&r1=1685984&r2=1685985&view=diff
==============================================================================
--- subversion/branches/fsx-1.10/subversion/tests/libsvn_fs_x/fs-x-pack-test.c 
(original)
+++ subversion/branches/fsx-1.10/subversion/tests/libsvn_fs_x/fs-x-pack-test.c 
Wed Jun 17 10:09:12 2015
@@ -25,6 +25,7 @@
 #include <apr_pools.h>
 
 #include "../svn_test.h"
+#include "../../libsvn_fs_x/batch_fsync.h"
 #include "../../libsvn_fs_x/fs.h"
 #include "../../libsvn_fs_x/reps.h"
 
@@ -844,6 +845,87 @@ pack_shard_size_one(const svn_test_opts_
 #undef SHARD_SIZE
 #undef MAX_REV
 /* ------------------------------------------------------------------------ */
+#define REPO_NAME "test-repo-fsx-batch-fsync"
+static svn_error_t *
+test_batch_fsync(const svn_test_opts_t *opts,
+                 apr_pool_t *pool)
+{
+  const char *abspath;
+  svn_fs_x__batch_fsync_t *batch;
+  int i;
+
+  /* Create an empty working directory and let it be cleaned up by the test
+   * harness. */
+  SVN_ERR(svn_dirent_get_absolute(&abspath, REPO_NAME, pool));
+
+  SVN_ERR(svn_io_remove_dir2(abspath, TRUE, NULL, NULL, pool));
+  SVN_ERR(svn_io_make_dir_recursively(abspath, pool));
+  svn_test_add_dir_cleanup(abspath);
+
+  /* Initialize infrastructure with a pool that lives as long as this
+   * application. */
+  SVN_ERR(svn_fs_x__batch_fsync_init(svn_pool_create(NULL)));
+
+  /* We use and re-use the same batch object throughout this test. */
+  SVN_ERR(svn_fs_x__batch_fsync_create(&batch, pool));
+
+  /* The working directory is new. */
+  SVN_ERR(svn_fs_x__batch_fsync_new_path(batch, abspath, pool));
+
+  /* 1st run: Has to fire up worker threads etc. */
+  for (i = 0; i < 10; ++i)
+    {
+      apr_file_t *file;
+      const char *path = svn_dirent_join(abspath,
+                                         apr_psprintf(pool, "file%i", i),
+                                         pool);
+      apr_size_t len = strlen(path);
+
+      SVN_ERR(svn_fs_x__batch_fsync_open_file(&file, batch, path, pool));
+      SVN_ERR(svn_fs_x__batch_fsync_new_path(batch, path, pool));
+
+      SVN_ERR(svn_io_file_write(file, path, &len, pool));
+    }
+
+  SVN_ERR(svn_fs_x__batch_fsync_run(batch, pool));
+
+  /* 2nd run: Running a batch must leave the container in an empty,
+   * re-usable state. Hence, try to re-use it. */
+  for (i = 0; i < 10; ++i)
+    {
+      apr_file_t *file;
+      const char *path = svn_dirent_join(abspath,
+                                         apr_psprintf(pool, "new%i", i),
+                                         pool);
+      apr_size_t len = strlen(path);
+
+      SVN_ERR(svn_fs_x__batch_fsync_open_file(&file, batch, path, pool));
+      SVN_ERR(svn_fs_x__batch_fsync_new_path(batch, path, pool));
+
+      SVN_ERR(svn_io_file_write(file, path, &len, pool));
+    }
+
+  SVN_ERR(svn_fs_x__batch_fsync_run(batch, pool));
+
+  /* 3rd run: Schedule but don't execute. POOL cleanup shall not fail. */
+  for (i = 0; i < 10; ++i)
+    {
+      apr_file_t *file;
+      const char *path = svn_dirent_join(abspath,
+                                         apr_psprintf(pool, "another%i", i),
+                                         pool);
+      apr_size_t len = strlen(path);
+
+      SVN_ERR(svn_fs_x__batch_fsync_open_file(&file, batch, path, pool));
+      SVN_ERR(svn_fs_x__batch_fsync_new_path(batch, path, pool));
+
+      SVN_ERR(svn_io_file_write(file, path, &len, pool));
+    }
+
+  return SVN_NO_ERROR;
+}
+#undef REPO_NAME
+/* ------------------------------------------------------------------------ */
 
 /* The test table.  */
 
@@ -876,6 +958,8 @@ static struct svn_test_descriptor_t test
                        "test representations container"),
     SVN_TEST_OPTS_PASS(pack_shard_size_one,
                        "test packing with shard size = 1"),
+    SVN_TEST_OPTS_PASS(test_batch_fsync,
+                       "test batch fsync"),
     SVN_TEST_NULL
   };
 


Reply via email to