Hello community,

here is the log from the commit of package s3backer for openSUSE:Factory 
checked in at 2020-10-02 17:40:24
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/s3backer (Old)
 and      /work/SRC/openSUSE:Factory/.s3backer.new.4249 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "s3backer"

Fri Oct  2 17:40:24 2020 rev:18 rq:839054 version:1.5.5

Changes:
--------
--- /work/SRC/openSUSE:Factory/s3backer/s3backer.changes        2019-10-09 
15:19:43.540456811 +0200
+++ /work/SRC/openSUSE:Factory/.s3backer.new.4249/s3backer.changes      
2020-10-02 17:41:28.978888166 +0200
@@ -1,0 +2,9 @@
+Sat Aug 22 17:43:05 UTC 2020 - Archie Cobbs <archie.co...@gmail.com>
+
+- Upgrade to release 1.5.5
+  + Added `--no-vhost' flag (issue #117)
+  + Added `--blockCacheNumProtected' flag (pr #119)
+  + Added `--test-errors', `--test-delays', and `--test-discard'
+  + Disallow stream encryption ciphers (issue #123)
+
+-------------------------------------------------------------------

Old:
----
  s3backer-1.5.4.tar.gz

New:
----
  s3backer-1.5.5.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ s3backer.spec ++++++
--- /var/tmp/diff_new_pack.klDWci/_old  2020-10-02 17:41:30.814889261 +0200
+++ /var/tmp/diff_new_pack.klDWci/_new  2020-10-02 17:41:30.818889264 +0200
@@ -1,7 +1,7 @@
 #
 # spec file for package s3backer
 #
-# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2020 SUSE LLC
 # Copyright 2008 Archie L. Cobbs.
 #
 # All modifications and additions to the file contributed by third parties
@@ -13,18 +13,18 @@
 # license that conforms to the Open Source Definition (Version 1.9)
 # published by the Open Source Initiative.
 
-# Please submit bugfixes or comments via http://bugs.opensuse.org/
+# Please submit bugfixes or comments via https://bugs.opensuse.org/
 #
 
 
 Name:           s3backer
-Version:        1.5.4
+Version:        1.5.5
 Release:        0
 Summary:        FUSE-based single file backing store via Amazon S3
 License:        GPL-2.0-or-later
 Group:          System/Filesystems
 Source:         
https://s3.amazonaws.com/archie-public/%{name}/%{name}-%{version}.tar.gz
-Url:            https://github.com/archiecobbs/%{name}
+URL:            https://github.com/archiecobbs/%{name}
 BuildRoot:      %{_tmppath}/%{name}-%{version}-build
 %if 0%{?suse_version} >= 1100
 BuildRequires:  libcurl-devel >= 7.16.2

++++++ s3backer-1.5.4.tar.gz -> s3backer-1.5.5.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/CHANGES new/s3backer-1.5.5/CHANGES
--- old/s3backer-1.5.4/CHANGES  2019-10-08 19:18:16.000000000 +0200
+++ new/s3backer-1.5.5/CHANGES  2020-08-22 19:38:04.000000000 +0200
@@ -1,3 +1,10 @@
+Version 1.5.5 released August 22, 2020
+
+    - Added `--no-vhost' flag (issue #117)
+    - Added `--blockCacheNumProtected' flag (pr #119)
+    - Added `--test-errors', `--test-delays', and `--test-discard'
+    - Disallow stream encryption ciphers (issue #123)
+
 Version 1.5.4 released October 8, 2019
 
     - Only set "x-amz-server-side-encryption" header with PUT requests (issue 
#116)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/block_cache.c 
new/s3backer-1.5.5/block_cache.c
--- old/s3backer-1.5.4/block_cache.c    2019-07-31 19:16:00.000000000 +0200
+++ new/s3backer-1.5.5/block_cache.c    2020-07-29 16:39:02.000000000 +0200
@@ -57,6 +57,9 @@
  * used to most recently used (where 'used' means either read or written). 
CLEAN2 is the
  * same as CLEAN except that the data must be MD5 verified before being used.
  *
+ * The linked list for CLEAN/CLEAN2 blocks is actually two lists, hi_cleans 
and lo_cleans.
+ * This allows us to evict "low priority" blocks before "high priority" blocks.
+ *
  * Blocks in the DIRTY state are linked in a list in the order they should be 
written.
  * A pool of worker threads picks them off and writes them through to the 
underlying
  * s3backer_store; while being written they are in state WRITING, or WRITING2 
if another
@@ -130,7 +133,7 @@
 #define ENTRY_IN_LIST(entry)                ((entry)->link.tqe_prev != NULL)
 #define ENTRY_RESET_LINK(entry)             do { (entry)->link.tqe_prev = 
NULL; } while (0)
 #define ENTRY_GET_STATE(entry)              (ENTRY_IN_LIST(entry) ?            
                 \
-                                                ((entry)->dirty ? DIRTY :      
          \
+                                                ((entry)->dirty ? DIRTY :      
                 \
                                                   ((entry)->verify ? CLEAN2 : 
CLEAN)) :         \
                                                 ((entry)->timeout == 
READING_TIMEOUT ?          \
                                                   ((entry)->verify ? READING2 
: READING) :      \
@@ -145,16 +148,20 @@
 /* Special timeout value for entries in state READING and READING2 */
 #define READING_TIMEOUT             ((uint32_t)0x3fffffff)
 
+/* Declare the list "head" struct */
+TAILQ_HEAD(list_head, cache_entry);
+
 /* Private data */
 struct block_cache_private {
     struct block_cache_conf         *config;        // configuration
     struct s3backer_store           *inner;         // underlying s3backer 
store
     struct block_cache_stats        stats;          // statistics
-    TAILQ_HEAD(, cache_entry)       cleans;         // list of clean blocks 
(LRU order)
-    TAILQ_HEAD(, cache_entry)       dirties;        // list of dirty blocks 
(write order)
+    struct list_head                lo_cleans;      // list of low priority 
clean blocks (LRU order)
+    struct list_head                hi_cleans;      // list of high priority 
clean blocks (LRU order)
+    struct list_head                dirties;        // list of dirty blocks 
(write order)
     struct s3b_hash                 *hashtable;     // hashtable of all cached 
blocks
     struct s3b_dcache               *dcache;        // on-disk persistent cache
-    u_int                           num_cleans;     // length of the 'cleans' 
list
+    u_int                           num_cleans;     // combined lengths of 
'lo_cleans' and 'hi_cleans'
     u_int                           num_dirties;    // # blocks that are 
DIRTY, WRITING, or WRITING2
     u_int64_t                       start_time;     // when we started
     u_int32_t                       clean_timeout;  // timeout for clean 
entries in time units
@@ -208,6 +215,8 @@
 static void block_cache_dirty_callback(void *arg, void *value);
 static double block_cache_dirty_ratio(struct block_cache_private *priv);
 static void block_cache_worker_wait(struct block_cache_private *priv, struct 
cache_entry *entry);
+static struct list_head *block_cache_cleans_list(struct block_cache_private 
*priv, s3b_block_t block_num);
+static int block_cache_high_prio(struct block_cache_conf *conf, s3b_block_t 
block_num);
 static uint32_t block_cache_get_time(struct block_cache_private *priv);
 static uint64_t block_cache_get_time_millis(void);
 static int block_cache_read_data(struct block_cache_private *priv, struct 
cache_entry *entry, void *dest, u_int off, u_int len);
@@ -277,7 +286,8 @@
         goto fail6;
     if ((r = pthread_cond_init(&priv->write_complete, NULL)) != 0)
         goto fail7;
-    TAILQ_INIT(&priv->cleans);
+    TAILQ_INIT(&priv->lo_cleans);
+    TAILQ_INIT(&priv->hi_cleans);
     TAILQ_INIT(&priv->dirties);
     if ((r = s3b_hash_create(&priv->hashtable, config->cache_size)) != 0)
         goto fail8;
@@ -308,11 +318,16 @@
 
 fail9:
     if (config->cache_file != NULL) {
-        while ((entry = TAILQ_FIRST(&priv->cleans)) != NULL) {
-            TAILQ_REMOVE(&priv->cleans, entry, link);
+        while ((entry = TAILQ_FIRST(&priv->lo_cleans)) != NULL) {
+            TAILQ_REMOVE(&priv->lo_cleans, entry, link);
             free(entry);
         }
-        s3b_dcache_close(priv->dcache);
+        while ((entry = TAILQ_FIRST(&priv->hi_cleans)) != NULL) {
+            TAILQ_REMOVE(&priv->hi_cleans, entry, link);
+            free(entry);
+        }
+        if (priv->dcache != NULL)
+            s3b_dcache_close(priv->dcache);
     }
     s3b_hash_destroy(priv->hashtable);
 fail8:
@@ -346,6 +361,7 @@
     const u_int dirty = md5 == NULL;
     struct block_cache_private *const priv = arg;
     struct block_cache_conf *const config = priv->config;
+    struct list_head *const cleans_list = block_cache_cleans_list(priv, 
block_num);
     struct cache_entry *entry;
     int r;
 
@@ -382,7 +398,7 @@
         entry->verify = !config->no_verify;
         if (entry->verify)
             memcpy(&entry->md5, md5, MD5_DIGEST_LENGTH);
-        TAILQ_INSERT_TAIL(&priv->cleans, entry, link);
+        TAILQ_INSERT_TAIL(cleans_list, entry, link);
         priv->num_cleans++;
         assert(ENTRY_GET_STATE(entry) == (config->no_verify ? CLEAN : CLEAN2));
     }
@@ -610,6 +626,7 @@
 block_cache_do_read(struct block_cache_private *const priv, s3b_block_t 
block_num, u_int off, u_int len, void *dest, int stats)
 {
     struct block_cache_conf *const config = priv->config;
+    struct list_head *const cleans_list = block_cache_cleans_list(priv, 
block_num);
     struct cache_entry *entry;
     u_char md5[MD5_DIGEST_LENGTH];
     int verified_but_not_read = 0;
@@ -647,7 +664,7 @@
                 if ((r = s3b_dcache_erase_block(priv->dcache, entry->u.dslot)) 
!= 0)
                     (*config->log)(LOG_ERR, "can't erase cached block! %s", 
strerror(r));
             }
-            TAILQ_REMOVE(&priv->cleans, entry, link);
+            TAILQ_REMOVE(cleans_list, entry, link);
             ENTRY_RESET_LINK(entry);
             priv->num_cleans--;
             entry->timeout = READING_TIMEOUT;
@@ -657,8 +674,8 @@
             /* Now go read/verify the data */
             goto read;
         case CLEAN:         /* Update timestamp and move to the end of the 
list to maintain LRU ordering */
-            TAILQ_REMOVE(&priv->cleans, entry, link);
-            TAILQ_INSERT_TAIL(&priv->cleans, entry, link);
+            TAILQ_REMOVE(cleans_list, entry, link);
+            TAILQ_INSERT_TAIL(cleans_list, entry, link);
             entry->timeout = block_cache_get_time(priv) + priv->clean_timeout;
             // FALLTHROUGH
         case DIRTY:         /* Copy the cached data */
@@ -757,7 +774,7 @@
             (*config->log)(LOG_ERR, "can't record cached block! %s", 
strerror(r));
     }
     entry->timeout = block_cache_get_time(priv) + priv->clean_timeout;
-    TAILQ_INSERT_TAIL(&priv->cleans, entry, link);
+    TAILQ_INSERT_TAIL(cleans_list, entry, link);
     priv->num_cleans++;
     assert(ENTRY_GET_STATE(entry) == CLEAN);
 
@@ -805,6 +822,7 @@
 block_cache_write(struct block_cache_private *const priv, s3b_block_t 
block_num, u_int off, u_int len, const void *src)
 {
     struct block_cache_conf *const config = priv->config;
+    struct list_head *const cleans_list = block_cache_cleans_list(priv, 
block_num);
     struct cache_entry *entry;
     int partial_miss = 0;
     int r;
@@ -853,7 +871,7 @@
             }
 
             /* Change from CLEAN to DIRTY */
-            TAILQ_REMOVE(&priv->cleans, entry, link);
+            TAILQ_REMOVE(cleans_list, entry, link);
             priv->num_cleans--;
             TAILQ_INSERT_TAIL(&priv->dirties, entry, link);
             priv->num_dirties++;
@@ -988,7 +1006,8 @@
      * and the data separately in hopes that the malloc() implementation will
      * put the data into its own page of virtual memory.
      *
-     * If the cache is full, try to evict a clean entry.
+     * If the cache is full, try to evict a clean entry. Evict low priority
+     * blocks before high priority blocks.
      */
     if (s3b_hash_size(priv->hashtable) < config->cache_size) {
         if ((entry = calloc(1, sizeof(*entry))) == NULL) {
@@ -997,7 +1016,10 @@
             priv->stats.out_of_memory_errors++;
             return r;
         }
-    } else if ((entry = TAILQ_FIRST(&priv->cleans)) != NULL) {
+    } else if ((entry = TAILQ_FIRST(&priv->lo_cleans)) != NULL) {
+        block_cache_free_entry(priv, &entry);
+        goto again;
+    } else if ((entry = TAILQ_FIRST(&priv->hi_cleans)) != NULL) {
         block_cache_free_entry(priv, &entry);
         goto again;
     } else
@@ -1042,6 +1064,7 @@
 {
     struct block_cache_conf *const config = priv->config;
     struct cache_entry *const entry = *entryp;
+    struct list_head *const cleans_list = block_cache_cleans_list(priv, 
entry->block_num);
     int r;
 
     /* Sanity check */
@@ -1060,7 +1083,7 @@
         free(entry->u.data);
 
     /* Remove entry from the clean list */
-    TAILQ_REMOVE(&priv->cleans, entry, link);
+    TAILQ_REMOVE(cleans_list, entry, link);
     s3b_hash_remove(priv->hashtable, entry->block_num);
     priv->num_cleans--;
 
@@ -1078,6 +1101,7 @@
     struct block_cache_conf *const config = priv->config;
     struct cache_entry *entry;
     struct cache_entry *clean_entry = NULL;
+    struct list_head *cleans_list;
     u_char md5[MD5_DIGEST_LENGTH];
     uint32_t adjusted_now;
     uint32_t now;
@@ -1109,9 +1133,13 @@
         /* Get current time */
         now = block_cache_get_time(priv);
 
-        /* Evict any CLEAN[2] blocks that have timed out (if enabled) */
+        /* Evict low priority any CLEAN[2] blocks that have timed out (if 
enabled) */
         if (priv->clean_timeout != 0) {
-            while ((clean_entry = TAILQ_FIRST(&priv->cleans)) != NULL && now 
>= clean_entry->timeout) {
+            while ((clean_entry = TAILQ_FIRST(&priv->lo_cleans)) != NULL && 
now >= clean_entry->timeout) {
+                block_cache_free_entry(priv, &clean_entry);
+                pthread_cond_signal(&priv->space_avail);
+            }
+            while ((clean_entry = TAILQ_FIRST(&priv->hi_cleans)) != NULL && 
now >= clean_entry->timeout) {
                 block_cache_free_entry(priv, &clean_entry);
                 pthread_cond_signal(&priv->space_avail);
             }
@@ -1165,7 +1193,8 @@
                         (*config->log)(LOG_ERR, "can't record cached block! 
%s", strerror(r));
                 }
                 priv->num_dirties--;
-                TAILQ_INSERT_TAIL(&priv->cleans, entry, link);
+                cleans_list = block_cache_cleans_list(priv, entry->block_num);
+                TAILQ_INSERT_TAIL(cleans_list, entry, link);
                 entry->verify = 0;
                 entry->timeout = block_cache_get_time(priv) + 
priv->clean_timeout;
                 priv->num_cleans++;
@@ -1286,6 +1315,26 @@
 }
 
 /*
+ * Get the head of the appropriate clean list, based on whether the block is 
low or high priority.
+ */
+static struct list_head *
+block_cache_cleans_list(struct block_cache_private *const priv, s3b_block_t 
block_num)
+{
+    return block_cache_high_prio(priv->config, block_num) ? &priv->hi_cleans : 
&priv->lo_cleans;
+}
+
+/*
+ * Classify a block as either low or high priority.
+ *
+ * NOTE: this function must always return the same value for any given block 
number.
+ */
+static int
+block_cache_high_prio(struct block_cache_conf *const config, s3b_block_t 
block_num)
+{
+    return block_num < config->num_protected;
+}
+
+/*
  * Return current time in milliseconds.
  */
 static uint64_t
@@ -1315,6 +1364,7 @@
 static struct cache_entry *
 block_cache_verified(struct block_cache_private *priv, struct cache_entry 
*entry)
 {
+    struct list_head *const cleans_list = block_cache_cleans_list(priv, 
entry->block_num);
     struct cache_entry *new_entry;
 
     /* Sanity check */
@@ -1329,8 +1379,8 @@
     /* Update all references that point to the entry */
     s3b_hash_put(priv->hashtable, new_entry);
     if (ENTRY_IN_LIST(entry)) {
-        TAILQ_REMOVE(&priv->cleans, entry, link);
-        TAILQ_INSERT_TAIL(&priv->cleans, new_entry, link);
+        TAILQ_REMOVE(cleans_list, entry, link);
+        TAILQ_INSERT_TAIL(cleans_list, new_entry, link);
     }
     free(entry);
     entry = new_entry;
@@ -1446,9 +1496,16 @@
     int dirty_len = 0;
 
     /* Check CLEANs and CLEAN2s */
-    for (entry = TAILQ_FIRST(&priv->cleans); entry != NULL; entry = 
TAILQ_NEXT(entry, link)) {
+    for (entry = TAILQ_FIRST(&priv->lo_cleans); entry != NULL; entry = 
TAILQ_NEXT(entry, link)) {
+        assert(ENTRY_GET_STATE(entry) == CLEAN || ENTRY_GET_STATE(entry) == 
CLEAN2);
+        assert(s3b_hash_get(priv->hashtable, entry->block_num) == entry);
+        assert(!block_cache_high_prio(config, entry->block_num));
+        clean_len++;
+    }
+    for (entry = TAILQ_FIRST(&priv->hi_cleans); entry != NULL; entry = 
TAILQ_NEXT(entry, link)) {
         assert(ENTRY_GET_STATE(entry) == CLEAN || ENTRY_GET_STATE(entry) == 
CLEAN2);
         assert(s3b_hash_get(priv->hashtable, entry->block_num) == entry);
+        assert(block_cache_high_prio(config, entry->block_num));
         clean_len++;
     }
     assert(clean_len == priv->num_cleans);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/block_cache.h 
new/s3backer-1.5.5/block_cache.h
--- old/s3backer-1.5.4/block_cache.h    2019-03-07 02:43:38.000000000 +0100
+++ new/s3backer-1.5.5/block_cache.h    2019-12-11 23:02:42.000000000 +0100
@@ -48,6 +48,7 @@
     u_int               no_verify;
     u_int               recover_dirty_blocks;
     u_int               perform_flush;
+    u_int               num_protected;
     const char          *cache_file;
     log_func_t          *log;
 };
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/configure new/s3backer-1.5.5/configure
--- old/s3backer-1.5.4/configure        2019-10-08 19:19:47.000000000 +0200
+++ new/s3backer-1.5.5/configure        2020-08-22 19:39:04.000000000 +0200
@@ -1,6 +1,6 @@
 #! /bin/sh
 # Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for s3backer FUSE filesystem backed by Amazon 
S3 1.5.4.
+# Generated by GNU Autoconf 2.69 for s3backer FUSE filesystem backed by Amazon 
S3 1.5.5.
 #
 # Report bugs to <https://github.com/archiecobbs/s3backer>.
 #
@@ -580,8 +580,8 @@
 # Identity of this package.
 PACKAGE_NAME='s3backer FUSE filesystem backed by Amazon S3'
 PACKAGE_TARNAME='s3backer'
-PACKAGE_VERSION='1.5.4'
-PACKAGE_STRING='s3backer FUSE filesystem backed by Amazon S3 1.5.4'
+PACKAGE_VERSION='1.5.5'
+PACKAGE_STRING='s3backer FUSE filesystem backed by Amazon S3 1.5.5'
 PACKAGE_BUGREPORT='https://github.com/archiecobbs/s3backer'
 PACKAGE_URL=''
 
@@ -1280,7 +1280,7 @@
   # Omit some internal or obsolete options to make the list less imposing.
   # This message is too long to be a string in the A/UX 3.1 sh.
   cat <<_ACEOF
-\`configure' configures s3backer FUSE filesystem backed by Amazon S3 1.5.4 to 
adapt to many kinds of systems.
+\`configure' configures s3backer FUSE filesystem backed by Amazon S3 1.5.5 to 
adapt to many kinds of systems.
 
 Usage: $0 [OPTION]... [VAR=VALUE]...
 
@@ -1346,7 +1346,7 @@
 
 if test -n "$ac_init_help"; then
   case $ac_init_help in
-     short | recursive ) echo "Configuration of s3backer FUSE filesystem 
backed by Amazon S3 1.5.4:";;
+     short | recursive ) echo "Configuration of s3backer FUSE filesystem 
backed by Amazon S3 1.5.5:";;
    esac
   cat <<\_ACEOF
 
@@ -1449,7 +1449,7 @@
 test -n "$ac_init_help" && exit $ac_status
 if $ac_init_version; then
   cat <<\_ACEOF
-s3backer FUSE filesystem backed by Amazon S3 configure 1.5.4
+s3backer FUSE filesystem backed by Amazon S3 configure 1.5.5
 generated by GNU Autoconf 2.69
 
 Copyright (C) 2012 Free Software Foundation, Inc.
@@ -1797,7 +1797,7 @@
 This file contains any messages produced by compilers while
 running configure, to aid debugging if configure makes a mistake.
 
-It was created by s3backer FUSE filesystem backed by Amazon S3 $as_me 1.5.4, 
which was
+It was created by s3backer FUSE filesystem backed by Amazon S3 $as_me 1.5.5, 
which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   $ $0 $@
@@ -2660,7 +2660,7 @@
 
 # Define the identity of the package.
  PACKAGE='s3backer'
- VERSION='1.5.4'
+ VERSION='1.5.5'
 
 
 cat >>confdefs.h <<_ACEOF
@@ -5498,7 +5498,7 @@
 # report actual input values of CONFIG_FILES etc. instead of their
 # values after options handling.
 ac_log="
-This file was extended by s3backer FUSE filesystem backed by Amazon S3 $as_me 
1.5.4, which was
+This file was extended by s3backer FUSE filesystem backed by Amazon S3 $as_me 
1.5.5, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   CONFIG_FILES    = $CONFIG_FILES
@@ -5564,7 +5564,7 @@
 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; 
s/[\\""\`\$]/\\\\&/g'`"
 ac_cs_version="\\
-s3backer FUSE filesystem backed by Amazon S3 config.status 1.5.4
+s3backer FUSE filesystem backed by Amazon S3 config.status 1.5.5
 configured by $0, generated by GNU Autoconf 2.69,
   with options \\"\$ac_cs_config\\"
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/configure.ac 
new/s3backer-1.5.5/configure.ac
--- old/s3backer-1.5.4/configure.ac     2019-10-08 19:18:57.000000000 +0200
+++ new/s3backer-1.5.5/configure.ac     2020-08-22 19:38:04.000000000 +0200
@@ -32,7 +32,7 @@
 # this exception statement from all source files in the program, then
 # also delete it here.
 
-AC_INIT([s3backer FUSE filesystem backed by Amazon S3], [1.5.4], 
[https://github.com/archiecobbs/s3backer], [s3backer])
+AC_INIT([s3backer FUSE filesystem backed by Amazon S3], [1.5.5], 
[https://github.com/archiecobbs/s3backer], [s3backer])
 AC_CONFIG_AUX_DIR(scripts)
 AM_INIT_AUTOMAKE(foreign)
 dnl AM_MAINTAINER_MODE
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/erase.c new/s3backer-1.5.5/erase.c
--- old/s3backer-1.5.4/erase.c  2018-05-31 00:09:31.000000000 +0200
+++ new/s3backer-1.5.5/erase.c  2020-04-19 21:08:47.000000000 +0200
@@ -117,7 +117,7 @@
     }
 
     /* Create temporary lower layer */
-    if ((priv->s3b = config->test ? test_io_create(&config->http_io) : 
http_io_create(&config->http_io)) == NULL) {
+    if ((priv->s3b = config->test ? test_io_create(&config->test_io) : 
http_io_create(&config->http_io)) == NULL) {
         warnx(config->test ? "test_io_create" : "http_io_create");
         goto fail3;
     }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/fuse_ops.c 
new/s3backer-1.5.5/fuse_ops.c
--- old/s3backer-1.5.4/fuse_ops.c       2019-07-09 20:49:24.000000000 +0200
+++ new/s3backer-1.5.5/fuse_ops.c       2020-04-19 21:08:47.000000000 +0200
@@ -39,6 +39,7 @@
 #include "ec_protect.h"
 #include "fuse_ops.h"
 #include "http_io.h"
+#include "test_io.h"
 #include "s3b_config.h"
 
 /****************************************************************************
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/gitrev.c new/s3backer-1.5.5/gitrev.c
--- old/s3backer-1.5.4/gitrev.c 2019-10-08 19:19:50.000000000 +0200
+++ new/s3backer-1.5.5/gitrev.c 2020-08-22 19:39:07.000000000 +0200
@@ -1 +1 @@
-const char *const s3backer_version = "1.5.4";
+const char *const s3backer_version = "1.5.5";
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/http_io.c new/s3backer-1.5.5/http_io.c
--- old/s3backer-1.5.4/http_io.c        2019-10-01 17:00:44.000000000 +0200
+++ new/s3backer-1.5.5/http_io.c        2020-04-19 21:08:47.000000000 +0200
@@ -276,7 +276,8 @@
 static void http_io_openssl_locker(int mode, int i, const char *file, int 
line);
 static u_long http_io_openssl_ider(void);
 static void http_io_base64_encode(char *buf, size_t bufsiz, const void *data, 
size_t len);
-static u_int http_io_crypt(struct http_io_private *priv, s3b_block_t 
block_num, int enc, const u_char *src, u_int len, u_char *dst);
+static u_int http_io_crypt(struct http_io_private *priv,
+    s3b_block_t block_num, int enc, const u_char *src, u_int len, u_char *dst, 
u_int dmax);
 static void http_io_authsig(struct http_io_private *priv, s3b_block_t 
block_num, const u_char *src, u_int len, u_char *hmac);
 static void update_hmac_from_header(HMAC_CTX *ctx, struct http_io *io,
   const char *name, int value_only, char *sigbuf, size_t sigbuflen);
@@ -362,6 +363,8 @@
     if (config->encryption != NULL) {
         char saltbuf[strlen(config->bucket) + 1 + strlen(config->prefix) + 1];
         u_int cipher_key_len;
+        u_int cipher_block_size;
+        u_int cipher_iv_length;
 
         /* Sanity checks */
         assert(config->password != NULL);
@@ -382,6 +385,16 @@
             goto fail4;
         }
 
+        /* Sanity check cipher is a block cipher */
+        cipher_block_size = EVP_CIPHER_block_size(priv->cipher);
+        cipher_iv_length = EVP_CIPHER_iv_length(priv->cipher);
+        if (cipher_block_size <= 1 || cipher_block_size != cipher_iv_length) {
+            (*config->log)(LOG_ERR, "invalid cipher `%s' (block size %u, IV 
length %u); only block ciphers are supported",
+              config->encryption, cipher_block_size, cipher_iv_length);
+            r = EINVAL;
+            goto fail4;
+        }
+
         /* Hash password to get bulk data encryption key */
         snprintf(saltbuf, sizeof(saltbuf), "%s/%s", config->bucket, 
config->prefix);
         if ((r = PKCS5_PBKDF2_HMAC_SHA1(config->password, 
strlen(config->password),
@@ -581,7 +594,7 @@
         if (io.list_truncated) {
             char block_hash_buf[S3B_BLOCK_NUM_DIGITS + 2];
 
-            http_io_format_block_hash(config, block_hash_buf, 
sizeof(block_hash_buf), io.last_block);
+            http_io_format_block_hash(config->blockHashPrefix, block_hash_buf, 
sizeof(block_hash_buf), io.last_block);
             snprintf(urlbuf + strlen(urlbuf), sizeof(urlbuf) - strlen(urlbuf), 
"%s=%s%s%0*jx&",
               LIST_PARAM_MARKER, url_encoded_prefix, block_hash_buf, 
S3B_BLOCK_NUM_DIGITS, (uintmax_t)io.last_block);
         }
@@ -719,7 +732,7 @@
 #endif
 
         /* Attempt to parse key as a block's object name */
-        if (http_io_parse_block(config, io->xml_text, &block_num) == 0) {
+        if (http_io_parse_block(config->prefix, config->num_blocks, 
config->blockHashPrefix, io->xml_text, &block_num) == 0) {
 #if DEBUG_BLOCK_LIST
             (*config->log)(LOG_DEBUG, "list: parsed key=\"%s\" -> block=%0*jx",
               io->xml_text, S3B_BLOCK_NUM_DIGITS, (uintmax_t)block_num);
@@ -777,19 +790,19 @@
  * Parse a block's item name (including prefix and block hash prefix if any) 
and returns the result in *block_nump.
  */
 int
-http_io_parse_block(struct http_io_conf *config, const char *name, s3b_block_t 
*block_nump)
+http_io_parse_block(const char *prefix, off_t num_blocks, int blockHashPrefix, 
const char *name, s3b_block_t *block_nump)
 {
-    const size_t plen = strlen(config->prefix);
+    const size_t plen = strlen(prefix);
     s3b_block_t hash_value = 0;
     s3b_block_t block_num = 0;
 
     /* Parse prefix */
-    if (strncmp(name, config->prefix, plen) != 0)
+    if (strncmp(name, prefix, plen) != 0)
         return -1;
     name += plen;
 
     /* Parse block hash prefix followed by dash (if so configured) */
-    if (config->blockHashPrefix) {
+    if (blockHashPrefix) {
         if (http_io_parse_hex_block_num(name, &hash_value) == -1)
             return -1;
         name += S3B_BLOCK_NUM_DIGITS;
@@ -801,11 +814,11 @@
     if (http_io_parse_hex_block_num(name, &block_num) == -1)
         return -1;
     name += S3B_BLOCK_NUM_DIGITS;
-    if (*name != '\0' || block_num >= config->num_blocks)
+    if (*name != '\0' || block_num >= num_blocks)
         return -1;
 
     /* Verify hash matches what's expected */
-    if (config->blockHashPrefix && hash_value != 
http_io_block_hash_prefix(block_num))
+    if (blockHashPrefix && hash_value != http_io_block_hash_prefix(block_num))
         return -1;
 
     /* Done */
@@ -849,10 +862,10 @@
  * Ref: 
https://crypto.stackexchange.com/questions/16219/cryptographic-hash-function-for-32-bit-length-input-keys
  */
 void
-http_io_format_block_hash(const struct http_io_conf *const config, char *buf, 
size_t bufsiz, s3b_block_t block_num)
+http_io_format_block_hash(int blockHashPrefix, char *buf, size_t bufsiz, 
s3b_block_t block_num)
 {
     assert(bufsiz >= S3B_BLOCK_NUM_DIGITS + 2);
-    if (config->blockHashPrefix)
+    if (blockHashPrefix)
         snprintf(buf, bufsiz, "%0*jx-", S3B_BLOCK_NUM_DIGITS, 
(uintmax_t)http_io_block_hash_prefix(block_num));
     else
         *buf = '\0';
@@ -1315,6 +1328,7 @@
         if (strncasecmp(layer, CONTENT_ENCODING_ENCRYPT "-", 
sizeof(CONTENT_ENCODING_ENCRYPT)) == 0) {
             const char *const block_cipher = layer + 
sizeof(CONTENT_ENCODING_ENCRYPT);
             u_char hmac[SHA_DIGEST_LENGTH];
+            u_int decrypt_buflen;
             u_char *buf;
 
             /* Encryption must be enabled */
@@ -1349,7 +1363,8 @@
             }
 
             /* Allocate buffer for the decrypted data */
-            if ((buf = malloc(did_read + EVP_MAX_IV_LENGTH)) == NULL) {
+            decrypt_buflen = did_read + EVP_MAX_IV_LENGTH;
+            if ((buf = malloc(decrypt_buflen)) == NULL) {
                 (*config->log)(LOG_ERR, "malloc: %s", strerror(errno));
                 pthread_mutex_lock(&priv->mutex);
                 priv->stats.out_of_memory_errors++;
@@ -1359,7 +1374,7 @@
             }
 
             /* Decrypt the block */
-            did_read = http_io_crypt(priv, block_num, 0, io.dest, did_read, 
buf);
+            did_read = http_io_crypt(priv, block_num, 0, io.dest, did_read, 
buf, decrypt_buflen);
             memcpy(io.dest, buf, did_read);
             free(buf);
 
@@ -1617,9 +1632,11 @@
     if (src != NULL && config->encryption != NULL) {
         void *encrypt_buf;
         u_int encrypt_len;
+        u_int encrypt_buflen;
 
         /* Allocate buffer */
-        if ((encrypt_buf = malloc(io.buf_size + EVP_MAX_IV_LENGTH)) == NULL) {
+        encrypt_buflen = io.buf_size + EVP_MAX_IV_LENGTH;
+        if ((encrypt_buf = malloc(encrypt_buflen)) == NULL) {
             (*config->log)(LOG_ERR, "malloc: %s", strerror(errno));
             pthread_mutex_lock(&priv->mutex);
             priv->stats.out_of_memory_errors++;
@@ -1629,7 +1646,7 @@
         }
 
         /* Encrypt the block */
-        encrypt_len = http_io_crypt(priv, block_num, 1, io.src, io.buf_size, 
encrypt_buf);
+        encrypt_len = http_io_crypt(priv, block_num, 1, io.src, io.buf_size, 
encrypt_buf, encrypt_buflen);
 
         /* Compute block signature */
         http_io_authsig(priv, block_num, encrypt_buf, encrypt_len, hmac);
@@ -2475,7 +2492,7 @@
     char block_hash_buf[S3B_BLOCK_NUM_DIGITS + 2];
     int len;
 
-    http_io_format_block_hash(config, block_hash_buf, sizeof(block_hash_buf), 
block_num);
+    http_io_format_block_hash(config->blockHashPrefix, block_hash_buf, 
sizeof(block_hash_buf), block_num);
     if (config->vhost) {
         len = snprintf(buf, bufsiz, "%s%s%s%0*jx", config->baseURL,
           config->prefix, block_hash_buf, S3B_BLOCK_NUM_DIGITS, 
(uintmax_t)block_num);
@@ -2757,7 +2774,7 @@
  * Encrypt or decrypt one block
  */
 static u_int
-http_io_crypt(struct http_io_private *priv, s3b_block_t block_num, int enc, 
const u_char *src, u_int len, u_char *dest)
+http_io_crypt(struct http_io_private *priv, s3b_block_t block_num, int enc, 
const u_char *src, u_int len, u_char *dest, u_int dmax)
 {
     u_char ivec[EVP_MAX_IV_LENGTH];
     EVP_CIPHER_CTX* ctx;
@@ -2817,6 +2834,12 @@
 }
 #endif
 
+    /* Sanity check */
+    if (total_len > dmax) {
+        (*priv->config->log)(LOG_ERR, "encryption buffer overflow! %u > %u", 
total_len, dmax);
+        abort();
+    }
+
     /* Done */
     EVP_CIPHER_CTX_free(ctx);
     return total_len;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/http_io.h new/s3backer-1.5.5/http_io.h
--- old/s3backer-1.5.4/http_io.h        2019-07-06 22:55:09.000000000 +0200
+++ new/s3backer-1.5.5/http_io.h        2020-04-19 21:08:47.000000000 +0200
@@ -140,6 +140,6 @@
 extern struct s3backer_store *http_io_create(struct http_io_conf *config);
 extern void http_io_get_stats(struct s3backer_store *s3b, struct http_io_stats 
*stats);
 extern void http_io_clear_stats(struct s3backer_store *s3b);
-extern int http_io_parse_block(struct http_io_conf *config, const char *name, 
s3b_block_t *block_num);
-extern void http_io_format_block_hash(const struct http_io_conf *config, char 
*block_hash_buf, size_t bufsiz, s3b_block_t block_num);
+extern int http_io_parse_block(const char *prefix, off_t num_blocks, int 
blockHashPrefix, const char *name, s3b_block_t *block_num);
+extern void http_io_format_block_hash(int blockHashPrefix, char 
*block_hash_buf, size_t bufsiz, s3b_block_t block_num);
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/main.c new/s3backer-1.5.5/main.c
--- old/s3backer-1.5.4/main.c   2019-02-28 16:08:40.000000000 +0100
+++ new/s3backer-1.5.5/main.c   2020-04-19 21:08:47.000000000 +0200
@@ -39,6 +39,7 @@
 #include "ec_protect.h"
 #include "fuse_ops.h"
 #include "http_io.h"
+#include "test_io.h"
 #include "s3b_config.h"
 #include "erase.h"
 #include "reset.h"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/reset.c new/s3backer-1.5.5/reset.c
--- old/s3backer-1.5.4/reset.c  2018-06-09 22:00:21.000000000 +0200
+++ new/s3backer-1.5.5/reset.c  2020-04-19 21:08:47.000000000 +0200
@@ -58,7 +58,7 @@
         warnx("resetting mount token for %s", config->description);
 
     /* Create temporary lower layer */
-    if ((s3b = config->test ? test_io_create(&config->http_io) : 
http_io_create(&config->http_io)) == NULL) {
+    if ((s3b = config->test ? test_io_create(&config->test_io) : 
http_io_create(&config->http_io)) == NULL) {
         warnx(config->test ? "test_io_create" : "http_io_create");
         goto fail;
     }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/s3b_config.c 
new/s3backer-1.5.5/s3b_config.c
--- old/s3backer-1.5.4/s3b_config.c     2019-07-26 16:21:11.000000000 +0200
+++ new/s3backer-1.5.5/s3b_config.c     2020-04-28 23:45:26.000000000 +0200
@@ -150,10 +150,7 @@
         .accessKey=             NULL,
         .baseURL=               NULL,
         .region=                NULL,
-        .bucket=                NULL,
         .sse=                   NULL,
-        .blockHashPrefix=       0,
-        .prefix=                S3BACKER_DEFAULT_PREFIX,
         .accessType=            S3BACKER_DEFAULT_ACCESS_TYPE,
         .authVersion=           S3BACKER_DEFAULT_AUTH_VERSION,
         .user_agent=            user_agent_buf,
@@ -191,6 +188,9 @@
     /* Common stuff */
     .block_size=            0,
     .file_size=             0,
+    .bucket=                NULL,
+    .prefix=                S3BACKER_DEFAULT_PREFIX,
+    .blockHashPrefix=       0,
     .quiet=                 0,
     .erase=                 0,
     .no_auto_detect=        0,
@@ -285,6 +285,10 @@
         .offset=    offsetof(struct s3b_config, 
block_cache.read_ahead_trigger),
     },
     {
+        .templ=     "--blockCacheNumProtected=%u",
+        .offset=    offsetof(struct s3b_config, block_cache.num_protected),
+    },
+    {
         .templ=     "--blockCacheFile=%s",
         .offset=    offsetof(struct s3b_config, block_cache.cache_file),
     },
@@ -344,6 +348,11 @@
         .value=     1
     },
     {
+        .templ=     "--no-vhost",
+        .offset=    offsetof(struct s3b_config, http_io.vhost),
+        .value=     -1
+    },
+    {
         .templ=     "--fileMode=%o",
         .offset=    offsetof(struct s3b_config, fuse_ops.file_mode),
     },
@@ -375,12 +384,12 @@
     },
     {
         .templ=     "--blockHashPrefix",
-        .offset=    offsetof(struct s3b_config, http_io.blockHashPrefix),
+        .offset=    offsetof(struct s3b_config, blockHashPrefix),
         .value=     1
     },
     {
         .templ=     "--prefix=%s",
-        .offset=    offsetof(struct s3b_config, http_io.prefix),
+        .offset=    offsetof(struct s3b_config, prefix),
     },
     {
         .templ=     "--defaultContentEncoding=%s",
@@ -458,6 +467,21 @@
         .value=     1
     },
     {
+        .templ=     "--test-errors",
+        .offset=    offsetof(struct s3b_config, test_io.random_errors),
+        .value=     1
+    },
+    {
+        .templ=     "--test-delays",
+        .offset=    offsetof(struct s3b_config, test_io.random_delays),
+        .value=     1
+    },
+    {
+        .templ=     "--test-discard",
+        .offset=    offsetof(struct s3b_config, test_io.discard_data),
+        .value=     1
+    },
+    {
         .templ=     "--timeout=%u",
         .offset=    offsetof(struct s3b_config, http_io.timeout),
     },
@@ -619,7 +643,7 @@
 
     /* Create HTTP (or test) layer */
     if (conf->test) {
-        if ((test_io_store = test_io_create(&conf->http_io)) == NULL)
+        if ((test_io_store = test_io_create(&conf->test_io)) == NULL)
             return NULL;
         store = test_io_store;
     } else {
@@ -891,8 +915,8 @@
     }
 
     /* Get bucket parameter */
-    if (config.http_io.bucket == NULL) {
-        if ((config.http_io.bucket = strdup(arg)) == NULL)
+    if (config.bucket == NULL) {
+        if ((config.bucket = strdup(arg)) == NULL)
             err(1, "strdup");
         return 0;
     }
@@ -1021,26 +1045,26 @@
 
     /* Check bucket/testdir */
     if (!config.test) {
-        if (config.http_io.bucket == NULL) {
+        if (config.bucket == NULL) {
             warnx("no S3 bucket specified");
             return -1;
         }
-        if (*config.http_io.bucket == '\0' || *config.http_io.bucket == '/' || 
strchr(config.http_io.bucket, '/') != 0) {
-            warnx("invalid S3 bucket `%s'", config.http_io.bucket);
+        if (*config.bucket == '\0' || *config.bucket == '/' || 
strchr(config.bucket, '/') != 0) {
+            warnx("invalid S3 bucket `%s'", config.bucket);
             return -1;
         }
     } else {
-        if (config.http_io.bucket == NULL) {
+        if (config.bucket == NULL) {
             warnx("no test directory specified");
             return -1;
         }
-        if (stat(config.http_io.bucket, &sb) == -1) {
-            warn("%s", config.http_io.bucket);
+        if (stat(config.bucket, &sb) == -1) {
+            warn("%s", config.bucket);
             return -1;
         }
         if (!S_ISDIR(sb.st_mode)) {
             errno = ENOTDIR;
-            warn("%s", config.http_io.bucket);
+            warn("%s", config.bucket);
             return -1;
         }
     }
@@ -1063,9 +1087,13 @@
     /* Set default or custom region */
     if (config.http_io.region == NULL)
         config.http_io.region = S3BACKER_DEFAULT_REGION;
-    if (customRegion)
+    if (customRegion && config.http_io.vhost != -1)
         config.http_io.vhost = 1;
 
+    /* Handle --no-vhost */
+    if (config.http_io.vhost == -1)
+        config.http_io.vhost = 0;
+
     /* Set default base URL */
     if (config.http_io.baseURL == NULL) {
         if (customRegion && strcmp(config.http_io.region, 
S3BACKER_DEFAULT_REGION) != 0)
@@ -1110,11 +1138,10 @@
         char *buf;
 
         schemelen = strchr(config.http_io.baseURL, ':') - 
config.http_io.baseURL + 3;
-        buflen = strlen(config.http_io.bucket) + 1 + 
strlen(config.http_io.baseURL) + 1;
+        buflen = strlen(config.bucket) + 1 + strlen(config.http_io.baseURL) + 
1;
         if ((buf = malloc(buflen)) == NULL)
             err(1, "malloc(%u)", (u_int)buflen);
-        snprintf(buf, buflen, "%.*s%s.%s", schemelen, config.http_io.baseURL,
-          config.http_io.bucket, config.http_io.baseURL + schemelen);
+        snprintf(buf, buflen, "%.*s%s.%s", schemelen, config.http_io.baseURL, 
config.bucket, config.http_io.baseURL + schemelen);
         config.http_io.baseURL = buf;
     }
 
@@ -1303,6 +1330,8 @@
         warnx("`--blockCacheRecoverDirtyBlocks' requires specifying 
`--blockCacheFile'");
         return -1;
     }
+    if (config.block_cache.num_protected > config.block_cache.cache_size)
+        warnx("`--blockCacheNumProtected' is larger than cache size; this may 
cause performance problems");
 
     /* Check mount point */
     if (config.erase || config.reset) {
@@ -1318,15 +1347,12 @@
     }
 
     /* Format descriptive string of what we're mounting */
-    if (config.test) {
-        snprintf(config.description, sizeof(config.description), "%s%s/%s",
-          "file://", config.http_io.bucket, config.http_io.prefix);
-    } else if (config.http_io.vhost)
-        snprintf(config.description, sizeof(config.description), "%s%s", 
config.http_io.baseURL, config.http_io.prefix);
-    else {
-        snprintf(config.description, sizeof(config.description), "%s%s/%s",
-          config.http_io.baseURL, config.http_io.bucket, 
config.http_io.prefix);
-    }
+    if (config.test)
+        snprintf(config.description, sizeof(config.description), "%s%s/%s", 
"file://", config.bucket, config.prefix);
+    else if (config.http_io.vhost)
+        snprintf(config.description, sizeof(config.description), "%s%s", 
config.http_io.baseURL, config.prefix);
+    else
+        snprintf(config.description, sizeof(config.description), "%s%s/%s", 
config.http_io.baseURL, config.bucket, config.prefix);
 
     /*
      * Read the first block (if any) to determine existing file and block size,
@@ -1337,6 +1363,9 @@
     if (config.no_auto_detect)
         r = ENOENT;
     else {
+        config.http_io.prefix = config.prefix;
+        config.http_io.bucket = config.bucket;
+        config.http_io.blockHashPrefix = config.blockHashPrefix;
         config.http_io.debug = config.debug;
         config.http_io.quiet = config.quiet;
         config.http_io.log = config.log;
@@ -1486,6 +1515,9 @@
     /* Copy common stuff into sub-module configs */
     config.block_cache.block_size = config.block_size;
     config.block_cache.log = config.log;
+    config.http_io.prefix = config.prefix;
+    config.http_io.bucket = config.bucket;
+    config.http_io.blockHashPrefix = config.blockHashPrefix;
     config.http_io.debug = config.debug;
     config.http_io.quiet = config.quiet;
     config.http_io.block_size = config.block_size;
@@ -1496,6 +1528,13 @@
     config.fuse_ops.block_size = config.block_size;
     config.fuse_ops.num_blocks = config.num_blocks;
     config.fuse_ops.log = config.log;
+    config.test_io.debug = config.debug;
+    config.test_io.log = config.log;
+    config.test_io.block_size = config.block_size;
+    config.test_io.num_blocks = config.num_blocks;
+    config.test_io.prefix = config.prefix;
+    config.test_io.bucket = config.bucket;
+    config.test_io.blockHashPrefix = config.blockHashPrefix;
 
     /* Check whether already mounted, and if so, compare mount token against 
on-disk cache (if any) */
     if (!config.test && !config.erase && !config.reset) {
@@ -1503,9 +1542,6 @@
         int conflict;
 
         /* Read s3 mount token */
-        config.http_io.debug = config.debug;
-        config.http_io.quiet = config.quiet;
-        config.http_io.log = config.log;
         if ((s3b = http_io_create(&config.http_io)) == NULL)
             err(1, "http_io_create");
         r = (*s3b->set_mount_token)(s3b, &mount_token, -1);
@@ -1595,7 +1631,7 @@
         }
 
         /* Create temporary lower layer */
-        if ((temp_store = config.test ? test_io_create(&config.http_io) : 
http_io_create(&config.http_io)) == NULL)
+        if ((temp_store = config.test ? test_io_create(&config.test_io) : 
http_io_create(&config.http_io)) == NULL)
             err(1, config.test ? "test_io_create" : "http_io_create");
 
         /* Initialize bitmap */
@@ -1657,9 +1693,9 @@
     (*config.log)(LOG_DEBUG, "%24s: %s", "authVersion", 
config.http_io.authVersion);
     (*config.log)(LOG_DEBUG, "%24s: \"%s\"", "baseURL", 
config.http_io.baseURL);
     (*config.log)(LOG_DEBUG, "%24s: \"%s\"", "region", config.http_io.region);
-    (*config.log)(LOG_DEBUG, "%24s: \"%s\"", config.test ? "testdir" : 
"bucket", config.http_io.bucket);
-    (*config.log)(LOG_DEBUG, "%24s: \"%s\"", "prefix", config.http_io.prefix);
-    (*config.log)(LOG_DEBUG, "%24s: %s", "blockHashPrefix", 
config.http_io.blockHashPrefix ? "true" : "false");
+    (*config.log)(LOG_DEBUG, "%24s: \"%s\"", config.test ? "testdir" : 
"bucket", config.bucket);
+    (*config.log)(LOG_DEBUG, "%24s: \"%s\"", "prefix", config.prefix);
+    (*config.log)(LOG_DEBUG, "%24s: %s", "blockHashPrefix", 
config.blockHashPrefix ? "true" : "false");
     (*config.log)(LOG_DEBUG, "%24s: \"%s\"", "defaultContentEncoding",
       config.http_io.default_ce != NULL ? config.http_io.default_ce : 
"(none)");
     (*config.log)(LOG_DEBUG, "%24s: %s", "list_blocks", config.list_blocks ? 
"true" : "false");
@@ -1802,6 +1838,7 @@
     fprintf(stderr, "\t--%-27s %s\n", "blockCacheThreads=NUM", "Block cache 
write-back thread pool size");
     fprintf(stderr, "\t--%-27s %s\n", "blockCacheTimeout=MILLIS", "Block cache 
entry timeout (zero = infinite)");
     fprintf(stderr, "\t--%-27s %s\n", "blockCacheWriteDelay=MILLIS", "Block 
cache maximum write-back delay");
+    fprintf(stderr, "\t--%-27s %s\n", "blockCacheNumProtected=NUM", 
"Preferentially retain NUM blocks in the block cache");
     fprintf(stderr, "\t--%-27s %s\n", "blockSize=SIZE", "Block size (with 
optional suffix 'K', 'M', 'G', etc.)");
     fprintf(stderr, "\t--%-27s %s\n", "blockHashPrefix", "Prepend hash to 
block names for even distribution");
     fprintf(stderr, "\t--%-27s %s\n", "cacert=FILE", "Specify SSL certificate 
authority file");
@@ -1825,6 +1862,7 @@
     fprintf(stderr, "\t--%-27s %s\n", "md5CacheSize=NUM", "Max size of MD5 
cache (zero = disabled)");
     fprintf(stderr, "\t--%-27s %s\n", "md5CacheTime=MILLIS", "Expire time for 
MD5 cache (zero = infinite)");
     fprintf(stderr, "\t--%-27s %s\n", "minWriteDelay=MILLIS", "Minimum time 
between same block writes");
+    fprintf(stderr, "\t--%-27s %s\n", "no-vhost", "Disable virtual hosted 
style requests");
     fprintf(stderr, "\t--%-27s %s\n", "password=PASSWORD", "Encrypt using 
PASSWORD");
     fprintf(stderr, "\t--%-27s %s\n", "passwordFile=FILE", "Encrypt using 
password read from FILE");
     fprintf(stderr, "\t--%-27s %s\n", "prefix=STRING", "Prefix for resource 
names within bucket");
@@ -1842,6 +1880,9 @@
     fprintf(stderr, "\t--%-27s %s\n", "statsFilename=NAME", "Name of 
statistics file in filesystem");
     fprintf(stderr, "\t--%-27s %s\n", "storageClass=TYPE", "Specify storage 
class for written blocks");
     fprintf(stderr, "\t--%-27s %s\n", "test", "Run in local test mode (bucket 
is a directory)");
+    fprintf(stderr, "\t--%-27s %s\n", "test-delays", "In test mode, introduce 
random I/O delays");
+    fprintf(stderr, "\t--%-27s %s\n", "test-discard", "In test mode, discard 
data and perform no I/O operations");
+    fprintf(stderr, "\t--%-27s %s\n", "test-errors", "In test mode, introduce 
random I/O errors");
     fprintf(stderr, "\t--%-27s %s\n", "timeout=SECONDS", "Max time allowed for 
one HTTP operation");
     fprintf(stderr, "\t--%-27s %s\n", "timeout=SECONDS", "Specify HTTP 
operation timeout");
     fprintf(stderr, "\t--%-27s %s\n", "version", "Show version information and 
exit");
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/s3b_config.h 
new/s3backer-1.5.5/s3b_config.h
--- old/s3backer-1.5.4/s3b_config.h     2017-04-01 18:05:52.000000000 +0200
+++ new/s3backer-1.5.5/s3b_config.h     2020-04-19 21:08:47.000000000 +0200
@@ -42,6 +42,7 @@
     struct fuse_ops_conf        fuse_ops;
     struct ec_protect_conf      ec_protect;
     struct http_io_conf         http_io;
+    struct test_io_conf         test_io;
 
     /* Common/global stuff */
     const char                  *accessFile;
@@ -50,6 +51,9 @@
     u_int                       block_size;
     off_t                       file_size;
     off_t                       num_blocks;
+    const char                  *bucket;
+    const char                  *prefix;
+    int                         blockHashPrefix;
     int                         debug;
     int                         erase;
     int                         reset;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/s3backer.1 
new/s3backer-1.5.5/s3backer.1
--- old/s3backer-1.5.4/s3backer.1       2019-09-11 16:12:08.000000000 +0200
+++ new/s3backer-1.5.5/s3backer.1       2020-04-19 21:13:00.000000000 +0200
@@ -183,6 +183,7 @@
 .Fl \-blockCacheFile ,
 .Fl \-blockCacheMaxDirty ,
 .Fl \-blockCacheNoVerify ,
+.Fl \-blockCacheNumProtected ,
 .Fl \-blockCacheSize ,
 .Fl \-blockCacheSync ,
 .Fl \-blockCacheThreads ,
@@ -463,6 +464,16 @@
 This flag requires
 .Fl \-blockCacheFile
 to be set.
+.It Fl \-blockCacheNumProtected=NUM
+Preferentially retain the first
+.Ar NUM
+blocks in the block cache.
+.Pp
+Some upper filesystems store highly active data (e.g., write journal) at the 
beginning of the filesystem.
+This option can be used to improve performance by reducing network reads for 
these regions of the file.
+With this option enabled, blocks after the first
+.Ar NUM
+blocks will be evicted before any protected blocks are.
 .It Fl \-blockHashPrefix
 Prepend random prefixes (generated deterministically from the block number) to 
block object names.
 This spreads requests more evenly across the namespace, and prevents heavy 
access to a narrow range of blocks from all being directed to the same backend 
server.
@@ -548,6 +559,8 @@
 See your OpenSSL documentation for a list of supported ciphers;
 the default if no cipher is specified is AES-128 CBC.
 .Pp
+Currently, only block ciphers are supported.
+.Pp
 The encryption password may be supplied via one of
 .Fl \-password
 or
@@ -759,7 +772,9 @@
 Specify an AWS region.
 This flag changes the default base URL to include the region name and 
automatically sets the
 .Fl \-vhost
-flag.
+flag, unless the
+.Fl \-no-vhost
+flag is used.
 .It Fl \-reset-mounted-flag
 Reset the 'already mounted' flag on the underlying S3 data store.
 .Pp
@@ -830,6 +845,15 @@
 is a relative pathname (and
 .Fl f
 is not given) it will be resolved relative to the root directory.
+.It Fl \-test-delays
+In test mode, introduce random I/O delays.
+.It Fl \-test-discard
+In test mode, discard all data written, return zero for all data blocks read, 
and perform no I/O operations.
+This mode is useful for isolating the FUSE and
+.Nm
+performance overhead.
+.It Fl \-test-errors
+In test mode, introduce random I/O errors.
 .It Fl \-timeout=SECONDS
 Specify a time limit in seconds for one HTTP operation attempt.
 This limits the entire operation including connection time (if not already 
connected) and data transfer time.
@@ -853,7 +877,11 @@
 Put another way, this flag is required for buckets defined outside of the US 
region.
 This flag is automatically set when the
 .Fl \-region
+flag is used, unless the
+.Fl \-no-vhost
 flag is used.
+.It Fl \-no-vhost
+Disable virtual hosted style requests (the default).
 .El
 .Pp
 In addition,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/s3backer.spec 
new/s3backer-1.5.5/s3backer.spec
--- old/s3backer-1.5.4/s3backer.spec    2019-10-08 19:19:50.000000000 +0200
+++ new/s3backer-1.5.5/s3backer.spec    2020-08-22 19:39:07.000000000 +0200
@@ -29,7 +29,7 @@
 # 
 
 Name:           s3backer
-Version:        1.5.4
+Version:        1.5.5
 Release:        1
 License:        GNU General Public License, Version 2
 Summary:        FUSE-based single file backing store via Amazon S3
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/test_io.c new/s3backer-1.5.5/test_io.c
--- old/s3backer-1.5.4/test_io.c        2019-07-26 16:21:11.000000000 +0200
+++ new/s3backer-1.5.5/test_io.c        2020-04-19 21:13:42.000000000 +0200
@@ -44,7 +44,7 @@
 
 /* Internal state */
 struct test_io_private {
-    struct http_io_conf         *config;
+    struct test_io_conf         *config;
     u_char                      zero_block[0];
 };
 
@@ -68,7 +68,7 @@
  * On error, returns NULL and sets `errno'.
  */
 struct s3backer_store *
-test_io_create(struct http_io_conf *config)
+test_io_create(struct test_io_conf *config)
 {
     struct s3backer_store *s3b;
     struct test_io_private *priv;
@@ -95,7 +95,8 @@
     s3b->data = priv;
 
     /* Random initialization */
-    srandom((u_int)time(NULL));
+    if (config->random_delays || config->random_errors)
+        srandom((u_int)time(NULL));
 
     /* Done */
     return s3b;
@@ -141,7 +142,8 @@
   u_char *actual_md5, const u_char *expect_md5, int strict)
 {
     struct test_io_private *const priv = s3b->data;
-    struct http_io_conf *const config = priv->config;
+    struct test_io_conf *const config = priv->config;
+    char block_hash_buf[S3B_BLOCK_NUM_DIGITS + 2];
     u_char md5[MD5_DIGEST_LENGTH];
     char path[PATH_MAX];
     int zero_block;
@@ -154,44 +156,53 @@
         (*config->log)(LOG_DEBUG, "test_io: read %0*jx started", 
S3B_BLOCK_NUM_DIGITS, (uintmax_t)block_num);
 
     /* Random delay */
-    usleep((random() % 200) * 1000);
+    if (config->random_delays)
+        usleep((random() % 200) * 1000);
 
     /* Random error */
-    if ((random() % 100) < RANDOM_ERROR_PERCENT) {
+    if (config->random_errors && (random() % 100) < RANDOM_ERROR_PERCENT) {
         (*config->log)(LOG_ERR, "test_io: random failure reading %0*jx", 
S3B_BLOCK_NUM_DIGITS, (uintmax_t)block_num);
         return EAGAIN;
     }
 
-    /* Generate path */
-    snprintf(path, sizeof(path), "%s/%s%0*jx", config->bucket, config->prefix, 
S3B_BLOCK_NUM_DIGITS, (uintmax_t)block_num);
-
     /* Read block */
-    if ((fd = open(path, O_RDONLY)) != -1) {
-        int total;
+    if (config->discard_data)
+        r = ENOENT;
+    else {
 
-        /* Read file */
-        for (total = 0; total < config->block_size; total += r) {
-            if ((r = read(fd, (char *)dest + total, config->block_size - 
total)) == -1) {
-                r = errno;
-                (*config->log)(LOG_ERR, "can't read %s: %s", path, 
strerror(r));
-                close(fd);
-                return r;
+        /* Generate path */
+        http_io_format_block_hash(config->blockHashPrefix, block_hash_buf, 
sizeof(block_hash_buf), block_num);
+        snprintf(path, sizeof(path), "%s/%s%s%0*jx",
+          config->bucket, config->prefix, block_hash_buf, 
S3B_BLOCK_NUM_DIGITS, (uintmax_t)block_num);
+
+        /* Open and read file */
+        if ((fd = open(path, O_RDONLY)) != -1) {
+            int total;
+
+            /* Read file */
+            for (total = 0; total < config->block_size; total += r) {
+                if ((r = read(fd, (char *)dest + total, config->block_size - 
total)) == -1) {
+                    r = errno;
+                    (*config->log)(LOG_ERR, "can't read %s: %s", path, 
strerror(r));
+                    close(fd);
+                    return r;
+                }
+                if (r == 0)
+                    break;
             }
-            if (r == 0)
-                break;
-        }
-        close(fd);
+            close(fd);
 
-        /* Check for short read */
-        if (total != config->block_size) {
-            (*config->log)(LOG_ERR, "%s: file is truncated (only read %d out 
of %u bytes)", path, total, config->block_size);
-            return EIO;
-        }
+            /* Check for short read */
+            if (total != config->block_size) {
+                (*config->log)(LOG_ERR, "%s: file is truncated (only read %d 
out of %u bytes)", path, total, config->block_size);
+                return EIO;
+            }
 
-        /* Done */
-        r = 0;
-    } else
-        r = errno;
+            /* Done */
+            r = 0;
+        } else
+            r = errno;
+    }
 
     /* Convert ENOENT into a read of all zeroes */
     if ((zero_block = (r == ENOENT))) {
@@ -260,7 +271,7 @@
   check_cancel_t *check_cancel, void *check_cancel_arg)
 {
     struct test_io_private *const priv = s3b->data;
-    struct http_io_conf *const config = priv->config;
+    struct test_io_conf *const config = priv->config;
     char block_hash_buf[S3B_BLOCK_NUM_DIGITS + 2];
     u_char md5[MD5_DIGEST_LENGTH];
     char temp[PATH_MAX];
@@ -299,16 +310,24 @@
     }
 
     /* Random delay */
-    usleep((random() % 200) * 1000);
+    if (config->random_delays)
+        usleep((random() % 200) * 1000);
 
     /* Random error */
-    if ((random() % 100) < RANDOM_ERROR_PERCENT) {
+    if (config->random_errors && (random() % 100) < RANDOM_ERROR_PERCENT) {
         (*config->log)(LOG_ERR, "test_io: random failure writing %0*jx", 
S3B_BLOCK_NUM_DIGITS, (uintmax_t)block_num);
         return EAGAIN;
     }
 
+    /* Discarding data? */
+    if (config->discard_data) {
+        if (config->debug)
+            (*config->log)(LOG_DEBUG, "test_io: discard %0*jx complete", 
S3B_BLOCK_NUM_DIGITS, (uintmax_t)block_num);
+        return 0;
+    }
+
     /* Generate path */
-    http_io_format_block_hash(config, block_hash_buf, sizeof(block_hash_buf), 
block_num);
+    http_io_format_block_hash(config->blockHashPrefix, block_hash_buf, 
sizeof(block_hash_buf), block_num);
     snprintf(path, sizeof(path), "%s/%s%s%0*jx",
       config->bucket, config->prefix, block_hash_buf, S3B_BLOCK_NUM_DIGITS, 
(uintmax_t)block_num);
 
@@ -360,7 +379,7 @@
 test_io_read_block_part(struct s3backer_store *s3b, s3b_block_t block_num, 
u_int off, u_int len, void *dest)
 {
     struct test_io_private *const priv = s3b->data;
-    struct http_io_conf *const config = priv->config;
+    struct test_io_conf *const config = priv->config;
 
     return block_part_read_block_part(s3b, block_num, config->block_size, off, 
len, dest);
 }
@@ -369,7 +388,7 @@
 test_io_write_block_part(struct s3backer_store *s3b, s3b_block_t block_num, 
u_int off, u_int len, const void *src)
 {
     struct test_io_private *const priv = s3b->data;
-    struct http_io_conf *const config = priv->config;
+    struct test_io_conf *const config = priv->config;
 
     return block_part_write_block_part(s3b, block_num, config->block_size, 
off, len, src);
 }
@@ -378,19 +397,23 @@
 test_io_list_blocks(struct s3backer_store *s3b, block_list_func_t *callback, 
void *arg)
 {
     struct test_io_private *const priv = s3b->data;
-    struct http_io_conf *const config = priv->config;
+    struct test_io_conf *const config = priv->config;
     s3b_block_t block_num;
     struct dirent *dent;
     DIR *dir;
     int i;
 
+    /* Discarding data? */
+    if (config->discard_data)
+        return 0;
+
     /* Open directory */
     if ((dir = opendir(config->bucket)) == NULL)
         return errno;
 
     /* Scan directory */
     for (i = 0; (dent = readdir(dir)) != NULL; i++) {
-        if (http_io_parse_block(config, dent->d_name, &block_num) == 0)
+        if (http_io_parse_block(config->prefix, config->num_blocks, 
config->blockHashPrefix, dent->d_name, &block_num) == 0)
             (*callback)(arg, block_num);
     }
 
@@ -400,4 +423,3 @@
     /* Done */
     return 0;
 }
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/test_io.h new/s3backer-1.5.5/test_io.h
--- old/s3backer-1.5.4/test_io.h        2017-04-01 18:05:52.000000000 +0200
+++ new/s3backer-1.5.5/test_io.h        2020-04-19 21:13:46.000000000 +0200
@@ -34,6 +34,20 @@
  * also delete it here.
  */
 
+/* Configuration info structure for test_io store */
+struct test_io_conf {
+    u_int               block_size;
+    off_t               num_blocks;
+    const char          *bucket;
+    const char          *prefix;
+    log_func_t          *log;
+    int                 blockHashPrefix;
+    int                 random_errors;
+    int                 random_delays;
+    int                 discard_data;
+    int                 debug;
+};
+
 /* test_io.c */
-extern struct s3backer_store *test_io_create(struct http_io_conf *config);
+extern struct s3backer_store *test_io_create(struct test_io_conf *config);
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/s3backer-1.5.4/tester.c new/s3backer-1.5.5/tester.c
--- old/s3backer-1.5.4/tester.c 2017-04-01 18:05:52.000000000 +0200
+++ new/s3backer-1.5.5/tester.c 2020-04-19 21:08:47.000000000 +0200
@@ -39,6 +39,7 @@
 #include "ec_protect.h"
 #include "fuse_ops.h"
 #include "http_io.h"
+#include "test_io.h"
 #include "s3b_config.h"
 
 /* Definitions */


Reply via email to