Re: [PATCH 8/8] aio: support for IO polling

2018-11-22 Thread Jan Kara


On Tue 20-11-18 10:19:53, Jens Axboe wrote:
> +/*
> + * We can't just wait for polled events to come to us, we have to actively
> + * find and complete them.
> + */
> +static void aio_iopoll_reap_events(struct kioctx *ctx)
> +{
> + if (!(ctx->flags & IOCTX_FLAG_IOPOLL))
> + return;
> +
> + while (!list_empty_careful(>poll_submitted) ||
> +!list_empty(>poll_completing)) {
> + unsigned int nr_events = 0;
> +
> + __aio_iopoll_check(ctx, NULL, _events, 1, UINT_MAX);
> + }
> +}
> +
> +static int aio_iopoll_check(struct kioctx *ctx, long min_nr, long nr,
> + struct io_event __user *event)
> +{
> + unsigned int nr_events = 0;
> + int ret = 0;
> +
> + /* * Only allow one thread polling at a time */
> + if (test_and_set_bit(0, >getevents_busy))
> + return -EBUSY;
> +
> + while (!nr_events || !need_resched()) {
> + int tmin = 0;
> +
> + if (nr_events < min_nr)
> + tmin = min_nr - nr_events;
> +
> + ret = __aio_iopoll_check(ctx, event, _events, tmin, nr);
> + if (ret <= 0)
> + break;
> + ret = 0;
> + }
> +
> + clear_bit(0, >getevents_busy);
> + return nr_events ? nr_events : ret;
> +}

Hum, what if userspace calls io_destroy() while another process is polling
for events on the same kioctx? It seems we'd be reaping events from two
processes in parallel in that case which will result in various
"interesting" effects like ctx->poll_completing list corruption...

Honza
-- 
Jan Kara 
SUSE Labs, CR


[PATCH 2/5] bcache: option to automatically run gc thread after writeback accomplished

2018-11-22 Thread Coly Li
The option gc_after_writeback is disabled by default, because garbage
collection will discard SSD data which drops cached data.

Echo 1 into /sys/fs/bcache//internal/gc_after_writeback will enable
this option, which wakes up gc thread when writeback accomplished and all
cached data is clean.

This option is helpful for people who cares writing performance more.
In heavy writing workload, all cached data can be clean only happens when
writeback thread cleans all cached data in I/O idle time. In such
situation a following gc running may help to shrink bcache B+ tree and
discard more clean data, which may be helpful for future writing requests.

If you are not sure whether this is helpful for your own workload, please
leave it as disabled by default.

Signed-off-by: Coly Li 
---
 drivers/md/bcache/bcache.h| 14 ++
 drivers/md/bcache/sysfs.c |  9 +
 drivers/md/bcache/writeback.c | 27 +++
 drivers/md/bcache/writeback.h |  2 ++
 4 files changed, 52 insertions(+)

diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 96d2213f279e..fdf75352e16a 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -626,6 +626,20 @@ struct cache_set {
/* Where in the btree gc currently is */
struct bkey gc_done;
 
+   /*
+* For automatical garbage collection after writeback completed, this
+* varialbe is used as bit fields,
+* -  0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
+* -  0010b (BCH_DO_AUTO_GC): do gc after writeback
+* This is an optimization for following write request after writeback
+* finished, but read hit rate dropped due to clean data on cache is
+* discarded. Unless user explicitly sets it via sysfs, it won't be
+* enabled.
+*/
+#define BCH_ENABLE_AUTO_GC 1
+#define BCH_DO_AUTO_GC 2
+   uint8_t gc_after_writeback;
+
/*
 * The allocation code needs gc_mark in struct bucket to be correct, but
 * it's not while a gc is in progress. Protected by bucket_lock.
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 7351ee4940f3..a38a74dffda4 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -128,6 +128,7 @@ rw_attribute(expensive_debug_checks);
 rw_attribute(cache_replacement_policy);
 rw_attribute(btree_shrinker_disabled);
 rw_attribute(copy_gc_enabled);
+rw_attribute(gc_after_writeback);
 rw_attribute(size);
 
 static ssize_t bch_snprint_string_list(char *buf,
@@ -676,6 +677,7 @@ SHOW(__bch_cache_set)
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled,   "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled,   "%i", c->copy_gc_enabled);
+   sysfs_printf(gc_after_writeback,"%i", c->gc_after_writeback);
sysfs_printf(io_disable,"%i",
 test_bit(CACHE_SET_IO_DISABLE, >flags));
 
@@ -776,6 +778,12 @@ STORE(__bch_cache_set)
sysfs_strtoul(gc_always_rewrite,c->gc_always_rewrite);
sysfs_strtoul(btree_shrinker_disabled,  c->shrinker_disabled);
sysfs_strtoul(copy_gc_enabled,  c->copy_gc_enabled);
+   /*
+* write gc_after_writeback here may overwrite an already set
+* BCH_DO_AUTO_GC, it doesn't matter because this flag will be
+* set in next chance.
+*/
+   sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
 
return size;
 }
@@ -856,6 +864,7 @@ static struct attribute *bch_cache_set_internal_files[] = {
_gc_always_rewrite,
_btree_shrinker_disabled,
_copy_gc_enabled,
+   _gc_after_writeback,
_io_disable,
NULL
 };
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 08c3a9f9676c..74e3f5f3807b 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -17,6 +17,15 @@
 #include 
 #include 
 
+static void update_gc_after_writeback(struct cache_set *c)
+{
+   if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
+   c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
+   return;
+
+   c->gc_after_writeback |= BCH_DO_AUTO_GC;
+}
+
 /* Rate limiting */
 static uint64_t __calc_target_rate(struct cached_dev *dc)
 {
@@ -191,6 +200,7 @@ static void update_writeback_rate(struct work_struct *work)
if (!set_at_max_writeback_rate(c, dc)) {
down_read(>writeback_lock);
__update_writeback_rate(dc);
+   update_gc_after_writeback(c);
up_read(>writeback_lock);
}
}
@@ -689,6 +699,23 @@ static int bch_writeback_thread(void *arg)
up_write(>writeback_lock);
break;

[PATCH 1/5] bcache: introduce force_wake_up_gc()

2018-11-22 Thread Coly Li
Garbage collection thread starts to work when c->sectors_to_gc is
negative value, otherwise nothing will happen even the gc thread
is woken up by wake_up_gc().

force_wake_up_gc() sets c->sectors_to_gc to -1 before calling
wake_up_gc(), then gc thread may have chance to run if no one else
sets c->sectors_to_gc to a positive value before gc_should_run().

This routine can be called where the gc thread is woken up and
required to run in force.

Signed-off-by: Coly Li 
---
 drivers/md/bcache/btree.h | 18 ++
 drivers/md/bcache/sysfs.c | 17 ++---
 2 files changed, 20 insertions(+), 15 deletions(-)

diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index a68d6c55783b..d1c72ef64edf 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -266,6 +266,24 @@ static inline void wake_up_gc(struct cache_set *c)
wake_up(>gc_wait);
 }
 
+static inline void force_wake_up_gc(struct cache_set *c)
+{
+   /*
+* Garbage collection thread only works when sectors_to_gc < 0,
+* calling wake_up_gc() won't start gc thread if sectors_to_gc is
+* not a nagetive value.
+* Therefore sectors_to_gc is set to -1 here, before waking up
+* gc thread by calling wake_up_gc(). Then gc_should_run() will
+* give a chance to permit gc thread to run. "Give a chance" means
+* before going into gc_should_run(), there is still possibility
+* that c->sectors_to_gc being set to other positive value. So
+* this routine won't 100% make sure gc thread will be woken up
+* to run.
+*/
+   atomic_set(>sectors_to_gc, -1);
+   wake_up_gc(c);
+}
+
 #define MAP_DONE   0
 #define MAP_CONTINUE   1
 
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index d2e5c9892d4d..7351ee4940f3 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -725,21 +725,8 @@ STORE(__bch_cache_set)
bch_cache_accounting_clear(>accounting);
}
 
-   if (attr == _trigger_gc) {
-   /*
-* Garbage collection thread only works when sectors_to_gc < 0,
-* when users write to sysfs entry trigger_gc, most of time
-* they want to forcibly triger gargage collection. Here -1 is
-* set to c->sectors_to_gc, to make gc_should_run() give a
-* chance to permit gc thread to run. "give a chance" means
-* before going into gc_should_run(), there is still chance
-* that c->sectors_to_gc being set to other positive value. So
-* writing sysfs entry trigger_gc won't always make sure gc
-* thread takes effect.
-*/
-   atomic_set(>sectors_to_gc, -1);
-   wake_up_gc(c);
-   }
+   if (attr == _trigger_gc)
+   force_wake_up_gc(c);
 
if (attr == _prune_cache) {
struct shrink_control sc;
-- 
2.16.4



[PATCH 0/5] Writeback performance tuning options

2018-11-22 Thread Coly Li
I receive requirement to provide options to permit people to do research
on writeback performance tuning for their extreme heavy workloads. And
these options are required to be disabled by default to avoid changing
current code behavior.
 
This series adds several disabled-by-default options for writeback
performance tuning.

- Auto gc after writeback accomplished
  sysfs entry gc_after_writeback is added, if it is explicitly enabled,
  gc thread will be forced woken up when writeback accomplished and
  all cache data are clean. This behavior will shrink bcache B+ tree
  and discard clean SSD space, which might be helpful for following
  write request. The following patches cover this option,
  - bcache: introduce force_wake_up_gc()
  - bcache: option to automatically run gc thread after writeback
accomplished

- Tunnable cutoff writeback and cutoff writeback sync
  Currently cutoff writeback and cutoff wrireback sync are fixed value
  defined by macro. There is no way for people to set a larger value and
  test the resulting performance behavior. Now they can be specificed
  as dynamic module load time parameter (bch_cutoff_writeback, and
  bch_cutof_writeback_sync), and won't change after bcache module
  loaded. Now people can test and observe the behavior on their own
  cache device with larger cutoff writeback values. The following
  patches cover these 2 options,
  - bcache: add MODULE_DESCRIPTION information
  - bcache: make cutoff_writeback and cutoff_writeback_sync tunnable
  - bcache: set writeback_percent in a flexible range

- A more flexible writeback_percent range
  Currently writeback_percent is in range of [0, 40], because cutoff
  writeback value is defind as 40 by a macro. This patch permits the
  value to be specified in range [0, bch_cutoff_writeback], while the
  maximum value of bch_cutoff_writeback can be 70. Now people may have
  a more flexible writeback_percent parameter range to test and observe
  how bcache writeback code behaves on their own cache hardware. The
  following patch covers this change,
  - bcache: set writeback_percent in a flexible range

If anyone is also interested on writeback performance tuning with these
tunnable options, I do appreciate if you find a better performance
number with the non-default option values, and share them with us.

Thanks in advance.

Coly Li

---
Coly Li (5):
  bcache: introduce force_wake_up_gc()
  bcache: option to automatically run gc thread after writeback
accomplished
  bcache: add MODULE_DESCRIPTION information
  bcache: make cutoff_writeback and cutoff_writeback_sync tunnable
  bcache: set writeback_percent in a flexible range

 drivers/md/bcache/bcache.h| 14 ++
 drivers/md/bcache/btree.h | 18 +
 drivers/md/bcache/super.c | 45 +--
 drivers/md/bcache/sysfs.c | 36 +++---
 drivers/md/bcache/writeback.c | 27 ++
 drivers/md/bcache/writeback.h | 12 ++--
 6 files changed, 132 insertions(+), 20 deletions(-)

-- 
2.16.4



[PATCH 3/5] bcache: add MODULE_DESCRIPTION information

2018-11-22 Thread Coly Li
This patch moves MODULE_AUTHOR and MODULE_LICENSE to end of super.c, and
add MODULE_DESCRIPTION("Bcache: a Linux block layer cache").

This is preparation for adding module parameters.

Signed-off-by: Coly Li 
---
 drivers/md/bcache/super.c | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 5b59d44656c0..61d3b63fa617 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -25,9 +25,6 @@
 #include 
 #include 
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kent Overstreet ");
-
 static const char bcache_magic[] = {
0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
@@ -2469,3 +2466,7 @@ static int __init bcache_init(void)
 
 module_exit(bcache_exit);
 module_init(bcache_init);
+
+MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
+MODULE_AUTHOR("Kent Overstreet ");
+MODULE_LICENSE("GPL");
-- 
2.16.4



[PATCH 4/5] bcache: make cutoff_writeback and cutoff_writeback_sync tunnable

2018-11-22 Thread Coly Li
Currently the cutoff writeback and cutoff writeback sync thresholds are
defined by CUTOFF_WRITEBACK (40) and CUTOFF_WRITEBACK_SYNC (70) as static
values. Most of time these they work fine, but when people want to do
research on bcache writeback mode performance tuning, there is no chance
to modify the soft and hard cutoff writeback values.

This patch introduces two module parameters bch_cutoff_writeback_sync and
bch_cutoff_writeback which permit people to tune the values when loading
bcache.ko. If they are not specified by module loading, current values
CUTOFF_WRITEBACK_SYNC and CUTOFF_WRITEBACK will be used as default and
nothing changes.

When people want to tune this two values,
- cutoff_writeback can be set in range [1, 70]
- cutoff_writeback_sync can be set in range [1, 90]
- cutoff_writeback always <= cutoff_writeback_sync

The default values are strongly recommended to most of users for most of
workloads. Anyway, if people wants to take their own risk to do research
on new writeback cutoff tuning for their own workload, now they can
make it.

Signed-off-by: Coly Li 
---
 drivers/md/bcache/super.c | 40 
 drivers/md/bcache/sysfs.c |  7 +++
 drivers/md/bcache/writeback.h | 10 --
 3 files changed, 55 insertions(+), 2 deletions(-)

diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 61d3b63fa617..4dee119c3664 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -25,6 +25,9 @@
 #include 
 #include 
 
+unsigned int bch_cutoff_writeback;
+unsigned int bch_cutoff_writeback_sync;
+
 static const char bcache_magic[] = {
0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
@@ -2420,6 +2423,32 @@ static void bcache_exit(void)
mutex_destroy(_register_lock);
 }
 
+/* Check and fixup module parameters */
+static void check_module_parameters(void)
+{
+   if (bch_cutoff_writeback_sync == 0)
+   bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
+   else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
+   pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u",
+   bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
+   bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
+   }
+
+   if (bch_cutoff_writeback == 0)
+   bch_cutoff_writeback = CUTOFF_WRITEBACK;
+   else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
+   pr_warn("set bch_cutoff_writeback (%u) to max value %u",
+   bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
+   bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
+   }
+
+   if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
+   pr_warn("set bch_cutoff_writeback (%u) to %u",
+   bch_cutoff_writeback, bch_cutoff_writeback_sync);
+   bch_cutoff_writeback = bch_cutoff_writeback_sync;
+   }
+}
+
 static int __init bcache_init(void)
 {
static const struct attribute *files[] = {
@@ -2428,6 +2457,8 @@ static int __init bcache_init(void)
NULL
};
 
+   check_module_parameters();
+
mutex_init(_register_lock);
init_waitqueue_head(_wait);
register_reboot_notifier();
@@ -2464,9 +2495,18 @@ static int __init bcache_init(void)
return -ENOMEM;
 }
 
+/*
+ * Module hooks
+ */
 module_exit(bcache_exit);
 module_init(bcache_init);
 
+module_param(bch_cutoff_writeback, uint, 0);
+MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback");
+
+module_param(bch_cutoff_writeback_sync, uint, 0);
+MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff 
writeback");
+
 MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
 MODULE_AUTHOR("Kent Overstreet ");
 MODULE_LICENSE("GPL");
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index a38a74dffda4..27fbc2dd1734 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -88,6 +88,8 @@ read_attribute(writeback_keys_done);
 read_attribute(writeback_keys_failed);
 read_attribute(io_errors);
 read_attribute(congested);
+read_attribute(cutoff_writeback);
+read_attribute(cutoff_writeback_sync);
 rw_attribute(congested_read_threshold_us);
 rw_attribute(congested_write_threshold_us);
 
@@ -669,6 +671,9 @@ SHOW(__bch_cache_set)
sysfs_print(congested_write_threshold_us,
c->congested_write_threshold_us);
 
+   sysfs_print(cutoff_writeback, bch_cutoff_writeback);
+   sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
+
sysfs_print(active_journal_entries, fifo_used(>journal.pin));
sysfs_printf(verify,"%i", c->verify);
sysfs_printf(key_merging_disabled,  "%i", c->key_merging_disabled);
@@ -866,6 +871,8 @@ static struct attribute *bch_cache_set_internal_files[] = {
_copy_gc_enabled,
 

[PATCH 5/5] bcache: set writeback_percent in a flexible range

2018-11-22 Thread Coly Li
Because CUTOFF_WRITEBACK is defined as 40, so before the changes of
dynamic cutoff writeback values, writeback_percent is limited to
[0, CUTOFF_WRITEBACK]. Any value larger than CUTOFF_WRITEBACK will
be fixed up to 40.

Now cutof writeback limit is a dynamic value bch_cutoff_writeback,
so the range of writeback_percent can be a more flexible range as
[0, bch_cutoff_writeback]. The flexibility is, it can be expended
to a larger or smaller range than [0, 40], depends on how value
bch_cutoff_writeback is specified.

The default value is still strongly recommended to most of users for
most of workloads. But for people who want to do research on bcache
writeback perforamnce tuning, they may have chance to specify more
flexible writeback_percent in range [0, 70].

Signed-off-by: Coly Li 
---
 drivers/md/bcache/sysfs.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 27fbc2dd1734..36de4d52d60a 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -267,7 +267,8 @@ STORE(__cached_dev)
d_strtoul(writeback_running);
d_strtoul(writeback_delay);
 
-   sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
+   sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
+   0, bch_cutoff_writeback);
 
if (attr == _writeback_rate) {
ssize_t ret;
-- 
2.16.4



Re: [PATCH 8/8] aio: support for IO polling

2018-11-22 Thread Jens Axboe
On 11/22/18 4:13 AM, Jan Kara wrote:
> 
> On Tue 20-11-18 10:19:53, Jens Axboe wrote:
>> +/*
>> + * We can't just wait for polled events to come to us, we have to actively
>> + * find and complete them.
>> + */
>> +static void aio_iopoll_reap_events(struct kioctx *ctx)
>> +{
>> +if (!(ctx->flags & IOCTX_FLAG_IOPOLL))
>> +return;
>> +
>> +while (!list_empty_careful(>poll_submitted) ||
>> +   !list_empty(>poll_completing)) {
>> +unsigned int nr_events = 0;
>> +
>> +__aio_iopoll_check(ctx, NULL, _events, 1, UINT_MAX);
>> +}
>> +}
>> +
>> +static int aio_iopoll_check(struct kioctx *ctx, long min_nr, long nr,
>> +struct io_event __user *event)
>> +{
>> +unsigned int nr_events = 0;
>> +int ret = 0;
>> +
>> +/* * Only allow one thread polling at a time */
>> +if (test_and_set_bit(0, >getevents_busy))
>> +return -EBUSY;
>> +
>> +while (!nr_events || !need_resched()) {
>> +int tmin = 0;
>> +
>> +if (nr_events < min_nr)
>> +tmin = min_nr - nr_events;
>> +
>> +ret = __aio_iopoll_check(ctx, event, _events, tmin, nr);
>> +if (ret <= 0)
>> +break;
>> +ret = 0;
>> +}
>> +
>> +clear_bit(0, >getevents_busy);
>> +return nr_events ? nr_events : ret;
>> +}
> 
> Hum, what if userspace calls io_destroy() while another process is polling
> for events on the same kioctx? It seems we'd be reaping events from two
> processes in parallel in that case which will result in various
> "interesting" effects like ctx->poll_completing list corruption...

I've replaced the ->getevents_busy with a mutex, and we also protect
the ->dead check inside that mutex. That ensures that destroy can't
proceed before a potential caller is inside getevents(), and that
getevents() sees if the ctx is being destroyed.

-- 
Jens Axboe



Re: [PATCH blktests] Add use of logger so that syslog files show when each test starts

2018-11-22 Thread Theodore Y. Ts'o
Ping?

- Ted

On Mon, Oct 29, 2018 at 12:15:57PM -0400, Theodore Ts'o wrote:
> Signed-off-by: Theodore Ts'o 
> ---
>  check | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/check b/check
> index f6c3537..ebd87c0 100755
> --- a/check
> +++ b/check
> @@ -319,6 +319,7 @@ _call_test() {
>   local dmesg_marker=""
>   CHECK_DMESG=0
>   fi
> + $LOGGER_PROG "run blktests $TEST_NAME"
>  
>   trap _cleanup EXIT
>   if ! TMPDIR="$(mktemp --tmpdir -p "$OUTPUT" -d 
> "tmpdir.${TEST_NAME//\//.}.XXX")"; then
> @@ -578,6 +579,8 @@ fi
>  eval set -- "$TEMP"
>  unset TEMP
>  
> +LOGGER_PROG="$(type -P logger)" || LOGGER_PROG=true
> +
>  if [[ -r config ]]; then
>   # shellcheck disable=SC1091
>   . config
> -- 
> 2.18.0.rc0
>