We have repeatedly received reports that SEEK_HOLE and SEEK_DATA are slow on certain filesystems and/or under certain circumstances. That is why we generally try to avoid it (which is why bdrv_co_block_status() has the @want_zero parameter, and which is why qcow2 has a metadata preallocation detection, so we do not fall through to the protocol layer to discover which blocks are zero, unless that is really necessary (i.e., for metadata-preallocated images)).
In addition to those measures, we can also try to speed up zero detection by letting file-posix cache some hole location information, namely where the next hole after the most recently queried offset is. This helps especially for images that are (nearly) fully allocated, which is coincidentally also the case where querying for zero information cannot gain us much. Note that this of course only works so long as we have no concurrent writers to the image, which is the case when the WRITE capability is not shared. Alternatively (or perhaps as an improvement in the future), we could let file-posix keep track of what it knows is zero and what it knows is non-zero with bitmaps, which would help images that actually have a significant number of holes (where this implementation here cannot do much). But for such images, SEEK_HOLE/DATA are generally faster (they do not need to seek through the whole file), and the performance lost by querying the block status does not feel as bad because it is outweighed by the performance that can be saved by special-cases zeroed areas, so focussing on images that are (nearly) fully allocated is more important. Signed-off-by: Max Reitz <[email protected]> --- block/file-posix.c | 81 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 80 insertions(+), 1 deletion(-) diff --git a/block/file-posix.c b/block/file-posix.c index 05079b40ca..2ca0a2e05b 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -172,6 +172,11 @@ typedef struct BDRVRawState { } stats; PRManager *pr_mgr; + + bool can_cache_next_zero_offset; + bool next_zero_offset_valid; + uint64_t next_zero_offset_from; + uint64_t next_zero_offset; } BDRVRawState; typedef struct BDRVRawReopenState { @@ -2049,7 +2054,25 @@ static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { + BDRVRawState *s = bs->opaque; + assert(flags == 0); + + /* + * If offset is just above s->next_zero_offset, the hole that was + * reportedly there might be removed from the file (because only + * whole filesystem clusters can be zeroed). But that does not + * matter, because block-status does not care about whether there + * actually is a hole, but just about whether there are zeroes + * there - and this write will not make those zeroes non-zero. + */ + if (s->next_zero_offset_valid && + offset <= s->next_zero_offset && + offset + bytes > s->next_zero_offset) + { + s->next_zero_offset_valid = false; + } + return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE); } @@ -2183,6 +2206,10 @@ static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset, struct stat st; int ret; + if (s->next_zero_offset_valid && offset < s->next_zero_offset) { + s->next_zero_offset_valid = false; + } + if (fstat(s->fd, &st)) { ret = -errno; error_setg_errno(errp, -ret, "Failed to fstat() the file"); @@ -2616,8 +2643,17 @@ static int coroutine_fn raw_co_delete_file(BlockDriverState *bs, static int find_allocation(BlockDriverState *bs, off_t start, off_t *data, off_t *hole) { -#if defined SEEK_HOLE && defined SEEK_DATA BDRVRawState *s = bs->opaque; + + if (s->next_zero_offset_valid) { + if (start >= s->next_zero_offset_from && start < s->next_zero_offset) { + *data = start; + *hole = s->next_zero_offset; + return 0; + } + } + +#if defined SEEK_HOLE && defined SEEK_DATA off_t offs; /* @@ -2716,6 +2752,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, int64_t *map, BlockDriverState **file) { + BDRVRawState *s = bs->opaque; off_t data = 0, hole = 0; int ret; @@ -2734,6 +2771,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, } ret = find_allocation(bs, offset, &data, &hole); + s->next_zero_offset_valid = false; if (ret == -ENXIO) { /* Trailing hole */ *pnum = bytes; @@ -2761,6 +2799,12 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, } ret = BDRV_BLOCK_DATA; + + if (s->can_cache_next_zero_offset) { + s->next_zero_offset_valid = true; + s->next_zero_offset_from = offset; + s->next_zero_offset = hole; + } } else { /* On a hole, compute bytes to the beginning of the next extent. */ assert(hole == offset); @@ -2910,6 +2954,13 @@ raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev) RawPosixAIOData acb; int ret; + if (s->next_zero_offset_valid && + offset <= s->next_zero_offset && + offset + bytes > s->next_zero_offset_from) + { + s->next_zero_offset_valid = false; + } + acb = (RawPosixAIOData) { .bs = bs, .aio_fildes = s->fd, @@ -2941,6 +2992,17 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, RawPosixAIOData acb; ThreadPoolFunc *handler; + if (s->next_zero_offset_valid && + offset < s->next_zero_offset && + offset + bytes > s->next_zero_offset_from) + { + if (offset > s->next_zero_offset_from) { + s->next_zero_offset = offset; + } else { + s->next_zero_offset_valid = false; + } + } + #ifdef CONFIG_FALLOCATE if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { BdrvTrackedRequest *req; @@ -3155,6 +3217,15 @@ static void raw_set_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared) raw_handle_perm_lock(bs, RAW_PL_COMMIT, perm, shared, NULL); s->perm = perm; s->shared_perm = shared; + + /* + * We can only cache anything if there are no external writers on + * the image. + */ + s->can_cache_next_zero_offset = !(shared & BLK_PERM_WRITE); + if (!s->can_cache_next_zero_offset) { + s->next_zero_offset_valid = false; + } } static void raw_abort_perm_update(BlockDriverState *bs) @@ -3203,6 +3274,14 @@ static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs, return -EIO; } + /* Same as in raw_co_pwritev() */ + if (s->next_zero_offset_valid && + dst_offset <= s->next_zero_offset && + dst_offset + bytes > s->next_zero_offset_from) + { + s->next_zero_offset_valid = false; + } + acb = (RawPosixAIOData) { .bs = bs, .aio_type = QEMU_AIO_COPY_RANGE, -- 2.29.2
