Short writes can happen, too, not just short reads.  The difference to
aio=native is that the kernel will actually retry the tail of short
requests internally already -- so it is harder to reproduce.  But if the
tail of a short request returns an error to the kernel, we will see it
in userspace still.  To reproduce this, apply the following patch on top
of the one shown in HEAD^ (again %s/escaped // to apply):

escaped diff --git a/block/export/fuse.c b/block/export/fuse.c
escaped index 67dc50a412..2b98489a32 100644
escaped --- a/block/export/fuse.c
escaped +++ b/block/export/fuse.c
@@ -1059,8 +1059,15 @@ fuse_co_read(FuseExport *exp, void **bufptr, uint64_t 
offset, uint32_t size)
     int64_t blk_len;
     void *buf;
     int ret;
+    static uint32_t error_size;

-    size = MIN(size, 4096);
+    if (error_size == size) {
+        error_size = 0;
+        return -EIO;
+    } else if (size > 4096) {
+        error_size = size - 4096;
+        size = 4096;
+    }

     /* Limited by max_read, should not happen */
     if (size > FUSE_MAX_READ_BYTES) {
@@ -1111,8 +1118,15 @@ fuse_co_write(FuseExport *exp, struct fuse_write_out 
*out,
 {
     int64_t blk_len;
     int ret;
+    static uint32_t error_size;

-    size = MIN(size, 4096);
+    if (error_size == size) {
+        error_size = 0;
+        return -EIO;
+    } else if (size > 4096) {
+        error_size = size - 4096;
+        size = 4096;
+    }

     QEMU_BUILD_BUG_ON(FUSE_MAX_WRITE_BYTES > BDRV_REQUEST_MAX_BYTES);
     /* Limited by max_write, should not happen */

I know this is a bit artificial because to produce this, there must be
an I/O error somewhere anyway, but if it does happen, qemu will
understand it to mean ENOSPC for short writes, which is incorrect.  So I
believe we need to resubmit the tail to maybe have it succeed now, or at
least get the correct error code.

Reproducer as before:
$ ./qemu-img create -f raw test.raw 8k
Formatting 'test.raw', fmt=raw size=8192
$ ./qemu-io -f raw -c 'write -P 42 0 8k' test.raw
wrote 8192/8192 bytes at offset 0
8 KiB, 1 ops; 00.00 sec (64.804 MiB/sec and 8294.9003 ops/sec)
$ hexdump -C test.raw
00000000  2a 2a 2a 2a 2a 2a 2a 2a  2a 2a 2a 2a 2a 2a 2a 2a  |****************|
*
00002000
$ storage-daemon/qemu-storage-daemon \
    --blockdev file,node-name=test,filename=test.raw \
    --export fuse,id=exp,node-name=test,mountpoint=test.raw,writable=true

$ ./qemu-io --image-opts -c 'read -P 23 0 8k' \
    driver=file,filename=test.raw,cache.direct=on,aio=io_uring
read 8192/8192 bytes at offset 0
8 KiB, 1 ops; 00.00 sec (58.481 MiB/sec and 7485.5342 ops/sec)
$ ./qemu-io --image-opts -c 'write -P 23 0 8k' \
    driver=file,filename=test.raw,cache.direct=on,aio=io_uring
write failed: No space left on device
$ hexdump -C test.raw
00000000  17 17 17 17 17 17 17 17  17 17 17 17 17 17 17 17  |................|
*
00001000  2a 2a 2a 2a 2a 2a 2a 2a  2a 2a 2a 2a 2a 2a 2a 2a  |****************|
*
00002000

So short reads already work (because there is code for that), but short
writes incorrectly produce ENOSPC.  This patch fixes that by
resubmitting not only the tail of short reads but short writes also.

Signed-off-by: Hanna Czenczek <[email protected]>
---
 block/io_uring.c   | 83 ++++++++++++++++++++++++++--------------------
 block/trace-events |  2 +-
 2 files changed, 48 insertions(+), 37 deletions(-)

diff --git a/block/io_uring.c b/block/io_uring.c
index cb131d3b8b..61b54647ae 100644
--- a/block/io_uring.c
+++ b/block/io_uring.c
@@ -27,10 +27,10 @@ typedef struct {
     BdrvRequestFlags flags;
 
     /*
-     * Buffered reads may require resubmission, see
-     * luring_resubmit_short_read().
+     * Short reads/writes require resubmission, see
+     * luring_resubmit_short_io().
      */
-    int total_read;
+    int total_done;
     QEMUIOVector resubmit_qiov;
 
     CqeHandler cqe_handler;
@@ -44,6 +44,10 @@ static void luring_prep_sqe(struct io_uring_sqe *sqe, void 
*opaque)
     int fd = req->fd;
     BdrvRequestFlags flags = req->flags;
 
+    if (req->resubmit_qiov.iov != NULL) {
+        qiov = &req->resubmit_qiov;
+    }
+
     switch (req->type) {
     case QEMU_AIO_WRITE:
     {
@@ -51,7 +55,8 @@ static void luring_prep_sqe(struct io_uring_sqe *sqe, void 
*opaque)
         if (luring_flags != 0 || qiov->niov > 1) {
 #ifdef HAVE_IO_URING_PREP_WRITEV2
             io_uring_prep_writev2(sqe, fd, qiov->iov,
-                                  qiov->niov, offset, luring_flags);
+                                  qiov->niov, offset + req->total_done,
+                                  luring_flags);
 #else
             /*
              * FUA should only be enabled with HAVE_IO_URING_PREP_WRITEV2, see
@@ -59,12 +64,14 @@ static void luring_prep_sqe(struct io_uring_sqe *sqe, void 
*opaque)
              */
             assert(luring_flags == 0);
 
-            io_uring_prep_writev(sqe, fd, qiov->iov, qiov->niov, offset);
+            io_uring_prep_writev(sqe, fd, qiov->iov, qiov->niov,
+                                 offset + req->total_done);
 #endif
         } else {
             /* The man page says non-vectored is faster than vectored */
             struct iovec *iov = qiov->iov;
-            io_uring_prep_write(sqe, fd, iov->iov_base, iov->iov_len, offset);
+            io_uring_prep_write(sqe, fd, iov->iov_base, iov->iov_len,
+                                offset + req->total_done);
         }
         break;
     }
@@ -73,17 +80,14 @@ static void luring_prep_sqe(struct io_uring_sqe *sqe, void 
*opaque)
         break;
     case QEMU_AIO_READ:
     {
-        if (req->resubmit_qiov.iov != NULL) {
-            qiov = &req->resubmit_qiov;
-        }
         if (qiov->niov > 1) {
             io_uring_prep_readv(sqe, fd, qiov->iov, qiov->niov,
-                                offset + req->total_read);
+                                offset + req->total_done);
         } else {
             /* The man page says non-vectored is faster than vectored */
             struct iovec *iov = qiov->iov;
             io_uring_prep_read(sqe, fd, iov->iov_base, iov->iov_len,
-                               offset + req->total_read);
+                               offset + req->total_done);
         }
         break;
     }
@@ -98,21 +102,26 @@ static void luring_prep_sqe(struct io_uring_sqe *sqe, void 
*opaque)
 }
 
 /**
- * luring_resubmit_short_read:
+ * luring_resubmit_short_io:
  *
- * Short reads are rare but may occur. The remaining read request needs to be
- * resubmitted.
+ * Short reads and writes are rare but may occur.  The remaining request needs
+ * to be resubmitted.
+ *
+ * For example, short reads can be reproduced by a FUSE export deliberately
+ * executing short reads.  The tail of short writes is generally resubmitted by
+ * io-uring in the kernel, but if that resubmission encounters an I/O error, 
the
+ * already submitted portion will be returned as a short write.
  */
-static void luring_resubmit_short_read(LuringRequest *req, int nread)
+static void luring_resubmit_short_io(LuringRequest *req, int ndone)
 {
     QEMUIOVector *resubmit_qiov;
     size_t remaining;
 
-    trace_luring_resubmit_short_read(req, nread);
+    trace_luring_resubmit_short_io(req, ndone);
 
-    /* Update read position */
-    req->total_read += nread;
-    remaining = req->qiov->size - req->total_read;
+    /* Update I/O position */
+    req->total_done += ndone;
+    remaining = req->qiov->size - req->total_done;
 
     /* Shorten qiov */
     resubmit_qiov = &req->resubmit_qiov;
@@ -121,7 +130,7 @@ static void luring_resubmit_short_read(LuringRequest *req, 
int nread)
     } else {
         qemu_iovec_reset(resubmit_qiov);
     }
-    qemu_iovec_concat(resubmit_qiov, req->qiov, req->total_read, remaining);
+    qemu_iovec_concat(resubmit_qiov, req->qiov, req->total_done, remaining);
 
     aio_add_sqe(luring_prep_sqe, req, &req->cqe_handler);
 }
@@ -153,26 +162,28 @@ static void luring_cqe_handler(CqeHandler *cqe_handler)
             return;
         }
     } else if (req->qiov) {
-        /* total_read is non-zero only for resubmitted read requests */
-        int total_bytes = ret + req->total_read;
+        /* total_done is non-zero only for resubmitted requests */
+        int total_bytes = ret + req->total_done;
 
         if (total_bytes == req->qiov->size) {
             ret = 0;
-        } else {
+        } else if (ret > 0 && (req->type == QEMU_AIO_READ ||
+                               req->type == QEMU_AIO_WRITE)) {
             /* Short Read/Write */
-            if (req->type == QEMU_AIO_READ) {
-                if (ret > 0) {
-                    luring_resubmit_short_read(req, ret);
-                    return;
-                }
-
-                /* Pad with zeroes */
-                qemu_iovec_memset(req->qiov, total_bytes, 0,
-                                  req->qiov->size - total_bytes);
-                ret = 0;
-            } else {
-                ret = -ENOSPC;
-            }
+            luring_resubmit_short_io(req, ret);
+            return;
+        } else if (req->type == QEMU_AIO_READ) {
+            /* Read ret == 0: EOF, pad with zeroes */
+            qemu_iovec_memset(req->qiov, total_bytes, 0,
+                              req->qiov->size - total_bytes);
+            ret = 0;
+        } else {
+            /*
+             * Normal write ret == 0 means ENOSPC.
+             * For zone-append, we treat any 0 <= ret < qiov->size as ENOSPC,
+             * too, because resubmitting the tail seems a little unsafe.
+             */
+            ret = -ENOSPC;
         }
     }
 
diff --git a/block/trace-events b/block/trace-events
index d170fc96f1..950c82d4b8 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -64,7 +64,7 @@ file_paio_submit(void *acb, void *opaque, int64_t offset, int 
count, int type) "
 # io_uring.c
 luring_cqe_handler(void *req, int ret) "req %p ret %d"
 luring_co_submit(void *bs, void *req, int fd, uint64_t offset, size_t nbytes, 
int type) "bs %p req %p fd %d offset %" PRId64 " nbytes %zd type %d"
-luring_resubmit_short_read(void *req, int nread) "req %p nread %d"
+luring_resubmit_short_io(void *req, int ndone) "req %p ndone %d"
 
 # qcow2.c
 qcow2_add_task(void *co, void *bs, void *pool, const char *action, int 
cluster_type, uint64_t host_offset, uint64_t offset, uint64_t bytes, void 
*qiov, size_t qiov_offset) "co %p bs %p pool %p: %s: cluster_type %d 
file_cluster_offset %" PRIu64 " offset %" PRIu64 " bytes %" PRIu64 " qiov %p 
qiov_offset %zu"
-- 
2.53.0


Reply via email to