Add an iomap implementation of fops->iopoll() and wire it up for
XFS.

Signed-off-by: Jens Axboe <ax...@kernel.dk>
---
 fs/iomap.c            | 50 +++++++++++++++++++++++++++++--------------
 fs/xfs/xfs_file.c     |  1 +
 include/linux/iomap.h |  1 +
 3 files changed, 36 insertions(+), 16 deletions(-)

diff --git a/fs/iomap.c b/fs/iomap.c
index 74c1f37f0fd6..faf96198f99a 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1419,14 +1419,14 @@ struct iomap_dio {
        unsigned                flags;
        int                     error;
        bool                    wait_for_completion;
+       blk_qc_t                cookie;
+       struct request_queue    *last_queue;
 
        union {
                /* used during submission and for synchronous completion: */
                struct {
                        struct iov_iter         *iter;
                        struct task_struct      *waiter;
-                       struct request_queue    *last_queue;
-                       blk_qc_t                cookie;
                } submit;
 
                /* used for aio completion: */
@@ -1436,6 +1436,30 @@ struct iomap_dio {
        };
 };
 
+int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
+{
+       struct iomap_dio *dio = kiocb->private;
+       struct request_queue *q = READ_ONCE(dio->last_queue);
+
+       if (!q || dio->cookie == BLK_QC_T_NONE)
+               return 0;
+       return blk_poll(q, READ_ONCE(dio->cookie), spin);
+}
+EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
+
+static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
+               struct bio *bio)
+{
+       atomic_inc(&dio->ref);
+
+       /*
+        * iomap_dio_iopoll can race with us.  A non-zero last_queue marks that
+        * we are ready to poll.
+        */
+       WRITE_ONCE(dio->cookie, submit_bio(bio));
+       WRITE_ONCE(dio->last_queue, bdev_get_queue(iomap->bdev));
+}
+
 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
 {
        struct kiocb *iocb = dio->iocb;
@@ -1548,7 +1572,7 @@ static void iomap_dio_bio_end_io(struct bio *bio)
        }
 }
 
-static blk_qc_t
+static void
 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
                unsigned len)
 {
@@ -1568,9 +1592,7 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap 
*iomap, loff_t pos,
        get_page(page);
        __bio_add_page(bio, page, len, 0);
        bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
-
-       atomic_inc(&dio->ref);
-       return submit_bio(bio);
+       iomap_dio_submit_bio(dio, iomap, bio);
 }
 
 static loff_t
@@ -1676,11 +1698,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, 
loff_t length,
                copied += n;
 
                nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
-
-               atomic_inc(&dio->ref);
-
-               dio->submit.last_queue = bdev_get_queue(iomap->bdev);
-               dio->submit.cookie = submit_bio(bio);
+               iomap_dio_submit_bio(dio, iomap, bio);
        } while (nr_pages);
 
        if (need_zeroout) {
@@ -1782,6 +1800,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        dio = kmalloc(sizeof(*dio), GFP_KERNEL);
        if (!dio)
                return -ENOMEM;
+       iocb->private = dio;
 
        dio->iocb = iocb;
        atomic_set(&dio->ref, 1);
@@ -1791,11 +1810,11 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        dio->error = 0;
        dio->flags = 0;
        dio->wait_for_completion = is_sync_kiocb(iocb);
+       dio->cookie = BLK_QC_T_NONE;
+       dio->last_queue = NULL;
 
        dio->submit.iter = iter;
        dio->submit.waiter = current;
-       dio->submit.cookie = BLK_QC_T_NONE;
-       dio->submit.last_queue = NULL;
 
        if (iov_iter_rw(iter) == READ) {
                if (pos >= dio->i_size)
@@ -1894,9 +1913,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                                break;
 
                        if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                           !dio->submit.last_queue ||
-                           !blk_poll(dio->submit.last_queue,
-                                        dio->submit.cookie, true))
+                           !dio->last_queue ||
+                           !blk_poll(dio->last_queue, dio->cookie, true))
                                io_schedule();
                }
                __set_current_state(TASK_RUNNING);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 53c9ab8fb777..603e705781a4 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1203,6 +1203,7 @@ const struct file_operations xfs_file_operations = {
        .write_iter     = xfs_file_write_iter,
        .splice_read    = generic_file_splice_read,
        .splice_write   = iter_file_splice_write,
+       .iopoll         = iomap_dio_iopoll,
        .unlocked_ioctl = xfs_file_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = xfs_file_compat_ioctl,
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 9a4258154b25..0fefb5455bda 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -162,6 +162,7 @@ typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, 
ssize_t ret,
                unsigned flags);
 ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops, iomap_dio_end_io_t end_io);
+int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
 
 #ifdef CONFIG_SWAP
 struct file;
-- 
2.17.1

Reply via email to