A previous commit deemed it safe to use __set_current_state() for IRQ
driven O_DIRECT, but that's isn't necessarily the case. Be safer and
only apply that optimization to polled IO, where we know the the task is
going to find the completions itself.

Fixes: 849a370016a5 ("block: avoid ordered task state change for polled IO")
Signed-off-by: Jens Axboe <ax...@kernel.dk>

---

diff --git a/fs/block_dev.c b/fs/block_dev.c
index e1886cc7048f..b5fba2922504 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -193,6 +193,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct 
iov_iter *iter,
        struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs, *bvec;
        loff_t pos = iocb->ki_pos;
        bool should_dirty = false;
+       bool is_poll;
        struct bio bio;
        ssize_t ret;
        blk_qc_t qc;
@@ -232,18 +233,21 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct 
iov_iter *iter,
                bio.bi_opf = dio_bio_write_op(iocb);
                task_io_account_write(ret);
        }
-       if (iocb->ki_flags & IOCB_HIPRI)
+       is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
+       if (is_poll)
                bio.bi_opf |= REQ_HIPRI;
 
        qc = submit_bio(&bio);
        for (;;) {
-               __set_current_state(TASK_UNINTERRUPTIBLE);
+               if (is_poll)
+                       __set_current_state(TASK_UNINTERRUPTIBLE);
+               else
+                       set_current_state(TASK_UNINTERRUPTIBLE);
 
                if (!READ_ONCE(bio.bi_private))
                        break;
 
-               if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(bdev_get_queue(bdev), qc, true))
+               if (!is_poll || !blk_poll(bdev_get_queue(bdev), qc, true))
                        io_schedule();
        }
        __set_current_state(TASK_RUNNING);
@@ -426,13 +430,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter 
*iter, int nr_pages)
                return -EIOCBQUEUED;
 
        for (;;) {
-               __set_current_state(TASK_UNINTERRUPTIBLE);
+               if (is_poll)
+                       __set_current_state(TASK_UNINTERRUPTIBLE);
+               else
+                       set_current_state(TASK_UNINTERRUPTIBLE);
 
                if (!READ_ONCE(dio->waiter))
                        break;
 
-               if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                   !blk_poll(bdev_get_queue(bdev), qc, true))
+               if (!is_poll || !blk_poll(bdev_get_queue(bdev), qc, true))
                        io_schedule();
        }
        __set_current_state(TASK_RUNNING);
diff --git a/fs/iomap.c b/fs/iomap.c
index 9a5bf1e8925b..30c5b7d9aca9 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1790,6 +1790,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        size_t count = iov_iter_count(iter);
        loff_t pos = iocb->ki_pos, start = pos;
        loff_t end = iocb->ki_pos + count - 1, ret = 0;
+       bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
        unsigned int flags = IOMAP_DIRECT;
        struct blk_plug plug;
        struct iomap_dio *dio;
@@ -1908,13 +1909,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                        return -EIOCBQUEUED;
 
                for (;;) {
-                       __set_current_state(TASK_UNINTERRUPTIBLE);
+                       if (is_poll)
+                               __set_current_state(TASK_UNINTERRUPTIBLE);
+                       else
+                               set_current_state(TASK_UNINTERRUPTIBLE);
 
                        if (!READ_ONCE(dio->submit.waiter))
                                break;
 
-                       if (!(iocb->ki_flags & IOCB_HIPRI) ||
-                           !dio->submit.last_queue ||
+                       if (!is_poll || !dio->submit.last_queue ||
                            !blk_poll(dio->submit.last_queue,
                                         dio->submit.cookie, true))
                                io_schedule();

-- 
Jens Axboe

Reply via email to