When deciding whether to fire a timeout basing on number of completions,
ignore CQEs emitted by other timeouts.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 19 +++----------------
 1 file changed, 3 insertions(+), 16 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 006ac57af842..fb8ec4b00375 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1098,33 +1098,20 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx)
        spin_unlock_irq(&ctx->completion_lock);
 }
 
-static inline bool io_check_in_range(u32 pos, u32 start, u32 end)
-{
-       /* if @end < @start, check for [end, MAX_UINT] + [MAX_UINT, start] */
-       return (pos - start) <= (end - start);
-}
-
 static void __io_flush_timeouts(struct io_ring_ctx *ctx)
 {
-       u32 end, start;
-
-       start = end = ctx->cached_cq_tail;
        do {
                struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
                                                        struct io_kiocb, list);
 
                if (req->flags & REQ_F_TIMEOUT_NOSEQ)
                        break;
-               /*
-                * multiple timeouts may have the same target,
-                * check that @req is in [first_tail, cur_tail]
-                */
-               if (!io_check_in_range(req->timeout.target_seq, start, end))
+               if (req->timeout.target_seq != ctx->cached_cq_tail
+                                       - atomic_read(&ctx->cq_timeouts))
                        break;
 
                list_del_init(&req->list);
                io_kill_timeout(req);
-               end = ctx->cached_cq_tail;
        } while (!list_empty(&ctx->timeout_list));
 }
 
@@ -4688,7 +4675,7 @@ static int io_timeout(struct io_kiocb *req)
                goto add;
        }
 
-       tail = ctx->cached_cq_tail;
+       tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
        req->timeout.target_seq = tail + off;
 
        /*
-- 
2.24.0

Reply via email to