If we get a keyed wakeup for a aio poll waitqueue and wake can acquire the
ctx_lock without spinning we can just complete the iocb straight from the
wakeup callback to avoid a context switch.

Signed-off-by: Christoph Hellwig <[email protected]>
Tested-by: Avi Kivity <[email protected]>
---

Fix a subject line typo and improve a comment.

 fs/aio.c | 20 ++++++++++++++++++--
 1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 2fd19521d8a8..5943098a87c6 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1672,13 +1672,29 @@ static int aio_poll_wake(struct wait_queue_entry *wait, 
unsigned mode, int sync,
                void *key)
 {
        struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
+       struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
        __poll_t mask = key_to_poll(key);
 
        req->woken = true;
 
        /* for instances that support it check for an event match first: */
-       if (mask && !(mask & req->events))
-               return 0;
+       if (mask) {
+               if (!(mask & req->events))
+                       return 0;
+
+               /*
+                * Try to complete the iocb inline if we can to avoid a costly
+                * context switch.
+                */
+               if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
+                       list_del(&iocb->ki_list);
+                       spin_unlock(&iocb->ki_ctx->ctx_lock);
+
+                       list_del_init(&req->wait.entry);
+                       aio_poll_complete(iocb, mask);
+                       return 1;
+               }
+       }
 
        list_del_init(&req->wait.entry);
        schedule_work(&req->work);
-- 
2.18.0

Reply via email to