Similarly to how we use the state->ios_left to know how many references
to get to a file, we can use it to allocate the aio_kiocb's we need in
bulk.

Signed-off-by: Jens Axboe <ax...@kernel.dk>
---
 fs/aio.c | 47 +++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 39 insertions(+), 8 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index e9dbaedda7ae..a385e7c06bfa 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -240,6 +240,8 @@ struct aio_kiocb {
        };
 };
 
+#define AIO_IOPOLL_BATCH       8
+
 struct aio_submit_state {
        struct kioctx *ctx;
 
@@ -254,6 +256,13 @@ struct aio_submit_state {
        struct list_head req_list;
        unsigned int req_count;
 
+       /*
+        * aio_kiocb alloc cache
+        */
+       void *iocbs[AIO_IOPOLL_BATCH];
+       unsigned int free_iocbs;
+       unsigned int cur_iocb;
+
        /*
         * File reference cache
         */
@@ -1113,15 +1122,35 @@ static void aio_iocb_init(struct kioctx *ctx, struct 
aio_kiocb *req)
  *     Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
  */
-static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
+static struct aio_kiocb *aio_get_req(struct kioctx *ctx,
+                                    struct aio_submit_state *state)
 {
        struct aio_kiocb *req;
 
-       req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
-       if (unlikely(!req))
-               return NULL;
+       if (!state)
+               req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
+       else if (!state->free_iocbs) {
+               size_t size;
+
+               size = min_t(size_t, state->ios_left, ARRAY_SIZE(state->iocbs));
+               size = kmem_cache_alloc_bulk(kiocb_cachep, GFP_KERNEL, size,
+                                               state->iocbs);
+               if (size < 0)
+                       return ERR_PTR(size);
+               else if (!size)
+                       return ERR_PTR(-ENOMEM);
+               state->free_iocbs = size - 1;
+               state->cur_iocb = 1;
+               req = state->iocbs[0];
+       } else {
+               req = state->iocbs[state->cur_iocb];
+               state->free_iocbs--;
+               state->cur_iocb++;
+       }
+
+       if (req)
+               aio_iocb_init(ctx, req);
 
-       aio_iocb_init(ctx, req);
        return req;
 }
 
@@ -1359,8 +1388,6 @@ static bool aio_read_events(struct kioctx *ctx, long 
min_nr, long nr,
        return ret < 0 || *i >= min_nr;
 }
 
-#define AIO_IOPOLL_BATCH       8
-
 /*
  * Process completed iocb iopoll entries, copying the result to userspace.
  */
@@ -2371,7 +2398,7 @@ static int __io_submit_one(struct kioctx *ctx, const 
struct iocb *iocb,
                return -EAGAIN;
 
        ret = -EAGAIN;
-       req = aio_get_req(ctx);
+       req = aio_get_req(ctx, state);
        if (unlikely(!req))
                goto out_put_reqs_available;
 
@@ -2503,6 +2530,9 @@ static void aio_submit_state_end(struct aio_submit_state 
*state)
        if (!list_empty(&state->req_list))
                aio_flush_state_reqs(state->ctx, state);
        aio_file_put(state);
+       if (state->free_iocbs)
+               kmem_cache_free_bulk(kiocb_cachep, state->free_iocbs,
+                                       &state->iocbs[state->cur_iocb]);
 }
 
 /*
@@ -2514,6 +2544,7 @@ static void aio_submit_state_start(struct 
aio_submit_state *state,
        state->ctx = ctx;
        INIT_LIST_HEAD(&state->req_list);
        state->req_count = 0;
+       state->free_iocbs = 0;
        state->file = NULL;
        state->ios_left = max_ios;
 #ifdef CONFIG_BLOCK
-- 
2.17.1

Reply via email to