Calling mempool_alloc in a loop is not safe unless the maximum allocation size times the maximum number of threads using it is less than the minimum pool size. Use the new mempool_alloc_bulk helper to allocate all missing elements in one pass to remove this deadlock risk. This also means that non-pool allocations now use alloc_pages_bulk which can be significantly faster than a loop over individual page allocations.
Signed-off-by: Christoph Hellwig <[email protected]> --- block/blk-crypto-fallback.c | 70 ++++++++++++++++++++++++++++--------- 1 file changed, 53 insertions(+), 17 deletions(-) diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index 58b35c5d6949..1db4aa4d812a 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -22,7 +22,7 @@ #include "blk-cgroup.h" #include "blk-crypto-internal.h" -static unsigned int num_prealloc_bounce_pg = 32; +static unsigned int num_prealloc_bounce_pg = BIO_MAX_VECS; module_param(num_prealloc_bounce_pg, uint, 0); MODULE_PARM_DESC(num_prealloc_bounce_pg, "Number of preallocated bounce pages for the blk-crypto crypto API fallback"); @@ -144,11 +144,21 @@ static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = { static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) { struct bio *src_bio = enc_bio->bi_private; - int i; + struct page **pages = (struct page **)enc_bio->bi_io_vec; + struct bio_vec *bv; + unsigned int i; - for (i = 0; i < enc_bio->bi_vcnt; i++) - mempool_free(enc_bio->bi_io_vec[i].bv_page, - blk_crypto_bounce_page_pool); + /* + * Use the same trick as the alloc side to avoid the need for an extra + * pages array. + */ + bio_for_each_bvec_all(bv, enc_bio, i) + pages[i] = bv->bv_page; + + i = mempool_free_bulk(blk_crypto_bounce_page_pool, (void **)pages, + enc_bio->bi_vcnt); + if (i < enc_bio->bi_vcnt) + release_pages(pages + i, enc_bio->bi_vcnt - i); if (enc_bio->bi_status) cmpxchg(&src_bio->bi_status, 0, enc_bio->bi_status); @@ -157,9 +167,14 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) bio_endio(src_bio); } +#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) + static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src, - unsigned int nr_segs) + unsigned int nr_segs, struct page ***pages_ret) { + unsigned int memflags = memalloc_noio_save(); + unsigned int nr_allocated; + struct page **pages; struct bio *bio; nr_segs = min(nr_segs, BIO_MAX_VECS); @@ -174,6 +189,30 @@ static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src, bio->bi_write_stream = bio_src->bi_write_stream; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio_clone_blkg_association(bio, bio_src); + + /* + * Move page array up in the allocated memory for the bio vecs as far as + * possible so that we can start filling biovecs from the beginning + * without overwriting the temporary page array. + */ + static_assert(PAGE_PTRS_PER_BVEC > 1); + pages = (struct page **)bio->bi_io_vec; + pages += nr_segs * (PAGE_PTRS_PER_BVEC - 1); + + /* + * Try a bulk allocation first. This could leave random pages in the + * array unallocated, but we'll fix that up later in mempool_alloc_bulk. + * + * Note: alloc_pages_bulk needs the array to be zeroed, as it assumes + * any non-zero slot already contains a valid allocation. + */ + memset(pages, 0, sizeof(struct page *) * nr_segs); + nr_allocated = alloc_pages_bulk(GFP_KERNEL, nr_segs, pages); + if (nr_allocated < nr_segs) + mempool_alloc_bulk(blk_crypto_bounce_page_pool, (void **)pages, + nr_segs, nr_allocated); + memalloc_noio_restore(memflags); + *pages_ret = pages; return bio; } @@ -210,6 +249,7 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio, u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; struct scatterlist src, dst; union blk_crypto_iv iv; + struct page **enc_pages; unsigned int enc_idx; struct bio *enc_bio; unsigned int j; @@ -227,15 +267,13 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio, /* Encrypt each page in the source bio */ new_bio: - enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_segs); + enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_segs, &enc_pages); enc_idx = 0; for (;;) { struct bio_vec src_bv = bio_iter_iovec(src_bio, src_bio->bi_iter); - struct page *enc_page; + struct page *enc_page = enc_pages[enc_idx]; - enc_page = mempool_alloc(blk_crypto_bounce_page_pool, - GFP_NOIO); __bio_add_page(enc_bio, enc_page, src_bv.bv_len, src_bv.bv_offset); @@ -246,10 +284,8 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio, /* Encrypt each data unit in this page */ for (j = 0; j < src_bv.bv_len; j += data_unit_size) { blk_crypto_dun_to_iv(curr_dun, &iv); - if (crypto_skcipher_encrypt(ciph_req)) { - enc_idx++; - goto out_free_bounce_pages; - } + if (crypto_skcipher_encrypt(ciph_req)) + goto out_free_enc_bio; bio_crypt_dun_increment(curr_dun, 1); src.offset += data_unit_size; dst.offset += data_unit_size; @@ -278,9 +314,9 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio, submit_bio(enc_bio); return; -out_free_bounce_pages: - while (enc_idx > 0) - mempool_free(enc_bio->bi_io_vec[--enc_idx].bv_page, +out_free_enc_bio: + for (enc_idx = 0; enc_idx < enc_bio->bi_max_vecs; enc_idx++) + mempool_free(enc_bio->bi_io_vec[enc_idx].bv_page, blk_crypto_bounce_page_pool); bio_put(enc_bio); cmpxchg(&src_bio->bi_status, 0, BLK_STS_IOERR); -- 2.47.3
