The current code in blk_crypto_fallback_encrypt_bio is inefficient and prone to deadlocks under memory pressure: It first walks to pass in plaintext bio to see how much of it can fit into a single encrypted bio using up to BIO_MAX_VEC PAGE_SIZE segments, and then allocates a plaintext clone that fits the size, only to allocate another bio for the ciphertext later. While the plaintext clone uses a bioset to avoid deadlocks when allocations could fail, the ciphertex one uses bio_kmalloc which is a no-go in the file system I/O path.
Switch blk_crypto_fallback_encrypt_bio to walk the source plaintext bio while consuming bi_iter without cloning it, and instead allocate a ciphertext bio at the beginning and whenever we fille up the previous one. The existing bio_set for the plaintext clones is reused for the ciphertext bios to remove the deadlock risk. Signed-off-by: Christoph Hellwig <[email protected]> --- block/blk-crypto-fallback.c | 162 ++++++++++++++---------------------- 1 file changed, 63 insertions(+), 99 deletions(-) diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index 86b27f96051a..1f58010fb437 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -152,35 +152,26 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) src_bio->bi_status = enc_bio->bi_status; - bio_uninit(enc_bio); - kfree(enc_bio); + bio_put(enc_bio); bio_endio(src_bio); } -static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) +static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src, + unsigned int nr_segs) { - unsigned int nr_segs = bio_segments(bio_src); - struct bvec_iter iter; - struct bio_vec bv; struct bio *bio; - bio = bio_kmalloc(nr_segs, GFP_NOIO); - if (!bio) - return NULL; - bio_init_inline(bio, bio_src->bi_bdev, nr_segs, bio_src->bi_opf); + bio = bio_alloc_bioset(bio_src->bi_bdev, nr_segs, bio_src->bi_opf, + GFP_NOIO, &crypto_bio_split); if (bio_flagged(bio_src, BIO_REMAPPED)) bio_set_flag(bio, BIO_REMAPPED); + bio->bi_private = bio_src; + bio->bi_end_io = blk_crypto_fallback_encrypt_endio; bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_write_stream = bio_src->bi_write_stream; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; - bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; - - bio_for_each_segment(bv, bio_src, iter) - bio->bi_io_vec[bio->bi_vcnt++] = bv; - bio_clone_blkg_association(bio, bio_src); - return bio; } @@ -208,32 +199,6 @@ blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot, return true; } -static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr) -{ - struct bio *bio = *bio_ptr; - unsigned int i = 0; - unsigned int num_sectors = 0; - struct bio_vec bv; - struct bvec_iter iter; - - bio_for_each_segment(bv, bio, iter) { - num_sectors += bv.bv_len >> SECTOR_SHIFT; - if (++i == BIO_MAX_VECS) - break; - } - - if (num_sectors < bio_sectors(bio)) { - bio = bio_submit_split_bioset(bio, num_sectors, - &crypto_bio_split); - if (!bio) - return false; - - *bio_ptr = bio; - } - - return true; -} - union blk_crypto_iv { __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; @@ -257,34 +222,22 @@ static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], */ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) { - struct bio *src_bio, *enc_bio; - struct bio_crypt_ctx *bc; - struct blk_crypto_keyslot *slot; - int data_unit_size; + struct bio *src_bio = *bio_ptr; + struct bio_crypt_ctx *bc = src_bio->bi_crypt_context; + int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; struct skcipher_request *ciph_req = NULL; + struct blk_crypto_keyslot *slot; DECLARE_CRYPTO_WAIT(wait); u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; struct scatterlist src, dst; union blk_crypto_iv iv; - unsigned int i, j; + struct bio *enc_bio = NULL; + unsigned int nr_segs; + unsigned int enc_idx = 0; + unsigned int j; bool ret = false; blk_status_t blk_st; - /* Split the bio if it's too big for single page bvec */ - if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr)) - return false; - - src_bio = *bio_ptr; - bc = src_bio->bi_crypt_context; - data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; - - /* Allocate bounce bio for encryption */ - enc_bio = blk_crypto_fallback_clone_bio(src_bio); - if (!enc_bio) { - src_bio->bi_status = BLK_STS_RESOURCE; - return false; - } - /* * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for * this bio's algorithm and key. @@ -293,7 +246,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) bc->bc_key, &slot); if (blk_st != BLK_STS_OK) { src_bio->bi_status = blk_st; - goto out_put_enc_bio; + return false; } /* and then allocate an skcipher_request for it */ @@ -309,61 +262,72 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size, iv.bytes); - /* Encrypt each page in the bounce bio */ - for (i = 0; i < enc_bio->bi_vcnt; i++) { - struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i]; - struct page *plaintext_page = enc_bvec->bv_page; - struct page *ciphertext_page = - mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO); - - enc_bvec->bv_page = ciphertext_page; + /* Encrypt each page in the origin bio */ + nr_segs = bio_segments(src_bio); + for (;;) { + struct bio_vec src_bv = + bio_iter_iovec(src_bio, src_bio->bi_iter); + struct page *enc_page; - if (!ciphertext_page) { - src_bio->bi_status = BLK_STS_RESOURCE; - goto out_free_bounce_pages; + if (!enc_bio) { + enc_bio = blk_crypto_alloc_enc_bio(src_bio, + min(nr_segs, BIO_MAX_VECS)); } - sg_set_page(&src, plaintext_page, data_unit_size, - enc_bvec->bv_offset); - sg_set_page(&dst, ciphertext_page, data_unit_size, - enc_bvec->bv_offset); + enc_page = mempool_alloc(blk_crypto_bounce_page_pool, + GFP_NOIO); + __bio_add_page(enc_bio, enc_page, src_bv.bv_len, + src_bv.bv_offset); + + sg_set_page(&src, src_bv.bv_page, data_unit_size, + src_bv.bv_offset); + sg_set_page(&dst, enc_page, data_unit_size, src_bv.bv_offset); /* Encrypt each data unit in this page */ - for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { + for (j = 0; j < src_bv.bv_len; j += data_unit_size) { blk_crypto_dun_to_iv(curr_dun, &iv); if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req), - &wait)) { - i++; - src_bio->bi_status = BLK_STS_IOERR; - goto out_free_bounce_pages; - } + &wait)) + goto out_ioerror; bio_crypt_dun_increment(curr_dun, 1); src.offset += data_unit_size; dst.offset += data_unit_size; } + + bio_advance_iter_single(src_bio, &src_bio->bi_iter, + src_bv.bv_len); + if (!src_bio->bi_iter.bi_size) + break; + + if (++enc_idx == enc_bio->bi_max_vecs) { + /* + * Each encrypted bio will call bio_endio in the + * completion handler, so ensure the remaining count + * matches the number of submitted bios. + */ + bio_inc_remaining(src_bio); + submit_bio(enc_bio); + enc_bio = NULL; + enc_idx = 0; + } + nr_segs--; } - enc_bio->bi_private = src_bio; - enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio; *bio_ptr = enc_bio; ret = true; - - enc_bio = NULL; - goto out_free_ciph_req; - -out_free_bounce_pages: - while (i > 0) - mempool_free(enc_bio->bi_io_vec[--i].bv_page, - blk_crypto_bounce_page_pool); out_free_ciph_req: skcipher_request_free(ciph_req); out_release_keyslot: blk_crypto_put_keyslot(slot); -out_put_enc_bio: - if (enc_bio) - bio_uninit(enc_bio); - kfree(enc_bio); return ret; + +out_ioerror: + while (enc_idx > 0) + mempool_free(enc_bio->bi_io_vec[enc_idx--].bv_page, + blk_crypto_bounce_page_pool); + bio_put(enc_bio); + src_bio->bi_status = BLK_STS_IOERR; + goto out_free_ciph_req; } /* @@ -537,7 +501,7 @@ static int blk_crypto_fallback_init(void) get_random_bytes(blank_key, sizeof(blank_key)); - err = bioset_init(&crypto_bio_split, 64, 0, 0); + err = bioset_init(&crypto_bio_split, 64, 0, BIOSET_NEED_BVECS); if (err) goto out; -- 2.47.3
