On Fri, Dec 19, 2025 at 12:02:44PM -0800, Eric Biggers wrote:
> On Wed, Dec 17, 2025 at 07:06:50AM +0100, Christoph Hellwig wrote:
> > new_bio:
> > - enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_segs);
> > + enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_segs, &enc_pages);
> > enc_idx = 0;
> > for (;;) {
> > struct bio_vec src_bv =
> > bio_iter_iovec(src_bio, src_bio->bi_iter);
> > - struct page *enc_page;
> > + struct page *enc_page = enc_pages[enc_idx];
> >
> > - enc_page = mempool_alloc(blk_crypto_bounce_page_pool,
> > - GFP_NOIO);
> > __bio_add_page(enc_bio, enc_page, src_bv.bv_len,
> > src_bv.bv_offset);
> >
> > @@ -246,10 +284,8 @@ static void __blk_crypto_fallback_encrypt_bio(struct
> > bio *src_bio,
> > /* Encrypt each data unit in this page */
> > for (j = 0; j < src_bv.bv_len; j += data_unit_size) {
> > blk_crypto_dun_to_iv(curr_dun, &iv);
> > - if (crypto_skcipher_encrypt(ciph_req)) {
> > - enc_idx++;
> > - goto out_free_bounce_pages;
> > - }
> > + if (crypto_skcipher_encrypt(ciph_req))
> > + goto out_free_enc_bio;
> > bio_crypt_dun_increment(curr_dun, 1);
> > src.offset += data_unit_size;
> > dst.offset += data_unit_size;
> > @@ -278,9 +314,9 @@ static void __blk_crypto_fallback_encrypt_bio(struct
> > bio *src_bio,
> > submit_bio(enc_bio);
> > return;
> >
> > -out_free_bounce_pages:
> > - while (enc_idx > 0)
> > - mempool_free(enc_bio->bi_io_vec[--enc_idx].bv_page,
> > +out_free_enc_bio:
> > + for (enc_idx = 0; enc_idx < enc_bio->bi_max_vecs; enc_idx++)
> > + mempool_free(enc_bio->bi_io_vec[enc_idx].bv_page,
> > blk_crypto_bounce_page_pool);
> > bio_put(enc_bio);
> > cmpxchg(&src_bio->bi_status, 0, BLK_STS_IOERR);
>
> The error handling at out_free_enc_bio is still broken, I'm afraid.
> It's not taking into account that some of the pages may have been moved
> into bvecs and some have not.
>
> I think it needs something like the following:
>
> diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
> index 23e097197450..d6760404b76c 100644
> --- a/block/blk-crypto-fallback.c
> +++ b/block/blk-crypto-fallback.c
> @@ -272,7 +272,7 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio
> *src_bio,
> for (;;) {
> struct bio_vec src_bv =
> bio_iter_iovec(src_bio, src_bio->bi_iter);
> - struct page *enc_page = enc_pages[enc_idx];
> + struct page *enc_page;
>
> if (!IS_ALIGNED(src_bv.bv_len | src_bv.bv_offset,
> data_unit_size)) {
> @@ -280,6 +280,7 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio
> *src_bio,
> goto out_free_enc_bio;
> }
>
> + enc_page = enc_pages[enc_idx++];
> __bio_add_page(enc_bio, enc_page, src_bv.bv_len,
> src_bv.bv_offset);
>
> @@ -305,7 +306,7 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio
> *src_bio,
> break;
>
> nr_segs--;
> - if (++enc_idx == enc_bio->bi_max_vecs) {
> + if (enc_idx == enc_bio->bi_max_vecs) {
> /*
> * For each additional encrypted bio submitted,
> * increment the source bio's remaining count. Each
> @@ -323,9 +324,11 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio
> *src_bio,
> return;
>
> out_free_enc_bio:
> - for (enc_idx = 0; enc_idx < enc_bio->bi_max_vecs; enc_idx++)
> + for (j = 0; j < enc_idx; j++)
> mempool_free(enc_bio->bi_io_vec[j].bv_page,
> blk_crypto_bounce_page_pool);
> + for (; j < enc_bio->bi_max_vecs; j++)
> + mempool_free(enc_pages[j], blk_crypto_bounce_page_pool);
> bio_put(enc_bio);
> bio_endio(src_bio);
> }
Also, this shows that the decrement of 'nr_segs' is a bit out-of-place
(as was 'enc_idx'). nr_segs is used only when allocating a bio, so it
could be decremented only when starting a new one:
submit_bio(enc_bio);
nr_segs -= nr_enc_pages;
goto new_bio;
- Eric