Add a blk_crypto_submit_bio helper that either submits the bio when
it is not encrypted or inline encryption is provided, but otherwise
handles the encryption before going down into the low-level driver.
This reduces the risk from bio reordering and keeps memory allocation
as high up in the stack as possible.

Signed-off-by: Christoph Hellwig <[email protected]>
---
 Documentation/block/inline-encryption.rst |  6 ++++++
 block/blk-core.c                          | 10 +++++++---
 block/blk-crypto-internal.h               | 19 +++++++++++--------
 block/blk-crypto.c                        | 23 ++++++-----------------
 fs/buffer.c                               |  3 ++-
 fs/crypto/bio.c                           |  2 +-
 fs/ext4/page-io.c                         |  3 ++-
 fs/ext4/readpage.c                        |  9 +++++----
 fs/f2fs/data.c                            |  4 ++--
 fs/f2fs/file.c                            |  3 ++-
 fs/iomap/direct-io.c                      |  3 ++-
 include/linux/blk-crypto.h                | 22 ++++++++++++++++++++++
 12 files changed, 68 insertions(+), 39 deletions(-)

diff --git a/Documentation/block/inline-encryption.rst 
b/Documentation/block/inline-encryption.rst
index 6380e6ab492b..7e0703a12dfb 100644
--- a/Documentation/block/inline-encryption.rst
+++ b/Documentation/block/inline-encryption.rst
@@ -206,6 +206,12 @@ it to a bio, given the blk_crypto_key and the data unit 
number that will be used
 for en/decryption.  Users don't need to worry about freeing the bio_crypt_ctx
 later, as that happens automatically when the bio is freed or reset.
 
+To submit a bio that uses inline encryption, users must call
+``blk_crypto_submit_bio()`` instead of the usual ``submit_bio()``.  This will
+submit the bio to the underlying driver if it supports inline crypto, or else
+call the blk-crypto fallback routines before submitting normal bios to the
+underlying drivers.
+
 Finally, when done using inline encryption with a blk_crypto_key on a
 block_device, users must call ``blk_crypto_evict_key()``.  This ensures that
 the key is evicted from all keyslots it may be programmed into and unlinked 
from
diff --git a/block/blk-core.c b/block/blk-core.c
index f87e5f1a101f..a0bf5174e9e9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -628,9 +628,6 @@ static void __submit_bio(struct bio *bio)
        /* If plug is not used, add new plug here to cache nsecs time. */
        struct blk_plug plug;
 
-       if (unlikely(!blk_crypto_bio_prep(bio)))
-               return;
-
        blk_start_plug(&plug);
 
        if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
@@ -794,6 +791,13 @@ void submit_bio_noacct(struct bio *bio)
        if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
                goto not_supported;
 
+       if (bio_has_crypt_ctx(bio)) {
+               if (WARN_ON_ONCE(!bio_has_data(bio)))
+                       goto end_io;
+               if (!blk_crypto_supported(bio))
+                       goto not_supported;
+       }
+
        if (should_fail_bio(bio))
                goto end_io;
        bio_check_ro(bio);
diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
index d65023120341..742694213529 100644
--- a/block/blk-crypto-internal.h
+++ b/block/blk-crypto-internal.h
@@ -86,6 +86,12 @@ bool __blk_crypto_cfg_supported(struct blk_crypto_profile 
*profile,
 int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
                     void __user *argp);
 
+static inline bool blk_crypto_supported(struct bio *bio)
+{
+       return blk_crypto_config_supported_natively(bio->bi_bdev,
+                       &bio->bi_crypt_context->bc_key->crypto_cfg);
+}
+
 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
 
 static inline int blk_crypto_sysfs_register(struct gendisk *disk)
@@ -139,6 +145,11 @@ static inline int blk_crypto_ioctl(struct block_device 
*bdev, unsigned int cmd,
        return -ENOTTY;
 }
 
+static inline bool blk_crypto_supported(struct bio *bio)
+{
+       return false;
+}
+
 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
 
 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
@@ -165,14 +176,6 @@ static inline void bio_crypt_do_front_merge(struct request 
*rq,
 #endif
 }
 
-bool __blk_crypto_bio_prep(struct bio *bio);
-static inline bool blk_crypto_bio_prep(struct bio *bio)
-{
-       if (bio_has_crypt_ctx(bio))
-               return __blk_crypto_bio_prep(bio);
-       return true;
-}
-
 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
 static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
 {
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 0b2535d8dbcc..856d3c5b1fa0 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -242,25 +242,13 @@ void __blk_crypto_free_request(struct request *rq)
        rq->crypt_ctx = NULL;
 }
 
-/**
- * __blk_crypto_bio_prep - Prepare bio for inline encryption
- * @bio: bio to prepare
- *
- * If the bio crypt context provided for the bio is supported by the underlying
- * device's inline encryption hardware, do nothing.
- *
- * Otherwise, try to perform en/decryption for this bio by falling back to the
- * kernel crypto API.  For encryption this means submitting newly allocated
- * bios for the encrypted payload while keeping back the source bio until they
- * complete, while for reads the decryption happens in-place by a hooked in
- * completion handler.
- *
- * Caller must ensure bio has bio_crypt_ctx.
+/*
+ * Process a bio with a crypto context.  Returns true if the caller should
+ * submit the passed in bio, false if the bio is consumed.
  *
- * Return: true if @bio should be submitted to the driver by the caller, else
- * false.  Sets bio->bi_status, calls bio_endio and returns false on error.
+ * See the kerneldoc comment for blk_crypto_submit_bio for further details.
  */
-bool __blk_crypto_bio_prep(struct bio *bio)
+bool __blk_crypto_submit_bio(struct bio *bio)
 {
        const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
        struct block_device *bdev = bio->bi_bdev;
@@ -288,6 +276,7 @@ bool __blk_crypto_bio_prep(struct bio *bio)
 
        return true;
 }
+EXPORT_SYMBOL_GPL(__blk_crypto_submit_bio);
 
 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
                             gfp_t gfp_mask)
diff --git a/fs/buffer.c b/fs/buffer.c
index 838c0c571022..da18053f66e8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -29,6 +29,7 @@
 #include <linux/slab.h>
 #include <linux/capability.h>
 #include <linux/blkdev.h>
+#include <linux/blk-crypto.h>
 #include <linux/file.h>
 #include <linux/quotaops.h>
 #include <linux/highmem.h>
@@ -2821,7 +2822,7 @@ static void submit_bh_wbc(blk_opf_t opf, struct 
buffer_head *bh,
                wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
        }
 
-       submit_bio(bio);
+       blk_crypto_submit_bio(bio);
 }
 
 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index c2b3ca100f8d..6da683ea69dc 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -105,7 +105,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct 
inode *inode,
                }
 
                atomic_inc(&done.pending);
-               submit_bio(bio);
+               blk_crypto_submit_bio(bio);
        }
 
        fscrypt_zeroout_range_done(&done);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 39abfeec5f36..a8c95eee91b7 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -7,6 +7,7 @@
  * Written by Theodore Ts'o, 2010.
  */
 
+#include <linux/blk-crypto.h>
 #include <linux/fs.h>
 #include <linux/time.h>
 #include <linux/highuid.h>
@@ -401,7 +402,7 @@ void ext4_io_submit(struct ext4_io_submit *io)
        if (bio) {
                if (io->io_wbc->sync_mode == WB_SYNC_ALL)
                        io->io_bio->bi_opf |= REQ_SYNC;
-               submit_bio(io->io_bio);
+               blk_crypto_submit_bio(io->io_bio);
        }
        io->io_bio = NULL;
 }
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index e7f2350c725b..49a6d36a8dba 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -36,6 +36,7 @@
 #include <linux/bio.h>
 #include <linux/fs.h>
 #include <linux/buffer_head.h>
+#include <linux/blk-crypto.h>
 #include <linux/blkdev.h>
 #include <linux/highmem.h>
 #include <linux/prefetch.h>
@@ -345,7 +346,7 @@ int ext4_mpage_readpages(struct inode *inode,
                if (bio && (last_block_in_bio != first_block - 1 ||
                            !fscrypt_mergeable_bio(bio, inode, next_block))) {
                submit_and_realloc:
-                       submit_bio(bio);
+                       blk_crypto_submit_bio(bio);
                        bio = NULL;
                }
                if (bio == NULL) {
@@ -371,14 +372,14 @@ int ext4_mpage_readpages(struct inode *inode,
                if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
                     (relative_block == map.m_len)) ||
                    (first_hole != blocks_per_folio)) {
-                       submit_bio(bio);
+                       blk_crypto_submit_bio(bio);
                        bio = NULL;
                } else
                        last_block_in_bio = first_block + blocks_per_folio - 1;
                continue;
        confused:
                if (bio) {
-                       submit_bio(bio);
+                       blk_crypto_submit_bio(bio);
                        bio = NULL;
                }
                if (!folio_test_uptodate(folio))
@@ -389,7 +390,7 @@ int ext4_mpage_readpages(struct inode *inode,
                ; /* A label shall be followed by a statement until C23 */
        }
        if (bio)
-               submit_bio(bio);
+               blk_crypto_submit_bio(bio);
        return 0;
 }
 
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index c30e69392a62..c3dd8a5c8589 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -513,7 +513,7 @@ void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct 
bio *bio,
        trace_f2fs_submit_read_bio(sbi->sb, type, bio);
 
        iostat_update_submit_ctx(bio, type);
-       submit_bio(bio);
+       blk_crypto_submit_bio(bio);
 }
 
 static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
@@ -522,7 +522,7 @@ static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, 
struct bio *bio,
        WARN_ON_ONCE(is_read_io(bio_op(bio)));
        trace_f2fs_submit_write_bio(sbi->sb, type, bio);
        iostat_update_submit_ctx(bio, type);
-       submit_bio(bio);
+       blk_crypto_submit_bio(bio);
 }
 
 static void __submit_merged_bio(struct f2fs_bio_info *io)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index d7047ca6b98d..914790f37915 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -5,6 +5,7 @@
  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
  */
+#include <linux/blk-crypto.h>
 #include <linux/fs.h>
 #include <linux/f2fs_fs.h>
 #include <linux/stat.h>
@@ -5046,7 +5047,7 @@ static void f2fs_dio_write_submit_io(const struct 
iomap_iter *iter,
        enum temp_type temp = f2fs_get_segment_temp(sbi, type);
 
        bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, DATA, temp);
-       submit_bio(bio);
+       blk_crypto_submit_bio(bio);
 }
 
 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 8e273408453a..4000c8596d9b 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -3,6 +3,7 @@
  * Copyright (C) 2010 Red Hat, Inc.
  * Copyright (c) 2016-2025 Christoph Hellwig.
  */
+#include <linux/blk-crypto.h>
 #include <linux/fscrypt.h>
 #include <linux/pagemap.h>
 #include <linux/iomap.h>
@@ -74,7 +75,7 @@ static void iomap_dio_submit_bio(const struct iomap_iter 
*iter,
                dio->dops->submit_io(iter, bio, pos);
        } else {
                WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_ANON_WRITE);
-               submit_bio(bio);
+               blk_crypto_submit_bio(bio);
        }
 }
 
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 58b0c5254a67..887169f8feb0 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -171,6 +171,28 @@ static inline bool bio_has_crypt_ctx(struct bio *bio)
 
 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
 
+bool __blk_crypto_submit_bio(struct bio *bio);
+
+/**
+ * blk_crypto_submit_bio - Submit a bio using inline encryption
+ * @bio: bio to submit
+ *
+ * If @bio has not crypto context, or the crypt context attached to @bio is
+ * supported by the underlying device's inline encryption hardware, just submit
+ * @bio.
+ *
+ * Otherwise, try to perform en/decryption for this bio by falling back to the
+ * kernel crypto API. For encryption this means submitting newly allocated
+ * bios for the encrypted payload while keeping back the source bio until they
+ * complete, while for reads the decryption happens in-place by a hooked in
+ * completion handler.
+ */
+static inline void blk_crypto_submit_bio(struct bio *bio)
+{
+       if (!bio_has_crypt_ctx(bio) || __blk_crypto_submit_bio(bio))
+               submit_bio(bio);
+}
+
 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
 /**
  * bio_crypt_clone - clone bio encryption context
-- 
2.47.3


Reply via email to