On Thu, Jan 22, 2026 at 09:22:03AM +0100, Christoph Hellwig wrote:
> Look up the fsverity_info once in end_buffer_async_read_io, and then
> pass it along to the I/O completion workqueue in
> struct postprocess_bh_ctx.
>
> This amortizes the lookup better once it becomes less efficient.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> ---
> fs/buffer.c | 27 +++++++++++----------------
> 1 file changed, 11 insertions(+), 16 deletions(-)
>
> diff --git a/fs/buffer.c b/fs/buffer.c
> index 3982253b6805..f4b3297ef1b1 100644
> --- a/fs/buffer.c
> +++ b/fs/buffer.c
> @@ -302,6 +302,7 @@ static void end_buffer_async_read(struct buffer_head *bh,
> int uptodate)
> struct postprocess_bh_ctx {
> struct work_struct work;
> struct buffer_head *bh;
> + struct fsverity_info *vi;
> };
>
> static void verify_bh(struct work_struct *work)
> @@ -309,25 +310,14 @@ static void verify_bh(struct work_struct *work)
> struct postprocess_bh_ctx *ctx =
> container_of(work, struct postprocess_bh_ctx, work);
> struct buffer_head *bh = ctx->bh;
> - struct inode *inode = bh->b_folio->mapping->host;
> bool valid;
>
> - valid = fsverity_verify_blocks(*fsverity_info_addr(inode), bh->b_folio,
> - bh->b_size, bh_offset(bh));
> + valid = fsverity_verify_blocks(ctx->vi, bh->b_folio, bh->b_size,
> + bh_offset(bh));
> end_buffer_async_read(bh, valid);
> kfree(ctx);
> }
>
> -static bool need_fsverity(struct buffer_head *bh)
> -{
> - struct folio *folio = bh->b_folio;
> - struct inode *inode = folio->mapping->host;
> -
> - return fsverity_active(inode) &&
> - /* needed by ext4 */
> - folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
> -}
> -
> static void decrypt_bh(struct work_struct *work)
> {
> struct postprocess_bh_ctx *ctx =
> @@ -337,7 +327,7 @@ static void decrypt_bh(struct work_struct *work)
>
> err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
> bh_offset(bh));
> - if (err == 0 && need_fsverity(bh)) {
> + if (err == 0 && ctx->vi) {
> /*
> * We use different work queues for decryption and for verity
> * because verity may require reading metadata pages that need
> @@ -359,15 +349,20 @@ static void end_buffer_async_read_io(struct buffer_head
> *bh, int uptodate)
> {
> struct inode *inode = bh->b_folio->mapping->host;
> bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
> - bool verify = need_fsverity(bh);
> + struct fsverity_info *vi = NULL;
> +
> + /* needed by ext4 */
> + if (bh->b_folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
> + vi = fsverity_get_info(inode);
Well this is no longer a weird ext4ism, since f2fs also needs this,
right? Maybe this comment should read:
/*
* Merkle tree folios can be cached in the pagecache, but they
* must never be cached below the first base page offset beyond
* EOF because mmap can expose address space beyond EOF.
*/
if (bh->b_folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
--D
>
> /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
> - if (uptodate && (decrypt || verify)) {
> + if (uptodate && (decrypt || vi)) {
> struct postprocess_bh_ctx *ctx =
> kmalloc(sizeof(*ctx), GFP_ATOMIC);
>
> if (ctx) {
> ctx->bh = bh;
> + ctx->vi = vi;
> if (decrypt) {
> INIT_WORK(&ctx->work, decrypt_bh);
> fscrypt_enqueue_decrypt_work(&ctx->work);
> --
> 2.47.3
>
>
_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel