Look up the fsverity_info once in btrfs_do_readpage, and then use it for all operations performed there, and do the same in end_folio_read for all folios processed there. The latter is also changed to derive the inode from the btrfs_bio - while bbio->inode is optional, it is always set for buffered reads.
This amortizes the lookup better once it becomes less efficient. Signed-off-by: Christoph Hellwig <[email protected]> Acked-by: David Sterba <[email protected]> --- fs/btrfs/extent_io.c | 54 +++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 21430b7d8f27..24988520521c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -476,26 +476,25 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, end, page_ops); } -static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len) +static bool btrfs_verify_folio(struct fsverity_info *vi, struct folio *folio, + u64 start, u32 len) { struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); - if (!fsverity_active(folio->mapping->host) || - btrfs_folio_test_uptodate(fs_info, folio, start, len) || - start >= i_size_read(folio->mapping->host)) + if (!vi || btrfs_folio_test_uptodate(fs_info, folio, start, len)) return true; - return fsverity_verify_folio(*fsverity_info_addr(folio->mapping->host), - folio); + return fsverity_verify_folio(vi, folio); } -static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len) +static void end_folio_read(struct fsverity_info *vi, struct folio *folio, + bool uptodate, u64 start, u32 len) { struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); ASSERT(folio_pos(folio) <= start && start + len <= folio_next_pos(folio)); - if (uptodate && btrfs_verify_folio(folio, start, len)) + if (uptodate && btrfs_verify_folio(vi, folio, start, len)) btrfs_folio_set_uptodate(fs_info, folio, start, len); else btrfs_folio_clear_uptodate(fs_info, folio, start, len); @@ -575,14 +574,19 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio) static void end_bbio_data_read(struct btrfs_bio *bbio) { struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; + struct inode *inode = &bbio->inode->vfs_inode; struct bio *bio = &bbio->bio; + struct fsverity_info *vi = NULL; struct folio_iter fi; ASSERT(!bio_flagged(bio, BIO_CLONED)); + + if (bbio->file_offset < i_size_read(inode)) + vi = fsverity_get_info(inode); + bio_for_each_folio_all(fi, &bbio->bio) { bool uptodate = !bio->bi_status; struct folio *folio = fi.folio; - struct inode *inode = folio->mapping->host; u64 start = folio_pos(folio) + fi.offset; btrfs_debug(fs_info, @@ -617,7 +621,7 @@ static void end_bbio_data_read(struct btrfs_bio *bbio) } /* Update page status and unlock. */ - end_folio_read(folio, uptodate, start, fi.length); + end_folio_read(vi, folio, uptodate, start, fi.length); } bio_put(bio); } @@ -992,7 +996,8 @@ static void btrfs_readahead_expand(struct readahead_control *ractl, * return 0 on success, otherwise return error */ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached, - struct btrfs_bio_ctrl *bio_ctrl) + struct btrfs_bio_ctrl *bio_ctrl, + struct fsverity_info *vi) { struct inode *inode = folio->mapping->host; struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); @@ -1030,16 +1035,16 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached, ASSERT(IS_ALIGNED(cur, fs_info->sectorsize)); if (cur >= last_byte) { folio_zero_range(folio, pg_offset, end - cur + 1); - end_folio_read(folio, true, cur, end - cur + 1); + end_folio_read(vi, folio, true, cur, end - cur + 1); break; } if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) { - end_folio_read(folio, true, cur, blocksize); + end_folio_read(vi, folio, true, cur, blocksize); continue; } em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached); if (IS_ERR(em)) { - end_folio_read(folio, false, cur, end + 1 - cur); + end_folio_read(vi, folio, false, cur, end + 1 - cur); return PTR_ERR(em); } extent_offset = cur - em->start; @@ -1116,12 +1121,12 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached, /* we've found a hole, just zero and go on */ if (block_start == EXTENT_MAP_HOLE) { folio_zero_range(folio, pg_offset, blocksize); - end_folio_read(folio, true, cur, blocksize); + end_folio_read(vi, folio, true, cur, blocksize); continue; } /* the get_extent function already copied into the folio */ if (block_start == EXTENT_MAP_INLINE) { - end_folio_read(folio, true, cur, blocksize); + end_folio_read(vi, folio, true, cur, blocksize); continue; } @@ -1318,7 +1323,8 @@ static void lock_extents_for_read(struct btrfs_inode *inode, u64 start, u64 end, int btrfs_read_folio(struct file *file, struct folio *folio) { - struct btrfs_inode *inode = folio_to_inode(folio); + struct inode *vfs_inode = folio->mapping->host; + struct btrfs_inode *inode = BTRFS_I(vfs_inode); const u64 start = folio_pos(folio); const u64 end = start + folio_size(folio) - 1; struct extent_state *cached_state = NULL; @@ -1327,10 +1333,13 @@ int btrfs_read_folio(struct file *file, struct folio *folio) .last_em_start = U64_MAX, }; struct extent_map *em_cached = NULL; + struct fsverity_info *vi = NULL; int ret; lock_extents_for_read(inode, start, end, &cached_state); - ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl); + if (folio_pos(folio) < i_size_read(vfs_inode)) + vi = fsverity_get_info(vfs_inode); + ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, vi); btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); btrfs_free_extent_map(em_cached); @@ -2697,16 +2706,19 @@ void btrfs_readahead(struct readahead_control *rac) .last_em_start = U64_MAX, }; struct folio *folio; - struct btrfs_inode *inode = BTRFS_I(rac->mapping->host); + struct inode *vfs_inode = rac->mapping->host; + struct btrfs_inode *inode = BTRFS_I(vfs_inode); const u64 start = readahead_pos(rac); const u64 end = start + readahead_length(rac) - 1; struct extent_state *cached_state = NULL; struct extent_map *em_cached = NULL; + struct fsverity_info *vi = NULL; lock_extents_for_read(inode, start, end, &cached_state); - + if (start < i_size_read(vfs_inode)) + vi = fsverity_get_info(vfs_inode); while ((folio = readahead_folio(rac)) != NULL) - btrfs_do_readpage(folio, &em_cached, &bio_ctrl); + btrfs_do_readpage(folio, &em_cached, &bio_ctrl, vi); btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); -- 2.47.3 _______________________________________________ Linux-f2fs-devel mailing list [email protected] https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
