Add support for generating / verifying protection information in the file system. This is largely done by simply setting the IOMAP_F_INTEGRITY flag and letting iomap do all of the work. XFS just has to ensure that the data read completions for integrity data are run from user context.
For zoned writeback, XFS also has to generate the integrity data itself as the zoned writeback path is not using the generic writeback_submit implementation. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: "Darrick J. Wong" <[email protected]> Tested-by: Anuj Gupta <[email protected]> --- fs/xfs/xfs_aops.c | 49 ++++++++++++++++++++++++++++++++++++++++++---- fs/xfs/xfs_iomap.c | 9 ++++++--- 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index c3c1e149fff4..bf985b5e73a0 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -22,6 +22,7 @@ #include "xfs_icache.h" #include "xfs_zone_alloc.h" #include "xfs_rtgroup.h" +#include <linux/bio-integrity.h> struct xfs_writepage_ctx { struct iomap_writepage_ctx ctx; @@ -661,6 +662,8 @@ xfs_zoned_writeback_submit( bio_endio(&ioend->io_bio); return error; } + if (wpc->iomap.flags & IOMAP_F_INTEGRITY) + fs_bio_integrity_generate(&ioend->io_bio); xfs_zone_alloc_and_submit(ioend, &XFS_ZWPC(wpc)->open_zone); return 0; } @@ -741,12 +744,45 @@ xfs_vm_bmap( return iomap_bmap(mapping, block, &xfs_read_iomap_ops); } +static void +xfs_bio_submit_read( + const struct iomap_iter *iter, + struct iomap_read_folio_ctx *ctx) +{ + struct bio *bio = ctx->read_ctx; + + /* delay read completions to the ioend workqueue */ + iomap_init_ioend(iter->inode, bio, ctx->read_ctx_file_offset, 0); + bio->bi_end_io = xfs_end_bio; + submit_bio(bio); +} + +static const struct iomap_read_ops xfs_bio_read_integrity_ops = { + .read_folio_range = iomap_bio_read_folio_range, + .submit_read = xfs_bio_submit_read, + .bio_set = &iomap_ioend_bioset, +}; + +static inline const struct iomap_read_ops * +xfs_bio_read_ops( + const struct xfs_inode *ip) +{ + if (bdev_has_integrity_csum(xfs_inode_buftarg(ip)->bt_bdev)) + return &xfs_bio_read_integrity_ops; + return &iomap_bio_read_ops; +} + STATIC int xfs_vm_read_folio( - struct file *unused, - struct folio *folio) + struct file *file, + struct folio *folio) { - iomap_bio_read_folio(folio, &xfs_read_iomap_ops); + struct iomap_read_folio_ctx ctx = { + .cur_folio = folio, + .ops = xfs_bio_read_ops(XFS_I(file->f_mapping->host)), + }; + + iomap_read_folio(&xfs_read_iomap_ops, &ctx); return 0; } @@ -754,7 +790,12 @@ STATIC void xfs_vm_readahead( struct readahead_control *rac) { - iomap_bio_readahead(rac, &xfs_read_iomap_ops); + struct iomap_read_folio_ctx ctx = { + .rac = rac, + .ops = xfs_bio_read_ops(XFS_I(rac->mapping->host)), + }; + + iomap_readahead(&xfs_read_iomap_ops, &ctx); } static int diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 04f39ea15898..b5d70bcb63b9 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -143,11 +143,14 @@ xfs_bmbt_to_iomap( } iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); - if (mapping_flags & IOMAP_DAX) + iomap->flags = iomap_flags; + if (mapping_flags & IOMAP_DAX) { iomap->dax_dev = target->bt_daxdev; - else + } else { iomap->bdev = target->bt_bdev; - iomap->flags = iomap_flags; + if (bdev_has_integrity_csum(iomap->bdev)) + iomap->flags |= IOMAP_F_INTEGRITY; + } /* * If the inode is dirty for datasync purposes, let iomap know so it -- 2.47.3
