bio_split_rw_at passes the queues dma_alignment into bio_split_io_at, which that already checks unconditionally. Remove the len_align_mask argument from bio_split_io_at and switch all users of bio_split_rw_at to directly call bio_split_io_at.
Signed-off-by: Christoph Hellwig <[email protected]> --- block/blk-map.c | 2 +- block/blk-merge.c | 12 +++++------- fs/btrfs/bio.c | 2 +- fs/iomap/ioend.c | 2 +- fs/xfs/xfs_zone_gc.c | 2 +- include/linux/bio.h | 2 +- include/linux/blkdev.h | 7 ------- 7 files changed, 10 insertions(+), 19 deletions(-) diff --git a/block/blk-map.c b/block/blk-map.c index 4533094d9458..106c6157c49b 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -432,7 +432,7 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio) int ret; /* check that the data layout matches the hardware restrictions */ - ret = bio_split_io_at(bio, lim, &nr_segs, max_bytes, 0); + ret = bio_split_io_at(bio, lim, &nr_segs, max_bytes); if (ret) { /* if we would have to split the bio, copy instead */ if (ret > 0) diff --git a/block/blk-merge.c b/block/blk-merge.c index d3115d7469df..6cea8fb3e968 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -314,7 +314,6 @@ static inline unsigned int bvec_seg_gap(struct bio_vec *bvprv, * @lim: [in] queue limits to split based on * @segs: [out] number of segments in the bio with the first half of the sectors * @max_bytes: [in] maximum number of bytes per bio - * @len_align_mask: [in] length alignment mask for each vector * * Find out if @bio needs to be split to fit the queue limits in @lim and a * maximum size of @max_bytes. Returns a negative error number if @bio can't be @@ -322,15 +321,14 @@ static inline unsigned int bvec_seg_gap(struct bio_vec *bvprv, * @bio needs to be split. */ int bio_split_io_at(struct bio *bio, const struct queue_limits *lim, - unsigned *segs, unsigned max_bytes, unsigned len_align_mask) + unsigned *segs, unsigned max_bytes) { struct bio_vec bv, bvprv, *bvprvp = NULL; unsigned nsegs = 0, bytes = 0, gaps = 0; struct bvec_iter iter; bio_for_each_bvec(bv, bio, iter) { - if (bv.bv_offset & lim->dma_alignment || - bv.bv_len & len_align_mask) + if (bv.bv_offset & lim->dma_alignment) return -EINVAL; /* @@ -404,14 +402,14 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, unsigned *nr_segs) { return bio_submit_split(bio, - bio_split_rw_at(bio, lim, nr_segs, + bio_split_io_at(bio, lim, nr_segs, get_max_io_size(bio, lim) << SECTOR_SHIFT)); } /* * REQ_OP_ZONE_APPEND bios must never be split by the block layer. * - * But we want the nr_segs calculation provided by bio_split_rw_at, and having + * But we want the nr_segs calculation provided by bio_split_io_at, and having * a good sanity check that the submitter built the bio correctly is nice to * have as well. */ @@ -420,7 +418,7 @@ struct bio *bio_split_zone_append(struct bio *bio, { int split_sectors; - split_sectors = bio_split_rw_at(bio, lim, nr_segs, + split_sectors = bio_split_io_at(bio, lim, nr_segs, lim->max_zone_append_sectors << SECTOR_SHIFT); if (WARN_ON_ONCE(split_sectors > 0)) split_sectors = -EINVAL; diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index fa1d321a2fb8..c01154f8b956 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -726,7 +726,7 @@ static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length) int sector_offset; map_length = min(map_length, fs_info->max_zone_append_size); - sector_offset = bio_split_rw_at(&bbio->bio, &fs_info->limits, + sector_offset = bio_split_io_at(&bbio->bio, &fs_info->limits, &nr_segs, map_length); if (sector_offset) { /* diff --git a/fs/iomap/ioend.c b/fs/iomap/ioend.c index 86f44922ed3b..41d60c7823b7 100644 --- a/fs/iomap/ioend.c +++ b/fs/iomap/ioend.c @@ -387,7 +387,7 @@ struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend, max_len = min(max_len, lim->max_zone_append_sectors << SECTOR_SHIFT); - sector_offset = bio_split_rw_at(bio, lim, &nr_segs, max_len); + sector_offset = bio_split_io_at(bio, lim, &nr_segs, max_len); if (unlikely(sector_offset < 0)) return ERR_PTR(sector_offset); if (!sector_offset) diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c index 3c52cc1497d4..554c96cb92c6 100644 --- a/fs/xfs/xfs_zone_gc.c +++ b/fs/xfs/xfs_zone_gc.c @@ -767,7 +767,7 @@ xfs_zone_gc_split_write( if (!chunk->is_seq) return NULL; - split_sectors = bio_split_rw_at(&chunk->bio, lim, &nsegs, + split_sectors = bio_split_io_at(&chunk->bio, lim, &nsegs, lim->max_zone_append_sectors << SECTOR_SHIFT); if (!split_sectors) return NULL; diff --git a/include/linux/bio.h b/include/linux/bio.h index ad2d57908c1c..d1f38c47d2ee 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -323,7 +323,7 @@ void bio_trim(struct bio *bio, sector_t offset, sector_t size); extern struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs); int bio_split_io_at(struct bio *bio, const struct queue_limits *lim, - unsigned *segs, unsigned max_bytes, unsigned len_align); + unsigned *segs, unsigned max_bytes); u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next, u8 gaps_bit); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 72e34acd439c..38b0bc8c6011 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1864,13 +1864,6 @@ bdev_atomic_write_unit_max_bytes(struct block_device *bdev) return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev)); } -static inline int bio_split_rw_at(struct bio *bio, - const struct queue_limits *lim, - unsigned *segs, unsigned max_bytes) -{ - return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment); -} - #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } #endif /* _LINUX_BLKDEV_H */ -- 2.47.3
