Convert the __bio_add_page(..., virt_to_page(), ...) pattern to the bio_add_virt_nofail helper implementing it and use bio_add_vmalloc to insulate xfs from the details of adding vmalloc memory to a bio.
Signed-off-by: Christoph Hellwig <h...@lst.de> --- fs/xfs/xfs_buf.c | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 1a2b3f06fa71..042a738b7fda 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1339,37 +1339,26 @@ xfs_buf_submit_bio( if (is_vmalloc_addr(bp->b_addr)) { unsigned int size = BBTOB(bp->b_length); - unsigned int alloc_size = roundup(size, PAGE_SIZE); void *data = bp->b_addr; + unsigned int added; - bio = bio_alloc(bp->b_target->bt_bdev, alloc_size >> PAGE_SHIFT, - xfs_buf_bio_op(bp), GFP_NOIO); + bio = bio_alloc(bp->b_target->bt_bdev, + howmany(size, PAGE_SIZE), xfs_buf_bio_op(bp), + GFP_NOIO); do { - unsigned int len = min(size, PAGE_SIZE); - - ASSERT(offset_in_page(data) == 0); - __bio_add_page(bio, vmalloc_to_page(data), len, 0); - data += len; - size -= len; + added = bio_add_vmalloc(bio, data, size); + data += added; + size -= added; } while (size); - - flush_kernel_vmap_range(bp->b_addr, alloc_size); } else { /* * Single folio or slab allocation. Must be contiguous and thus * only a single bvec is needed. - * - * This uses the page based bio add helper for now as that is - * the lowest common denominator between folios and slab - * allocations. To be replaced with a better block layer - * helper soon (hopefully). */ bio = bio_alloc(bp->b_target->bt_bdev, 1, xfs_buf_bio_op(bp), GFP_NOIO); - __bio_add_page(bio, virt_to_page(bp->b_addr), - BBTOB(bp->b_length), - offset_in_page(bp->b_addr)); + bio_add_virt_nofail(bio, bp->b_addr, BBTOB(bp->b_length)); } bio->bi_private = bp; -- 2.47.2