Split bio_map_kern into a simple version that can use bio_add_virt_nofail for kernel direct mapping addresses and a more complex bio_map_vmalloc with the logic to chunk up and map vmalloc ranges using the bio_add_vmalloc helper.
Signed-off-by: Christoph Hellwig <h...@lst.de> --- block/blk-map.c | 74 +++++++++++++++++++------------------------------ 1 file changed, 29 insertions(+), 45 deletions(-) diff --git a/block/blk-map.c b/block/blk-map.c index ca6b55ac0da1..7742d3cb0499 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -317,65 +317,47 @@ static void bio_map_kern_endio(struct bio *bio) kfree(bio); } -/** - * bio_map_kern - map kernel address into bio - * @data: pointer to buffer to map - * @len: length in bytes - * @op: bio/request operation - * @gfp_mask: allocation flags for bio allocation - * - * Map the kernel address into a bio suitable for io to a block - * device. Returns an error pointer in case of error. - */ -static struct bio *bio_map_kern(void *data, unsigned int len, - enum req_op op, gfp_t gfp_mask) +static struct bio *bio_map_virt(void *data, unsigned int len, enum req_op op, + gfp_t gfp_mask) { - unsigned long kaddr = (unsigned long)data; - unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - unsigned long start = kaddr >> PAGE_SHIFT; - const int nr_pages = end - start; - bool is_vmalloc = is_vmalloc_addr(data); - struct page *page; - int offset, i; struct bio *bio; - bio = bio_kmalloc(nr_pages, gfp_mask); + bio = bio_kmalloc(1, gfp_mask); if (!bio) return ERR_PTR(-ENOMEM); - bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, op); - - if (is_vmalloc) { - flush_kernel_vmap_range(data, len); - bio->bi_private = data; - } - - offset = offset_in_page(kaddr); - for (i = 0; i < nr_pages; i++) { - unsigned int bytes = PAGE_SIZE - offset; + bio_init(bio, NULL, bio->bi_inline_vecs, 1, op); + bio_add_virt_nofail(bio, data, len); + bio->bi_end_io = bio_map_kern_endio; + return bio; +} - if (len <= 0) - break; +static struct bio *bio_map_vmalloc(void *data, unsigned int len, enum req_op op, + gfp_t gfp_mask) +{ + unsigned int nr_vecs = bio_vmalloc_max_vecs(data, len); + unsigned int added; + struct bio *bio; - if (bytes > len) - bytes = len; + bio = bio_kmalloc(nr_vecs, gfp_mask); + if (!bio) + return ERR_PTR(-ENOMEM); + bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, op); + bio->bi_private = data; + bio->bi_end_io = bio_map_kern_endio; - if (!is_vmalloc) - page = virt_to_page(data); - else - page = vmalloc_to_page(data); - if (bio_add_page(bio, page, bytes, offset) < bytes) { + do { + added = bio_add_vmalloc(bio, data, len); + if (!added) { /* we don't support partial mappings */ bio_uninit(bio); kfree(bio); return ERR_PTR(-EINVAL); } - data += bytes; - len -= bytes; - offset = 0; - } + data += added; + len -= added; + } while (len); - bio->bi_end_io = bio_map_kern_endio; return bio; } @@ -713,8 +695,10 @@ int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len, if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf) || blk_queue_may_bounce(rq->q)) bio = bio_copy_kern(kbuf, len, req_op(rq), gfp_mask); + else if (is_vmalloc_addr(kbuf)) + bio = bio_map_vmalloc(kbuf, len, req_op(rq), gfp_mask); else - bio = bio_map_kern(kbuf, len, req_op(rq), gfp_mask); + bio = bio_map_virt(kbuf, len, req_op(rq), gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); -- 2.47.2