> +#ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
That seems like an odd constructu, as you don't call
flush_kernel_dcache_page. From looking whoe defines it it seems
to be about the right set of architectures, but that might be
by a mix of chance and similar requirements for cache flushing.
> +static void bio_invalidate_vmalloc_pages(struct bio *bio)
> +{
> + if (bio->bi_private) {
> + struct bvec_iter_all iter_all;
> + struct bio_vec *bvec;
> + unsigned long len = 0;
> +
> + bio_for_each_segment_all(bvec, bio, iter_all)
> + len += bvec->bv_len;
> + invalidate_kernel_vmap_range(bio->bi_private, len);
We control the bio here, so we can directly iterate over the
segments instead of doing the fairly expensive bio_for_each_segment_all
call that goes to each page and builds a bvec for it.
> + struct page *page;
> int offset, i;
> struct bio *bio;
>
> @@ -1508,6 +1529,12 @@ struct bio *bio_map_kern(struct request_queue *q, void
> *data, unsigned int len,
> if (!bio)
> return ERR_PTR(-ENOMEM);
>
> + if (is_vmalloc) {
> + flush_kernel_vmap_range(data, len);
> + if ((!op_is_write(bio_op(bio))))
> + bio->bi_private = data;
> + }
We've just allocate the bio, so bio->bi_opf is not actually set at
this point unfortunately.