Split out the logic to allocate a new bio and only keep the fast path
that adds more data to an existing bio in iomap_bio_read_folio_range.

Signed-off-by: Christoph Hellwig <[email protected]>
---
 fs/iomap/bio.c | 69 +++++++++++++++++++++++++++-----------------------
 1 file changed, 37 insertions(+), 32 deletions(-)

diff --git a/fs/iomap/bio.c b/fs/iomap/bio.c
index fc045f2e4c45..578b1202e037 100644
--- a/fs/iomap/bio.c
+++ b/fs/iomap/bio.c
@@ -26,45 +26,50 @@ static void iomap_bio_submit_read(struct 
iomap_read_folio_ctx *ctx)
                submit_bio(bio);
 }
 
-static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
+static void iomap_read_alloc_bio(const struct iomap_iter *iter,
                struct iomap_read_folio_ctx *ctx, size_t plen)
 {
-       struct folio *folio = ctx->cur_folio;
        const struct iomap *iomap = &iter->iomap;
-       loff_t pos = iter->pos;
-       size_t poff = offset_in_folio(folio, pos);
-       loff_t length = iomap_length(iter);
-       sector_t sector;
+       unsigned int nr_vecs = DIV_ROUND_UP(iomap_length(iter), PAGE_SIZE);
+       struct folio *folio = ctx->cur_folio;
+       gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
+       gfp_t orig_gfp = gfp;
        struct bio *bio = ctx->read_ctx;
 
-       sector = iomap_sector(iomap, pos);
-       if (!bio || bio_end_sector(bio) != sector ||
-           !bio_add_folio(bio, folio, plen, poff)) {
-               gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
-               gfp_t orig_gfp = gfp;
-               unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
+       if (bio)
+               submit_bio(bio);
+
+       /* Same as readahead_gfp_mask: */
+       if (ctx->rac)
+               gfp |= __GFP_NORETRY | __GFP_NOWARN;
 
-               if (bio)
-                       submit_bio(bio);
+       /*
+        * If the bio_alloc fails, try it again for a single page to avoid
+        * having to deal with partial page reads.  This emulates what
+        * do_mpage_read_folio does.
+        */
+       bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ, gfp);
+       if (!bio)
+               bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
+       if (ctx->rac)
+               bio->bi_opf |= REQ_RAHEAD;
+       bio->bi_iter.bi_sector = iomap_sector(iomap, iter->pos);
+       bio->bi_end_io = iomap_read_end_io;
+       bio_add_folio_nofail(bio, folio, plen,
+                       offset_in_folio(folio, iter->pos));
+       ctx->read_ctx = bio;
+}
+
+static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
+               struct iomap_read_folio_ctx *ctx, size_t plen)
+{
+       struct folio *folio = ctx->cur_folio;
+       struct bio *bio = ctx->read_ctx;
 
-               if (ctx->rac) /* same as readahead_gfp_mask */
-                       gfp |= __GFP_NORETRY | __GFP_NOWARN;
-               bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
-                                    gfp);
-               /*
-                * If the bio_alloc fails, try it again for a single page to
-                * avoid having to deal with partial page reads.  This emulates
-                * what do_mpage_read_folio does.
-                */
-               if (!bio)
-                       bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
-               if (ctx->rac)
-                       bio->bi_opf |= REQ_RAHEAD;
-               bio->bi_iter.bi_sector = sector;
-               bio->bi_end_io = iomap_read_end_io;
-               bio_add_folio_nofail(bio, folio, plen, poff);
-               ctx->read_ctx = bio;
-       }
+       if (!bio ||
+           bio_end_sector(bio) != iomap_sector(&iter->iomap, iter->pos) ||
+           !bio_add_folio(bio, folio, plen, offset_in_folio(folio, iter->pos)))
+               iomap_read_alloc_bio(iter, ctx, plen);
        return 0;
 }
 
-- 
2.47.3


Reply via email to