For compressed read, we always submit page read using page size.

This doesn't work well with subpage, as for subpage one page can contain
several sectors.
Such submission will read range out of what we want, and cause problems.

Thankfully to make it subpage compatible, we only need to change how the
last page of the compressed extent is read.

Instead of always adding a full page to the compressed read bio, if we're
at the last page, calculate the size using compressed length, so that we
only add part of the range into the compressed read bio.

Since we are here, also change the PAGE_SIZE used in
lookup_extent_mapping() to sectorsize.
This modification won't cause any functional change, as
lookup_extent_mapping() can handle the case where the search range is
larger than found extent range.

Signed-off-by: Qu Wenruo <w...@suse.com>
---
 fs/btrfs/compression.c | 26 ++++++++++++++++++++------
 1 file changed, 20 insertions(+), 6 deletions(-)

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 6d203acfdeb3..3d16ca5d420d 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -640,7 +640,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode 
*inode, struct bio *bio,
        read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree,
                                   page_offset(bio_first_page_all(bio)),
-                                  PAGE_SIZE);
+                                  fs_info->sectorsize);
        read_unlock(&em_tree->lock);
        if (!em)
                return BLK_STS_IOERR;
@@ -698,19 +698,33 @@ blk_status_t btrfs_submit_compressed_read(struct inode 
*inode, struct bio *bio,
        refcount_set(&cb->pending_bios, 1);
 
        for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+               u32 pg_len;
                int submit = 0;
 
+               /*
+                * To handle subpage case, we need to make sure the bio only
+                * covers the range we need.
+                *
+                * If we're at the last page, truncate the length to only cover
+                * the remaining part.
+                */
+               if (pg_index == nr_pages - 1)
+                       pg_len = min_t(u32, PAGE_SIZE,
+                                       compressed_len - pg_index * PAGE_SIZE);
+               else
+                       pg_len = PAGE_SIZE;
+
                page = cb->compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                page->index = em_start >> PAGE_SHIFT;
 
                if (comp_bio->bi_iter.bi_size)
-                       submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
+                       submit = btrfs_bio_fits_in_stripe(page, pg_len,
                                                          comp_bio, 0);
 
                page->mapping = NULL;
-               if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
-                   PAGE_SIZE) {
+               if (submit || bio_add_page(comp_bio, page, pg_len, 0) <
+                   pg_len) {
                        unsigned int nr_sectors;
 
                        ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
@@ -743,9 +757,9 @@ blk_status_t btrfs_submit_compressed_read(struct inode 
*inode, struct bio *bio,
                        comp_bio->bi_private = cb;
                        comp_bio->bi_end_io = end_compressed_bio_read;
 
-                       bio_add_page(comp_bio, page, PAGE_SIZE, 0);
+                       bio_add_page(comp_bio, page, pg_len, 0);
                }
-               cur_disk_byte += PAGE_SIZE;
+               cur_disk_byte += pg_len;
        }
 
        ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
-- 
2.30.0

Reply via email to