On 2021/8/12 11:18, Fengnan Chang wrote:
Since cluster is basic unit of compression, one cluster is compressed or
not, so we can calculate valid blocks only for first page in cluster,
the other pages just skip.
Signed-off-by: Fengnan Chang <[email protected]>
---
fs/f2fs/data.c | 24 +++++++++++++++++++-----
1 file changed, 19 insertions(+), 5 deletions(-)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index df5e8d8c654e..fc0115a61082 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2299,6 +2299,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
.nr_rpages = 0,
.nr_cpages = 0,
};
+ pgoff_t nc_cluster_idx = NULL_CLUSTER;
#endif
unsigned nr_pages = rac ? readahead_count(rac) : 1;
unsigned max_nr_pages = nr_pages;
@@ -2328,15 +2329,27 @@ static int f2fs_mpage_readpages(struct inode *inode,
&last_block_in_bio,
rac != NULL, false);
f2fs_destroy_compress_ctx(&cc, false);
+ nc_cluster_idx = NULL_CLUSTER;
Can we get rid of this?
if (ret)
goto set_error_page;
}
- ret = f2fs_is_compressed_cluster(inode, page->index);
- if (ret < 0)
- goto set_error_page;
- else if (!ret)
- goto read_single_page;
+ if (cc.cluster_idx == NULL_CLUSTER) {
+ if (nc_cluster_idx ==
+ page->index >> cc.log_cluster_size) {
+ goto read_single_page;
+ }
+
+ ret = f2fs_is_compressed_cluster(inode,
page->index);
+ if (ret < 0)
+ goto set_error_page;
+ else if (!ret) {
+ nc_cluster_idx =
+ page->index >>
cc.log_cluster_size;
+ goto read_single_page;
+ }
+ nc_cluster_idx = NULL_CLUSTER;
+ }
ret = f2fs_init_compress_ctx(&cc);
if (ret)
goto set_error_page;
@@ -2373,6 +2386,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
&last_block_in_bio,
rac != NULL, false);
f2fs_destroy_compress_ctx(&cc, false);
+ nc_cluster_idx = NULL_CLUSTER;
Ditto,
Thanks,
}
}
#endif
_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel