when overwrite only first block of cluster, since cluster is not full, it
will call f2fs_write_raw_pages when f2fs_write_multi_pages, and cause the
whole cluster become uncompressed eventhough data can be compressed.
this may will make random write bench score reduce a lot.

root# dd if=/dev/zero of=./fio-test bs=1M count=1

root# sync

root# echo 3 > /proc/sys/vm/drop_caches

root# f2fs_io get_cblocks ./fio-test

root# dd if=/dev/zero of=./fio-test bs=4K count=1 oflag=direct conv=notrunc

w/o patch:
root# f2fs_io get_cblocks ./fio-test
189

w/ patch:
root# f2fs_io get_cblocks ./fio-test
192

Signed-off-by: Fengnan Chang <[email protected]>
---
 fs/f2fs/compress.c | 12 ++++++++++++
 fs/f2fs/data.c     |  7 +++++++
 fs/f2fs/f2fs.h     |  1 +
 3 files changed, 20 insertions(+)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index c1bf9ad4c220..c4f36ead6f17 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -857,6 +857,18 @@ void f2fs_end_read_compressed_page(struct page *page, bool 
failed,
                f2fs_decompress_cluster(dic);
 }
 
+bool is_page_same_cluster(struct compress_ctx *cc, struct pagevec *pvec, int 
index)
+{
+       int idx = cluster_idx(cc, pvec->pages[index]->index);
+       int i = index + 1;
+
+       for (i = index + 1; i < index + cc->cluster_size; i++) {
+               if (cluster_idx(cc, pvec->pages[i]->index) != idx)
+                       return false;
+       }
+
+       return true;
+}
 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
 {
        if (cc->cluster_idx == NULL_CLUSTER)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f4fd6c246c9a..33ccabbe9f92 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -3025,6 +3025,13 @@ static int f2fs_write_cache_pages(struct address_space 
*mapping,
                                                                1)) {
                                                retry = 1;
                                                break;
+                                       } else if (ret2 && nr_pages - i < 
cc.cluster_size) {
+                                               retry = 1;
+                                               break;
+                                       } else if (ret2 && nr_pages - i >= 
cc.cluster_size &&
+                                               !is_page_same_cluster(&cc, 
&pvec, i)) {
+                                               retry = 1;
+                                               break;
                                        }
                                } else {
                                        goto lock_page;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 039a229e11c9..f225ea36bb60 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -4031,6 +4031,7 @@ void f2fs_end_read_compressed_page(struct page *page, 
bool failed,
 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
+bool is_page_same_cluster(struct compress_ctx *cc, struct pagevec *pvec, int 
index);
 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
 int f2fs_write_multi_pages(struct compress_ctx *cc,
                                                int *submitted,
-- 
2.32.0



_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to