when overwrite only first block of cluster, since cluster is not full, it
will call f2fs_write_raw_pages when f2fs_write_multi_pages, and cause the
whole cluster become uncompressed eventhough data can be compressed.
this may will make random write bench score reduce a lot.

root# dd if=/dev/zero of=./fio-test bs=1M count=1

root# sync

root# echo 3 > /proc/sys/vm/drop_caches

root# f2fs_io get_cblocks ./fio-test

root# dd if=/dev/zero of=./fio-test bs=4K count=1 oflag=direct conv=notrunc

w/o patch:
root# f2fs_io get_cblocks ./fio-test
189

w/ patch:
root# f2fs_io get_cblocks ./fio-test
192

Signed-off-by: Fengnan Chang <[email protected]>
Signed-off-by: Chao Yu <[email protected]>
---
 fs/f2fs/compress.c | 19 +++++++++++++++++++
 fs/f2fs/data.c     |  7 ++++---
 fs/f2fs/f2fs.h     |  2 ++
 3 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index c1bf9ad4c220..bec3176ca682 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -881,6 +881,25 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, 
pgoff_t index)
        return is_page_in_cluster(cc, index);
 }
 
+bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec 
*pvec,
+                               int index, int nr_pages)
+{
+       unsigned long pgidx;
+       int i;
+
+       if (nr_pages - index < cc->cluster_size)
+               return false;
+
+       pgidx = pvec->pages[index]->index;
+
+       for (i = 1; i < cc->cluster_size; i++) {
+               if (pvec->pages[index + i]->index != pgidx + i)
+                       return false;
+       }
+
+       return true;
+}
+
 static bool cluster_has_invalid_data(struct compress_ctx *cc)
 {
        loff_t i_size = i_size_read(cc->inode);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f4fd6c246c9a..7a0e26f3e575 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -3020,9 +3020,10 @@ static int f2fs_write_cache_pages(struct address_space 
*mapping,
                                                done = 1;
                                                break;
                                        } else if (ret2 &&
-                                               !f2fs_compress_write_end(inode,
-                                                               fsdata, 
page->index,
-                                                               1)) {
+                                               (!f2fs_compress_write_end(inode,
+                                                       fsdata, page->index, 1) 
||
+                                               
!f2fs_all_cluster_page_loaded(&cc,
+                                                       &pvec, i, nr_pages))) {
                                                retry = 1;
                                                break;
                                        }
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 039a229e11c9..6f8eda811350 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -4030,6 +4030,8 @@ void f2fs_end_read_compressed_page(struct page *page, 
bool failed,
                                                        block_t blkaddr);
 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
+bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec 
*pvec,
+                               int index, int nr_pages);
 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
 int f2fs_write_multi_pages(struct compress_ctx *cc,
-- 
2.32.0



_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to