Don't alloc new page array to replace old, just use old page array, try
to reduce one page array alloc and free when write compress page.

Signed-off-by: Fengnan Chang <[email protected]>
---
 fs/f2fs/compress.c | 14 ++------------
 fs/f2fs/f2fs.h     |  1 +
 2 files changed, 3 insertions(+), 12 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 455561826c7d..43daafe382e7 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -618,7 +618,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
        const struct f2fs_compress_ops *cops =
                                f2fs_cops[fi->i_compress_algorithm];
        unsigned int max_len, new_nr_cpages;
-       struct page **new_cpages;
        u32 chksum = 0;
        int i, ret;
 
@@ -633,6 +632,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 
        max_len = COMPRESS_HEADER_SIZE + cc->clen;
        cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
+       cc->raw_nr_cpages = cc->nr_cpages;
 
        cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
        if (!cc->cpages) {
@@ -683,13 +683,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 
        new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, 
PAGE_SIZE);
 
-       /* Now we're going to cut unnecessary tail pages */
-       new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
-       if (!new_cpages) {
-               ret = -ENOMEM;
-               goto out_vunmap_cbuf;
-       }
-
        /* zero out any unused part of the last page */
        memset(&cc->cbuf->cdata[cc->clen], 0,
                        (new_nr_cpages * PAGE_SIZE) -
@@ -700,7 +693,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
 
        for (i = 0; i < cc->nr_cpages; i++) {
                if (i < new_nr_cpages) {
-                       new_cpages[i] = cc->cpages[i];
                        continue;
                }
                f2fs_compress_free_page(cc->cpages[i]);
@@ -710,8 +702,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
        if (cops->destroy_compress_ctx)
                cops->destroy_compress_ctx(cc);
 
-       page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
-       cc->cpages = new_cpages;
        cc->nr_cpages = new_nr_cpages;
 
        trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
@@ -1330,7 +1320,7 @@ static int f2fs_write_compressed_pages(struct 
compress_ctx *cc,
        spin_unlock(&fi->i_size_lock);
 
        f2fs_put_rpages(cc);
-       page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+       page_array_free(cc->inode, cc->cpages, cc->raw_nr_cpages);
        cc->cpages = NULL;
        f2fs_destroy_compress_ctx(cc, false);
        return 0;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 867f2c5d9559..8b1f84d88a65 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1454,6 +1454,7 @@ struct compress_ctx {
        unsigned int nr_rpages;         /* total page number in rpages */
        struct page **cpages;           /* pages store compressed data in 
cluster */
        unsigned int nr_cpages;         /* total page number in cpages */
+       unsigned int raw_nr_cpages;     /* max total page number in cpages */
        void *rbuf;                     /* virtual mapped address on rpages */
        struct compress_data *cbuf;     /* virtual mapped address on cpages */
        size_t rlen;                    /* valid data length in rbuf */
-- 
2.29.0



_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to