From: Chao Yu <[email protected]>

In below call path, no page will be cached into @pagepool list
or grabbed from @pagepool list:
- z_erofs_readpage
 - z_erofs_do_read_page
  - preload_compressed_pages
  - erofs_allocpage

Let's get rid of this unneeded parameter.

Signed-off-by: Chao Yu <[email protected]>
---
 fs/erofs/utils.c |  2 +-
 fs/erofs/zdata.c | 14 ++++++--------
 2 files changed, 7 insertions(+), 9 deletions(-)

diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index de9986d2f82f..7a6e5456b0b8 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -11,7 +11,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t 
gfp)
 {
        struct page *page;
 
-       if (!list_empty(pool)) {
+       if (pool && !list_empty(pool)) {
                page = lru_to_page(pool);
                DBG_BUGON(page_ref_count(page) != 1);
                list_del(&page->lru);
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 6c939def00f9..f218f58f4159 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -153,8 +153,7 @@ static DEFINE_MUTEX(z_pagemap_global_lock);
 
 static void preload_compressed_pages(struct z_erofs_collector *clt,
                                     struct address_space *mc,
-                                    enum z_erofs_cache_alloctype type,
-                                    struct list_head *pagepool)
+                                    enum z_erofs_cache_alloctype type)
 {
        const struct z_erofs_pcluster *pcl = clt->pcl;
        const unsigned int clusterpages = BIT(pcl->clusterbits);
@@ -562,8 +561,7 @@ static bool should_alloc_managed_pages(struct 
z_erofs_decompress_frontend *fe,
 }
 
 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
-                               struct page *page,
-                               struct list_head *pagepool)
+                               struct page *page)
 {
        struct inode *const inode = fe->inode;
        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
@@ -621,7 +619,7 @@ static int z_erofs_do_read_page(struct 
z_erofs_decompress_frontend *fe,
                cache_strategy = DONTALLOC;
 
        preload_compressed_pages(clt, MNGD_MAPPING(sbi),
-                                cache_strategy, pagepool);
+                                cache_strategy);
 
 hitted:
        /*
@@ -653,7 +651,7 @@ static int z_erofs_do_read_page(struct 
z_erofs_decompress_frontend *fe,
        /* should allocate an additional staging page for pagevec */
        if (err == -EAGAIN) {
                struct page *const newpage =
-                       erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);
+                       erofs_allocpage(NULL, GFP_NOFS | __GFP_NOFAIL);
 
                newpage->mapping = Z_EROFS_MAPPING_STAGING;
                err = z_erofs_attach_page(clt, newpage,
@@ -1282,7 +1280,7 @@ static int z_erofs_readpage(struct file *file, struct 
page *page)
 
        f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
 
-       err = z_erofs_do_read_page(&f, page, &pagepool);
+       err = z_erofs_do_read_page(&f, page);
        (void)z_erofs_collector_end(&f.clt);
 
        /* if some compressed cluster ready, need submit them anyway */
@@ -1341,7 +1339,7 @@ static void z_erofs_readahead(struct readahead_control 
*rac)
                /* traversal in reverse order */
                head = (void *)page_private(page);
 
-               err = z_erofs_do_read_page(&f, page, &pagepool);
+               err = z_erofs_do_read_page(&f, page);
                if (err)
                        erofs_err(inode->i_sb,
                                  "readahead error at page %lu @ nid %llu",
-- 
2.22.0

Reply via email to