From: Gao Xiang <hsiang...@redhat.com>

When picking up inplace I/O pages, it should be traversed in reverse
order in aligned with the traversal order of file-backed online pages.
Also, index should be updated together when preloading compressed pages.

Previously, only page-sized pclustersize was supported so no problem
at all. Also rename `compressedpages' to `icpage_ptr' to reflect its
functionality.

Acked-by: Chao Yu <yuch...@huawei.com>
Signed-off-by: Gao Xiang <hsiang...@redhat.com>
---
 fs/erofs/zdata.c | 28 ++++++++++++++--------------
 1 file changed, 14 insertions(+), 14 deletions(-)

diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index db296d324333..78e4b598ecca 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -204,7 +204,8 @@ struct z_erofs_collector {
 
        struct z_erofs_pcluster *pcl, *tailpcl;
        struct z_erofs_collection *cl;
-       struct page **compressedpages;
+       /* a pointer used to pick up inplace I/O pages */
+       struct page **icpage_ptr;
        z_erofs_next_pcluster_t owned_head;
 
        enum z_erofs_collectmode mode;
@@ -238,17 +239,19 @@ static void preload_compressed_pages(struct 
z_erofs_collector *clt,
                                     enum z_erofs_cache_alloctype type,
                                     struct list_head *pagepool)
 {
-       const struct z_erofs_pcluster *pcl = clt->pcl;
-       struct page **pages = clt->compressedpages;
-       pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages);
+       struct z_erofs_pcluster *pcl = clt->pcl;
        bool standalone = true;
        gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
                        __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
+       struct page **pages;
+       pgoff_t index;
 
        if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
                return;
 
-       for (; pages < pcl->compressed_pages + pcl->pclusterpages; ++pages) {
+       pages = pcl->compressed_pages;
+       index = pcl->obj.index;
+       for (; index < pcl->obj.index + pcl->pclusterpages; ++index, ++pages) {
                struct page *page;
                compressed_page_t t;
                struct page *newpage = NULL;
@@ -360,16 +363,14 @@ int erofs_try_to_free_cached_page(struct address_space 
*mapping,
 }
 
 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
-static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
-                                         struct page *page)
+static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
+                                  struct page *page)
 {
        struct z_erofs_pcluster *const pcl = clt->pcl;
 
-       while (clt->compressedpages <
-              pcl->compressed_pages + pcl->pclusterpages) {
-               if (!cmpxchg(clt->compressedpages++, NULL, page))
+       while (clt->icpage_ptr > pcl->compressed_pages)
+               if (!cmpxchg(--clt->icpage_ptr, NULL, page))
                        return true;
-       }
        return false;
 }
 
@@ -576,9 +577,8 @@ static int z_erofs_collector_begin(struct z_erofs_collector 
*clt,
        z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
                                  clt->cl->pagevec, clt->cl->vcnt);
 
-       clt->compressedpages = clt->pcl->compressed_pages;
-       if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */
-               clt->compressedpages += clt->pcl->pclusterpages;
+       /* since file-backed online pages are traversed in reverse order */
+       clt->icpage_ptr = clt->pcl->compressed_pages + clt->pcl->pclusterpages;
        return 0;
 }
 
-- 
2.20.1

Reply via email to