On 2023/8/17 16:28, Gao Xiang wrote:
Some preparation logic should be part of z_erofs_pcluster_begin()
instead of z_erofs_do_read_page().  Let's move now.

Signed-off-by: Gao Xiang <[email protected]>
---
  fs/erofs/zdata.c | 59 +++++++++++++++++++++---------------------------
  1 file changed, 26 insertions(+), 33 deletions(-)

diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 4ed99346c4e1..30ecdfe41836 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -852,7 +852,10 @@ static int z_erofs_register_pcluster(struct 
z_erofs_decompress_frontend *fe)
  static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
  {
        struct erofs_map_blocks *map = &fe->map;
+       struct super_block *sb = fe->inode->i_sb;
+       erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
        struct erofs_workgroup *grp = NULL;
+       void *mptr;
        int ret;
DBG_BUGON(fe->pcl);
@@ -861,8 +864,7 @@ static int z_erofs_pcluster_begin(struct 
z_erofs_decompress_frontend *fe)
        DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
if (!(map->m_flags & EROFS_MAP_META)) {
-               grp = erofs_find_workgroup(fe->inode->i_sb,
-                                          map->m_pa >> PAGE_SHIFT);
+               grp = erofs_find_workgroup(sb, blknr);
        } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
                DBG_BUGON(1);
                return -EFSCORRUPTED;
@@ -881,9 +883,24 @@ static int z_erofs_pcluster_begin(struct 
z_erofs_decompress_frontend *fe)
        } else if (ret) {
                return ret;
        }
+
        z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
                                Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
-       /* since file-backed online pages are traversed in reverse order */
+       if (!z_erofs_is_inline_pcluster(fe->pcl)) {
+               /* bind cache first when cached decompression is preferred */
+               z_erofs_bind_cache(fe);
+       } else {

Nitpick, mptr can be defined here.

Reviewed-by: Chao Yu <[email protected]>

Thanks,

+               mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
+               if (IS_ERR(mptr)) {
+                       ret = PTR_ERR(mptr);
+                       erofs_err(sb, "failed to get inline data %d", ret);
+                       return ret;
+               }
+               get_page(map->buf.page);
+               WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
+               fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
+       }
+       /* file-backed inplace I/O pages are traversed in reverse order */
        fe->icur = z_erofs_pclusterpages(fe->pcl);
        return 0;
  }
@@ -982,39 +999,15 @@ static int z_erofs_do_read_page(struct 
z_erofs_decompress_frontend *fe,
                err = z_erofs_map_blocks_iter(inode, map, 0);
                if (err)
                        goto out;
-       } else {
-               if (fe->pcl)
-                       goto hitted;
-               /* didn't get a valid pcluster previously (very rare) */
-       }
-
-       if (!(map->m_flags & EROFS_MAP_MAPPED) ||
-           map->m_flags & EROFS_MAP_FRAGMENT)
+       } else if (fe->pcl) {
                goto hitted;
+       }
- err = z_erofs_pcluster_begin(fe);
-       if (err)
-               goto out;
-
-       if (z_erofs_is_inline_pcluster(fe->pcl)) {
-               void *mp;
-
-               mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
-                                       erofs_blknr(inode->i_sb, map->m_pa),
-                                       EROFS_NO_KMAP);
-               if (IS_ERR(mp)) {
-                       err = PTR_ERR(mp);
-                       erofs_err(inode->i_sb,
-                                 "failed to get inline page, err %d", err);
+       if ((map->m_flags & EROFS_MAP_MAPPED) &&
+           !(map->m_flags & EROFS_MAP_FRAGMENT)) {
+               err = z_erofs_pcluster_begin(fe);
+               if (err)
                        goto out;
-               }
-               get_page(fe->map.buf.page);
-               WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
-                          fe->map.buf.page);
-               fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
-       } else {
-               /* bind cache first when cached decompression is preferred */
-               z_erofs_bind_cache(fe);
        }
  hitted:
        /*

Reply via email to