Hi Chao,

I split this into two patches along with upstreamed change.

https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git/log/?h=dev-test

Thanks,

On 04/27, Chao Yu wrote:
> This patch changes as below:
> - remove unneeded check condition in __cluster_may_compress()
> - rename __cluster_may_compress() to cluster_has_invalid_data() for
> better readability
> - add cp_error check in f2fs_write_compressed_pages() like we did
> in f2fs_write_single_data_page()
> 
> Signed-off-by: Chao Yu <[email protected]>
> ---
> v2:
> - rename function for better readability
> - add cp_error check in f2fs_write_compressed_pages()
>  fs/f2fs/compress.c | 22 +++++++++++-----------
>  1 file changed, 11 insertions(+), 11 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 6e46a00c1930..53f78befed8f 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -888,9 +888,8 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, 
> pgoff_t index)
>       return is_page_in_cluster(cc, index);
>  }
>  
> -static bool __cluster_may_compress(struct compress_ctx *cc)
> +static bool cluster_has_invalid_data(struct compress_ctx *cc)
>  {
> -     struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
>       loff_t i_size = i_size_read(cc->inode);
>       unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
>       int i;
> @@ -898,18 +897,13 @@ static bool __cluster_may_compress(struct compress_ctx 
> *cc)
>       for (i = 0; i < cc->cluster_size; i++) {
>               struct page *page = cc->rpages[i];
>  
> -             f2fs_bug_on(sbi, !page);
> -
> -             if (unlikely(f2fs_cp_error(sbi)))
> -                     return false;
> -             if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
> -                     return false;
> +             f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
>  
>               /* beyond EOF */
>               if (page->index >= nr_pages)
> -                     return false;
> +                     return true;
>       }
> -     return true;
> +     return false;
>  }
>  
>  static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
> @@ -985,7 +979,7 @@ static bool cluster_may_compress(struct compress_ctx *cc)
>               return false;
>       if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
>               return false;
> -     return __cluster_may_compress(cc);
> +     return !cluster_has_invalid_data(cc);
>  }
>  
>  static void set_cluster_writeback(struct compress_ctx *cc)
> @@ -1232,6 +1226,12 @@ static int f2fs_write_compressed_pages(struct 
> compress_ctx *cc,
>       loff_t psize;
>       int i, err;
>  
> +     /* we should bypass data pages to proceed the kworkder jobs */
> +     if (unlikely(f2fs_cp_error(sbi))) {
> +             mapping_set_error(cc->rpages[0]->mapping, -EIO);
> +             goto out_free;
> +     }
> +
>       if (IS_NOQUOTA(inode)) {
>               /*
>                * We need to wait for node_write to avoid block allocation 
> during
> -- 
> 2.29.2


_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to