[f2fs-dev] [PATCH 1/3] f2fs: fix wrong value of tracepoint parameter

2020-05-27 Thread Chao Yu
In f2fs_lookup(), we should set @err correctly before printing it
in tracepoint.

Signed-off-by: Chao Yu 
---
 fs/f2fs/namei.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 63b34a161cf4..2c2f8c36c828 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -504,6 +504,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct 
dentry *dentry,
err = PTR_ERR(page);
goto out;
}
+   err = -ENOENT;
goto out_splice;
}
 
@@ -549,7 +550,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct 
dentry *dentry,
 #endif
new = d_splice_alias(inode, dentry);
err = PTR_ERR_OR_ZERO(new);
-   trace_f2fs_lookup_end(dir, dentry, ino, err);
+   trace_f2fs_lookup_end(dir, dentry, ino, !new ? -ENOENT : err);
return new;
 out_iput:
iput(inode);
-- 
2.18.0.rc1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 3/3] f2fs: fix to cover meta flush with cp_lock

2020-05-27 Thread Chao Yu
meta inode page should be flushed under cp_lock, fix it.

Signed-off-by: Chao Yu 
---
 fs/f2fs/file.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f7de2a1da528..0fcae4d90074 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -2260,7 +2260,9 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned 
long arg)
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_METAFLUSH:
+   mutex_lock(>cp_mutex);
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
+   mutex_unlock(>cp_mutex);
f2fs_stop_checkpoint(sbi, false);
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
-- 
2.18.0.rc1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH 2/3] f2fs: remove unneeded return value of __insert_discard_tree()

2020-05-27 Thread Chao Yu
We never use return value of __insert_discard_tree(), so remove it.

Signed-off-by: Chao Yu 
---
 fs/f2fs/segment.c | 9 ++---
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 1c48ec866b8c..ebbadde6cbce 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1221,7 +1221,7 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
return err;
 }
 
-static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
+static void __insert_discard_tree(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len,
struct rb_node **insert_p,
@@ -1230,7 +1230,6 @@ static struct discard_cmd *__insert_discard_tree(struct 
f2fs_sb_info *sbi,
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct rb_node **p;
struct rb_node *parent = NULL;
-   struct discard_cmd *dc = NULL;
bool leftmost = true;
 
if (insert_p && insert_parent) {
@@ -1242,12 +1241,8 @@ static struct discard_cmd *__insert_discard_tree(struct 
f2fs_sb_info *sbi,
p = f2fs_lookup_rb_tree_for_insert(sbi, >root, ,
lstart, );
 do_insert:
-   dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
+   __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
p, leftmost);
-   if (!dc)
-   return NULL;
-
-   return dc;
 }
 
 static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
-- 
2.18.0.rc1



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


[f2fs-dev] [PATCH] f2fs_io: add randread

2020-05-27 Thread Daeho Jeong
From: Daeho Jeong 

I've added a new command to evaluate random read.

Signed-off-by: Daeho Jeong 
---
 tools/f2fs_io/f2fs_io.c | 64 +
 1 file changed, 64 insertions(+)

diff --git a/tools/f2fs_io/f2fs_io.c b/tools/f2fs_io/f2fs_io.c
index d1889ff..30544c1 100644
--- a/tools/f2fs_io/f2fs_io.c
+++ b/tools/f2fs_io/f2fs_io.c
@@ -551,6 +551,69 @@ static void do_read(int argc, char **argv, const struct 
cmd_desc *cmd)
exit(0);
 }
 
+#define randread_desc "random read data from file"
+#define randread_help  \
+"f2fs_io randread [chunk_size in 4kb] [count] [IO] [file_path]\n\n"\
+"Do random read data in file_path\n"   \
+"IO can be\n"  \
+"  buffered : buffered IO\n"   \
+"  dio  : direct IO\n" \
+
+static void do_randread(int argc, char **argv, const struct cmd_desc *cmd)
+{
+   u64 buf_size = 0, ret = 0, read_cnt = 0;
+   u64 idx, end_idx, aligned_size;
+   char *buf = NULL;
+   unsigned bs, count, i;
+   int flags = 0;
+   int fd;
+   time_t t;
+   struct stat stbuf;
+
+   if (argc != 5) {
+   fputs("Excess arguments\n\n", stderr);
+   fputs(cmd->cmd_help, stderr);
+   exit(1);
+   }
+
+   bs = atoi(argv[1]);
+   if (bs > 1024)
+   die("Too big chunk size - limit: 4MB");
+   buf_size = bs * 4096;
+
+   buf = aligned_xalloc(4096, buf_size);
+
+   count = atoi(argv[2]);
+   if (!strcmp(argv[3], "dio"))
+   flags |= O_DIRECT;
+   else if (strcmp(argv[3], "buffered"))
+   die("Wrong IO type");
+
+   fd = xopen(argv[4], O_RDONLY | flags, 0);
+
+   if (fstat(fd, ) != 0)
+   die_errno("fstat of source file failed");
+
+   aligned_size = (u64)stbuf.st_size & ~((u64)(4096 - 1));
+   if (aligned_size < buf_size)
+   die("File is too small to random read");
+   end_idx = (u64)(aligned_size - buf_size) / (u64)4096 + 1;
+
+   srand((unsigned) time());
+
+   for (i = 0; i < count; i++) {
+   idx = rand() % end_idx;
+
+   ret = pread(fd, buf, buf_size, 4096 * idx);
+   if (ret != buf_size)
+   break;
+
+   read_cnt += ret;
+   }
+   printf("Read %"PRIu64" bytes\n", read_cnt);
+   exit(0);
+}
+
 struct file_ext {
__u32 f_pos;
__u32 start_blk;
@@ -841,6 +904,7 @@ const struct cmd_desc cmd_list[] = {
CMD(fallocate),
CMD(write),
CMD(read),
+   CMD(randread),
CMD(fiemap),
CMD(gc_urgent),
CMD(defrag_file),
-- 
2.27.0.rc0.183.gde8f92d652-goog



___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH 3/3] f2fs: fix to cover meta flush with cp_lock

2020-05-27 Thread Jaegeuk Kim
On 05/28, Chao Yu wrote:
> On 2020/5/28 5:02, Jaegeuk Kim wrote:
> > On 05/27, Chao Yu wrote:
> >> meta inode page should be flushed under cp_lock, fix it.
> > 
> > It doesn't matter for this case, yes?
> 
> It's not related to discard issue.

I meant we really need this or not. :P

> 
> Now, I got some progress, I can reproduce that bug occasionally.
> 
> Thanks,
> 
> > 
> >>
> >> Signed-off-by: Chao Yu 
> >> ---
> >>  fs/f2fs/file.c | 2 ++
> >>  1 file changed, 2 insertions(+)
> >>
> >> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> >> index f7de2a1da528..0fcae4d90074 100644
> >> --- a/fs/f2fs/file.c
> >> +++ b/fs/f2fs/file.c
> >> @@ -2260,7 +2260,9 @@ static int f2fs_ioc_shutdown(struct file *filp, 
> >> unsigned long arg)
> >>set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
> >>break;
> >>case F2FS_GOING_DOWN_METAFLUSH:
> >> +  mutex_lock(>cp_mutex);
> >>f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
> >> +  mutex_unlock(>cp_mutex);
> >>f2fs_stop_checkpoint(sbi, false);
> >>set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
> >>break;
> >> -- 
> >> 2.18.0.rc1
> > .
> > 


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH 3/3] f2fs: fix to cover meta flush with cp_lock

2020-05-27 Thread Chao Yu
On 2020/5/28 5:02, Jaegeuk Kim wrote:
> On 05/27, Chao Yu wrote:
>> meta inode page should be flushed under cp_lock, fix it.
> 
> It doesn't matter for this case, yes?

It's not related to discard issue.

Now, I got some progress, I can reproduce that bug occasionally.

Thanks,

> 
>>
>> Signed-off-by: Chao Yu 
>> ---
>>  fs/f2fs/file.c | 2 ++
>>  1 file changed, 2 insertions(+)
>>
>> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
>> index f7de2a1da528..0fcae4d90074 100644
>> --- a/fs/f2fs/file.c
>> +++ b/fs/f2fs/file.c
>> @@ -2260,7 +2260,9 @@ static int f2fs_ioc_shutdown(struct file *filp, 
>> unsigned long arg)
>>  set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
>>  break;
>>  case F2FS_GOING_DOWN_METAFLUSH:
>> +mutex_lock(>cp_mutex);
>>  f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
>> +mutex_unlock(>cp_mutex);
>>  f2fs_stop_checkpoint(sbi, false);
>>  set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
>>  break;
>> -- 
>> 2.18.0.rc1
> .
> 


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH v3] f2fs: avoid inifinite loop to wait for flushing node pages at cp_error

2020-05-27 Thread Chao Yu
On 2020/5/28 4:56, Jaegeuk Kim wrote:
> On 05/27, Chao Yu wrote:
>> On 2020/5/26 9:56, Jaegeuk Kim wrote:
>>> On 05/26, Chao Yu wrote:
 On 2020/5/26 9:11, Chao Yu wrote:
> On 2020/5/25 23:06, Jaegeuk Kim wrote:
>> On 05/25, Chao Yu wrote:
>>> On 2020/5/25 11:56, Jaegeuk Kim wrote:
 Shutdown test is somtimes hung, since it keeps trying to flush dirty 
 node pages
>>
>> 71.07% 0.01%  kworker/u256:1+  [kernel.kallsyms]  [k] wb_writeback
>> |
>>  --71.06%--wb_writeback
>>|
>>|--68.96%--__writeback_inodes_wb
>>|  |
>>|   --68.95%--writeback_sb_inodes
>>| |
>>| 
>> |--65.08%--__writeback_single_inode
>>| |  |
>>| |   
>> --64.35%--do_writepages
>>| | |
>>| | 
>> |--59.83%--f2fs_write_node_pages
>>| | | 
>>  |
>>| | | 
>>   --59.74%--f2fs_sync_node_pages
>>| | | 
>> |
>>| | | 
>> |--27.91%--pagevec_lookup_range_tag
>>| | | 
>> |  |
>>| | | 
>> |   --27.90%--find_get_pages_range_tag
>>
>> Before umount, kworker will always hold one core, that looks not reasonable,
>> to avoid that, could we just allow node write, since it's out-place-update,
>> and cp is not allowed, we don't need to worry about its effect on data on
>> previous checkpoint, and it can decrease memory footprint cost by node pages.
> 
> It can cause some roll-forward recovery?

Yup, recovery should be considered,

Later fsync() will fail due to:

int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)
return -EIO;


And we need to adjust f2fs_fsync_node_pages() to handle in-process fsyncing node
pages as well:

if (f2fs_cp_error()) {
set_fsync_mark(page, 0);
set_dentry_mark(page, 0);
if (atomic) {
unlock & put page;
ret = -EIO;
break;
}
}

ret = __write_node_page();

Thanks,

> 
>>
>> Thanks,
>>
>>>
>>> IMO, for umount case, we should drop dirty reference and dirty pages on 
>>> meta/data
>>> pages like we change for node pages to avoid potential dead loop...
>>
>> I believe we're doing for them. :P
>
> Actually, I mean do we need to drop dirty meta/data pages explicitly as 
> below:
>
> diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
> index 3dc3ac6fe143..4c08fd0a680a 100644
> --- a/fs/f2fs/checkpoint.c
> +++ b/fs/f2fs/checkpoint.c
> @@ -299,8 +299,15 @@ static int __f2fs_write_meta_page(struct page *page,
>
>   trace_f2fs_writepage(page, META);
>
> - if (unlikely(f2fs_cp_error(sbi)))
> + if (unlikely(f2fs_cp_error(sbi))) {
> + if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
> + ClearPageUptodate(page);
> + dec_page_count(sbi, F2FS_DIRTY_META);
> + unlock_page(page);
> + return 0;
> + }
>   goto redirty_out;
> + }
>   if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
>   goto redirty_out;
>   if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 48a622b95b76..94b342802513 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2682,6 +2682,12 @@ int f2fs_write_single_data_page(struct page *page, 
> int *submitted,
>
>   /* we should bypass data pages to proceed the kworkder jobs */
>   if (unlikely(f2fs_cp_error(sbi))) {
> + if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
> + ClearPageUptodate(page);
> + inode_dec_dirty_pages(inode);
> + unlock_page(page);
> + return 0;
> + }

 Oh, I notice previously, we will drop non-directory inode's dirty pages 
 directly,
 however, during umount, we'd better drop directory inode's dirty pages as 
 well, right?
>>>
>>> Hmm, I remember I dropped them before. Need to double check.
>>>

>   

Re: [f2fs-dev] [PATCH 3/3] f2fs: fix to cover meta flush with cp_lock

2020-05-27 Thread Chao Yu
On 2020/5/28 9:26, Jaegeuk Kim wrote:
> On 05/28, Chao Yu wrote:
>> On 2020/5/28 5:02, Jaegeuk Kim wrote:
>>> On 05/27, Chao Yu wrote:
 meta inode page should be flushed under cp_lock, fix it.
>>>
>>> It doesn't matter for this case, yes?
>>
>> It's not related to discard issue.
> 
> I meant we really need this or not. :P

Yes, let's keep that rule: flush meta pages under cp_lock, otherwise
checkpoint flush order may be broken due to race, right? as checkpoint
should write 2rd cp park page after flushing all meta pages.

> 
>>
>> Now, I got some progress, I can reproduce that bug occasionally.
>>
>> Thanks,
>>
>>>

 Signed-off-by: Chao Yu 
 ---
  fs/f2fs/file.c | 2 ++
  1 file changed, 2 insertions(+)

 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
 index f7de2a1da528..0fcae4d90074 100644
 --- a/fs/f2fs/file.c
 +++ b/fs/f2fs/file.c
 @@ -2260,7 +2260,9 @@ static int f2fs_ioc_shutdown(struct file *filp, 
 unsigned long arg)
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_METAFLUSH:
 +  mutex_lock(>cp_mutex);
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
 +  mutex_unlock(>cp_mutex);
f2fs_stop_checkpoint(sbi, false);
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
 -- 
 2.18.0.rc1
>>> .
>>>
> .
> 


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH] f2fs: protect new segment allocation in expand_inode_data

2020-05-27 Thread Chao Yu
On 2020/5/27 12:02, Daeho Jeong wrote:
> From: Daeho Jeong 
> 
> Found a new segemnt allocation without f2fs_lock_op() in
> expand_inode_data(). So, when we do fallocate() for a pinned file
> and trigger checkpoint very frequently and simultaneously. F2FS gets
> stuck in the below code of do_checkpoint() forever.
> 
>   f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
>   /* Wait for all dirty meta pages to be submitted for IO */
> <= if fallocate() here,
>   f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META); <= it'll wait forever.
> 
> Signed-off-by: Daeho Jeong 

Reviewed-by: Chao Yu 

Thanks,


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH] f2fs: fix retry logic in f2fs_write_cache_pages()

2020-05-27 Thread Chao Yu
On 2020/5/27 10:20, Sahitya Tummala wrote:
> In case a compressed file is getting overwritten, the current retry
> logic doesn't include the current page to be retried now as it sets
> the new start index as 0 and new end index as writeback_index - 1.
> This causes the corresponding cluster to be uncompressed and written
> as normal pages without compression. Fix this by allowing writeback to
> be retried for the current page as well (in case of compressed page
> getting retried due to index mismatch with cluster index). So that
> this cluster can be written compressed in case of overwrite.
> 
> Signed-off-by: Sahitya Tummala 
> ---
>  fs/f2fs/data.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 4af5fcd..bfd1df4 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -3024,7 +3024,7 @@ static int f2fs_write_cache_pages(struct address_space 
> *mapping,
>   if ((!cycled && !done) || retry) {

IMO, we add retry logic in wrong place, you can see that cycled value is
zero only if wbc->range_cyclic is true, in that case writeback_index is valid.

However if retry is true and wbc->range_cyclic is false, then writeback_index
would be uninitialized variable.

Thoughts?

Thanks,

>   cycled = 1;
>   index = 0;
> - end = writeback_index - 1;
> + end = retry ? -1 : writeback_index - 1;
>   goto retry;
>   }
>   if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
> 


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH] f2fs: compress: don't compress any datas after cp stop

2020-05-27 Thread Chao Yu
Jaegeuk, could you please review this patch?

On 2020/5/26 9:55, Chao Yu wrote:
> While compressed data writeback, we need to drop dirty pages like we did
> for non-compressed pages if cp stops, however it's not needed to compress
> any data in such case, so let's detect cp stop condition in
> cluster_may_compress() to avoid redundant compressing and let following
> f2fs_write_raw_pages() drops dirty pages correctly.
> 
> Signed-off-by: Chao Yu 
> ---
>  fs/f2fs/compress.c | 2 ++
>  1 file changed, 2 insertions(+)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index bf152c0d79fe..a53578a89211 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -849,6 +849,8 @@ static bool cluster_may_compress(struct compress_ctx *cc)
>   return false;
>   if (!f2fs_cluster_is_full(cc))
>   return false;
> + if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode
> + return false;
>   return __cluster_may_compress(cc);
>  }
>  
> 


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH] f2fs: fix retry logic in f2fs_write_cache_pages()

2020-05-27 Thread Chao Yu
On 2020/5/28 10:45, Chao Yu wrote:
> On 2020/5/27 10:20, Sahitya Tummala wrote:
>> In case a compressed file is getting overwritten, the current retry
>> logic doesn't include the current page to be retried now as it sets
>> the new start index as 0 and new end index as writeback_index - 1.
>> This causes the corresponding cluster to be uncompressed and written
>> as normal pages without compression. Fix this by allowing writeback to
>> be retried for the current page as well (in case of compressed page
>> getting retried due to index mismatch with cluster index). So that
>> this cluster can be written compressed in case of overwrite.
>>
>> Signed-off-by: Sahitya Tummala 
>> ---
>>  fs/f2fs/data.c | 2 +-
>>  1 file changed, 1 insertion(+), 1 deletion(-)
>>
>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
>> index 4af5fcd..bfd1df4 100644
>> --- a/fs/f2fs/data.c
>> +++ b/fs/f2fs/data.c
>> @@ -3024,7 +3024,7 @@ static int f2fs_write_cache_pages(struct address_space 
>> *mapping,
>>  if ((!cycled && !done) || retry) {
> 
> IMO, we add retry logic in wrong place, you can see that cycled value is
> zero only if wbc->range_cyclic is true, in that case writeback_index is valid.
> 
> However if retry is true and wbc->range_cyclic is false, then writeback_index
> would be uninitialized variable.
> 
> Thoughts?
> 
> Thanks,
> 
>>  cycled = 1;
>>  index = 0;
>> -end = writeback_index - 1;

BTW, I notice that range_cyclic writeback flow was refactored in below commit,
and skeleton of f2fs.writepages was copied from 
mm/page-writeback.c::write_cache_pages(),
I guess we need follow that change.

64081362e8ff ("mm/page-writeback.c: fix range_cyclic writeback vs writepages 
deadlock")

Thanks,

>> +end = retry ? -1 : writeback_index - 1;
>>  goto retry;
>>  }
>>  if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
>>
> 
> 
> ___
> Linux-f2fs-devel mailing list
> Linux-f2fs-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
> .
> 


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH 3/3] f2fs: fix to cover meta flush with cp_lock

2020-05-27 Thread Jaegeuk Kim
On 05/27, Chao Yu wrote:
> meta inode page should be flushed under cp_lock, fix it.

It doesn't matter for this case, yes?

> 
> Signed-off-by: Chao Yu 
> ---
>  fs/f2fs/file.c | 2 ++
>  1 file changed, 2 insertions(+)
> 
> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> index f7de2a1da528..0fcae4d90074 100644
> --- a/fs/f2fs/file.c
> +++ b/fs/f2fs/file.c
> @@ -2260,7 +2260,9 @@ static int f2fs_ioc_shutdown(struct file *filp, 
> unsigned long arg)
>   set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
>   break;
>   case F2FS_GOING_DOWN_METAFLUSH:
> + mutex_lock(>cp_mutex);
>   f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
> + mutex_unlock(>cp_mutex);
>   f2fs_stop_checkpoint(sbi, false);
>   set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
>   break;
> -- 
> 2.18.0.rc1


___
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel


Re: [f2fs-dev] [PATCH v3] f2fs: avoid inifinite loop to wait for flushing node pages at cp_error

2020-05-27 Thread Jaegeuk Kim
On 05/27, Chao Yu wrote:
> On 2020/5/26 9:56, Jaegeuk Kim wrote:
> > On 05/26, Chao Yu wrote:
> >> On 2020/5/26 9:11, Chao Yu wrote:
> >>> On 2020/5/25 23:06, Jaegeuk Kim wrote:
>  On 05/25, Chao Yu wrote:
> > On 2020/5/25 11:56, Jaegeuk Kim wrote:
> >> Shutdown test is somtimes hung, since it keeps trying to flush dirty 
> >> node pages
> 
> 71.07% 0.01%  kworker/u256:1+  [kernel.kallsyms]  [k] wb_writeback
> |
>  --71.06%--wb_writeback
>|
>|--68.96%--__writeback_inodes_wb
>|  |
>|   --68.95%--writeback_sb_inodes
>| |
>| 
> |--65.08%--__writeback_single_inode
>| |  |
>| |   
> --64.35%--do_writepages
>| | |
>| | 
> |--59.83%--f2fs_write_node_pages
>| | |  
> |
>| | |  
>  --59.74%--f2fs_sync_node_pages
>| | |  
>|
>| | |  
>|--27.91%--pagevec_lookup_range_tag
>| | |  
>|  |
>| | |  
>|   --27.90%--find_get_pages_range_tag
> 
> Before umount, kworker will always hold one core, that looks not reasonable,
> to avoid that, could we just allow node write, since it's out-place-update,
> and cp is not allowed, we don't need to worry about its effect on data on
> previous checkpoint, and it can decrease memory footprint cost by node pages.

It can cause some roll-forward recovery?

> 
> Thanks,
> 
> >
> > IMO, for umount case, we should drop dirty reference and dirty pages on 
> > meta/data
> > pages like we change for node pages to avoid potential dead loop...
> 
>  I believe we're doing for them. :P
> >>>
> >>> Actually, I mean do we need to drop dirty meta/data pages explicitly as 
> >>> below:
> >>>
> >>> diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
> >>> index 3dc3ac6fe143..4c08fd0a680a 100644
> >>> --- a/fs/f2fs/checkpoint.c
> >>> +++ b/fs/f2fs/checkpoint.c
> >>> @@ -299,8 +299,15 @@ static int __f2fs_write_meta_page(struct page *page,
> >>>
> >>>   trace_f2fs_writepage(page, META);
> >>>
> >>> - if (unlikely(f2fs_cp_error(sbi)))
> >>> + if (unlikely(f2fs_cp_error(sbi))) {
> >>> + if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
> >>> + ClearPageUptodate(page);
> >>> + dec_page_count(sbi, F2FS_DIRTY_META);
> >>> + unlock_page(page);
> >>> + return 0;
> >>> + }
> >>>   goto redirty_out;
> >>> + }
> >>>   if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
> >>>   goto redirty_out;
> >>>   if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
> >>> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> >>> index 48a622b95b76..94b342802513 100644
> >>> --- a/fs/f2fs/data.c
> >>> +++ b/fs/f2fs/data.c
> >>> @@ -2682,6 +2682,12 @@ int f2fs_write_single_data_page(struct page *page, 
> >>> int *submitted,
> >>>
> >>>   /* we should bypass data pages to proceed the kworkder jobs */
> >>>   if (unlikely(f2fs_cp_error(sbi))) {
> >>> + if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
> >>> + ClearPageUptodate(page);
> >>> + inode_dec_dirty_pages(inode);
> >>> + unlock_page(page);
> >>> + return 0;
> >>> + }
> >>
> >> Oh, I notice previously, we will drop non-directory inode's dirty pages 
> >> directly,
> >> however, during umount, we'd better drop directory inode's dirty pages as 
> >> well, right?
> > 
> > Hmm, I remember I dropped them before. Need to double check.
> > 
> >>
> >>>   mapping_set_error(page->mapping, -EIO);
> >>>   /*
> >>>* don't drop any dirty dentry pages for keeping lastest
> >>>
> 
> >
> > Thanks,
> >
> >> in an inifinite loop. Let's drop dirty pages at umount in that case.
> >>
> >> Signed-off-by: Jaegeuk Kim 
> >> ---
> >> v3:
> >>  - fix wrong unlock
> >>
> >> v2:
> >>  - fix typos
> >>
> >>  fs/f2fs/node.c | 9 -
> >>  1 file changed, 8 insertions(+), 1 deletion(-)
> >>
> >> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> >> index e632de10aedab..e0bb0f7e0506e 100644
> >> --- a/fs/f2fs/node.c
> >> +++ b/fs/f2fs/node.c
> >>