Let's avoid holding cp_rwsem write lock during f2fs_sync_node_pages() in block_operations(), then cp_rwsem write lock's coverage in checkpoint can be shrunk, then it expects there will be more opportunity that foreground operator can grab cp_rwsem read lock.
Signed-off-by: Chao Yu <[email protected]> --- fs/f2fs/checkpoint.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index f7cb4277de70..7596faa0be45 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1190,6 +1190,8 @@ static int block_operations(struct f2fs_sb_info *sbi) .for_reclaim = 0, }; int err = 0, cnt = 0; + bool sync_lockless = true; + unsigned int retry_cnt = 3; /* * Let's flush inline_data in dirty node pages. @@ -1249,15 +1251,25 @@ static int block_operations(struct f2fs_sb_info *sbi) if (get_pages(sbi, F2FS_DIRTY_NODES)) { f2fs_up_write(&sbi->node_write); + if (!retry_cnt--) + sync_lockless = false; + if (sync_lockless) { + f2fs_up_write(&sbi->node_change); + f2fs_unlock_all(sbi); + } atomic_inc(&sbi->wb_sync_req[NODE]); err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO); atomic_dec(&sbi->wb_sync_req[NODE]); if (err) { - f2fs_up_write(&sbi->node_change); - f2fs_unlock_all(sbi); + if (!sync_lockless) { + f2fs_up_write(&sbi->node_change); + f2fs_unlock_all(sbi); + } return err; } cond_resched(); + if (sync_lockless) + goto retry_flush_quotas; goto retry_flush_nodes; } -- 2.25.1 _______________________________________________ Linux-f2fs-devel mailing list [email protected] https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
