Commit fb235dc06fac ("btrfs: qgroup: Move half of the qgroup accounting time out of commit trans") makes btrfs_qgroup_extent_record::old_roots populated at insert time.
It's OK for most cases as btrfs_qgroup_extent_record is inserted at delayed ref head insert time, which has a less restrict lock context. But later delayed subtree scan optimization will need to insert btrfs_qgroup_extent_record with path write lock hold, where triggering a backref walk can easily lead to dead lock. So this patch introduces two new internal functions, qgroup_trace_extent() and qgroup_trace_leaf_items(), with new @exec_post parameter to info whether we need to initialize the backref walk right now. Also modifies btrfs_qgroup_account_extents() not to trigger kernel warning. Signed-off-by: Qu Wenruo <w...@suse.com> --- fs/btrfs/qgroup.c | 51 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 45868fd76209..6c674ac29b90 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1580,8 +1580,16 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, return 0; } -int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, - u64 num_bytes, gfp_t gfp_flag) +/* + * Insert qgroup extent record for extent at @bytenr, @num_bytes. + * + * @bytenr: bytenr of the extent + * @num_bytes: length of the extent + * @exec_post: whether to exec the post insert work + * will init backref walk if set to true. + */ +static int qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, + u64 num_bytes, gfp_t gfp_flag, bool exec_post) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_qgroup_extent_record *record; @@ -1607,11 +1615,27 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, kfree(record); return 0; } - return btrfs_qgroup_trace_extent_post(fs_info, record); + if (exec_post) + return btrfs_qgroup_trace_extent_post(fs_info, record); + return 0; } -int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, - struct extent_buffer *eb) +int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, + u64 num_bytes, gfp_t gfp_flag) +{ + return qgroup_trace_extent(trans, bytenr, num_bytes, gfp_flag, true); +} + +/* + * Insert qgroup extent record for leaf and all file extents in it + * + * @bytenr: bytenr of the leaf + * @num_bytes: length of the leaf + * @exec_post: whether to exec the post insert work + * will init backref walk if set to true. + */ +static int qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, + struct extent_buffer *eb, bool exec_post) { struct btrfs_fs_info *fs_info = trans->fs_info; int nr = btrfs_header_nritems(eb); @@ -1643,8 +1667,8 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); - ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes, - GFP_NOFS); + ret = qgroup_trace_extent(trans, bytenr, num_bytes, GFP_NOFS, + exec_post); if (ret) return ret; } @@ -1652,6 +1676,12 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, return 0; } +int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, + struct extent_buffer *eb) +{ + return qgroup_trace_leaf_items(trans, eb, true); +} + /* * Walk up the tree from the bottom, freeing leaves and any interior * nodes which have had all slots visited. If a node (leaf or @@ -2558,10 +2588,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) if (!ret) { /* - * Old roots should be searched when inserting qgroup - * extent record + * Most record->old_roots should have been populated at + * insert time. Although we still allow some records + * without old_roots populated. */ - if (WARN_ON(!record->old_roots)) { + if (!record->old_roots) { /* Search commit root to find old_roots */ ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0, -- 2.19.1