On 3.12.18 г. 17:20 ч., Josef Bacik wrote:
> From: Josef Bacik <jba...@fb.com>
>
> We were missing some quota cleanups in check_ref_cleanup, so break the
> ref head accounting cleanup into a helper and call that from both
> check_ref_cleanup and cleanup_ref_head. This will hopefully ensure that
> we don't screw up accounting in the future for other things that we add.
>
> Reviewed-by: Omar Sandoval <osan...@fb.com>
> Reviewed-by: Liu Bo <bo....@linux.alibaba.com>
> Signed-off-by: Josef Bacik <jba...@fb.com>
Doesn't this also need a stable tag? Furthermore, doesn't the missing
code dealing with total_bytes_pinned in check_ref_cleanup mean that
every time the last reference for a block was freed we were leaking
bytes in total_bytes_pinned? Shouldn't this have lead to eventually
total_bytes_pinned dominating the usage in a space_info ?
Codewise lgtm:
Reviewed-by: Nikolay Borisov <nbori...@suse.com>
> ---
> fs/btrfs/extent-tree.c | 67
> +++++++++++++++++++++++++++++---------------------
> 1 file changed, 39 insertions(+), 28 deletions(-)
>
> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> index c36b3a42f2bb..e3ed3507018d 100644
> --- a/fs/btrfs/extent-tree.c
> +++ b/fs/btrfs/extent-tree.c
> @@ -2443,6 +2443,41 @@ static int cleanup_extent_op(struct btrfs_trans_handle
> *trans,
> return ret ? ret : 1;
> }
>
> +static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans,
> + struct btrfs_delayed_ref_head *head)
> +{
> + struct btrfs_fs_info *fs_info = trans->fs_info;
> + struct btrfs_delayed_ref_root *delayed_refs =
> + &trans->transaction->delayed_refs;
> +
> + if (head->total_ref_mod < 0) {
> + struct btrfs_space_info *space_info;
> + u64 flags;
> +
> + if (head->is_data)
> + flags = BTRFS_BLOCK_GROUP_DATA;
> + else if (head->is_system)
> + flags = BTRFS_BLOCK_GROUP_SYSTEM;
> + else
> + flags = BTRFS_BLOCK_GROUP_METADATA;
> + space_info = __find_space_info(fs_info, flags);
> + ASSERT(space_info);
> + percpu_counter_add_batch(&space_info->total_bytes_pinned,
> + -head->num_bytes,
> + BTRFS_TOTAL_BYTES_PINNED_BATCH);
> +
> + if (head->is_data) {
> + spin_lock(&delayed_refs->lock);
> + delayed_refs->pending_csums -= head->num_bytes;
> + spin_unlock(&delayed_refs->lock);
> + }
> + }
> +
> + /* Also free its reserved qgroup space */
> + btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
> + head->qgroup_reserved);
> +}
> +
> static int cleanup_ref_head(struct btrfs_trans_handle *trans,
> struct btrfs_delayed_ref_head *head)
> {
> @@ -2478,31 +2513,6 @@ static int cleanup_ref_head(struct btrfs_trans_handle
> *trans,
> spin_unlock(&head->lock);
> spin_unlock(&delayed_refs->lock);
>
> - trace_run_delayed_ref_head(fs_info, head, 0);
> -
> - if (head->total_ref_mod < 0) {
> - struct btrfs_space_info *space_info;
> - u64 flags;
> -
> - if (head->is_data)
> - flags = BTRFS_BLOCK_GROUP_DATA;
> - else if (head->is_system)
> - flags = BTRFS_BLOCK_GROUP_SYSTEM;
> - else
> - flags = BTRFS_BLOCK_GROUP_METADATA;
> - space_info = __find_space_info(fs_info, flags);
> - ASSERT(space_info);
> - percpu_counter_add_batch(&space_info->total_bytes_pinned,
> - -head->num_bytes,
> - BTRFS_TOTAL_BYTES_PINNED_BATCH);
> -
> - if (head->is_data) {
> - spin_lock(&delayed_refs->lock);
> - delayed_refs->pending_csums -= head->num_bytes;
> - spin_unlock(&delayed_refs->lock);
> - }
> - }
> -
> if (head->must_insert_reserved) {
> btrfs_pin_extent(fs_info, head->bytenr,
> head->num_bytes, 1);
> @@ -2512,9 +2522,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle
> *trans,
> }
> }
>
> - /* Also free its reserved qgroup space */
> - btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
> - head->qgroup_reserved);
> + cleanup_ref_head_accounting(trans, head);
> +
> + trace_run_delayed_ref_head(fs_info, head, 0);
> btrfs_delayed_ref_unlock(head);
> btrfs_put_delayed_ref_head(head);
> return 0;
> @@ -6991,6 +7001,7 @@ static noinline int check_ref_cleanup(struct
> btrfs_trans_handle *trans,
> if (head->must_insert_reserved)
> ret = 1;
>
> + cleanup_ref_head_accounting(trans, head);
> mutex_unlock(&head->mutex);
> btrfs_put_delayed_ref_head(head);
> return ret;
>