It's no longer expected to call this function with an open transaction
so all the hacks concerning this can be removed. In fact it'll
constitute a bug to call this function with a transaction already held
so WARN in this case.

Signed-off-by: Nikolay Borisov <nbori...@suse.com>
---
 fs/btrfs/qgroup.c | 35 +++++++----------------------------
 1 file changed, 7 insertions(+), 28 deletions(-)

diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index fbef95bc3557..c9e82e0c88e3 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -3535,37 +3535,19 @@ static int try_flush_qgroup(struct btrfs_root *root)
 {
        struct btrfs_trans_handle *trans;
        int ret;
-       bool can_commit = true;
 
-       /*
-        * If current process holds a transaction, we shouldn't flush, as we
-        * assume all space reservation happens before a transaction handle is
-        * held.
-        *
-        * But there are cases like btrfs_delayed_item_reserve_metadata() where
-        * we try to reserve space with one transction handle already held.
-        * In that case we can't commit transaction, but at least try to end it
-        * and hope the started data writes can free some space.
-        */
-       if (current->journal_info &&
-           current->journal_info != BTRFS_SEND_TRANS_STUB)
-               can_commit = false;
+       /* Can't hold an open transaction or we run the risk of deadlocking. */
+       ASSERT(current->journal_info == NULL ||
+              current->journal_info == BTRFS_SEND_TRANS_STUB);
+       if (WARN_ON(current->journal_info &&
+                    current->journal_info != BTRFS_SEND_TRANS_STUB))
+               return 0;
 
        /*
         * We don't want to run flush again and again, so if there is a running
         * one, we won't try to start a new flush, but exit directly.
         */
        if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
-               /*
-                * We are already holding a transaction, thus we can block other
-                * threads from flushing.  So exit right now. This increases
-                * the chance of EDQUOT for heavy load and near limit cases.
-                * But we can argue that if we're already near limit, EDQUOT is
-                * unavoidable anyway.
-                */
-               if (!can_commit)
-                       return 0;
-
                wait_event(root->qgroup_flush_wait,
                        !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
                return 0;
@@ -3582,10 +3564,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
                goto out;
        }
 
-       if (can_commit)
-               ret = btrfs_commit_transaction(trans);
-       else
-               ret = btrfs_end_transaction(trans);
+       ret = btrfs_commit_transaction(trans);
 out:
        clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
        wake_up(&root->qgroup_flush_wait);
-- 
2.25.1

Reply via email to