From 2d03c3a6f1cf3db05212f9c3745e2ea81baf8b85 Mon Sep 17 00:00:00 2001
From: wangyugui <wangyugui@e16-tech.com>
Date: Sat, 27 Mar 2021 23:00:23 +0800
Subject: [PATCH] btrfs: add __GFP_NOFAIL to kmem_cache

---
 fs/btrfs/backref.c          | 19 +++++++++----------
 fs/btrfs/ctree.c            |  2 +-
 fs/btrfs/delayed-inode.c    |  2 +-
 fs/btrfs/delayed-ref.c      | 10 +++++-----
 fs/btrfs/delayed-ref.h      |  2 +-
 fs/btrfs/disk-io.c          |  2 +-
 fs/btrfs/extent_io.c        | 12 ++++++------
 fs/btrfs/extent_map.c       |  2 +-
 fs/btrfs/file.c             |  2 +-
 fs/btrfs/free-space-cache.c | 18 +++++++-----------
 fs/btrfs/ordered-data.c     |  2 +-
 fs/btrfs/transaction.c      |  2 +-
 12 files changed, 35 insertions(+), 40 deletions(-)

diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f47c152..943a67e 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -736,8 +736,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
 		while ((node = ulist_next(parents, &uiter))) {
 			struct prelim_ref *new_ref;
 
-			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
-						   GFP_NOFS);
+			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache, GFP_NOFS | __GFP_NOFAIL);
 			if (!new_ref) {
 				free_pref(ref);
 				ret = -ENOMEM;
@@ -990,7 +989,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
 		case BTRFS_SHARED_BLOCK_REF_KEY:
 			ret = add_direct_ref(fs_info, preftrees,
 					     *info_level + 1, offset,
-					     bytenr, 1, NULL, GFP_NOFS);
+					     bytenr, 1, NULL, GFP_NOFS | __GFP_NOFAIL);
 			break;
 		case BTRFS_SHARED_DATA_REF_KEY: {
 			struct btrfs_shared_data_ref *sdref;
@@ -1000,13 +999,13 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
 			count = btrfs_shared_data_ref_count(leaf, sdref);
 
 			ret = add_direct_ref(fs_info, preftrees, 0, offset,
-					     bytenr, count, sc, GFP_NOFS);
+					     bytenr, count, sc, GFP_NOFS | __GFP_NOFAIL);
 			break;
 		}
 		case BTRFS_TREE_BLOCK_REF_KEY:
 			ret = add_indirect_ref(fs_info, preftrees, offset,
 					       NULL, *info_level + 1,
-					       bytenr, 1, NULL, GFP_NOFS);
+					       bytenr, 1, NULL, GFP_NOFS | __GFP_NOFAIL);
 			break;
 		case BTRFS_EXTENT_DATA_REF_KEY: {
 			struct btrfs_extent_data_ref *dref;
@@ -1029,7 +1028,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
 
 			ret = add_indirect_ref(fs_info, preftrees, root,
 					       &key, 0, bytenr, count,
-					       sc, GFP_NOFS);
+					       sc, GFP_NOFS | __GFP_NOFAIL);
 			break;
 		}
 		default:
@@ -1084,7 +1083,7 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
 			/* SHARED DIRECT METADATA backref */
 			ret = add_direct_ref(fs_info, preftrees,
 					     info_level + 1, key.offset,
-					     bytenr, 1, NULL, GFP_NOFS);
+					     bytenr, 1, NULL, GFP_NOFS | __GFP_NOFAIL);
 			break;
 		case BTRFS_SHARED_DATA_REF_KEY: {
 			/* SHARED DIRECT FULL backref */
@@ -1096,14 +1095,14 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
 			count = btrfs_shared_data_ref_count(leaf, sdref);
 			ret = add_direct_ref(fs_info, preftrees, 0,
 					     key.offset, bytenr, count,
-					     sc, GFP_NOFS);
+					     sc, GFP_NOFS | __GFP_NOFAIL);
 			break;
 		}
 		case BTRFS_TREE_BLOCK_REF_KEY:
 			/* NORMAL INDIRECT METADATA backref */
 			ret = add_indirect_ref(fs_info, preftrees, key.offset,
 					       NULL, info_level + 1, bytenr,
-					       1, NULL, GFP_NOFS);
+					       1, NULL, GFP_NOFS | __GFP_NOFAIL);
 			break;
 		case BTRFS_EXTENT_DATA_REF_KEY: {
 			/* NORMAL INDIRECT DATA backref */
@@ -1127,7 +1126,7 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
 			root = btrfs_extent_data_ref_root(leaf, dref);
 			ret = add_indirect_ref(fs_info, preftrees, root,
 					       &key, 0, bytenr, count,
-					       sc, GFP_NOFS);
+					       sc, GFP_NOFS | __GFP_NOFAIL);
 			break;
 		}
 		default:
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 34b929b..1e71538 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -75,7 +75,7 @@ size_t __attribute_const__ btrfs_get_num_csums(void)
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
-	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
+	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS | __GFP_NOFAIL);
 }
 
 /* this also releases the path */
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index bf25401..1431208 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -133,7 +133,7 @@ again:
 	if (node)
 		return node;
 
-	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
+	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS | __GFP_NOFAIL);
 	if (!node)
 		return ERR_PTR(-ENOMEM);
 	btrfs_init_delayed_node(node, root, ino);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 63be7d0..dd4354a 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -951,11 +951,11 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 
 	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
 	BUG_ON(extent_op && extent_op->is_data);
-	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
+	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS | __GFP_NOFAIL);
 	if (!ref)
 		return -ENOMEM;
 
-	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS | __GFP_NOFAIL);
 	if (!head_ref) {
 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 		return -ENOMEM;
@@ -1044,7 +1044,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 	u8 ref_type;
 
 	ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
-	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
+	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS | __GFP_NOFAIL);
 	if (!ref)
 		return -ENOMEM;
 
@@ -1060,7 +1060,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 	ref->offset = offset;
 
 
-	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS | __GFP_NOFAIL);
 	if (!head_ref) {
 		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
 		return -ENOMEM;
@@ -1121,7 +1121,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
 	struct btrfs_delayed_ref_head *head_ref;
 	struct btrfs_delayed_ref_root *delayed_refs;
 
-	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS | __GFP_NOFAIL);
 	if (!head_ref)
 		return -ENOMEM;
 
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index e22fba2..fe8fc23 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -296,7 +296,7 @@ static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
 static inline struct btrfs_delayed_extent_op *
 btrfs_alloc_delayed_extent_op(void)
 {
-	return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
+	return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS | __GFP_NOFAIL);
 }
 
 static inline void
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 41b718c..35f5d83 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -736,7 +736,7 @@ blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 {
 	struct btrfs_end_io_wq *end_io_wq;
 
-	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
+	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS | __GFP_NOFAIL);
 	if (!end_io_wq)
 		return BLK_STS_RESOURCE;
 
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 910769d..8c28623 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1227,7 +1227,7 @@ again:
 		 * extent state allocations are needed. We'll only know this
 		 * after locking the tree.
 		 */
-		prealloc = alloc_extent_state(GFP_NOFS);
+		prealloc = alloc_extent_state(GFP_NOFS | __GFP_NOFAIL);
 		if (!prealloc && !first_iteration)
 			return -ENOMEM;
 	}
@@ -1412,7 +1412,7 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 	 */
 	BUG_ON(bits & EXTENT_LOCKED);
 
-	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
+	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS | __GFP_NOFAIL,
 			      changeset);
 }
 
@@ -1428,7 +1428,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 		     struct extent_state **cached)
 {
 	return __clear_extent_bit(tree, start, end, bits, wake, delete,
-				  cached, GFP_NOFS, NULL);
+				  cached, GFP_NOFS | __GFP_NOFAIL, NULL);
 }
 
 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1440,7 +1440,7 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 	 */
 	BUG_ON(bits & EXTENT_LOCKED);
 
-	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
+	return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS | __GFP_NOFAIL,
 				  changeset);
 }
 
@@ -1457,7 +1457,7 @@ int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
 	while (1) {
 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED,
 				     EXTENT_LOCKED, &failed_start,
-				     cached_state, GFP_NOFS, NULL);
+				     cached_state, GFP_NOFS | __GFP_NOFAIL, NULL);
 		if (err == -EEXIST) {
 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
 			start = failed_start;
@@ -1474,7 +1474,7 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 	u64 failed_start;
 
 	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
-			     &failed_start, NULL, GFP_NOFS, NULL);
+			     &failed_start, NULL, GFP_NOFS | __GFP_NOFAIL, NULL);
 	if (err == -EEXIST) {
 		if (failed_start > start)
 			clear_extent_bit(tree, start, failed_start - 1,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 4a8e02f..2179330 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -50,7 +50,7 @@ void extent_map_tree_init(struct extent_map_tree *tree)
 struct extent_map *alloc_extent_map(void)
 {
 	struct extent_map *em;
-	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
+	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS | __GFP_NOFAIL);
 	if (!em)
 		return NULL;
 	RB_CLEAR_NODE(&em->rb_node);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 0e155f0..16a1582 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -151,7 +151,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 	else
 		transid = inode->root->last_trans;
 
-	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
+	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS | __GFP_NOFAIL);
 	if (!defrag)
 		return -ENOMEM;
 
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 9988dec..faa3a42 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -782,8 +782,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 		goto free_cache;
 
 	while (num_entries) {
-		e = kmem_cache_zalloc(btrfs_free_space_cachep,
-				      GFP_NOFS);
+		e = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS | __GFP_NOFAIL);
 		if (!e) {
 			ret = -ENOMEM;
 			goto free_cache;
@@ -814,8 +813,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 		} else {
 			ASSERT(num_bitmaps);
 			num_bitmaps--;
-			e->bitmap = kmem_cache_zalloc(
-					btrfs_free_space_bitmap_cachep, GFP_NOFS);
+			e->bitmap = kmem_cache_zalloc( btrfs_free_space_bitmap_cachep, GFP_NOFS | __GFP_NOFAIL);
 			if (!e->bitmap) {
 				ret = -ENOMEM;
 				kmem_cache_free(
@@ -2238,8 +2236,7 @@ new_bitmap:
 
 		/* no pre-allocated info, allocate a new one */
 		if (!info) {
-			info = kmem_cache_zalloc(btrfs_free_space_cachep,
-						 GFP_NOFS);
+			info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS | __GFP_NOFAIL);
 			if (!info) {
 				spin_lock(&ctl->tree_lock);
 				ret = -ENOMEM;
@@ -2248,8 +2245,7 @@ new_bitmap:
 		}
 
 		/* allocate the bitmap */
-		info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
-						 GFP_NOFS);
+		info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS | __GFP_NOFAIL);
 		info->trim_state = BTRFS_TRIM_STATE_TRIMMED;
 		spin_lock(&ctl->tree_lock);
 		if (!info->bitmap) {
@@ -2479,7 +2475,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
 
 	ASSERT(!btrfs_is_zoned(fs_info));
 
-	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
+	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS | __GFP_NOFAIL);
 	if (!info)
 		return -ENOMEM;
 
@@ -4011,7 +4007,7 @@ int test_add_free_space_entry(struct btrfs_block_group *cache,
 
 again:
 	if (!info) {
-		info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
+		info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS | __GFP_NOFAIL);
 		if (!info)
 			return -ENOMEM;
 	}
@@ -4029,7 +4025,7 @@ again:
 	}
 
 	if (!map) {
-		map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
+		map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS | __GFP_NOFAIL);
 		if (!map) {
 			kmem_cache_free(btrfs_free_space_cachep, info);
 			return -ENOMEM;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 985a215..dbeb810 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -186,7 +186,7 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset
 		if (ret < 0)
 			return ret;
 	}
-	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
+	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS | __GFP_NOFAIL);
 	if (!entry)
 		return -ENOMEM;
 
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index acff6bb..9c916d9 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -653,7 +653,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
 			goto reserve_fail;
 	}
 again:
-	h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
+	h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS | __GFP_NOFAIL);
 	if (!h) {
 		ret = -ENOMEM;
 		goto alloc_fail;
-- 
2.30.2

