btree node scan needs to not use the btree node cache: that causes
interference from prior failed reads and parallel workers.

Instead we need to allocate btree nodes that don't live in the btree
cache, so that we can call bch2_btree_node_read_done() directly.

This patch tweaks the low level helpers so they don't touch the btree
cache lists.

Cc: Nikita Ofitserov <himi...@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstr...@linux.dev>
---
 fs/bcachefs/btree_cache.c | 21 ++++++++++++---------
 fs/bcachefs/btree_cache.h |  1 +
 fs/bcachefs/debug.c       |  2 --
 3 files changed, 13 insertions(+), 11 deletions(-)

diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index a3631a903ecf..702c8f7081d7 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -86,7 +86,7 @@ void bch2_btree_node_to_freelist(struct bch_fs *c, struct 
btree *b)
        six_unlock_intent(&b->c.lock);
 }
 
-static void __btree_node_data_free(struct btree_cache *bc, struct btree *b)
+void __btree_node_data_free(struct btree *b)
 {
        BUG_ON(!list_empty(&b->list));
        BUG_ON(btree_node_hashed(b));
@@ -113,16 +113,17 @@ static void __btree_node_data_free(struct btree_cache 
*bc, struct btree *b)
        munmap(b->aux_data, btree_aux_data_bytes(b));
 #endif
        b->aux_data = NULL;
-
-       btree_node_to_freedlist(bc, b);
 }
 
 static void btree_node_data_free(struct btree_cache *bc, struct btree *b)
 {
        BUG_ON(list_empty(&b->list));
        list_del_init(&b->list);
+
+       __btree_node_data_free(b);
+
        --bc->nr_freeable;
-       __btree_node_data_free(bc, b);
+       btree_node_to_freedlist(bc, b);
 }
 
 static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
@@ -199,8 +200,6 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
        }
 
        bch2_btree_lock_init(&b->c, 0, GFP_KERNEL);
-
-       __bch2_btree_node_to_freelist(bc, b);
        return b;
 }
 
@@ -526,7 +525,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker 
*shrink,
                        --touched;;
                } else if (!btree_node_reclaim(c, b)) {
                        __bch2_btree_node_hash_remove(bc, b);
-                       __btree_node_data_free(bc, b);
+                       __btree_node_data_free(b);
+                       btree_node_to_freedlist(bc, b);
 
                        freed++;
                        bc->nr_freed++;
@@ -667,9 +667,12 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
 
        bch2_recalc_btree_reserve(c);
 
-       for (i = 0; i < bc->nr_reserve; i++)
-               if (!__bch2_btree_node_mem_alloc(c))
+       for (i = 0; i < bc->nr_reserve; i++) {
+               struct btree *b = __bch2_btree_node_mem_alloc(c);
+               if (!b)
                        goto err;
+               __bch2_btree_node_to_freelist(bc, b);
+       }
 
        list_splice_init(&bc->live[0].list, &bc->freeable);
 
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index 3264801cbcbe..649e9dfd178a 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -30,6 +30,7 @@ void bch2_btree_node_update_key_early(struct btree_trans *, 
enum btree_id, unsig
 void bch2_btree_cache_cannibalize_unlock(struct btree_trans *);
 int bch2_btree_cache_cannibalize_lock(struct btree_trans *, struct closure *);
 
+void __btree_node_data_free(struct btree *);
 struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
 struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
 
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 901f643ead83..79d64052215c 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -153,8 +153,6 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
                c->verify_data = __bch2_btree_node_mem_alloc(c);
                if (!c->verify_data)
                        goto out;
-
-               list_del_init(&c->verify_data->list);
        }
 
        BUG_ON(b->nsets != 1);
-- 
2.50.0


Reply via email to