This should help us identify any performance issues with the btree node
cache.

Signed-off-by: Daniel Hill <[email protected]>
---
 fs/bcachefs/btree_cache.c | 35 +++++++++++++++++++++++++++++++++++
 fs/bcachefs/btree_types.h |  3 +++
 2 files changed, 38 insertions(+)

diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 799750464969..8d2954ab3598 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -418,6 +418,9 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
        flags = memalloc_nofs_save();
        mutex_lock(&bc->lock);
 
+       free_percpu(bc->hits);
+       free_percpu(bc->misses);
+
        if (c->verify_data)
                list_move(&c->verify_data->list, &bc->live);
 
@@ -467,6 +470,11 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
        unsigned i;
        int ret = 0;
 
+       bc->hits   = __alloc_percpu(sizeof(u64) * BTREE_MAX_DEPTH, sizeof(u64));
+       bc->misses = __alloc_percpu(sizeof(u64) * BTREE_MAX_DEPTH, sizeof(u64));
+       if (!bc->hits || !bc->misses)
+               goto err;
+
        ret = rhashtable_init(&bc->table, &bch_btree_cache_params);
        if (ret)
                goto err;
@@ -833,6 +841,7 @@ static struct btree *__bch2_btree_node_get(struct 
btree_trans *trans, struct btr
        struct btree *b;
        struct bset_tree *t;
        bool need_relock = false;
+       bool is_hit = true;
        int ret;
 
        EBUG_ON(level >= BTREE_MAX_DEPTH);
@@ -852,6 +861,8 @@ static struct btree *__bch2_btree_node_get(struct 
btree_trans *trans, struct btr
                if (!b)
                        goto retry;
 
+               is_hit = false;
+
                if (IS_ERR(b))
                        return b;
        } else {
@@ -925,6 +936,11 @@ static struct btree *__bch2_btree_node_get(struct 
btree_trans *trans, struct btr
        EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
        btree_check_header(c, b);
 
+       if (is_hit)
+               this_cpu_inc(bc->hits[level]);
+       else
+               this_cpu_inc(bc->misses[level]);
+
        return b;
 }
 
@@ -1029,6 +1045,7 @@ struct btree *bch2_btree_node_get_noiter(struct 
btree_trans *trans,
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
        struct bset_tree *t;
+       bool is_hit = true;
        int ret;
 
        EBUG_ON(level >= BTREE_MAX_DEPTH);
@@ -1055,6 +1072,8 @@ struct btree *bch2_btree_node_get_noiter(struct 
btree_trans *trans,
                    !bch2_btree_cache_cannibalize_lock(trans, NULL))
                        goto retry;
 
+               is_hit = false;
+
                if (IS_ERR(b))
                        goto out;
        } else {
@@ -1100,6 +1119,11 @@ struct btree *bch2_btree_node_get_noiter(struct 
btree_trans *trans,
        EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
        btree_check_header(c, b);
 out:
+       if (is_hit)
+               this_cpu_inc(bc->hits[level]);
+       else
+               this_cpu_inc(bc->misses[level]);
+
        bch2_btree_cache_cannibalize_unlock(trans);
        return b;
 }
@@ -1225,4 +1249,15 @@ void bch2_btree_cache_to_text(struct printbuf *out, 
const struct bch_fs *c)
        prt_printf(out, "nr nodes:\t\t%u\n", c->btree_cache.used);
        prt_printf(out, "nr dirty:\t\t%u\n", 
atomic_read(&c->btree_cache.dirty));
        prt_printf(out, "cannibalize lock:\t%p\n", c->btree_cache.alloc_lock);
+
+       for (int i = 0; i < BTREE_MAX_DEPTH; i++) {
+               u64 hits = percpu_u64_get(&c->btree_cache.hits[i]);
+               u64 misses = percpu_u64_get(&c->btree_cache.misses[i]);
+               u64 total = hits + misses;
+
+               prt_printf(out, "level %i misses: ", i);
+               bch2_prt_percent_rational_u64(out, misses, total, 0);
+               prt_printf(out, " %llu+%llu/%llu", hits, misses, total);
+               prt_newline(out);
+       }
 }
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index b2ebf143c3b7..a10ab188908d 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -179,6 +179,9 @@ struct btree_cache {
        struct bbpos            pinned_nodes_end;
        u64                     pinned_nodes_leaf_mask;
        u64                     pinned_nodes_interior_mask;
+
+       u64 __percpu *hits;
+       u64 __percpu *misses;
 };
 
 struct btree_node_iter {
-- 
2.43.0


Reply via email to