summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2024-05-05 09:47:53 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2024-05-08 17:29:24 -0400
commite11ecc6133b345a6c5919ae35920c384c6ff6f95 (patch)
tree9368e5431046f5e5a2268bda6cc9b003fa83b173
parentc6705091342c06c963015dac07ede417d2e0ad04 (diff)
bcachefs: Improve sysfs internal/btree_cache
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/btree_cache.c33
-rw-r--r--fs/bcachefs/btree_types.h2
2 files changed, 30 insertions, 5 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index debdd7dc0477..2c226e0cc263 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -162,6 +162,9 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
/* Cause future lookups for this node to fail: */
b->hash_val = 0;
+
+ if (b->c.btree_id < BTREE_ID_NR)
+ --bc->used_by_btree[b->c.btree_id];
}
int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
@@ -169,8 +172,11 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
BUG_ON(b->hash_val);
b->hash_val = btree_ptr_hash_val(&b->key);
- return rhashtable_lookup_insert_fast(&bc->table, &b->hash,
- bch_btree_cache_params);
+ int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash,
+ bch_btree_cache_params);
+ if (!ret && b->c.btree_id < BTREE_ID_NR)
+ bc->used_by_btree[b->c.btree_id]++;
+ return ret;
}
int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
@@ -1269,9 +1275,26 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
stats.failed);
}
+static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
+ const char *label, unsigned nr)
+{
+ prt_printf(out, "%s\t", label);
+ prt_human_readable_u64(out, nr * c->opts.btree_node_size);
+ prt_printf(out, " (%u)\n", nr);
+}
+
void bch2_btree_cache_to_text(struct printbuf *out, const struct bch_fs *c)
{
- prt_printf(out, "nr nodes:\t\t%u\n", c->btree_cache.used);
- prt_printf(out, "nr dirty:\t\t%u\n", atomic_read(&c->btree_cache.dirty));
- prt_printf(out, "cannibalize lock:\t%p\n", c->btree_cache.alloc_lock);
+ const struct btree_cache *bc = &c->btree_cache;
+
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 24);
+
+ prt_btree_cache_line(out, c, "total:", bc->used);
+ prt_btree_cache_line(out, c, "nr dirty:", atomic_read(&bc->dirty));
+ prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock);
+ prt_newline(out);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->used_by_btree); i++)
+ prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->used_by_btree[i]);
}
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 4ff5213219a5..76364bd4347e 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -166,6 +166,8 @@ struct btree_cache {
atomic_t dirty;
struct shrinker *shrink;
+ unsigned used_by_btree[BTREE_ID_NR];
+
/*
* If we need to allocate memory for a new btree node and that
* allocation fails, we can cannibalize another node in the btree cache