diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2021-11-27 16:13:41 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2022-10-03 22:51:37 -0400 |
commit | 9503493f11a5d3403d9f11b60f107b485c5a60df (patch) | |
tree | 94c2c35941e46724accf35ddadca7c63c83a8982 | |
parent | 8b56fc6472efd456e5663a1b191ae9ee1761bbfe (diff) |
bcachefs: Improve tracing of btree_path leaks
This patch plumbs the btree_path->ip_allocated field back to where the
btree_iter that owns it was first initialized - meaning it will be much
easier to figure out which btree_iter wasn't exited properly.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r-- | fs/bcachefs/btree_iter.c | 60 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.h | 6 | ||||
-rw-r--r-- | fs/bcachefs/btree_types.h | 3 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_interior.c | 7 |
4 files changed, 52 insertions, 24 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 6e30c7a345cc..2e6442cf2881 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -25,6 +25,15 @@ static inline void btree_path_list_remove(struct btree_trans *, struct btree_pat static inline void btree_path_list_add(struct btree_trans *, struct btree_path *, struct btree_path *); +static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter) +{ +#ifdef CONFIG_BCACHEFS_DEBUG + return iter->ip_allocated; +#else + return 0; +#endif +} + static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *); /* @@ -1610,14 +1619,15 @@ static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btr inline struct btree_path * __must_check bch2_btree_path_make_mut(struct btree_trans *trans, - struct btree_path *path, bool intent) + struct btree_path *path, bool intent, + unsigned long ip) { if (path->ref > 1 || path->preserve) { __btree_path_put(path, intent); path = btree_path_clone(trans, path, intent); path->preserve = false; #ifdef CONFIG_BCACHEFS_DEBUG - path->ip_allocated = _RET_IP_; + path->ip_allocated = ip; #endif btree_trans_verify_sorted(trans); } @@ -1628,7 +1638,7 @@ bch2_btree_path_make_mut(struct btree_trans *trans, static struct btree_path * __must_check btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos new_pos, - bool intent) + bool intent, unsigned long ip) { int cmp = bpos_cmp(new_pos, path->pos); unsigned l = path->level; @@ -1639,7 +1649,7 @@ btree_path_set_pos(struct btree_trans *trans, if (!cmp) return path; - path = bch2_btree_path_make_mut(trans, path, intent); + path = bch2_btree_path_make_mut(trans, path, intent, ip); path->pos = new_pos; path->should_be_locked = false; @@ -1815,7 +1825,7 @@ static struct btree_path *btree_path_alloc(struct btree_trans *trans, struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached, enum btree_id btree_id, struct bpos pos, unsigned locks_want, unsigned level, - bool intent) + bool intent, unsigned long ip) { struct btree_path *path, *path_pos = NULL; int i; @@ -1838,7 +1848,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached, path_pos->btree_id == btree_id && path_pos->level == level) { __btree_path_get(path_pos, intent); - path = btree_path_set_pos(trans, path_pos, pos, intent); + path = btree_path_set_pos(trans, path_pos, pos, intent, ip); path->preserve = true; } else { path = btree_path_alloc(trans, path_pos); @@ -1858,7 +1868,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached, for (i = 0; i < ARRAY_SIZE(path->l); i++) path->l[i].b = BTREE_ITER_NO_NODE_INIT; #ifdef CONFIG_BCACHEFS_DEBUG - path->ip_allocated = _RET_IP_; + path->ip_allocated = ip; #endif btree_trans_verify_sorted(trans); } @@ -1936,7 +1946,8 @@ bch2_btree_iter_traverse(struct btree_iter *iter) iter->path = btree_path_set_pos(iter->trans, iter->path, btree_iter_search_key(iter), - iter->flags & BTREE_ITER_INTENT); + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); if (ret) @@ -1971,7 +1982,8 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) iter->k.p = iter->pos = b->key.k.p; iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p, - iter->flags & BTREE_ITER_INTENT); + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); iter->path->should_be_locked = true; BUG_ON(iter->path->uptodate); out: @@ -2030,7 +2042,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) */ path = iter->path = btree_path_set_pos(trans, path, bpos_successor(iter->pos), - iter->flags & BTREE_ITER_INTENT); + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); path->level = iter->min_depth; @@ -2052,7 +2065,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) iter->k.p = iter->pos = b->key.k.p; iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p, - iter->flags & BTREE_ITER_INTENT); + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); iter->path->should_be_locked = true; BUG_ON(iter->path->uptodate); out: @@ -2111,7 +2125,8 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) while (1) { iter->path = btree_path_set_pos(trans, iter->path, search_key, - iter->flags & BTREE_ITER_INTENT); + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (unlikely(ret)) { @@ -2187,7 +2202,8 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) cmp = bpos_cmp(k.k->p, iter->path->pos); if (cmp) { iter->path = bch2_btree_path_make_mut(trans, iter->path, - iter->flags & BTREE_ITER_INTENT); + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); iter->path->pos = k.k->p; btree_path_check_sort(trans, iter->path, cmp); } @@ -2239,7 +2255,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) while (1) { iter->path = btree_path_set_pos(trans, iter->path, search_key, - iter->flags & BTREE_ITER_INTENT); + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (unlikely(ret)) { @@ -2369,7 +2386,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) search_key = btree_iter_search_key(iter); iter->path = btree_path_set_pos(trans, iter->path, search_key, - iter->flags & BTREE_ITER_INTENT); + iter->flags & BTREE_ITER_INTENT, + btree_iter_ip_allocated(iter)); ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (unlikely(ret)) @@ -2583,7 +2601,8 @@ static void __bch2_trans_iter_init(struct btree_trans *trans, unsigned btree_id, struct bpos pos, unsigned locks_want, unsigned depth, - unsigned flags) + unsigned flags, + unsigned long ip) { EBUG_ON(trans->restarted); @@ -2609,6 +2628,9 @@ static void __bch2_trans_iter_init(struct btree_trans *trans, iter->k.type = KEY_TYPE_deleted; iter->k.p = pos; iter->k.size = 0; +#ifdef CONFIG_BCACHEFS_DEBUG + iter->ip_allocated = ip; +#endif iter->path = bch2_path_get(trans, flags & BTREE_ITER_CACHED, @@ -2616,7 +2638,7 @@ static void __bch2_trans_iter_init(struct btree_trans *trans, iter->pos, locks_want, depth, - flags & BTREE_ITER_INTENT); + flags & BTREE_ITER_INTENT, ip); } void bch2_trans_iter_init(struct btree_trans *trans, @@ -2625,7 +2647,7 @@ void bch2_trans_iter_init(struct btree_trans *trans, unsigned flags) { __bch2_trans_iter_init(trans, iter, btree_id, pos, - 0, 0, flags); + 0, 0, flags, _RET_IP_); } void bch2_trans_node_iter_init(struct btree_trans *trans, @@ -2640,7 +2662,7 @@ void bch2_trans_node_iter_init(struct btree_trans *trans, BTREE_ITER_NOT_EXTENTS| __BTREE_ITER_ALL_SNAPSHOTS| BTREE_ITER_ALL_SNAPSHOTS| - flags); + flags, _RET_IP_); BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH)); BUG_ON(iter->path->level != depth); BUG_ON(iter->min_depth != depth); diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 31d2dda7ca05..26eb90a7eab8 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -130,11 +130,13 @@ __trans_next_path_with_node(struct btree_trans *trans, struct btree *b, (_path)->idx + 1)) struct btree_path * __must_check -bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *, bool); +bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *, + bool, unsigned long); int __must_check bch2_btree_path_traverse(struct btree_trans *, struct btree_path *, unsigned); struct btree_path *bch2_path_get(struct btree_trans *, bool, enum btree_id, - struct bpos, unsigned, unsigned, bool); + struct bpos, unsigned, unsigned, bool, + unsigned long); inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *); #ifdef CONFIG_BCACHEFS_DEBUG diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 0d0a719f738f..2c2e2f794b8f 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -291,6 +291,9 @@ struct btree_iter { * bch2_btree_iter_next_slot() can correctly advance pos. */ struct bkey k; +#ifdef CONFIG_BCACHEFS_DEBUG + unsigned long ip_allocated; +#endif }; struct btree_key_cache { diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 61c7757bd3ca..dfff972551ee 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -1590,8 +1590,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, ? bpos_predecessor(b->data->min_key) : bpos_successor(b->data->max_key); - sib_path = bch2_path_get(trans, false, path->btree_id, - sib_pos, U8_MAX, level, true); + sib_path = bch2_path_get(trans, false, path->btree_id, sib_pos, + U8_MAX, level, true, _THIS_IP_); ret = bch2_btree_path_traverse(trans, sib_path, false); if (ret) goto err; @@ -1888,7 +1888,8 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, bch2_trans_copy_iter(&iter2, iter); iter2.path = bch2_btree_path_make_mut(trans, iter2.path, - iter2.flags & BTREE_ITER_INTENT); + iter2.flags & BTREE_ITER_INTENT, + _THIS_IP_); BUG_ON(iter2.path->level != b->c.level); BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p)); |