diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-09-25 16:42:53 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2022-10-03 23:55:17 -0400 |
commit | d3e275713324b596e7a7874581796880a29e7536 (patch) | |
tree | dcfae00bc2a7ea5752fb685b65a2ca6e762a9d3b | |
parent | 344b02d69a14058fa0763469c2062979849113de (diff) |
bcachefs: bch2_btree_node_relock_notrace()
Most of the node_relock_fail trace events are generated from
bch2_btree_path_verify_level(), when debugcheck_iterators is enabled -
but we're not interested in these trace events, they don't indicate that
we're in a slowpath.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/btree_iter.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_locking.c | 6 | ||||
-rw-r--r-- | fs/bcachefs/btree_locking.h | 16 |
3 files changed, 19 insertions, 5 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 33bd16a04e4d..c11e8e8168c4 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -179,7 +179,7 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans, if (!btree_path_node(path, level)) return; - if (!bch2_btree_node_relock(trans, path, level)) + if (!bch2_btree_node_relock_notrace(trans, path, level)) return; BUG_ON(!btree_path_pos_in_node(path, l->b)); diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index 44cce642ef70..339d44ce37a6 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -367,7 +367,8 @@ static inline bool btree_path_get_locks(struct btree_trans *trans, } bool __bch2_btree_node_relock(struct btree_trans *trans, - struct btree_path *path, unsigned level) + struct btree_path *path, unsigned level, + bool trace) { struct btree *b = btree_path_node(path, level); int want = __btree_lock_want(path, level); @@ -382,7 +383,8 @@ bool __bch2_btree_node_relock(struct btree_trans *trans, return true; } fail: - trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); + if (trace) + trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); return false; } diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index 4aaf6e5e74ef..d91b42bf1de1 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -311,7 +311,7 @@ bch2_btree_node_lock_write(struct btree_trans *trans, bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *, unsigned long); -bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned); +bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace); static inline bool bch2_btree_node_relock(struct btree_trans *trans, struct btree_path *path, unsigned level) @@ -322,7 +322,19 @@ static inline bool bch2_btree_node_relock(struct btree_trans *trans, return likely(btree_node_locked(path, level)) || (!IS_ERR_OR_NULL(path->l[level].b) && - __bch2_btree_node_relock(trans, path, level)); + __bch2_btree_node_relock(trans, path, level, true)); +} + +static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans, + struct btree_path *path, unsigned level) +{ + EBUG_ON(btree_node_locked(path, level) && + !btree_node_write_locked(path, level) && + btree_node_locked_type(path, level) != __btree_lock_want(path, level)); + + return likely(btree_node_locked(path, level)) || + (!IS_ERR_OR_NULL(path->l[level].b) && + __bch2_btree_node_relock(trans, path, level, false)); } static inline int bch2_btree_path_relock(struct btree_trans *trans, |