diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-09-25 16:42:53 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-03-13 11:34:37 -0400 |
commit | d1e24ee89c33dcd190a25fd5f8267e00f8672dbe (patch) | |
tree | 63e1219d6c2dbf78b680911a127546a73dcba55e | |
parent | 5a62a02b2a5f95ebc871ed6942f2e34d6aaf3a7a (diff) |
bcachefs: bch2_btree_node_relock_notrace()
Most of the node_relock_fail trace events are generated from
bch2_btree_path_verify_level(), when debugcheck_iterators is enabled -
but we're not interested in these trace events, they don't indicate that
we're in a slowpath.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/btree_iter.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_locking.c | 6 | ||||
-rw-r--r-- | fs/bcachefs/btree_locking.h | 16 |
3 files changed, 19 insertions, 5 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 88a6ccc0a1e5..3a564c4a5f5e 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -167,7 +167,7 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans, if (!btree_path_node(path, level)) return; - if (!bch2_btree_node_relock(trans, path, level)) + if (!bch2_btree_node_relock_notrace(trans, path, level)) return; BUG_ON(!btree_path_pos_in_node(path, l->b)); diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index 628682f472bc..ee9a902f7851 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -401,7 +401,8 @@ static inline bool btree_path_get_locks(struct btree_trans *trans, } bool __bch2_btree_node_relock(struct btree_trans *trans, - struct btree_path *path, unsigned level) + struct btree_path *path, unsigned level, + bool trace) { struct btree *b = btree_path_node(path, level); int want = __btree_lock_want(path, level); @@ -416,7 +417,8 @@ bool __bch2_btree_node_relock(struct btree_trans *trans, return true; } fail: - trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); + if (trace) + trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); return false; } diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index 863341fcf1ea..873d3b6d0a4e 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -318,7 +318,7 @@ static inline int bch2_btree_path_relock(struct btree_trans *trans, : __bch2_btree_path_relock(trans, path, trace_ip); } -bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned); +bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace); static inline bool bch2_btree_node_relock(struct btree_trans *trans, struct btree_path *path, unsigned level) @@ -329,7 +329,19 @@ static inline bool bch2_btree_node_relock(struct btree_trans *trans, return likely(btree_node_locked(path, level)) || (!IS_ERR_OR_NULL(path->l[level].b) && - __bch2_btree_node_relock(trans, path, level)); + __bch2_btree_node_relock(trans, path, level, true)); +} + +static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans, + struct btree_path *path, unsigned level) +{ + EBUG_ON(btree_node_locked(path, level) && + !btree_node_write_locked(path, level) && + btree_node_locked_type(path, level) != __btree_lock_want(path, level)); + + return likely(btree_node_locked(path, level)) || + (!IS_ERR_OR_NULL(path->l[level].b) && + __bch2_btree_node_relock(trans, path, level, false)); } /* upgrade */ |