diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2022-01-08 22:59:58 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2022-01-09 00:44:06 -0500 |
commit | a419efdd56977c37499a87e350c8ec706e07fcdc (patch) | |
tree | 4bcd7e64289157a2ba7bd832e4d05ed8a4a36b5f | |
parent | 1574b3f2398ebd6fc797b583b375b83e8257c45c (diff) |
bcachefs: Tracepoint improvements
This improves the transaction restart tracepoints - adding distinct
tracepoints for all the locations and reasons a transaction might have
been restarted, and ensures that there's a tracepoint for every
transaction restart.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r-- | fs/bcachefs/btree_cache.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.c | 33 | ||||
-rw-r--r-- | fs/bcachefs/btree_key_cache.c | 3 | ||||
-rw-r--r-- | include/trace/events/bcachefs.h | 102 |
4 files changed, 127 insertions, 15 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index fc6c4d4cd02f..986d08d708cc 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -666,6 +666,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, * been freed: */ if (trans && !bch2_btree_node_relock(trans, path, level + 1)) { + trace_trans_restart_relock_parent_for_fill(trans->fn, + _THIS_IP_, btree_id, &path->pos); btree_trans_restart(trans); return ERR_PTR(-EINTR); } @@ -713,6 +715,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, } if (!six_relock_type(&b->c.lock, lock_type, seq)) { + trace_trans_restart_relock_after_fill(trans->fn, _THIS_IP_, + btree_id, &path->pos); btree_trans_restart(trans); return ERR_PTR(-EINTR); } diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 2ae4e523ff3b..3f126cfc32d9 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -178,19 +178,25 @@ bool __bch2_btree_node_relock(struct btree_trans *trans, int want = __btree_lock_want(path, level); if (!is_btree_node(path, level)) - return false; + goto fail; if (race_fault()) - return false; + goto fail; if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || (btree_node_lock_seq_matches(path, b, level) && btree_node_lock_increment(trans, b, level, want))) { mark_btree_node_locked(path, level, want); return true; - } else { - return false; } +fail: + trace_btree_node_relock_fail(trans->fn, _RET_IP_, + path->btree_id, + &path->pos, + (unsigned long) b, + path->l[level].lock_seq, + is_btree_node(path, level) ? b->c.lock.state.seq : 0); + return false; } bool bch2_btree_node_upgrade(struct btree_trans *trans, @@ -237,7 +243,7 @@ success: static inline bool btree_path_get_locks(struct btree_trans *trans, struct btree_path *path, - bool upgrade, unsigned long trace_ip) + bool upgrade) { unsigned l = path->level; int fail_idx = -1; @@ -440,6 +446,8 @@ bool bch2_btree_path_relock_intent(struct btree_trans *trans, if (!bch2_btree_node_relock(trans, path, l)) { __bch2_btree_path_unlock(path); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_, + path->btree_id, &path->pos); btree_trans_restart(trans); return false; } @@ -452,10 +460,13 @@ __flatten static bool bch2_btree_path_relock(struct btree_trans *trans, struct btree_path *path, unsigned long trace_ip) { - bool ret = btree_path_get_locks(trans, path, false, trace_ip); + bool ret = btree_path_get_locks(trans, path, false); - if (!ret) + if (!ret) { + trace_trans_restart_relock_path(trans->fn, trace_ip, + path->btree_id, &path->pos); btree_trans_restart(trans); + } return ret; } @@ -469,7 +480,7 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans, path->locks_want = new_locks_want; - if (btree_path_get_locks(trans, path, true, _THIS_IP_)) + if (btree_path_get_locks(trans, path, true)) return true; /* @@ -497,7 +508,7 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans, linked->btree_id == path->btree_id && linked->locks_want < new_locks_want) { linked->locks_want = new_locks_want; - btree_path_get_locks(trans, linked, true, _THIS_IP_); + btree_path_get_locks(trans, linked, true); } return false; @@ -1962,7 +1973,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans, locks_want = min(locks_want, BTREE_MAX_DEPTH); if (locks_want > path->locks_want) { path->locks_want = locks_want; - btree_path_get_locks(trans, path, true, _THIS_IP_); + btree_path_get_locks(trans, path, true); } return path; @@ -2099,6 +2110,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) __bch2_btree_path_unlock(path); path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS; path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS; + trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_, + path->btree_id, &path->pos); btree_trans_restart(trans); ret = -EINTR; goto err; diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 1d7b101224f1..faed51e7f4b8 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -222,7 +222,8 @@ static int btree_key_cache_fill(struct btree_trans *trans, goto err; if (!bch2_btree_node_relock(trans, ck_path, 0)) { - trace_transaction_restart_ip(trans->fn, _THIS_IP_); + trace_trans_restart_relock_key_cache_fill(trans->fn, + _THIS_IP_, ck_path->btree_id, &ck_path->pos); ret = btree_trans_restart(trans); goto err; } diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h index 295dcd60e704..8f10d13b27d5 100644 --- a/include/trace/events/bcachefs.h +++ b/include/trace/events/bcachefs.h @@ -346,6 +346,52 @@ TRACE_EVENT(btree_cache_scan, __entry->ret) ); +TRACE_EVENT(btree_node_relock_fail, + TP_PROTO(const char *trans_fn, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos, + unsigned long node, + u32 iter_lock_seq, + u32 node_lock_seq), + TP_ARGS(trans_fn, caller_ip, btree_id, pos, node, iter_lock_seq, node_lock_seq), + + TP_STRUCT__entry( + __array(char, trans_fn, 24 ) + __array(char, caller, 32 ) + __field(u8, btree_id ) + __field(u64, pos_inode ) + __field(u64, pos_offset ) + __field(u32, pos_snapshot ) + __field(unsigned long, node ) + __field(u32, iter_lock_seq ) + __field(u32, node_lock_seq ) + ), + + TP_fast_assign( + strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn)); + snprintf(__entry->caller, sizeof(__entry->caller), "%pS", (void *) caller_ip); + __entry->btree_id = btree_id; + __entry->pos_inode = pos->inode; + __entry->pos_offset = pos->offset; + __entry->pos_snapshot = pos->snapshot; + __entry->node = node; + __entry->iter_lock_seq = iter_lock_seq; + __entry->node_lock_seq = node_lock_seq; + ), + + TP_printk("%s %s btree %u pos %llu:%llu:%u, node %lu iter seq %u lock seq %u", + __entry->trans_fn, + __entry->caller, + __entry->btree_id, + __entry->pos_inode, + __entry->pos_offset, + __entry->pos_snapshot, + __entry->node, + __entry->iter_lock_seq, + __entry->node_lock_seq) +); + /* Garbage collection */ DEFINE_EVENT(btree_node, btree_gc_rewrite_node, @@ -621,7 +667,7 @@ DECLARE_EVENT_CLASS(transaction_restart_iter, TP_STRUCT__entry( __array(char, trans_fn, 24 ) - __field(unsigned long, caller_ip ) + __array(char, caller, 32 ) __field(u8, btree_id ) __field(u64, pos_inode ) __field(u64, pos_offset ) @@ -630,16 +676,16 @@ DECLARE_EVENT_CLASS(transaction_restart_iter, TP_fast_assign( strncpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn)); - __entry->caller_ip = caller_ip; + snprintf(__entry->caller, sizeof(__entry->caller), "%pS", (void *) caller_ip); __entry->btree_id = btree_id; __entry->pos_inode = pos->inode; __entry->pos_offset = pos->offset; __entry->pos_snapshot = pos->snapshot; ), - TP_printk("%s %pS btree %u pos %llu:%llu:%u", + TP_printk("%s %s btree %u pos %llu:%llu:%u", __entry->trans_fn, - (void *) __entry->caller_ip, + __entry->caller, __entry->btree_id, __entry->pos_inode, __entry->pos_offset, @@ -694,6 +740,54 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_relock, TP_ARGS(trans_fn, caller_ip, btree_id, pos) ); +DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node, + TP_PROTO(const char *trans_fn, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_fn, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill, + TP_PROTO(const char *trans_fn, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_fn, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill, + TP_PROTO(const char *trans_fn, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_fn, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill, + TP_PROTO(const char *trans_fn, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_fn, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path, + TP_PROTO(const char *trans_fn, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_fn, caller_ip, btree_id, pos) +); + +DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent, + TP_PROTO(const char *trans_fn, + unsigned long caller_ip, + enum btree_id btree_id, + struct bpos *pos), + TP_ARGS(trans_fn, caller_ip, btree_id, pos) +); + DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse, TP_PROTO(const char *trans_fn, unsigned long caller_ip, |