diff options
Diffstat (limited to 'fs/bcachefs/btree_iter.c')
-rw-r--r-- | fs/bcachefs/btree_iter.c | 106 |
1 files changed, 63 insertions, 43 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index bdb068e9d263..8840531266d7 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -170,8 +170,8 @@ success: return true; } -static inline bool btree_iter_get_locks(struct btree_iter *iter, - bool upgrade, bool trace) +static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade, + unsigned long trace_ip) { unsigned l = iter->level; int fail_idx = -1; @@ -183,16 +183,17 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter, if (!(upgrade ? bch2_btree_node_upgrade(iter, l) : bch2_btree_node_relock(iter, l))) { - if (trace) - (upgrade - ? trace_node_upgrade_fail - : trace_node_relock_fail)(l, iter->l[l].lock_seq, - is_btree_node(iter, l) - ? 0 - : (unsigned long) iter->l[l].b, - is_btree_node(iter, l) - ? iter->l[l].b->c.lock.state.seq - : 0); + (upgrade + ? trace_node_upgrade_fail + : trace_node_relock_fail)(iter->trans->ip, trace_ip, + iter->btree_id, &iter->real_pos, + l, iter->l[l].lock_seq, + is_btree_node(iter, l) + ? 0 + : (unsigned long) iter->l[l].b, + is_btree_node(iter, l) + ? iter->l[l].b->c.lock.state.seq + : 0); fail_idx = l; btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); @@ -372,9 +373,9 @@ static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {} #endif __flatten -bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace) +static bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip) { - return btree_iter_get_locks(iter, false, trace); + return btree_iter_get_locks(iter, false, trace_ip); } bool __bch2_btree_iter_upgrade(struct btree_iter *iter, @@ -386,7 +387,7 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *iter, iter->locks_want = new_locks_want; - if (btree_iter_get_locks(iter, true, true)) + if (btree_iter_get_locks(iter, true, _THIS_IP_)) return true; /* @@ -414,7 +415,7 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *iter, linked->btree_id == iter->btree_id && linked->locks_want < new_locks_want) { linked->locks_want = new_locks_want; - btree_iter_get_locks(linked, true, false); + btree_iter_get_locks(linked, true, _THIS_IP_); } return false; @@ -460,8 +461,9 @@ bool bch2_trans_relock(struct btree_trans *trans) struct btree_iter *iter; trans_for_each_iter(trans, iter) - if (!bch2_btree_iter_relock(iter, true)) { - trace_trans_restart_relock(trans->ip); + if (!bch2_btree_iter_relock(iter, _RET_IP_)) { + trace_trans_restart_relock(trans->ip, _RET_IP_, + iter->btree_id, &iter->real_pos); return false; } return true; @@ -1175,7 +1177,8 @@ err: static int btree_iter_traverse_one(struct btree_iter *, unsigned long); -static int __btree_iter_traverse_all(struct btree_trans *trans, int ret) +static int __btree_iter_traverse_all(struct btree_trans *trans, int ret, + unsigned long trace_ip) { struct bch_fs *c = trans->c; struct btree_iter *iter; @@ -1192,7 +1195,7 @@ retry_all: relock_fail = false; trans_for_each_iter(trans, iter) { - if (!bch2_btree_iter_relock(iter, true)) + if (!bch2_btree_iter_relock(iter, _THIS_IP_)) relock_fail = true; sorted[nr_sorted++] = iter->idx; } @@ -1269,13 +1272,13 @@ out: trans->in_traverse_all = false; - trace_trans_traverse_all(trans->ip); + trace_trans_traverse_all(trans->ip, trace_ip); return ret; } int bch2_btree_iter_traverse_all(struct btree_trans *trans) { - return __btree_iter_traverse_all(trans, 0); + return __btree_iter_traverse_all(trans, 0, _RET_IP_); } static inline bool btree_iter_good_node(struct btree_iter *iter, @@ -1320,6 +1323,7 @@ static int btree_iter_traverse_one(struct btree_iter *iter, unsigned long trace_ip) { unsigned depth_want = iter->level; + int ret = 0; /* * if we need interior nodes locked, call btree_iter_relock() to make @@ -1327,16 +1331,18 @@ static int btree_iter_traverse_one(struct btree_iter *iter, */ if (iter->uptodate == BTREE_ITER_NEED_RELOCK || iter->locks_want > 1) - bch2_btree_iter_relock(iter, false); + bch2_btree_iter_relock(iter, _THIS_IP_); - if (btree_iter_type(iter) == BTREE_ITER_CACHED) - return bch2_btree_iter_traverse_cached(iter); + if (btree_iter_type(iter) == BTREE_ITER_CACHED) { + ret = bch2_btree_iter_traverse_cached(iter); + goto out; + } if (iter->uptodate < BTREE_ITER_NEED_RELOCK) - return 0; + goto out; if (unlikely(iter->level >= BTREE_MAX_DEPTH)) - return 0; + goto out; iter->level = btree_iter_up_until_good_node(iter, 0); @@ -1347,12 +1353,18 @@ static int btree_iter_traverse_one(struct btree_iter *iter, * btree_iter_lock_root() comes next and that it can't fail */ while (iter->level > depth_want) { - int ret = btree_iter_node(iter, iter->level) + ret = btree_iter_node(iter, iter->level) ? btree_iter_down(iter, trace_ip) : btree_iter_lock_root(iter, depth_want, trace_ip); if (unlikely(ret)) { - if (ret == 1) - return 0; + if (ret == 1) { + /* + * Got to the end of the btree (in + * BTREE_ITER_NODES mode) + */ + ret = 0; + goto out; + } iter->level = depth_want; @@ -1364,14 +1376,16 @@ static int btree_iter_traverse_one(struct btree_iter *iter, iter->l[iter->level].b = BTREE_ITER_NO_NODE_DOWN; } - return ret; + goto out; } } iter->uptodate = BTREE_ITER_NEED_PEEK; - +out: + trace_iter_traverse(iter->trans->ip, trace_ip, + iter->btree_id, &iter->real_pos, ret); bch2_btree_iter_verify(iter); - return 0; + return ret; } static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) @@ -1382,7 +1396,7 @@ static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) ret = bch2_trans_cond_resched(trans) ?: btree_iter_traverse_one(iter, _RET_IP_); if (unlikely(ret)) - ret = __btree_iter_traverse_all(trans, ret); + ret = __btree_iter_traverse_all(trans, ret, _RET_IP_); return ret; } @@ -1498,6 +1512,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos) { + struct bpos old_pos = iter->real_pos; int cmp = bpos_cmp(new_pos, iter->real_pos); unsigned l = iter->level; @@ -1508,7 +1523,7 @@ static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_p if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) { btree_node_unlock(iter, 0); - iter->l[0].b = BTREE_ITER_NO_NODE_UP; + iter->l[0].b = BTREE_ITER_NO_NODE_CACHED; btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); return; } @@ -1537,6 +1552,11 @@ out: btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK); bch2_btree_iter_verify(iter); +#ifdef CONFIG_BCACHEFS_DEBUG + trace_iter_set_search_pos(iter->trans->ip, _RET_IP_, + iter->btree_id, + &old_pos, &new_pos, l); +#endif } inline bool bch2_btree_iter_advance(struct btree_iter *iter) @@ -2055,13 +2075,6 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, best = iter; } - trace_trans_get_iter(_RET_IP_, trans->ip, - btree_id, - &real_pos, locks_want, - best ? &best->real_pos : &pos_min, - best ? best->locks_want : 0, - best ? best->uptodate : BTREE_ITER_NEED_TRAVERSE); - if (!best) { iter = btree_trans_iter_alloc(trans); bch2_btree_iter_init(trans, iter, btree_id); @@ -2090,7 +2103,7 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, locks_want = min(locks_want, BTREE_MAX_DEPTH); if (locks_want > iter->locks_want) { iter->locks_want = locks_want; - btree_iter_get_locks(iter, true, false); + btree_iter_get_locks(iter, true, _THIS_IP_); } while (iter->level != depth) { @@ -2108,6 +2121,13 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, bch2_btree_iter_set_pos(iter, pos); btree_iter_set_search_pos(iter, real_pos); + trace_trans_get_iter(_RET_IP_, trans->ip, + btree_id, + &real_pos, locks_want, iter->uptodate, + best ? &best->real_pos : &pos_min, + best ? best->locks_want : U8_MAX, + best ? best->uptodate : U8_MAX); + return iter; } |