diff options
-rw-r--r-- | fs/bcachefs/btree_iter.c | 12 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.h | 4 | ||||
-rw-r--r-- | fs/bcachefs/btree_locking.h | 3 |
3 files changed, 12 insertions, 7 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 85c8bd3b396b..ee463f36823c 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -860,7 +860,8 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) i < iter->locks_want && iter->nodes[i]; i++) if (!bch2_btree_node_relock(iter, i)) { - while (iter->nodes[iter->level] && + while (iter->level < BTREE_MAX_DEPTH && + iter->nodes[iter->level] && iter->level + 1 < iter->locks_want) btree_iter_up(iter); break; @@ -871,7 +872,8 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) * If the current node isn't locked, go up until we have a locked node * or run out of nodes: */ - while (iter->nodes[iter->level] && + while (iter->level < BTREE_MAX_DEPTH && + iter->nodes[iter->level] && !(is_btree_node(iter, iter->level) && bch2_btree_node_relock(iter, iter->level) && btree_iter_pos_cmp(iter->pos, @@ -883,7 +885,8 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) * If we've got a btree node locked (i.e. we aren't about to relock the * root) - advance its node iterator if necessary: */ - if (iter->nodes[iter->level]) { + if (iter->level < BTREE_MAX_DEPTH && + iter->nodes[iter->level]) { struct bkey_s_c k; while ((k = __btree_iter_peek_all(iter)).k && @@ -955,7 +958,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth) btree_iter_up(iter); - if (!iter->nodes[iter->level]) + if (iter->level == BTREE_MAX_DEPTH || + !iter->nodes[iter->level]) return NULL; /* parent node usually won't be locked: redo traversal if necessary */ diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index a7fdba824eb6..eb196a3affb5 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -50,10 +50,8 @@ struct btree_iter { * always fail (but since freeing a btree node takes a write lock on the * node, which increments the node's lock seq, that's not actually * necessary in that example). - * - * One extra slot for a sentinel NULL: */ - struct btree *nodes[BTREE_MAX_DEPTH + 1]; + struct btree *nodes[BTREE_MAX_DEPTH]; struct btree_node_iter node_iters[BTREE_MAX_DEPTH]; /* diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index acfe5b59df56..ca2992ba385a 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -92,6 +92,7 @@ static inline void btree_node_unlock(struct btree_iter *iter, unsigned level) int lock_type = btree_node_locked_type(iter, level); EBUG_ON(!level && iter->flags & BTREE_ITER_UPTODATE); + EBUG_ON(level >= BTREE_MAX_DEPTH); if (lock_type != BTREE_NODE_UNLOCKED) six_unlock_type(&iter->nodes[level]->lock, lock_type); @@ -106,6 +107,8 @@ static inline bool btree_node_lock(struct btree *b, struct bpos pos, struct btree_iter *iter, enum six_lock_type type) { + EBUG_ON(level >= BTREE_MAX_DEPTH); + return likely(six_trylock_type(&b->lock, type)) || __bch2_btree_node_lock(b, pos, level, iter, type); } |