summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-02-01 02:26:52 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2018-05-22 00:44:18 -0400
commitb497a715b9fd583376639355672b2af0f8f3e819 (patch)
tree02c52f2cb93581aa9f7f6b86dce37cf31fb3d3f2
parent891abdbcb623ccbba196c6d09abff752cd7d01b9 (diff)
bcachefs: struct btree_iter_level
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/bset.h4
-rw-r--r--fs/bcachefs/btree_cache.c4
-rw-r--r--fs/bcachefs/btree_gc.c6
-rw-r--r--fs/bcachefs/btree_io.c12
-rw-r--r--fs/bcachefs/btree_iter.c239
-rw-r--r--fs/bcachefs/btree_iter.h32
-rw-r--r--fs/bcachefs/btree_locking.h15
-rw-r--r--fs/bcachefs/btree_update_interior.c20
-rw-r--r--fs/bcachefs/btree_update_interior.h31
-rw-r--r--fs/bcachefs/btree_update_leaf.c47
-rw-r--r--fs/bcachefs/debug.c15
-rw-r--r--fs/bcachefs/extents.c76
12 files changed, 262 insertions, 239 deletions
diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h
index c195cd913f6e..135d93f12db4 100644
--- a/fs/bcachefs/bset.h
+++ b/fs/bcachefs/bset.h
@@ -493,14 +493,14 @@ static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
static inline struct bkey_packed *
__bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
- struct btree *b)
+ struct btree *b)
{
return __btree_node_offset_to_key(b, iter->data->k);
}
static inline struct bkey_packed *
bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
- struct btree *b)
+ struct btree *b)
{
return bch2_btree_node_iter_end(iter)
? NULL
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index a439760a1dfd..21044901facb 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -746,7 +746,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
struct btree *ret;
unsigned level = b->level;
- parent = iter->nodes[level + 1];
+ parent = btree_iter_node(iter, level + 1);
if (!parent)
return NULL;
@@ -755,7 +755,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
return ERR_PTR(-EINTR);
}
- node_iter = iter->node_iters[parent->level];
+ node_iter = iter->l[parent->level].iter;
k = bch2_btree_node_iter_peek_all(&node_iter, parent);
BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 43e64d2746bc..635086638ba8 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -589,7 +589,7 @@ static void recalc_packed_keys(struct btree *b)
static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
struct btree *old_nodes[GC_MERGE_NODES])
{
- struct btree *parent = iter->nodes[old_nodes[0]->level + 1];
+ struct btree *parent = btree_node_parent(iter, old_nodes[0]);
unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0;
unsigned blocks = btree_blocks(c) * 2 / 3;
struct btree *new_nodes[GC_MERGE_NODES];
@@ -775,7 +775,7 @@ next:
BUG_ON(!bch2_keylist_empty(&keylist));
- BUG_ON(iter->nodes[old_nodes[0]->level] != old_nodes[0]);
+ BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]);
BUG_ON(!bch2_btree_iter_node_replace(iter, new_nodes[0]));
@@ -868,7 +868,7 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
* and the nodes in our sliding window might not have the same
* parent anymore - blow away the sliding window:
*/
- if (iter.nodes[iter.level + 1] &&
+ if (btree_iter_node(&iter, iter.level + 1) &&
!btree_node_intent_locked(&iter, iter.level + 1))
memset(merge + 1, 0,
(GC_MERGE_NODES - 1) * sizeof(merge[0]));
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index ee636847a8e0..f4acfcdc3f21 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -827,13 +827,13 @@ void bch2_btree_build_aux_trees(struct btree *b)
* Returns true if we sorted (i.e. invalidated iterators
*/
void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
- struct btree_iter *iter)
+ struct btree_iter *iter)
{
struct btree_node_entry *bne;
bool did_sort;
EBUG_ON(!(b->lock.state.seq & 1));
- EBUG_ON(iter && iter->nodes[b->level] != b);
+ EBUG_ON(iter && iter->l[b->level].b != b);
did_sort = btree_node_compact(c, b, iter);
@@ -1467,15 +1467,13 @@ retry:
goto err;
/* has node been freed? */
- if (iter.nodes[b->level] != b) {
+ if (iter.l[b->level].b != b) {
/* node has been freed: */
- if (!btree_node_dying(b))
- panic("foo4\n");
+ BUG_ON(!btree_node_dying(b));
goto out;
}
- if (!btree_node_hashed(b))
- panic("foo5\n");
+ BUG_ON(!btree_node_hashed(b));
bkey_copy(&tmp.k, &b->key);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 93689542cf8c..dcb923615fb2 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -14,7 +14,7 @@
static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
{
- return iter->nodes[l] && iter->nodes[l] != BTREE_ITER_NOT_END;
+ return iter->l[l].b && iter->l[l].b != BTREE_ITER_NOT_END;
}
/* Btree node locking: */
@@ -27,7 +27,7 @@ void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
{
struct btree_iter *linked;
- EBUG_ON(iter->nodes[b->level] != b);
+ EBUG_ON(iter->l[b->level].b != b);
EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq);
for_each_linked_btree_node(iter, b, linked)
@@ -43,14 +43,14 @@ void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
struct btree_iter *linked;
unsigned readers = 0;
- EBUG_ON(iter->nodes[b->level] != b);
+ EBUG_ON(iter->l[b->level].b != b);
EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq);
if (six_trylock_write(&b->lock))
return;
for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[b->level] == b &&
+ if (linked->l[b->level].b == b &&
btree_node_read_locked(linked, b->level))
readers++;
@@ -71,10 +71,10 @@ void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
}
}
-bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
+bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
{
struct btree_iter *linked;
- struct btree *b = iter->nodes[level];
+ struct btree *b = iter->l[level].b;
int want = btree_lock_want(iter, level);
int have = btree_node_locked_type(iter, level);
@@ -93,7 +93,7 @@ bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
goto success;
for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[level] == b &&
+ if (linked->l[level].b == b &&
btree_node_locked_type(linked, level) == want &&
iter->lock_seq[level] == b->lock.state.seq) {
btree_node_unlock(iter, level);
@@ -112,7 +112,7 @@ bool bch2_btree_iter_relock(struct btree_iter *iter)
{
unsigned l;
- for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++)
+ for (l = iter->level; l < iter->locks_want && iter->l[l].b; l++)
if (!bch2_btree_node_relock(iter, l))
return false;
@@ -139,7 +139,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
iter->nodes_locked != iter->nodes_intent_locked);
for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[level] == b &&
+ if (linked->l[level].b == b &&
btree_node_locked_type(linked, level) == type) {
six_lock_increment(&b->lock, type);
return true;
@@ -212,7 +212,7 @@ static void btree_iter_drop_extra_locks(struct btree_iter *iter)
btree_node_unlock(iter, l);
} else {
if (btree_node_intent_locked(iter, l)) {
- six_lock_downgrade(&iter->nodes[l]->lock);
+ six_lock_downgrade(&iter->l[l].b->lock);
iter->nodes_intent_locked ^= 1 << l;
}
break;
@@ -277,13 +277,13 @@ int bch2_btree_iter_unlock(struct btree_iter *iter)
#ifdef CONFIG_BCACHEFS_DEBUG
static void __bch2_btree_iter_verify(struct btree_iter *iter,
- struct btree *b)
+ struct btree *b)
{
- struct btree_node_iter *node_iter = &iter->node_iters[b->level];
- struct btree_node_iter tmp = *node_iter;
+ struct btree_iter_level *l = &iter->l[b->level];
+ struct btree_node_iter tmp = l->iter;
struct bkey_packed *k;
- bch2_btree_node_iter_verify(node_iter, b);
+ bch2_btree_node_iter_verify(&l->iter, b);
/*
* For interior nodes, the iterator will have skipped past
@@ -302,7 +302,7 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
buf, iter->pos.inode, iter->pos.offset);
}
- k = bch2_btree_node_iter_peek_all(node_iter, b);
+ k = bch2_btree_node_iter_peek_all(&l->iter, b);
if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k,
iter->flags & BTREE_ITER_IS_EXTENTS)) {
char buf[100];
@@ -318,7 +318,7 @@ void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
- if (iter->nodes[b->level] == b)
+ if (iter->l[b->level].b == b)
__bch2_btree_iter_verify(iter, b);
for_each_linked_btree_node(iter, b, linked)
@@ -439,18 +439,18 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
{
struct btree_iter *linked;
- if (node_iter != &iter->node_iters[b->level])
+ if (node_iter != &iter->l[b->level].iter)
__bch2_btree_node_iter_fix(iter, b, node_iter, t,
where, clobber_u64s, new_u64s);
- if (iter->nodes[b->level] == b)
+ if (iter->l[b->level].b == b)
__bch2_btree_node_iter_fix(iter, b,
- &iter->node_iters[b->level], t,
+ &iter->l[b->level].iter, t,
where, clobber_u64s, new_u64s);
for_each_linked_btree_node(iter, b, linked)
__bch2_btree_node_iter_fix(linked, b,
- &linked->node_iters[b->level], t,
+ &linked->l[b->level].iter, t,
where, clobber_u64s, new_u64s);
/* interior node iterators are... special... */
@@ -459,50 +459,48 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
}
/* peek_all() doesn't skip deleted keys */
-static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
+static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter,
+ struct btree_iter_level *l)
{
- struct btree *b = iter->nodes[iter->level];
struct bkey_packed *k =
- bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
+ bch2_btree_node_iter_peek_all(&l->iter, l->b);
struct bkey_s_c ret;
- EBUG_ON(!btree_node_locked(iter, iter->level));
+ EBUG_ON(!btree_node_locked(iter, l - iter->l));
if (!k)
return bkey_s_c_null;
- ret = bkey_disassemble(b, k, &iter->k);
+ ret = bkey_disassemble(l->b, k, &iter->k);
if (debug_check_bkeys(iter->c))
- bch2_bkey_debugcheck(iter->c, b, ret);
+ bch2_bkey_debugcheck(iter->c, l->b, ret);
return ret;
}
-static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
+static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter,
+ struct btree_iter_level *l)
{
- struct btree *b = iter->nodes[iter->level];
- struct bkey_packed *k =
- bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b);
+ struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
struct bkey_s_c ret;
- EBUG_ON(!btree_node_locked(iter, iter->level));
+ EBUG_ON(!btree_node_locked(iter, l - iter->l));
if (!k)
return bkey_s_c_null;
- ret = bkey_disassemble(b, k, &iter->k);
+ ret = bkey_disassemble(l->b, k, &iter->k);
if (debug_check_bkeys(iter->c))
- bch2_bkey_debugcheck(iter->c, b, ret);
+ bch2_bkey_debugcheck(iter->c, l->b, ret);
return ret;
}
-static inline void __btree_iter_advance(struct btree_iter *iter)
+static inline void __btree_iter_advance(struct btree_iter_level *l)
{
- bch2_btree_node_iter_advance(&iter->node_iters[iter->level],
- iter->nodes[iter->level]);
+ bch2_btree_node_iter_advance(&l->iter, l->b);
}
/*
@@ -510,27 +508,28 @@ static inline void __btree_iter_advance(struct btree_iter *iter)
*/
static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
{
- struct btree *parent;
+ struct btree_iter_level *l;
+ unsigned plevel;
bool parent_locked;
struct bkey_packed *k;
if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
return;
- parent = btree_node_parent(iter, b);
- if (!parent)
+ plevel = b->level + 1;
+ if (!btree_iter_node(iter, plevel))
return;
- parent_locked = btree_node_locked(iter, b->level + 1);
+ parent_locked = btree_node_locked(iter, plevel);
- if (!bch2_btree_node_relock(iter, b->level + 1))
+ if (!bch2_btree_node_relock(iter, plevel))
return;
- k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1],
- parent);
+ l = &iter->l[plevel];
+ k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
if (!k ||
bkey_deleted(k) ||
- bkey_cmp_left_packed(parent, k, &b->key.k.p)) {
+ bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
char buf[100];
struct bkey uk = bkey_unpack_key(b, k);
@@ -543,25 +542,27 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
btree_node_unlock(iter, b->level + 1);
}
+static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
+ struct btree *b)
+{
+ return iter->btree_id == b->btree_id &&
+ bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
+ btree_iter_pos_cmp(iter->pos, &b->key.k,
+ iter->flags & BTREE_ITER_IS_EXTENTS);
+}
+
static inline void __btree_iter_init(struct btree_iter *iter,
struct btree *b)
{
- bch2_btree_node_iter_init(&iter->node_iters[b->level], b, iter->pos,
+ struct btree_iter_level *l = &iter->l[b->level];
+
+ bch2_btree_node_iter_init(&l->iter, b, iter->pos,
iter->flags & BTREE_ITER_IS_EXTENTS,
btree_node_is_extents(b));
/* Skip to first non whiteout: */
if (b->level)
- bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
-}
-
-static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
- struct btree *b)
-{
- return iter->btree_id == b->btree_id &&
- bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
- btree_iter_pos_cmp(iter->pos, &b->key.k,
- iter->flags & BTREE_ITER_IS_EXTENTS);
+ bch2_btree_node_iter_peek(&l->iter, b);
}
static inline void btree_iter_node_set(struct btree_iter *iter,
@@ -573,7 +574,7 @@ static inline void btree_iter_node_set(struct btree_iter *iter,
EBUG_ON(b->lock.state.seq & 1);
iter->lock_seq[b->level] = b->lock.state.seq;
- iter->nodes[b->level] = b;
+ iter->l[b->level].b = b;
__btree_iter_init(iter, b);
}
@@ -635,10 +636,10 @@ void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
{
unsigned level = b->level;
- if (iter->nodes[level] == b) {
+ if (iter->l[level].b == b) {
iter->flags &= ~BTREE_ITER_UPTODATE;
btree_node_unlock(iter, level);
- iter->nodes[level] = BTREE_ITER_NOT_END;
+ iter->l[level].b = BTREE_ITER_NOT_END;
}
}
@@ -677,7 +678,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
* that depth
*/
iter->level = depth_want;
- iter->nodes[iter->level] = NULL;
+ iter->l[iter->level].b = NULL;
return 0;
}
@@ -690,8 +691,8 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
b->level == iter->level &&
!race_fault())) {
for (i = 0; i < iter->level; i++)
- iter->nodes[i] = BTREE_ITER_NOT_END;
- iter->nodes[iter->level] = b;
+ iter->l[i].b = BTREE_ITER_NOT_END;
+ iter->l[iter->level].b = b;
mark_btree_node_locked(iter, iter->level, lock_type);
btree_iter_node_set(iter, b);
@@ -706,52 +707,55 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
noinline
static void btree_iter_prefetch(struct btree_iter *iter)
{
- struct btree *b = iter->nodes[iter->level + 1];
- struct btree_node_iter node_iter = iter->node_iters[iter->level + 1];
+ struct btree_iter_level *l = &iter->l[iter->level];
+ struct btree_node_iter node_iter = l->iter;
struct bkey_packed *k;
BKEY_PADDED(k) tmp;
- unsigned nr = iter->level ? 1 : 8;
- bool was_locked = btree_node_locked(iter, iter->level + 1);
+ unsigned nr = iter->level > 1 ? 1 : 8;
+ bool was_locked = btree_node_locked(iter, iter->level);
while (nr) {
- if (!bch2_btree_node_relock(iter, iter->level + 1))
+ if (!bch2_btree_node_relock(iter, iter->level))
return;
- bch2_btree_node_iter_advance(&node_iter, b);
- k = bch2_btree_node_iter_peek(&node_iter, b);
+ bch2_btree_node_iter_advance(&node_iter, l->b);
+ k = bch2_btree_node_iter_peek(&node_iter, l->b);
if (!k)
break;
- bch2_bkey_unpack(b, &tmp.k, k);
+ bch2_bkey_unpack(l->b, &tmp.k, k);
bch2_btree_node_prefetch(iter->c, &tmp.k,
- iter->level, iter->btree_id);
+ iter->level - 1,
+ iter->btree_id);
}
if (!was_locked)
- btree_node_unlock(iter, iter->level + 1);
+ btree_node_unlock(iter, iter->level);
}
static inline int btree_iter_down(struct btree_iter *iter)
{
+ struct btree_iter_level *l = &iter->l[iter->level];
struct btree *b;
- struct bkey_s_c k = __btree_iter_peek(iter);
unsigned level = iter->level - 1;
enum six_lock_type lock_type = btree_lock_want(iter, level);
BKEY_PADDED(k) tmp;
- bkey_reassemble(&tmp.k, k);
+ bch2_bkey_unpack(l->b, &tmp.k,
+ bch2_btree_node_iter_peek(&l->iter, l->b));
b = bch2_btree_node_get(iter->c, iter, &tmp.k, level, lock_type);
if (unlikely(IS_ERR(b)))
return PTR_ERR(b);
- iter->level = level;
mark_btree_node_locked(iter, level, lock_type);
btree_iter_node_set(iter, b);
if (iter->flags & BTREE_ITER_PREFETCH)
btree_iter_prefetch(iter);
+ iter->level = level;
+
return 0;
}
@@ -832,7 +836,7 @@ io_error:
BUG_ON(ret != -EIO);
iter->flags |= BTREE_ITER_ERROR;
- iter->nodes[iter->level] = NULL;
+ iter->l[iter->level].b = NULL;
goto out;
}
@@ -849,22 +853,22 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
{
unsigned depth_want = iter->level;
- if (unlikely(!iter->nodes[iter->level]))
+ if (unlikely(!iter->l[iter->level].b))
return 0;
iter->flags &= ~(BTREE_ITER_UPTODATE|BTREE_ITER_AT_END_OF_LEAF);
/* make sure we have all the intent locks we need - ugh */
- if (unlikely(iter->nodes[iter->level] &&
+ if (unlikely(iter->l[iter->level].b &&
iter->level + 1 < iter->locks_want)) {
unsigned i;
for (i = iter->level + 1;
- i < iter->locks_want && iter->nodes[i];
+ i < iter->locks_want && iter->l[i].b;
i++)
if (!bch2_btree_node_relock(iter, i)) {
while (iter->level < BTREE_MAX_DEPTH &&
- iter->nodes[iter->level] &&
+ iter->l[iter->level].b &&
iter->level + 1 < iter->locks_want)
btree_iter_up(iter);
break;
@@ -875,12 +879,11 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
* If the current node isn't locked, go up until we have a locked node
* or run out of nodes:
*/
- while (iter->level < BTREE_MAX_DEPTH &&
- iter->nodes[iter->level] &&
+ while (btree_iter_node(iter, iter->level) &&
!(is_btree_node(iter, iter->level) &&
bch2_btree_node_relock(iter, iter->level) &&
btree_iter_pos_cmp(iter->pos,
- &iter->nodes[iter->level]->key.k,
+ &iter->l[iter->level].b->key.k,
iter->flags & BTREE_ITER_IS_EXTENTS)))
btree_iter_up(iter);
@@ -888,14 +891,14 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
* If we've got a btree node locked (i.e. we aren't about to relock the
* root) - advance its node iterator if necessary:
*/
- if (iter->level < BTREE_MAX_DEPTH &&
- iter->nodes[iter->level]) {
+ if (btree_iter_node(iter, iter->level)) {
+ struct btree_iter_level *l = &iter->l[iter->level];
struct bkey_s_c k;
- while ((k = __btree_iter_peek_all(iter)).k &&
+ while ((k = __btree_iter_peek_all(iter, l)).k &&
!btree_iter_pos_cmp(iter->pos, k.k,
iter->flags & BTREE_ITER_IS_EXTENTS))
- __btree_iter_advance(iter);
+ __btree_iter_advance(l);
}
/*
@@ -905,13 +908,12 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
* btree_iter_lock_root() comes next and that it can't fail
*/
while (iter->level > depth_want) {
- int ret = iter->level < BTREE_MAX_DEPTH &&
- iter->nodes[iter->level]
+ int ret = btree_iter_node(iter, iter->level)
? btree_iter_down(iter)
: btree_iter_lock_root(iter, depth_want);
if (unlikely(ret)) {
iter->level = depth_want;
- iter->nodes[iter->level] = BTREE_ITER_NOT_END;
+ iter->l[iter->level].b = BTREE_ITER_NOT_END;
return ret;
}
}
@@ -943,7 +945,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
if (ret)
return ERR_PTR(ret);
- b = iter->nodes[iter->level];
+ b = iter->l[iter->level].b;
if (b) {
EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
@@ -962,8 +964,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
btree_iter_up(iter);
- if (iter->level == BTREE_MAX_DEPTH ||
- !iter->nodes[iter->level])
+ if (!btree_iter_node(iter, iter->level))
return NULL;
/* parent node usually won't be locked: redo traversal if necessary */
@@ -971,7 +972,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
if (ret)
return NULL;
- b = iter->nodes[iter->level];
+ b = iter->l[iter->level].b;
if (!b)
return b;
@@ -988,7 +989,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
if (ret)
return NULL;
- b = iter->nodes[iter->level];
+ b = iter->l[iter->level].b;
}
iter->pos = b->key.k.p;
@@ -1000,22 +1001,21 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
{
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ struct btree_iter_level *l = &iter->l[0];
struct bkey_packed *k;
EBUG_ON(iter->level != 0);
EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
EBUG_ON(!btree_node_locked(iter, 0));
- EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0);
+ EBUG_ON(bkey_cmp(new_pos, l->b->key.k.p) > 0);
- while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
- !btree_iter_pos_cmp_packed(b, &new_pos, k,
+ while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
+ !btree_iter_pos_cmp_packed(l->b, &new_pos, k,
iter->flags & BTREE_ITER_IS_EXTENTS))
- bch2_btree_node_iter_advance(node_iter, b);
+ __btree_iter_advance(l);
if (!k &&
- !btree_iter_pos_cmp(new_pos, &b->key.k,
+ !btree_iter_pos_cmp(new_pos, &l->b->key.k,
iter->flags & BTREE_ITER_IS_EXTENTS))
iter->flags |= BTREE_ITER_AT_END_OF_LEAF;
@@ -1034,10 +1034,11 @@ void bch2_btree_iter_advance_pos(struct btree_iter *iter)
{
if (iter->flags & BTREE_ITER_UPTODATE &&
!(iter->flags & BTREE_ITER_WITH_HOLES)) {
+ struct btree_iter_level *l = &iter->l[0];
struct bkey_s_c k;
- __btree_iter_advance(iter);
- k = __btree_iter_peek(iter);
+ __btree_iter_advance(l);
+ k = __btree_iter_peek(iter, l);
if (likely(k.k)) {
iter->pos = bkey_start_pos(k.k);
return;
@@ -1056,16 +1057,19 @@ void bch2_btree_iter_advance_pos(struct btree_iter *iter)
/* XXX: expensive */
void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
{
+ struct btree_iter_level *l = &iter->l[iter->level];
+
/* incapable of rewinding across nodes: */
- BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0);
+ BUG_ON(bkey_cmp(pos, l->b->data->min_key) < 0);
iter->pos = pos;
iter->flags &= ~BTREE_ITER_UPTODATE;
- __btree_iter_init(iter, iter->nodes[iter->level]);
+ __btree_iter_init(iter, l->b);
}
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
{
+ struct btree_iter_level *l = &iter->l[0];
struct bkey_s_c k;
int ret;
@@ -1073,18 +1077,17 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
(iter->btree_id == BTREE_ID_EXTENTS));
if (iter->flags & BTREE_ITER_UPTODATE) {
- struct btree *b = iter->nodes[0];
struct bkey_packed *k =
- __bch2_btree_node_iter_peek_all(&iter->node_iters[0], b);
+ __bch2_btree_node_iter_peek_all(&l->iter, l->b);
struct bkey_s_c ret = {
.k = &iter->k,
- .v = bkeyp_val(&b->format, k)
+ .v = bkeyp_val(&l->b->format, k)
};
EBUG_ON(!btree_node_locked(iter, 0));
if (debug_check_bkeys(iter->c))
- bch2_bkey_debugcheck(iter->c, b, ret);
+ bch2_bkey_debugcheck(iter->c, l->b, ret);
return ret;
}
@@ -1095,7 +1098,7 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
return bkey_s_c_err(ret);
}
- k = __btree_iter_peek(iter);
+ k = __btree_iter_peek(iter, l);
if (likely(k.k)) {
/*
* iter->pos should always be equal to the key we just
@@ -1109,7 +1112,7 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
return k;
}
- iter->pos = iter->nodes[0]->key.k.p;
+ iter->pos = l->b->key.k.p;
if (!bkey_cmp(iter->pos, POS_MAX)) {
iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
@@ -1123,6 +1126,7 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
{
+ struct btree_iter_level *l = &iter->l[0];
struct bkey_s_c k;
struct bkey n;
int ret;
@@ -1139,7 +1143,7 @@ struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
return bkey_s_c_err(ret);
}
- k = __btree_iter_peek_all(iter);
+ k = __btree_iter_peek_all(iter, l);
recheck:
if (!k.k || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) {
/* hole */
@@ -1153,7 +1157,7 @@ recheck:
}
if (!k.k)
- k.k = &iter->nodes[0]->key.k;
+ k.k = &l->b->key.k;
bch2_key_resize(&n,
min_t(u64, KEY_SIZE_MAX,
@@ -1170,7 +1174,7 @@ recheck:
} else if (!bkey_deleted(k.k)) {
return k;
} else {
- __btree_iter_advance(iter);
+ __btree_iter_advance(l);
}
}
}
@@ -1180,6 +1184,8 @@ void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
unsigned locks_want, unsigned depth,
unsigned flags)
{
+ unsigned i;
+
EBUG_ON(depth >= BTREE_MAX_DEPTH);
EBUG_ON(locks_want > BTREE_MAX_DEPTH);
@@ -1191,8 +1197,9 @@ void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
iter->locks_want = locks_want;
iter->nodes_locked = 0;
iter->nodes_intent_locked = 0;
- memset(iter->nodes, 0, sizeof(iter->nodes));
- iter->nodes[iter->level] = BTREE_ITER_NOT_END;
+ for (i = 0; i < ARRAY_SIZE(iter->l); i++)
+ iter->l[i].b = NULL;
+ iter->l[iter->level].b = BTREE_ITER_NOT_END;
iter->next = iter;
prefetch(c->btree_roots[btree_id].b);
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 75105dd3d1f7..8b0a4f4cb82f 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -39,22 +39,12 @@ struct btree_iter {
nodes_locked:4,
nodes_intent_locked:4;
- u32 lock_seq[BTREE_MAX_DEPTH];
+ struct btree_iter_level {
+ struct btree *b;
+ struct btree_node_iter iter;
+ } l[BTREE_MAX_DEPTH];
- /*
- * NOTE: Never set iter->nodes to NULL except in btree_iter_lock_root().
- *
- * This is because iter->nodes[iter->level] == NULL is how
- * btree_iter_next_node() knows that it's finished with a depth first
- * traversal. Just unlocking a node (with btree_node_unlock()) is fine,
- * and if you really don't want that node used again (e.g. btree_split()
- * freed it) decrementing lock_seq will cause bch2_btree_node_relock() to
- * always fail (but since freeing a btree node takes a write lock on the
- * node, which increments the node's lock seq, that's not actually
- * necessary in that example).
- */
- struct btree *nodes[BTREE_MAX_DEPTH];
- struct btree_node_iter node_iters[BTREE_MAX_DEPTH];
+ u32 lock_seq[BTREE_MAX_DEPTH];
/*
* Current unpacked key - so that bch2_btree_iter_next()/
@@ -73,12 +63,16 @@ struct btree_iter {
struct btree_iter *next;
};
+static inline struct btree *btree_iter_node(struct btree_iter *iter,
+ unsigned level)
+{
+ return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL;
+}
+
static inline struct btree *btree_node_parent(struct btree_iter *iter,
struct btree *b)
{
- unsigned level = b->level + 1;
-
- return level < BTREE_MAX_DEPTH ? iter->nodes[level] : NULL;
+ return btree_iter_node(iter, b->level + 1);
}
static inline bool btree_iter_linked(const struct btree_iter *iter)
@@ -112,7 +106,7 @@ __next_linked_btree_node(struct btree_iter *iter, struct btree *b,
* sequence number is incremented by taking and releasing write
* locks and is even when unlocked:
*/
- } while (linked->nodes[b->level] != b ||
+ } while (linked->l[b->level].b != b ||
linked->lock_seq[b->level] >> 1 != b->lock.state.seq >> 1);
return linked;
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index ca2992ba385a..f526f9083f7f 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -95,7 +95,7 @@ static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
EBUG_ON(level >= BTREE_MAX_DEPTH);
if (lock_type != BTREE_NODE_UNLOCKED)
- six_unlock_type(&iter->nodes[level]->lock, lock_type);
+ six_unlock_type(&iter->l[level].b->lock, lock_type);
mark_btree_node_unlocked(iter, level);
}
@@ -113,10 +113,21 @@ static inline bool btree_node_lock(struct btree *b, struct bpos pos,
__bch2_btree_node_lock(b, pos, level, iter, type);
}
-bool bch2_btree_node_relock(struct btree_iter *, unsigned);
+bool __bch2_btree_node_relock(struct btree_iter *, unsigned);
+
+static inline bool bch2_btree_node_relock(struct btree_iter *iter,
+ unsigned level)
+{
+ return likely(btree_lock_want(iter, level) ==
+ btree_node_locked_type(iter, level)) ||
+ __bch2_btree_node_relock(iter, level);
+}
+
bool bch2_btree_iter_relock(struct btree_iter *);
void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
void bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
#endif /* _BCACHEFS_BTREE_LOCKING_H */
+
+
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 028ce252a43f..cf91da51299a 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1480,7 +1480,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
}
/* Don't screw up @iter's position: */
- node_iter = iter->node_iters[b->level];
+ node_iter = iter->l[b->level].iter;
/*
* btree_split(), btree_gc_coalesce() will insert keys before
@@ -1501,9 +1501,8 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
btree_update_updated_node(as, b);
for_each_linked_btree_node(iter, b, linked)
- bch2_btree_node_iter_peek(&linked->node_iters[b->level],
- b);
- bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
+ bch2_btree_node_iter_peek(&linked->l[b->level].iter, b);
+ bch2_btree_node_iter_peek(&iter->l[b->level].iter, b);
bch2_btree_iter_verify(iter, b);
@@ -1541,7 +1540,7 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
unsigned btree_reserve_flags)
{
- struct btree *b = iter->nodes[0];
+ struct btree *b = iter->l[0].b;
struct btree_update *as;
struct closure cl;
int ret = 0;
@@ -1597,9 +1596,10 @@ out:
return ret;
}
-int bch2_foreground_maybe_merge(struct bch_fs *c,
- struct btree_iter *iter,
- enum btree_node_sibling sib)
+int __bch2_foreground_maybe_merge(struct bch_fs *c,
+ struct btree_iter *iter,
+ unsigned level,
+ enum btree_node_sibling sib)
{
struct btree_update *as;
struct bkey_format_state new_s;
@@ -1612,10 +1612,10 @@ int bch2_foreground_maybe_merge(struct bch_fs *c,
closure_init_stack(&cl);
retry:
- if (!bch2_btree_node_relock(iter, iter->level))
+ if (!bch2_btree_node_relock(iter, level))
return 0;
- b = iter->nodes[iter->level];
+ b = iter->l[level].b;
parent = btree_node_parent(iter, b);
if (!parent)
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 31a8f7f9cea6..0b58ccc904a4 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -2,6 +2,7 @@
#define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
#include "btree_cache.h"
+#include "btree_locking.h"
#include "btree_update.h"
struct btree_reserve {
@@ -147,8 +148,34 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *,
void bch2_btree_insert_node(struct btree_update *, struct btree *,
struct btree_iter *, struct keylist *);
int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned);
-int bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
- enum btree_node_sibling);
+
+int __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *,
+ unsigned, enum btree_node_sibling);
+
+static inline int bch2_foreground_maybe_merge_sibling(struct bch_fs *c,
+ struct btree_iter *iter,
+ unsigned level,
+ enum btree_node_sibling sib)
+{
+ struct btree *b;
+
+ if (!bch2_btree_node_relock(iter, level))
+ return 0;
+
+ b = iter->l[level].b;
+ if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
+ return 0;
+
+ return __bch2_foreground_maybe_merge(c, iter, level, sib);
+}
+
+static inline void bch2_foreground_maybe_merge(struct bch_fs *c,
+ struct btree_iter *iter,
+ unsigned level)
+{
+ bch2_foreground_maybe_merge_sibling(c, iter, level, btree_prev_sib);
+ bch2_foreground_maybe_merge_sibling(c, iter, level, btree_next_sib);
+}
void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index c9073c2e47ba..40e5b57f0711 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -130,7 +130,7 @@ void bch2_btree_journal_key(struct btree_insert *trans,
{
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
- struct btree *b = iter->nodes[0];
+ struct btree *b = iter->l[0].b;
struct btree_write *w = btree_current_write(b);
EBUG_ON(iter->level || b->level);
@@ -171,13 +171,13 @@ bch2_insert_fixup_key(struct btree_insert *trans,
struct btree_insert_entry *insert)
{
struct btree_iter *iter = insert->iter;
+ struct btree_iter_level *l = &iter->l[0];
EBUG_ON(iter->level);
EBUG_ON(insert->k->k.u64s >
- bch_btree_keys_u64s_remaining(trans->c, iter->nodes[0]));
+ bch_btree_keys_u64s_remaining(trans->c, l->b));
- if (bch2_btree_bset_insert_key(iter, iter->nodes[0],
- &iter->node_iters[0],
+ if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
insert->k))
bch2_btree_journal_key(trans, iter, insert->k);
@@ -185,32 +185,16 @@ bch2_insert_fixup_key(struct btree_insert *trans,
return BTREE_INSERT_OK;
}
-static int inline foreground_maybe_merge(struct bch_fs *c,
- struct btree_iter *iter,
- enum btree_node_sibling sib)
-{
- struct btree *b;
-
- if (!btree_node_locked(iter, iter->level))
- return 0;
-
- b = iter->nodes[iter->level];
- if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
- return 0;
-
- return bch2_foreground_maybe_merge(c, iter, sib);
-}
-
/**
* btree_insert_key - insert a key one key into a leaf node
*/
static enum btree_insert_ret
-btree_insert_key(struct btree_insert *trans,
- struct btree_insert_entry *insert)
+btree_insert_key_leaf(struct btree_insert *trans,
+ struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
- struct btree *b = iter->nodes[0];
+ struct btree *b = iter->l[0].b;
enum btree_insert_ret ret;
int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
int old_live_u64s = b->nr.live_u64s;
@@ -246,7 +230,7 @@ static bool same_leaf_as_prev(struct btree_insert *trans,
* point to the same leaf node they'll always be adjacent now:
*/
return i != trans->entries &&
- i[0].iter->nodes[0] == i[-1].iter->nodes[0];
+ i[0].iter->l[0].b == i[-1].iter->l[0].b;
}
#define trans_for_each_entry(trans, i) \
@@ -275,7 +259,8 @@ static void multi_lock_write(struct bch_fs *c, struct btree_insert *trans)
trans_for_each_entry(trans, i)
if (!same_leaf_as_prev(trans, i))
- bch2_btree_node_lock_for_insert(c, i->iter->nodes[0], i->iter);
+ bch2_btree_node_lock_for_insert(c, i->iter->l[0].b,
+ i->iter);
}
static void multi_unlock_write(struct btree_insert *trans)
@@ -284,7 +269,7 @@ static void multi_unlock_write(struct btree_insert *trans)
trans_for_each_entry(trans, i)
if (!same_leaf_as_prev(trans, i))
- bch2_btree_node_unlock_write(i->iter->nodes[0], i->iter);
+ bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
}
static inline void btree_trans_sort(struct btree_insert *trans)
@@ -376,7 +361,7 @@ retry:
if (!i->done) {
u64s += i->k->k.u64s + i->extra_res;
if (!bch2_btree_node_insert_fits(c,
- i->iter->nodes[0], u64s)) {
+ i->iter->l[0].b, u64s)) {
split = i->iter;
goto unlock;
}
@@ -391,7 +376,7 @@ retry:
if (i->done)
continue;
- switch (btree_insert_key(trans, i)) {
+ switch (btree_insert_key_leaf(trans, i)) {
case BTREE_INSERT_OK:
i->done = true;
break;
@@ -437,10 +422,8 @@ unlock:
goto out;
trans_for_each_entry(trans, i)
- if (!same_leaf_as_prev(trans, i)) {
- foreground_maybe_merge(c, i->iter, btree_prev_sib);
- foreground_maybe_merge(c, i->iter, btree_next_sib);
- }
+ if (!same_leaf_as_prev(trans, i))
+ bch2_foreground_maybe_merge(c, i->iter, 0);
out:
/* make sure we didn't lose an error: */
if (!ret && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 4f4e861f2dec..b765914fe4e1 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -318,20 +318,21 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
while ((k = bch2_btree_iter_peek(&iter)).k &&
!(err = btree_iter_err(k))) {
- struct btree *b = iter.nodes[0];
- struct btree_node_iter *node_iter = &iter.node_iters[0];
- struct bkey_packed *_k = bch2_btree_node_iter_peek(node_iter, b);
+ struct btree_iter_level *l = &iter.l[0];
+ struct bkey_packed *_k =
+ bch2_btree_node_iter_peek(&l->iter, l->b);
- if (iter.nodes[0] != prev_node) {
- i->bytes = bch2_print_btree_node(i->c, b, i->buf,
+ if (l->b != prev_node) {
+ i->bytes = bch2_print_btree_node(i->c, l->b, i->buf,
sizeof(i->buf));
err = flush_buf(i);
if (err)
break;
}
- prev_node = iter.nodes[0];
+ prev_node = l->b;
- i->bytes = bch2_bkey_print_bfloat(b, _k, i->buf, sizeof(i->buf));
+ i->bytes = bch2_bkey_print_bfloat(l->b, _k, i->buf,
+ sizeof(i->buf));
err = flush_buf(i);
if (err)
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 7e7c38efde44..ab01c65f2b0a 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -1022,10 +1022,10 @@ struct extent_insert_state {
};
static void bch2_add_sectors(struct extent_insert_state *s,
- struct bkey_s_c k, u64 offset, s64 sectors)
+ struct bkey_s_c k, u64 offset, s64 sectors)
{
struct bch_fs *c = s->trans->c;
- struct btree *b = s->insert->iter->nodes[0];
+ struct btree *b = s->insert->iter->l[0].b;
EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
@@ -1079,7 +1079,7 @@ static bool bch2_extent_merge_inline(struct bch_fs *,
static enum btree_insert_ret
extent_insert_should_stop(struct extent_insert_state *s)
{
- struct btree *b = s->insert->iter->nodes[0];
+ struct btree *b = s->insert->iter->l[0].b;
/*
* Check if we have sufficient space in both the btree node and the
@@ -1101,19 +1101,18 @@ extent_insert_should_stop(struct extent_insert_state *s)
static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
struct bkey_i *insert)
{
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
- struct bset_tree *t = bset_tree_last(b);
+ struct btree_iter_level *l = &iter->l[0];
+ struct bset_tree *t = bset_tree_last(l->b);
struct bkey_packed *where =
- bch2_btree_node_iter_bset_pos(node_iter, b, t);
- struct bkey_packed *prev = bch2_bkey_prev(b, t, where);
+ bch2_btree_node_iter_bset_pos(&l->iter, l->b, t);
+ struct bkey_packed *prev = bch2_bkey_prev(l->b, t, where);
struct bkey_packed *next_live_key = where;
unsigned clobber_u64s;
if (prev)
where = bkey_next(prev);
- while (next_live_key != btree_bkey_last(b, t) &&
+ while (next_live_key != btree_bkey_last(l->b, t) &&
bkey_deleted(next_live_key))
next_live_key = bkey_next(next_live_key);
@@ -1127,18 +1126,19 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
bch2_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true))
goto drop_deleted_keys;
- if (next_live_key != btree_bkey_last(b, t) &&
+ if (next_live_key != btree_bkey_last(l->b, t) &&
bch2_extent_merge_inline(c, iter, bkey_to_packed(insert),
next_live_key, false))
goto drop_deleted_keys;
- bch2_bset_insert(b, node_iter, where, insert, clobber_u64s);
- bch2_btree_node_iter_fix(iter, b, node_iter, t, where,
+ bch2_bset_insert(l->b, &l->iter, where, insert, clobber_u64s);
+ bch2_btree_node_iter_fix(iter, l->b, &l->iter, t, where,
clobber_u64s, where->u64s);
return;
drop_deleted_keys:
- bch2_bset_delete(b, where, clobber_u64s);
- bch2_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0);
+ bch2_bset_delete(l->b, where, clobber_u64s);
+ bch2_btree_node_iter_fix(iter, l->b, &l->iter, t,
+ where, clobber_u64s, 0);
}
static void extent_insert_committed(struct extent_insert_state *s)
@@ -1181,8 +1181,7 @@ static void extent_insert_committed(struct extent_insert_state *s)
}
if (debug_check_bkeys(c))
- bch2_bkey_debugcheck(c, iter->nodes[iter->level],
- bkey_i_to_s_c(&split.k));
+ bch2_bkey_debugcheck(c, iter->l[0].b, bkey_i_to_s_c(&split.k));
bch2_btree_journal_key(s->trans, iter, &split.k);
@@ -1224,7 +1223,7 @@ __extent_insert_advance_pos(struct extent_insert_state *s,
static enum btree_insert_ret
extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
{
- struct btree *b = s->insert->iter->nodes[0];
+ struct btree *b = s->insert->iter->l[0].b;
struct bpos next_pos = bpos_min(s->insert->k->k.p,
k.k ? k.k->p : b->key.k.p);
enum btree_insert_ret ret;
@@ -1287,8 +1286,9 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
{
struct bch_fs *c = s->trans->c;
struct btree_iter *iter = s->insert->iter;
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ struct btree_iter_level *l = &iter->l[0];
+ struct btree *b = l->b;
+ struct btree_node_iter *node_iter = &l->iter;
enum btree_insert_ret ret;
switch (overlap) {
@@ -1397,8 +1397,9 @@ bch2_delete_fixup_extent(struct extent_insert_state *s)
{
struct bch_fs *c = s->trans->c;
struct btree_iter *iter = s->insert->iter;
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ struct btree_iter_level *l = &iter->l[0];
+ struct btree *b = l->b;
+ struct btree_node_iter *node_iter = &l->iter;
struct bkey_packed *_k;
struct bkey unpacked;
struct bkey_i *insert = s->insert->k;
@@ -1544,8 +1545,9 @@ bch2_insert_fixup_extent(struct btree_insert *trans,
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ struct btree_iter_level *l = &iter->l[0];
+ struct btree *b = l->b;
+ struct btree_node_iter *node_iter = &l->iter;
struct bkey_packed *_k;
struct bkey unpacked;
enum btree_insert_ret ret = BTREE_INSERT_OK;
@@ -2176,8 +2178,7 @@ static bool extent_merge_one_overlapping(struct btree_iter *iter,
struct bkey_packed *k, struct bkey uk,
bool check, bool could_pack)
{
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ struct btree_iter_level *l = &iter->l[0];
BUG_ON(!bkey_deleted(k));
@@ -2185,10 +2186,10 @@ static bool extent_merge_one_overlapping(struct btree_iter *iter,
return !bkey_packed(k) || could_pack;
} else {
uk.p = new_pos;
- extent_save(b, node_iter, k, &uk);
- bch2_bset_fix_invalidated_key(b, t, k);
- bch2_btree_node_iter_fix(iter, b, node_iter, t,
- k, k->u64s, k->u64s);
+ extent_save(l->b, &l->iter, k, &uk);
+ bch2_bset_fix_invalidated_key(l->b, t, k);
+ bch2_btree_node_iter_fix(iter, l->b, &l->iter, t,
+ k, k->u64s, k->u64s);
return true;
}
}
@@ -2196,8 +2197,9 @@ static bool extent_merge_one_overlapping(struct btree_iter *iter,
static bool extent_merge_do_overlapping(struct btree_iter *iter,
struct bkey *m, bool back_merge)
{
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ struct btree_iter_level *l = &iter->l[0];
+ struct btree *b = l->b;
+ struct btree_node_iter *node_iter = &l->iter;
struct bset_tree *t;
struct bkey_packed *k;
struct bkey uk;
@@ -2288,8 +2290,8 @@ static bool bch2_extent_merge_inline(struct bch_fs *c,
struct bkey_packed *r,
bool back_merge)
{
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ struct btree *b = iter->l[0].b;
+ struct btree_node_iter *node_iter = &iter->l[0].iter;
const struct bkey_format *f = &b->format;
struct bset_tree *t = bset_tree_last(b);
struct bkey_packed *m;
@@ -2332,8 +2334,8 @@ static bool bch2_extent_merge_inline(struct bch_fs *c,
if (back_merge)
bch2_btree_iter_set_pos_same_leaf(iter, li.k.k.p);
- bch2_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
- t, m, m->u64s, m->u64s);
+ bch2_btree_node_iter_fix(iter, b, node_iter,
+ t, m, m->u64s, m->u64s);
if (!back_merge)
bkey_copy(packed_to_bkey(l), &li.k);
@@ -2350,8 +2352,8 @@ static bool bch2_extent_merge_inline(struct bch_fs *c,
extent_i_save(b, m, &li.k);
bch2_bset_fix_invalidated_key(b, t, m);
- bch2_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
- t, m, m->u64s, m->u64s);
+ bch2_btree_node_iter_fix(iter, b, node_iter,
+ t, m, m->u64s, m->u64s);
return true;
default:
BUG();