summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-02-03 23:05:23 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2018-02-05 00:30:26 -0500
commit27897c54be64fbeb57b67754496bc2ad1e4da2c0 (patch)
tree4e3c81d7fc0b126a944235e2e74d71fdcfe0f435
parenta9886f394062c85a2c63477d0619397f14701ae4 (diff)
bcachefs: optimize bch2_btree_iter_traverse() calls
Reduce unnecessary calls, and also call it from bch2_btree_insert_at() when needed Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/alloc.c8
-rw-r--r--fs/bcachefs/btree_cache.c10
-rw-r--r--fs/bcachefs/btree_iter.c160
-rw-r--r--fs/bcachefs/btree_iter.h31
-rw-r--r--fs/bcachefs/btree_locking.h1
-rw-r--r--fs/bcachefs/btree_update_leaf.c43
-rw-r--r--fs/bcachefs/fs-io.c8
7 files changed, 147 insertions, 114 deletions
diff --git a/fs/bcachefs/alloc.c b/fs/bcachefs/alloc.c
index 5d679ed3b168..f6592de9b931 100644
--- a/fs/bcachefs/alloc.c
+++ b/fs/bcachefs/alloc.c
@@ -343,7 +343,7 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
do {
- ret = bch2_btree_iter_traverse(iter);
+ ret = btree_iter_err(bch2_btree_iter_peek_slot(iter));
if (ret)
break;
@@ -393,7 +393,7 @@ int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
return 0;
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter, NULL);
bch2_btree_iter_unlock(&iter);
@@ -407,7 +407,7 @@ static int bch2_alloc_write(struct bch_fs *c, struct bch_dev *ca)
int ret = 0;
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
down_read(&ca->bucket_lock);
for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
@@ -826,7 +826,7 @@ static int bch2_invalidate_free_inc(struct bch_fs *c, struct bch_dev *ca,
int ret = 0;
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
/*
* XXX: if ca->nr_invalidated != 0, just return if we'd block doing the
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index a9202ed585d9..0bde449ec745 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -787,9 +787,13 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
ret = bch2_btree_node_get(c, iter, &tmp.k, level, SIX_LOCK_intent);
}
- if (!IS_ERR(ret) && !bch2_btree_node_relock(iter, level)) {
- six_unlock_intent(&ret->lock);
- ret = ERR_PTR(-EINTR);
+ if (!bch2_btree_node_relock(iter, level)) {
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
+
+ if (!IS_ERR(ret)) {
+ six_unlock_intent(&ret->lock);
+ ret = ERR_PTR(-EINTR);
+ }
}
return ret;
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 3bb989f38023..9c4ba9542a61 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -10,6 +10,10 @@
#include <linux/prefetch.h>
#include <trace/events/bcachefs.h>
+static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *,
+ struct btree_iter_level *,
+ struct bkey *);
+
#define BTREE_ITER_NOT_END ((struct btree *) 1)
static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
@@ -112,10 +116,16 @@ bool bch2_btree_iter_relock(struct btree_iter *iter)
{
unsigned l;
- for (l = iter->level; l < iter->locks_want && iter->l[l].b; l++)
- if (!bch2_btree_node_relock(iter, l))
+ for (l = iter->level;
+ l < max_t(unsigned, iter->locks_want, 1) && iter->l[l].b;
+ l++)
+ if (!bch2_btree_node_relock(iter, l)) {
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
return false;
+ }
+ if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
+ iter->uptodate = BTREE_ITER_NEED_PEEK;
return true;
}
@@ -255,7 +265,7 @@ bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter,
static void __bch2_btree_iter_unlock(struct btree_iter *iter)
{
- iter->flags &= ~BTREE_ITER_UPTODATE;
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
while (iter->nodes_locked)
btree_node_unlock(iter, __ffs(iter->nodes_locked));
@@ -348,8 +358,15 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
/* didn't find the bset in the iterator - might have to readd it: */
if (new_u64s &&
btree_iter_pos_cmp_packed(b, &iter->pos, where,
- iter->flags & BTREE_ITER_IS_EXTENTS))
+ iter->flags & BTREE_ITER_IS_EXTENTS)) {
bch2_btree_node_iter_push(node_iter, b, where, end);
+
+ if (!b->level &&
+ node_iter == &iter->l[0].iter)
+ bkey_disassemble(b,
+ bch2_btree_node_iter_peek_all(node_iter, b),
+ &iter->k);
+ }
return;
found:
set->end = (int) set->end + shift;
@@ -362,16 +379,20 @@ found:
btree_iter_pos_cmp_packed(b, &iter->pos, where,
iter->flags & BTREE_ITER_IS_EXTENTS)) {
set->k = offset;
- bch2_btree_node_iter_sort(node_iter, b);
} else if (set->k < offset + clobber_u64s) {
set->k = offset + new_u64s;
if (set->k == set->end)
*set = node_iter->data[--node_iter->used];
- bch2_btree_node_iter_sort(node_iter, b);
} else {
set->k = (int) set->k + shift;
+ goto iter_current_key_not_modified;
}
+ bch2_btree_node_iter_sort(node_iter, b);
+ if (!b->level && node_iter == &iter->l[0].iter)
+ __btree_iter_peek_all(iter, &iter->l[0], &iter->k);
+iter_current_key_not_modified:
+
/*
* Interior nodes are special because iterators for interior nodes don't
* obey the usual invariants regarding the iterator position:
@@ -467,10 +488,14 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter,
bch2_btree_node_iter_peek_all(&l->iter, l->b);
struct bkey_s_c ret;
- EBUG_ON(!btree_node_locked(iter, l - iter->l));
-
- if (!k)
+ if (!k) {
+ /*
+ * signal to bch2_btree_iter_peek_slot() that we're currently at
+ * a hole
+ */
+ iter->k.type = KEY_TYPE_DELETED;
return bkey_s_c_null;
+ }
ret = bkey_disassemble(l->b, k, u);
@@ -486,10 +511,10 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter,
struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
struct bkey_s_c ret;
- EBUG_ON(!btree_node_locked(iter, l - iter->l));
-
- if (!k)
+ if (!k) {
+ iter->k.type = KEY_TYPE_DELETED;
return bkey_s_c_null;
+ }
ret = bkey_disassemble(l->b, k, &iter->k);
@@ -638,7 +663,7 @@ void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
unsigned level = b->level;
if (iter->l[level].b == b) {
- iter->flags &= ~BTREE_ITER_UPTODATE;
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
btree_node_unlock(iter, level);
iter->l[level].b = BTREE_ITER_NOT_END;
}
@@ -859,7 +884,7 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
if (unlikely(!iter->l[iter->level].b))
return 0;
- iter->flags &= ~(BTREE_ITER_UPTODATE|BTREE_ITER_AT_END_OF_LEAF);
+ iter->flags &= ~BTREE_ITER_AT_END_OF_LEAF;
/* make sure we have all the intent locks we need - ugh */
if (unlikely(iter->l[iter->level].b &&
@@ -929,6 +954,7 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
}
}
+ iter->uptodate = BTREE_ITER_NEED_PEEK;
return 0;
}
@@ -936,6 +962,9 @@ int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
{
int ret;
+ if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
+ return 0;
+
ret = __bch2_btree_iter_traverse(iter);
if (unlikely(ret))
ret = btree_iter_traverse_error(iter, ret);
@@ -979,6 +1008,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
return NULL;
/* parent node usually won't be locked: redo traversal if necessary */
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
ret = bch2_btree_iter_traverse(iter);
if (ret)
return NULL;
@@ -996,6 +1026,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
: bkey_successor(iter->pos);
iter->level = depth;
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
ret = bch2_btree_iter_traverse(iter);
if (ret)
return NULL;
@@ -1025,33 +1056,24 @@ void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_
iter->flags & BTREE_ITER_IS_EXTENTS))
__btree_iter_advance(l);
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+
if (!k &&
!btree_iter_pos_cmp(new_pos, &l->b->key.k,
- iter->flags & BTREE_ITER_IS_EXTENTS))
+ iter->flags & BTREE_ITER_IS_EXTENTS)) {
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
iter->flags |= BTREE_ITER_AT_END_OF_LEAF;
+ }
iter->pos = new_pos;
- iter->flags &= ~BTREE_ITER_UPTODATE;
}
void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */
iter->pos = new_pos;
- iter->flags &= ~BTREE_ITER_UPTODATE;
-}
-
-/* XXX: expensive */
-void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
-{
- struct btree_iter_level *l = &iter->l[iter->level];
-
- /* incapable of rewinding across nodes: */
- BUG_ON(bkey_cmp(pos, l->b->data->min_key) < 0);
- iter->pos = pos;
- iter->flags &= ~BTREE_ITER_UPTODATE;
- __btree_iter_init(iter, l->b);
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
}
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
@@ -1064,7 +1086,7 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
(iter->btree_id == BTREE_ID_EXTENTS));
EBUG_ON(iter->flags & BTREE_ITER_SLOTS);
- if (iter->flags & BTREE_ITER_UPTODATE) {
+ if (iter->uptodate == BTREE_ITER_UPTODATE) {
struct bkey_packed *k =
__bch2_btree_node_iter_peek_all(&l->iter, l->b);
struct bkey_s_c ret = {
@@ -1079,36 +1101,39 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
return ret;
}
+ if (iter->uptodate == BTREE_ITER_END)
+ return bkey_s_c_null;
+
while (1) {
ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret)) {
- iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
+ if (unlikely(ret))
return bkey_s_c_err(ret);
- }
k = __btree_iter_peek(iter, l);
- if (likely(k.k)) {
- /*
- * iter->pos should always be equal to the key we just
- * returned - except extents can straddle iter->pos:
- */
- if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
- bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
- iter->pos = bkey_start_pos(k.k);
-
- iter->flags |= BTREE_ITER_UPTODATE;
- return k;
- }
+ if (likely(k.k))
+ break;
+ /* got to the end of the leaf, iterator needs to be traversed: */
iter->pos = l->b->key.k.p;
-
if (!bkey_cmp(iter->pos, POS_MAX)) {
- iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
+ iter->uptodate = BTREE_ITER_END;
return bkey_s_c_null;
}
iter->pos = btree_type_successor(iter->btree_id, iter->pos);
+ iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
}
+
+ /*
+ * iter->pos should always be equal to the key we just
+ * returned - except extents can straddle iter->pos:
+ */
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
+ bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+ iter->pos = bkey_start_pos(k.k);
+
+ iter->uptodate = BTREE_ITER_UPTODATE;
+ return k;
}
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
@@ -1120,33 +1145,30 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
(iter->btree_id == BTREE_ID_EXTENTS));
EBUG_ON(iter->flags & BTREE_ITER_SLOTS);
- if (unlikely(!(iter->flags & BTREE_ITER_UPTODATE))) {
+ if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
k = bch2_btree_iter_peek(iter);
if (IS_ERR_OR_NULL(k.k))
return k;
}
- EBUG_ON(!(iter->flags & BTREE_ITER_UPTODATE));
- EBUG_ON(bch2_btree_node_iter_end(&l->iter));
-
- bch2_btree_node_iter_advance(&l->iter, l->b);
+ __btree_iter_advance(l);
k = __btree_iter_peek(iter, l);
if (likely(k.k)) {
EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) < 0);
-
iter->pos = bkey_start_pos(k.k);
return k;
}
- iter->flags &= ~BTREE_ITER_UPTODATE;
+ /* got to the end of the leaf, iterator needs to be traversed: */
iter->pos = l->b->key.k.p;
-
if (!bkey_cmp(iter->pos, POS_MAX)) {
- iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
+ iter->uptodate = BTREE_ITER_END;
return bkey_s_c_null;
}
iter->pos = btree_type_successor(iter->btree_id, iter->pos);
+ iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
+
return bch2_btree_iter_peek(iter);
}
@@ -1167,7 +1189,7 @@ recheck:
if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) {
EBUG_ON(bkey_cmp(k.k->p, iter->pos) < 0);
EBUG_ON(bkey_deleted(k.k));
- iter->flags |= BTREE_ITER_UPTODATE;
+ iter->uptodate = BTREE_ITER_UPTODATE;
return k;
}
@@ -1178,11 +1200,10 @@ recheck:
if (unlikely(!k.k &&
!btree_iter_pos_cmp(iter->pos, &l->b->key.k,
iter->flags & BTREE_ITER_IS_EXTENTS))) {
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret)) {
- iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
+ if (unlikely(ret))
return bkey_s_c_err(ret);
- }
goto recheck;
}
@@ -1193,8 +1214,10 @@ recheck:
if (iter->flags & BTREE_ITER_IS_EXTENTS) {
if (n.p.offset == KEY_OFFSET_MAX) {
- if (n.p.inode == KEY_INODE_MAX)
+ if (n.p.inode == KEY_INODE_MAX) {
+ iter->uptodate = BTREE_ITER_END;
return bkey_s_c_null;
+ }
iter->pos = bkey_successor(iter->pos);
goto recheck;
@@ -1214,7 +1237,7 @@ recheck:
}
iter->k = n;
- iter->flags |= BTREE_ITER_UPTODATE;
+ iter->uptodate = BTREE_ITER_UPTODATE;
return (struct bkey_s_c) { &iter->k, NULL };
}
@@ -1227,7 +1250,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
(iter->btree_id == BTREE_ID_EXTENTS));
EBUG_ON(!(iter->flags & BTREE_ITER_SLOTS));
- if (iter->flags & BTREE_ITER_UPTODATE) {
+ if (iter->uptodate == BTREE_ITER_UPTODATE) {
struct bkey_s_c ret = { .k = &iter->k };;
if (!bkey_deleted(&iter->k))
@@ -1241,18 +1264,19 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
return ret;
}
+ if (iter->uptodate == BTREE_ITER_END)
+ return bkey_s_c_null;
+
ret = bch2_btree_iter_traverse(iter);
- if (unlikely(ret)) {
- iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
+ if (unlikely(ret))
return bkey_s_c_err(ret);
- }
return __bch2_btree_iter_peek_slot(iter);
}
struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
{
- if (unlikely(!(iter->flags & BTREE_ITER_UPTODATE))) {
+ if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
struct bkey_s_c k;
k = bch2_btree_iter_peek_slot(iter);
@@ -1281,6 +1305,7 @@ void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
iter->c = c;
iter->pos = pos;
iter->flags = flags;
+ iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
iter->btree_id = btree_id;
iter->level = depth;
iter->locks_want = locks_want;
@@ -1336,4 +1361,5 @@ void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
__bch2_btree_iter_unlock(dst);
memcpy(dst, src, offsetof(struct btree_iter, next));
dst->nodes_locked = dst->nodes_intent_locked = 0;
+ dst->uptodate = BTREE_ITER_NEED_RELOCK;
}
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 96bd04e2114b..318b04242d52 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -6,20 +6,27 @@
#include "btree_types.h"
#include "bset.h"
-#define BTREE_ITER_UPTODATE (1 << 0)
-#define BTREE_ITER_SLOTS (1 << 1)
-#define BTREE_ITER_INTENT (1 << 2)
-#define BTREE_ITER_PREFETCH (1 << 3)
+#define BTREE_ITER_SLOTS (1 << 0)
+#define BTREE_ITER_INTENT (1 << 1)
+#define BTREE_ITER_PREFETCH (1 << 2)
/*
* Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
* @pos or the first key strictly greater than @pos
*/
-#define BTREE_ITER_IS_EXTENTS (1 << 4)
+#define BTREE_ITER_IS_EXTENTS (1 << 3)
/*
* indicates we need to call bch2_btree_iter_traverse() to revalidate iterator:
*/
-#define BTREE_ITER_AT_END_OF_LEAF (1 << 5)
-#define BTREE_ITER_ERROR (1 << 6)
+#define BTREE_ITER_AT_END_OF_LEAF (1 << 4)
+#define BTREE_ITER_ERROR (1 << 5)
+
+enum btree_iter_uptodate {
+ BTREE_ITER_UPTODATE = 0,
+ BTREE_ITER_NEED_PEEK = 1,
+ BTREE_ITER_NEED_RELOCK = 2,
+ BTREE_ITER_NEED_TRAVERSE = 3,
+ BTREE_ITER_END = 4,
+};
/*
* @pos - iterator's current position
@@ -33,7 +40,8 @@ struct btree_iter {
struct bpos pos;
u8 flags;
- enum btree_id btree_id:8;
+ unsigned uptodate:4;
+ enum btree_id btree_id:4;
unsigned level:4,
locks_want:4,
nodes_locked:4,
@@ -63,6 +71,12 @@ struct btree_iter {
struct btree_iter *next;
};
+static inline void btree_iter_set_dirty(struct btree_iter *iter,
+ enum btree_iter_uptodate u)
+{
+ iter->uptodate = max_t(unsigned, iter->uptodate, u);
+}
+
static inline struct btree *btree_iter_node(struct btree_iter *iter,
unsigned level)
{
@@ -170,7 +184,6 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
-void bch2_btree_iter_rewind(struct btree_iter *, struct bpos);
void __bch2_btree_iter_init(struct btree_iter *, struct bch_fs *,
enum btree_id, struct bpos,
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 4d8ad8ee8273..0581f44a103e 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -91,7 +91,6 @@ static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
{
int lock_type = btree_node_locked_type(iter, level);
- EBUG_ON(!level && (iter->flags & BTREE_ITER_UPTODATE));
EBUG_ON(level >= BTREE_MAX_DEPTH);
if (lock_type != BTREE_NODE_UNLOCKED)
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 6b53418480bd..4b252b6d5e01 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -200,7 +200,7 @@ btree_insert_key_leaf(struct btree_insert *trans,
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
- iter->flags &= ~BTREE_ITER_UPTODATE;
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
ret = !btree_node_is_extents(b)
? bch2_insert_fixup_key(trans, insert)
@@ -319,9 +319,16 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
return -EROFS;
retry_locks:
ret = -EINTR;
- trans_for_each_entry(trans, i)
+ trans_for_each_entry(trans, i) {
if (!bch2_btree_iter_set_locks_want(i->iter, 1))
goto err;
+
+ if (i->iter->uptodate == BTREE_ITER_NEED_TRAVERSE) {
+ ret = bch2_btree_iter_traverse(i->iter);
+ if (ret)
+ goto err;
+ }
+ }
retry:
trans->did_work = false;
u64s = 0;
@@ -413,17 +420,19 @@ unlock:
if (ret)
goto err;
- /*
- * hack: iterators are inconsistent when they hit end of leaf, until
- * traversed again
- */
trans_for_each_entry(trans, i)
if (i->iter->flags & BTREE_ITER_AT_END_OF_LEAF)
goto out;
- trans_for_each_entry(trans, i)
- if (!same_leaf_as_prev(trans, i))
+ trans_for_each_entry(trans, i) {
+ /*
+ * iterators are inconsistent when they hit end of leaf, until
+ * traversed again
+ */
+ if (i->iter->uptodate < BTREE_ITER_NEED_TRAVERSE &&
+ !same_leaf_as_prev(trans, i))
bch2_foreground_maybe_merge(c, i->iter, 0);
+ }
out:
/* make sure we didn't lose an error: */
if (!ret && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
@@ -497,12 +506,7 @@ int bch2_btree_insert_list_at(struct btree_iter *iter,
bch2_verify_keylist_sorted(keys);
while (!bch2_keylist_empty(keys)) {
- /* need to traverse between each insert */
- int ret = bch2_btree_iter_traverse(iter);
- if (ret)
- return ret;
-
- ret = bch2_btree_insert_at(iter->c, disk_res, hook,
+ int ret = bch2_btree_insert_at(iter->c, disk_res, hook,
journal_seq, flags,
BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
if (ret)
@@ -528,20 +532,15 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
u64 *journal_seq, int flags)
{
struct btree_iter iter;
- int ret, ret2;
+ int ret;
bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
BTREE_ITER_INTENT);
-
- ret = bch2_btree_iter_traverse(&iter);
- if (unlikely(ret))
- goto out;
-
ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
BTREE_INSERT_ENTRY(&iter, k));
-out: ret2 = bch2_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
- return ret ?: ret2;
+ return ret;
}
/*
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 106d4d22a543..1bffddf6ce51 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -407,10 +407,6 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
hook.need_inode_update = false;
do {
- ret = bch2_btree_iter_traverse(&extent_iter);
- if (ret)
- goto err;
-
/* XXX: inode->i_size locking */
k = bch2_keylist_front(keys);
if (min(k->k.p.offset << 9, op->new_i_size) >
@@ -2279,10 +2275,6 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
bch2_btree_iter_set_pos(&src,
POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
- ret = bch2_btree_iter_traverse(&dst);
- if (ret)
- goto btree_iter_err;
-
k = bch2_btree_iter_peek_slot(&src);
if ((ret = btree_iter_err(k)))
goto btree_iter_err;