summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-02-01 04:19:11 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2018-02-05 00:30:22 -0500
commita9886f394062c85a2c63477d0619397f14701ae4 (patch)
tree340304b3bf3b0088a751d1e1638428e353525fa4
parent9d836524abc8c4a819c931a00dbb150b194490f3 (diff)
bcachefs: btree_iter_next()/btree_iter_next_slot()
previously, bch2_btree_iter_advance_pos() would use iter->k to calculate the next iterator position, assuming iter->k had been set by bch2_btree_iter_peek()/peek_with_holes(); this was busted because it was possible in certain codepaths to call bch2_btree_iter_advance_pos() when either iter->k hadn't been initialized, or bch2_btree_iter_traverse() -> btree_iter_down() had scribbled over it. This gets rid of bch2_btree_iter_advance_pos, in favor of bch2_btree_iter_next() and bche_btree_iter_next_slot(), which both just call peek() internall if the iterator isn't uptodate. It's a performance improvement, too. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/bset.c11
-rw-r--r--fs/bcachefs/btree_cache.c3
-rw-r--r--fs/bcachefs/btree_iter.c215
-rw-r--r--fs/bcachefs/btree_iter.h45
-rw-r--r--fs/bcachefs/btree_locking.h2
-rw-r--r--fs/bcachefs/btree_update.h2
-rw-r--r--fs/bcachefs/btree_update_leaf.c32
-rw-r--r--fs/bcachefs/debug.c13
-rw-r--r--fs/bcachefs/dirent.c7
-rw-r--r--fs/bcachefs/extents.c2
-rw-r--r--fs/bcachefs/fs-io.c29
-rw-r--r--fs/bcachefs/fs.c4
-rw-r--r--fs/bcachefs/fsck.c4
-rw-r--r--fs/bcachefs/inode.c44
-rw-r--r--fs/bcachefs/inode.h2
-rw-r--r--fs/bcachefs/io.c6
-rw-r--r--fs/bcachefs/migrate.c2
-rw-r--r--fs/bcachefs/move.c8
-rw-r--r--fs/bcachefs/quota.c4
-rw-r--r--fs/bcachefs/str_hash.h78
20 files changed, 269 insertions, 244 deletions
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 718f094bdfa7..a07d554092f7 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -264,9 +264,9 @@ void bch2_verify_key_order(struct btree *b,
#else
-static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
- struct btree *b,
- struct bkey_packed *k) {}
+static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
+ struct btree *b,
+ struct bkey_packed *k) {}
#endif
@@ -1654,8 +1654,9 @@ void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
struct btree *b)
{
+#ifdef CONFIG_BCACHEFS_DEBUG
struct bkey_packed *k = bch2_btree_node_iter_peek_all(iter, b);
-
+#endif
iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
EBUG_ON(iter->data->k > iter->data->end);
@@ -1667,7 +1668,9 @@ void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
btree_node_iter_sift(iter, b, 0);
+#ifdef CONFIG_BCACHEFS_DEBUG
bch2_btree_node_iter_next_check(iter, b, k);
+#endif
}
/*
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 21044901facb..a9202ed585d9 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -347,6 +347,9 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
while (!list_empty(&bc->live)) {
b = list_first_entry(&bc->live, struct btree, list);
+ BUG_ON(btree_node_read_in_flight(b) ||
+ btree_node_write_in_flight(b));
+
if (btree_node_dirty(b))
bch2_btree_complete_write(c, b, btree_current_write(b));
clear_btree_node_dirty(b);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index dcb923615fb2..3bb989f38023 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -460,7 +460,8 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
/* peek_all() doesn't skip deleted keys */
static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter,
- struct btree_iter_level *l)
+ struct btree_iter_level *l,
+ struct bkey *u)
{
struct bkey_packed *k =
bch2_btree_node_iter_peek_all(&l->iter, l->b);
@@ -471,7 +472,7 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter,
if (!k)
return bkey_s_c_null;
- ret = bkey_disassemble(l->b, k, &iter->k);
+ ret = bkey_disassemble(l->b, k, u);
if (debug_check_bkeys(iter->c))
bch2_bkey_debugcheck(iter->c, l->b, ret);
@@ -741,6 +742,8 @@ static inline int btree_iter_down(struct btree_iter *iter)
enum six_lock_type lock_type = btree_lock_want(iter, level);
BKEY_PADDED(k) tmp;
+ BUG_ON(!btree_node_locked(iter, iter->level));
+
bch2_bkey_unpack(l->b, &tmp.k,
bch2_btree_node_iter_peek(&l->iter, l->b));
@@ -882,6 +885,11 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
while (btree_iter_node(iter, iter->level) &&
!(is_btree_node(iter, iter->level) &&
bch2_btree_node_relock(iter, iter->level) &&
+
+ /*
+ * XXX: correctly using BTREE_ITER_UPTODATE should make
+ * comparing iter->pos against node's key unnecessary
+ */
btree_iter_pos_cmp(iter->pos,
&iter->l[iter->level].b->key.k,
iter->flags & BTREE_ITER_IS_EXTENTS)))
@@ -890,12 +898,15 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
/*
* If we've got a btree node locked (i.e. we aren't about to relock the
* root) - advance its node iterator if necessary:
+ *
+ * XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary
*/
if (btree_iter_node(iter, iter->level)) {
struct btree_iter_level *l = &iter->l[iter->level];
struct bkey_s_c k;
+ struct bkey u;
- while ((k = __btree_iter_peek_all(iter, l)).k &&
+ while ((k = __btree_iter_peek_all(iter, l, &u)).k &&
!btree_iter_pos_cmp(iter->pos, k.k,
iter->flags & BTREE_ITER_IS_EXTENTS))
__btree_iter_advance(l);
@@ -1030,30 +1041,6 @@ void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
iter->flags &= ~BTREE_ITER_UPTODATE;
}
-void bch2_btree_iter_advance_pos(struct btree_iter *iter)
-{
- if (iter->flags & BTREE_ITER_UPTODATE &&
- !(iter->flags & BTREE_ITER_WITH_HOLES)) {
- struct btree_iter_level *l = &iter->l[0];
- struct bkey_s_c k;
-
- __btree_iter_advance(l);
- k = __btree_iter_peek(iter, l);
- if (likely(k.k)) {
- iter->pos = bkey_start_pos(k.k);
- return;
- }
- }
-
- /*
- * We use iter->k instead of iter->pos for extents: iter->pos will be
- * equal to the start of the extent we returned, but we need to advance
- * to the end of the extent we returned.
- */
- bch2_btree_iter_set_pos(iter,
- btree_type_successor(iter->btree_id, iter->k.p));
-}
-
/* XXX: expensive */
void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
{
@@ -1075,6 +1062,7 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
(iter->btree_id == BTREE_ID_EXTENTS));
+ EBUG_ON(iter->flags & BTREE_ITER_SLOTS);
if (iter->flags & BTREE_ITER_UPTODATE) {
struct bkey_packed *k =
@@ -1116,7 +1104,6 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
if (!bkey_cmp(iter->pos, POS_MAX)) {
iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
- bch2_btree_iter_unlock(iter);
return bkey_s_c_null;
}
@@ -1124,59 +1111,161 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
}
}
-struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
{
struct btree_iter_level *l = &iter->l[0];
struct bkey_s_c k;
- struct bkey n;
- int ret;
EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
(iter->btree_id == BTREE_ID_EXTENTS));
+ EBUG_ON(iter->flags & BTREE_ITER_SLOTS);
+
+ if (unlikely(!(iter->flags & BTREE_ITER_UPTODATE))) {
+ k = bch2_btree_iter_peek(iter);
+ if (IS_ERR_OR_NULL(k.k))
+ return k;
+ }
+
+ EBUG_ON(!(iter->flags & BTREE_ITER_UPTODATE));
+ EBUG_ON(bch2_btree_node_iter_end(&l->iter));
+
+ bch2_btree_node_iter_advance(&l->iter, l->b);
+ k = __btree_iter_peek(iter, l);
+ if (likely(k.k)) {
+ EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) < 0);
+
+ iter->pos = bkey_start_pos(k.k);
+ return k;
+ }
iter->flags &= ~BTREE_ITER_UPTODATE;
+ iter->pos = l->b->key.k.p;
- while (1) {
+ if (!bkey_cmp(iter->pos, POS_MAX)) {
+ iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
+ return bkey_s_c_null;
+ }
+
+ iter->pos = btree_type_successor(iter->btree_id, iter->pos);
+ return bch2_btree_iter_peek(iter);
+}
+
+static inline struct bkey_s_c
+__bch2_btree_iter_peek_slot(struct btree_iter *iter)
+{
+ struct btree_iter_level *l = &iter->l[0];
+ struct bkey_s_c k;
+ struct bkey n;
+ int ret;
+
+recheck:
+ while ((k = __btree_iter_peek_all(iter, l, &iter->k)).k &&
+ bkey_deleted(k.k) &&
+ bkey_cmp(bkey_start_pos(k.k), iter->pos) == 0)
+ __btree_iter_advance(l);
+
+ if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) {
+ EBUG_ON(bkey_cmp(k.k->p, iter->pos) < 0);
+ EBUG_ON(bkey_deleted(k.k));
+ iter->flags |= BTREE_ITER_UPTODATE;
+ return k;
+ }
+
+ /*
+ * If we got to the end of the node, check if we need to traverse to the
+ * next node:
+ */
+ if (unlikely(!k.k &&
+ !btree_iter_pos_cmp(iter->pos, &l->b->key.k,
+ iter->flags & BTREE_ITER_IS_EXTENTS))) {
ret = bch2_btree_iter_traverse(iter);
if (unlikely(ret)) {
iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
return bkey_s_c_err(ret);
}
- k = __btree_iter_peek_all(iter, l);
-recheck:
- if (!k.k || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) {
- /* hole */
- bkey_init(&n);
- n.p = iter->pos;
-
- if (iter->flags & BTREE_ITER_IS_EXTENTS) {
- if (n.p.offset == KEY_OFFSET_MAX) {
- iter->pos = bkey_successor(iter->pos);
- goto recheck;
- }
-
- if (!k.k)
- k.k = &l->b->key.k;
-
- bch2_key_resize(&n,
- min_t(u64, KEY_SIZE_MAX,
- (k.k->p.inode == n.p.inode
- ? bkey_start_offset(k.k)
- : KEY_OFFSET_MAX) -
- n.p.offset));
-
- EBUG_ON(!n.size);
- }
+ goto recheck;
+ }
- iter->k = n;
- return (struct bkey_s_c) { &iter->k, NULL };
- } else if (!bkey_deleted(k.k)) {
- return k;
- } else {
- __btree_iter_advance(l);
+ /* hole */
+ bkey_init(&n);
+ n.p = iter->pos;
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS) {
+ if (n.p.offset == KEY_OFFSET_MAX) {
+ if (n.p.inode == KEY_INODE_MAX)
+ return bkey_s_c_null;
+
+ iter->pos = bkey_successor(iter->pos);
+ goto recheck;
}
+
+ if (!k.k)
+ k.k = &l->b->key.k;
+
+ bch2_key_resize(&n,
+ min_t(u64, KEY_SIZE_MAX,
+ (k.k->p.inode == n.p.inode
+ ? bkey_start_offset(k.k)
+ : KEY_OFFSET_MAX) -
+ n.p.offset));
+
+ EBUG_ON(!n.size);
}
+
+ iter->k = n;
+ iter->flags |= BTREE_ITER_UPTODATE;
+ return (struct bkey_s_c) { &iter->k, NULL };
+}
+
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
+{
+ struct btree_iter_level *l = &iter->l[0];
+ int ret;
+
+ EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
+ (iter->btree_id == BTREE_ID_EXTENTS));
+ EBUG_ON(!(iter->flags & BTREE_ITER_SLOTS));
+
+ if (iter->flags & BTREE_ITER_UPTODATE) {
+ struct bkey_s_c ret = { .k = &iter->k };;
+
+ if (!bkey_deleted(&iter->k))
+ ret.v = bkeyp_val(&l->b->format,
+ __bch2_btree_node_iter_peek_all(&l->iter, l->b));
+
+ EBUG_ON(!btree_node_locked(iter, 0));
+
+ if (debug_check_bkeys(iter->c))
+ bch2_bkey_debugcheck(iter->c, l->b, ret);
+ return ret;
+ }
+
+ ret = bch2_btree_iter_traverse(iter);
+ if (unlikely(ret)) {
+ iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
+ return bkey_s_c_err(ret);
+ }
+
+ return __bch2_btree_iter_peek_slot(iter);
+}
+
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
+{
+ if (unlikely(!(iter->flags & BTREE_ITER_UPTODATE))) {
+ struct bkey_s_c k;
+
+ k = bch2_btree_iter_peek_slot(iter);
+ if (btree_iter_err(k))
+ return k;
+ }
+
+ iter->pos = btree_type_successor(iter->btree_id, iter->k.p);
+
+ if (!bkey_deleted(&iter->k))
+ __btree_iter_advance(&iter->l[0]);
+
+ return __bch2_btree_iter_peek_slot(iter);
}
void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 8b0a4f4cb82f..96bd04e2114b 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -7,7 +7,7 @@
#include "bset.h"
#define BTREE_ITER_UPTODATE (1 << 0)
-#define BTREE_ITER_WITH_HOLES (1 << 1)
+#define BTREE_ITER_SLOTS (1 << 1)
#define BTREE_ITER_INTENT (1 << 2)
#define BTREE_ITER_PREFETCH (1 << 3)
/*
@@ -48,7 +48,7 @@ struct btree_iter {
/*
* Current unpacked key - so that bch2_btree_iter_next()/
- * bch2_btree_iter_next_with_holes() can correctly advance pos.
+ * bch2_btree_iter_next_slot() can correctly advance pos.
*/
struct bkey k;
@@ -163,10 +163,13 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
+
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
+
void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
-void bch2_btree_iter_advance_pos(struct btree_iter *);
void bch2_btree_iter_rewind(struct btree_iter *, struct bpos);
void __bch2_btree_iter_init(struct btree_iter *, struct bch_fs *,
@@ -179,8 +182,8 @@ static inline void bch2_btree_iter_init(struct btree_iter *iter,
{
__bch2_btree_iter_init(iter, c, btree_id, pos,
flags & BTREE_ITER_INTENT ? 1 : 0, 0,
- btree_id == BTREE_ID_EXTENTS
- ? BTREE_ITER_IS_EXTENTS : 0);
+ (btree_id == BTREE_ID_EXTENTS
+ ? BTREE_ITER_IS_EXTENTS : 0)|flags);
}
void bch2_btree_iter_link(struct btree_iter *, struct btree_iter *);
@@ -229,16 +232,30 @@ static inline int btree_iter_cmp(const struct btree_iter *l,
static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_WITH_HOLES
- ? bch2_btree_iter_peek_with_holes(iter)
+ return flags & BTREE_ITER_SLOTS
+ ? bch2_btree_iter_peek_slot(iter)
: bch2_btree_iter_peek(iter);
}
+static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
+ unsigned flags)
+{
+ return flags & BTREE_ITER_SLOTS
+ ? bch2_btree_iter_next_slot(iter)
+ : bch2_btree_iter_next(iter);
+}
+
#define for_each_btree_key(_iter, _c, _btree_id, _start, _flags, _k) \
for (bch2_btree_iter_init((_iter), (_c), (_btree_id), \
- (_start), (_flags)); \
- !IS_ERR_OR_NULL(((_k) = __bch2_btree_iter_peek(_iter, _flags)).k);\
- bch2_btree_iter_advance_pos(_iter))
+ (_start), (_flags)), \
+ (_k) = __bch2_btree_iter_peek(_iter, _flags); \
+ !IS_ERR_OR_NULL((_k).k); \
+ (_k) = __bch2_btree_iter_next(_iter, _flags))
+
+#define for_each_btree_key_continue(_iter, _flags, _k) \
+ for ((_k) = __bch2_btree_iter_peek(_iter, _flags); \
+ !IS_ERR_OR_NULL((_k).k); \
+ (_k) = __bch2_btree_iter_next(_iter, _flags))
static inline int btree_iter_err(struct bkey_s_c k)
{
@@ -251,16 +268,10 @@ static inline int btree_iter_err(struct bkey_s_c k)
*/
static inline void bch2_btree_iter_cond_resched(struct btree_iter *iter)
{
- struct btree_iter *linked;
-
if (need_resched()) {
- for_each_linked_btree_iter(iter, linked)
- bch2_btree_iter_unlock(linked);
bch2_btree_iter_unlock(iter);
schedule();
} else if (race_fault()) {
- for_each_linked_btree_iter(iter, linked)
- bch2_btree_iter_unlock(linked);
bch2_btree_iter_unlock(iter);
}
}
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index f526f9083f7f..4d8ad8ee8273 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -91,7 +91,7 @@ static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
{
int lock_type = btree_node_locked_type(iter, level);
- EBUG_ON(!level && iter->flags & BTREE_ITER_UPTODATE);
+ EBUG_ON(!level && (iter->flags & BTREE_ITER_UPTODATE));
EBUG_ON(level >= BTREE_MAX_DEPTH);
if (lock_type != BTREE_NODE_UNLOCKED)
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index c7c2930650d3..f357095d5b4f 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -120,8 +120,6 @@ int bch2_btree_insert_list_at(struct btree_iter *, struct keylist *,
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
struct disk_reservation *,
struct extent_insert_hook *, u64 *, int flags);
-int bch2_btree_update(struct bch_fs *, enum btree_id,
- struct bkey_i *, u64 *);
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
struct bpos, struct bpos, struct bversion,
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 40e5b57f0711..6b53418480bd 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -544,38 +544,6 @@ out: ret2 = bch2_btree_iter_unlock(&iter);
return ret ?: ret2;
}
-/**
- * bch_btree_update - like bch2_btree_insert(), but asserts that we're
- * overwriting an existing key
- */
-int bch2_btree_update(struct bch_fs *c, enum btree_id id,
- struct bkey_i *k, u64 *journal_seq)
-{
- struct btree_iter iter;
- struct bkey_s_c u;
- int ret;
-
- EBUG_ON(id == BTREE_ID_EXTENTS);
-
- bch2_btree_iter_init(&iter, c, id, k->k.p,
- BTREE_ITER_INTENT);
-
- u = bch2_btree_iter_peek_with_holes(&iter);
- ret = btree_iter_err(u);
- if (ret)
- return ret;
-
- if (bkey_deleted(u.k)) {
- bch2_btree_iter_unlock(&iter);
- return -ENOENT;
- }
-
- ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, 0,
- BTREE_INSERT_ENTRY(&iter, k));
- bch2_btree_iter_unlock(&iter);
- return ret;
-}
-
/*
* bch_btree_delete_range - delete everything within a given range
*
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index b765914fe4e1..00e0de167b32 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -212,10 +212,10 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
if (!i->size)
return i->ret;
- bch2_btree_iter_init(&iter, i->c, i->id, i->from, BTREE_ITER_PREFETCH);
+ for_each_btree_key(&iter, i->c, i->id, i->from,
+ BTREE_ITER_PREFETCH, k) {
+ i->from = iter.pos;
- while ((k = bch2_btree_iter_peek(&iter)).k &&
- !(err = btree_iter_err(k))) {
bch2_bkey_val_to_text(i->c, bkey_type(0, i->id),
i->buf, sizeof(i->buf), k);
i->bytes = strlen(i->buf);
@@ -223,9 +223,6 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
i->buf[i->bytes] = '\n';
i->bytes++;
- bch2_btree_iter_advance_pos(&iter);
- i->from = iter.pos;
-
err = flush_buf(i);
if (err)
break;
@@ -233,7 +230,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
if (!i->size)
break;
}
- bch2_btree_iter_unlock(&iter);
+ err = bch2_btree_iter_unlock(&iter) ?: err;
return err < 0 ? err : i->ret;
}
@@ -338,7 +335,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
if (err)
break;
- bch2_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_next(&iter);
i->from = iter.pos;
err = flush_buf(i);
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index a900d39781a1..6bdece3a7637 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -213,12 +213,13 @@ int bch2_dirent_rename(struct bch_fs *c,
int ret = -ENOMEM;
bch2_btree_iter_init(&src_iter, c, BTREE_ID_DIRENTS, src_pos,
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
bch2_btree_iter_init(&dst_iter, c, BTREE_ID_DIRENTS, dst_pos,
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
bch2_btree_iter_link(&src_iter, &dst_iter);
- bch2_btree_iter_init(&whiteout_iter, c, BTREE_ID_DIRENTS, src_pos, 0);
+ bch2_btree_iter_init(&whiteout_iter, c, BTREE_ID_DIRENTS, src_pos,
+ BTREE_ITER_SLOTS);
bch2_btree_iter_link(&src_iter, &whiteout_iter);
if (mode == BCH_RENAME_EXCHANGE) {
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index ab01c65f2b0a..c2469167efea 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -2370,7 +2370,7 @@ int bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size)
end.offset += size;
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, pos,
- BTREE_ITER_WITH_HOLES, k) {
+ BTREE_ITER_SLOTS, k) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index f2cec012c3c8..106d4d22a543 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -400,7 +400,7 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
BTREE_ITER_INTENT);
bch2_btree_iter_init(&inode_iter, wop->c, BTREE_ID_INODES,
POS(extent_iter.pos.inode, 0),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
hook.op = op;
hook.hook.fn = bchfs_extent_update_hook;
@@ -423,7 +423,7 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
if (!btree_iter_linked(&inode_iter))
bch2_btree_iter_link(&extent_iter, &inode_iter);
- inode = bch2_btree_iter_peek_with_holes(&inode_iter);
+ inode = bch2_btree_iter_peek_slot(&inode_iter);
if ((ret = btree_iter_err(inode)))
goto err;
@@ -994,7 +994,7 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
- k = bch2_btree_iter_peek_with_holes(iter);
+ k = bch2_btree_iter_peek_slot(iter);
BUG_ON(!k.k);
if (IS_ERR(k.k)) {
@@ -1067,7 +1067,8 @@ int bch2_readpages(struct file *file, struct address_space *mapping,
.mapping = mapping, .nr_pages = nr_pages
};
- bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
+ bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
+ BTREE_ITER_SLOTS);
INIT_LIST_HEAD(&readpages_iter.pages);
list_add(&readpages_iter.pages, pages);
@@ -1107,7 +1108,8 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
bio_add_page_contig(&rbio->bio, page);
- bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0);
+ bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
+ BTREE_ITER_SLOTS);
bchfs_read(c, &iter, rbio, inum, NULL);
}
@@ -2236,9 +2238,10 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
bch2_btree_iter_init(&dst, c, BTREE_ID_EXTENTS,
POS(inode->v.i_ino, offset >> 9),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
/* position will be set from dst iter's position: */
- bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN, 0);
+ bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN,
+ BTREE_ITER_SLOTS);
bch2_btree_iter_link(&src, &dst);
/*
@@ -2280,7 +2283,7 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
if (ret)
goto btree_iter_err;
- k = bch2_btree_iter_peek_with_holes(&src);
+ k = bch2_btree_iter_peek_slot(&src);
if ((ret = btree_iter_err(k)))
goto btree_iter_err;
@@ -2356,7 +2359,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
int ret;
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
inode_lock(&inode->v);
inode_dio_wait(&inode->v);
@@ -2403,20 +2406,20 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
struct bkey_i_reservation reservation;
struct bkey_s_c k;
- k = bch2_btree_iter_peek_with_holes(&iter);
+ k = bch2_btree_iter_peek_slot(&iter);
if ((ret = btree_iter_err(k)))
goto btree_iter_err;
/* already reserved */
if (k.k->type == BCH_RESERVATION &&
bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
- bch2_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_next_slot(&iter);
continue;
}
if (bkey_extent_is_data(k.k)) {
if (!(mode & FALLOC_FL_ZERO_RANGE)) {
- bch2_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_next_slot(&iter);
continue;
}
}
@@ -2645,7 +2648,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
POS(inode->v.i_ino, offset >> 9),
- BTREE_ITER_WITH_HOLES, k) {
+ BTREE_ITER_SLOTS, k) {
if (k.k->p.inode != inode->v.i_ino) {
next_hole = bch2_next_pagecache_hole(&inode->v,
offset, MAX_LFS_FILESIZE);
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index aba845b2d966..98f282b2598f 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -85,10 +85,10 @@ int __must_check __bch2_write_inode(struct bch_fs *c,
lockdep_assert_held(&inode->ei_update_lock);
bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(inum, 0),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
do {
- struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
if ((ret = btree_iter_err(k)))
goto out;
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index ce14aa782593..2991a0dd3830 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -231,7 +231,7 @@ static int hash_check_key(const struct bch_hash_desc desc,
return ret;
return 1;
}
- bch2_btree_iter_advance_pos(&h->iter);
+ bch2_btree_iter_next(&h->iter);
}
fsck_err:
return ret;
@@ -1081,7 +1081,7 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
if (nlinks_pos == iter.pos.inode)
genradix_iter_advance(&nlinks_iter, links);
- bch2_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_next(&iter);
bch2_btree_iter_cond_resched(&iter);
}
fsck_err:
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 71a24cc66886..797aa2a981e3 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -303,10 +303,10 @@ int bch2_inode_create(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
searched_from_start = true;
again:
bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(*hint, 0),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
while (1) {
- struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
u32 bi_generation = 0;
ret = btree_iter_err(k);
@@ -322,7 +322,7 @@ again:
if (iter.pos.inode == max)
goto out;
- bch2_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_next_slot(&iter);
break;
case BCH_INODE_GENERATION: {
@@ -415,9 +415,9 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
return ret;
bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(inode_nr, 0),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
do {
- struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
u32 bi_generation = 0;
ret = btree_iter_err(k);
@@ -474,7 +474,7 @@ int bch2_inode_find_by_inum(struct bch_fs *c, u64 inode_nr,
for_each_btree_key(&iter, c, BTREE_ID_INODES,
POS(inode_nr, 0),
- BTREE_ITER_WITH_HOLES, k) {
+ BTREE_ITER_SLOTS, k) {
switch (k.k->type) {
case BCH_INODE_FS:
ret = bch2_inode_unpack(bkey_s_c_to_inode(k), inode);
@@ -491,38 +491,6 @@ int bch2_inode_find_by_inum(struct bch_fs *c, u64 inode_nr,
return bch2_btree_iter_unlock(&iter) ?: ret;
}
-int bch2_cached_dev_inode_find_by_uuid(struct bch_fs *c, uuid_le *uuid,
- struct bkey_i_inode_blockdev *ret)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
-
- for_each_btree_key(&iter, c, BTREE_ID_INODES, POS(0, 0), 0, k) {
- if (k.k->p.inode >= BLOCKDEV_INODE_MAX)
- break;
-
- if (k.k->type == BCH_INODE_BLOCKDEV) {
- struct bkey_s_c_inode_blockdev inode =
- bkey_s_c_to_inode_blockdev(k);
-
- pr_debug("found inode %llu: %pU (u64s %u)",
- inode.k->p.inode, inode.v->i_uuid.b,
- inode.k->u64s);
-
- if (CACHED_DEV(inode.v) &&
- !memcmp(uuid, &inode.v->i_uuid, 16)) {
- bkey_reassemble(&ret->k_i, k);
- bch2_btree_iter_unlock(&iter);
- return 0;
- }
- }
-
- bch2_btree_iter_cond_resched(&iter);
- }
- bch2_btree_iter_unlock(&iter);
- return -ENOENT;
-}
-
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_inode_pack_test(void)
{
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
index 8ebb6fb6d6d0..5c7aeadcb1a6 100644
--- a/fs/bcachefs/inode.h
+++ b/fs/bcachefs/inode.h
@@ -40,8 +40,6 @@ int bch2_inode_rm(struct bch_fs *, u64);
int bch2_inode_find_by_inum(struct bch_fs *, u64,
struct bch_inode_unpacked *);
-int bch2_cached_dev_inode_find_by_uuid(struct bch_fs *, uuid_le *,
- struct bkey_i_inode_blockdev *);
static inline struct timespec bch2_time_to_timespec(struct bch_fs *c, u64 time)
{
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index b148a89314b4..7cddbccd1938 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -1452,9 +1452,9 @@ static void bch2_read_nodecode_retry(struct bch_fs *c, struct bch_read_bio *rbio
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
POS(inode, bvec_iter.bi_sector),
- BTREE_ITER_WITH_HOLES);
+ BTREE_ITER_SLOTS);
retry:
- k = bch2_btree_iter_peek_with_holes(&iter);
+ k = bch2_btree_iter_peek_slot(&iter);
if (btree_iter_err(k)) {
bch2_btree_iter_unlock(&iter);
goto err;
@@ -1525,7 +1525,7 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
retry:
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
POS(inode, bvec_iter.bi_sector),
- BTREE_ITER_WITH_HOLES, k) {
+ BTREE_ITER_SLOTS, k) {
BKEY_PADDED(k) tmp;
struct extent_pick_ptr pick;
struct bvec_iter fragment;
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
index 488b6b27a4fe..01c8896078f2 100644
--- a/fs/bcachefs/migrate.c
+++ b/fs/bcachefs/migrate.c
@@ -162,7 +162,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
bch2_bkey_devs(k));
if (ret)
break;
- bch2_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_next(&iter);
continue;
}
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 7c7f436c8926..a67e7a451e38 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -49,10 +49,10 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
while (1) {
- struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
struct bkey_i_extent *insert, *new =
bkey_i_to_extent(bch2_keylist_front(keys));
BKEY_PADDED(k) _new, _insert;
@@ -143,7 +143,7 @@ nomatch:
&m->ctxt->stats->sectors_raced);
atomic_long_inc(&c->extent_migrate_raced);
trace_move_race(&new->k);
- bch2_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_next_slot(&iter);
goto next;
}
out:
@@ -435,7 +435,7 @@ next:
atomic64_add(k.k->size * bch2_extent_nr_dirty_ptrs(k),
&stats->sectors_seen);
next_nondata:
- bch2_btree_iter_advance_pos(&stats->iter);
+ bch2_btree_iter_next(&stats->iter);
bch2_btree_iter_cond_resched(&stats->iter);
}
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
index 92b0a2fde37c..6ab2c866a168 100644
--- a/fs/bcachefs/quota.c
+++ b/fs/bcachefs/quota.c
@@ -735,8 +735,8 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
bch2_btree_iter_init(&iter, c, BTREE_ID_QUOTAS, new_quota.k.p,
- BTREE_ITER_WITH_HOLES|BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek_with_holes(&iter);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek_slot(&iter);
ret = btree_iter_err(k);
if (unlikely(ret))
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index 530cf0a49c84..0adb9a1c73ec 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -131,12 +131,11 @@ bch2_hash_lookup_at(const struct bch_hash_desc desc,
struct btree_iter *iter, const void *search)
{
u64 inode = iter->pos.inode;
+ struct bkey_s_c k;
- do {
- struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
-
- if (btree_iter_err(k))
- return k;
+ for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
+ if (iter->pos.inode != inode)
+ break;
if (k.k->type == desc.key_type) {
if (!desc.cmp_key(k, search))
@@ -147,11 +146,8 @@ bch2_hash_lookup_at(const struct bch_hash_desc desc,
/* hole, not found */
break;
}
-
- bch2_btree_iter_advance_pos(iter);
- } while (iter->pos.inode == inode);
-
- return bkey_s_c_err(-ENOENT);
+ }
+ return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
}
static inline struct bkey_s_c
@@ -160,12 +156,11 @@ bch2_hash_lookup_bkey_at(const struct bch_hash_desc desc,
struct btree_iter *iter, struct bkey_s_c search)
{
u64 inode = iter->pos.inode;
+ struct bkey_s_c k;
- do {
- struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
-
- if (btree_iter_err(k))
- return k;
+ for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
+ if (iter->pos.inode != inode)
+ break;
if (k.k->type == desc.key_type) {
if (!desc.cmp_bkey(k, search))
@@ -176,11 +171,8 @@ bch2_hash_lookup_bkey_at(const struct bch_hash_desc desc,
/* hole, not found */
break;
}
-
- bch2_btree_iter_advance_pos(iter);
- } while (iter->pos.inode == inode);
-
- return bkey_s_c_err(-ENOENT);
+ }
+ return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
}
static inline struct bkey_s_c
@@ -190,7 +182,8 @@ bch2_hash_lookup(const struct bch_hash_desc desc,
struct btree_iter *iter, const void *key)
{
bch2_btree_iter_init(iter, c, desc.btree_id,
- POS(inode, desc.hash_key(info, key)), 0);
+ POS(inode, desc.hash_key(info, key)),
+ BTREE_ITER_SLOTS);
return bch2_hash_lookup_at(desc, info, iter, key);
}
@@ -203,7 +196,7 @@ bch2_hash_lookup_intent(const struct bch_hash_desc desc,
{
bch2_btree_iter_init(iter, c, desc.btree_id,
POS(inode, desc.hash_key(info, key)),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
return bch2_hash_lookup_at(desc, info, iter, key);
}
@@ -211,20 +204,17 @@ bch2_hash_lookup_intent(const struct bch_hash_desc desc,
static inline struct bkey_s_c
bch2_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter)
{
- while (1) {
- struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
+ u64 inode = iter->pos.inode;
+ struct bkey_s_c k;
- if (btree_iter_err(k))
- return k;
+ for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
+ if (iter->pos.inode != inode)
+ break;
if (k.k->type != desc.key_type)
return k;
-
- /* hash collision, keep going */
- bch2_btree_iter_advance_pos(iter);
- if (iter->pos.inode != k.k->p.inode)
- return bkey_s_c_err(-ENOENT);
}
+ return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
}
static inline struct bkey_s_c bch2_hash_hole(const struct bch_hash_desc desc,
@@ -235,7 +225,7 @@ static inline struct bkey_s_c bch2_hash_hole(const struct bch_hash_desc desc,
{
bch2_btree_iter_init(iter, c, desc.btree_id,
POS(inode, desc.hash_key(info, key)),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
return bch2_hash_hole_at(desc, iter);
}
@@ -245,16 +235,12 @@ static inline int bch2_hash_needs_whiteout(const struct bch_hash_desc desc,
struct btree_iter *iter,
struct btree_iter *start)
{
+ struct bkey_s_c k;
+
bch2_btree_iter_set_pos(iter,
btree_type_successor(start->btree_id, start->pos));
- while (1) {
- struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
- int ret = btree_iter_err(k);
-
- if (ret)
- return ret;
-
+ for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
if (k.k->type != desc.key_type &&
k.k->type != desc.whiteout_type)
return false;
@@ -262,9 +248,8 @@ static inline int bch2_hash_needs_whiteout(const struct bch_hash_desc desc,
if (k.k->type == desc.key_type &&
desc.hash_bkey(info, k) <= start->pos.offset)
return true;
-
- bch2_btree_iter_advance_pos(iter);
}
+ return btree_iter_err(k);
}
static inline int bch2_hash_set(const struct bch_hash_desc desc,
@@ -279,9 +264,9 @@ static inline int bch2_hash_set(const struct bch_hash_desc desc,
bch2_btree_iter_init(&hashed_slot, c, desc.btree_id,
POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
bch2_btree_iter_init(&iter, c, desc.btree_id, hashed_slot.pos,
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
bch2_btree_iter_link(&hashed_slot, &iter);
retry:
/*
@@ -354,7 +339,7 @@ static inline int bch2_hash_delete_at(const struct bch_hash_desc desc,
int ret = -ENOENT;
bch2_btree_iter_init(&whiteout_iter, iter->c, desc.btree_id,
- iter->pos, 0);
+ iter->pos, BTREE_ITER_SLOTS);
bch2_btree_iter_link(iter, &whiteout_iter);
ret = bch2_hash_needs_whiteout(desc, info, &whiteout_iter, iter);
@@ -385,9 +370,10 @@ static inline int bch2_hash_delete(const struct bch_hash_desc desc,
bch2_btree_iter_init(&iter, c, desc.btree_id,
POS(inode, desc.hash_key(info, key)),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
bch2_btree_iter_init(&whiteout_iter, c, desc.btree_id,
- POS(inode, desc.hash_key(info, key)), 0);
+ POS(inode, desc.hash_key(info, key)),
+ BTREE_ITER_SLOTS);
bch2_btree_iter_link(&iter, &whiteout_iter);
retry:
k = bch2_hash_lookup_at(desc, info, &iter, key);