summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-06-02 20:18:12 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2018-06-03 21:01:03 -0400
commit37b6d3515758331ab0cabe4c9d680947bb57fe29 (patch)
treedb8fab7ccf6f956c5b7ada4b77dac0b66312b0f4
parent98e46637b68323d1b82473c5eacba6d521f7a7d1 (diff)
bcachefs: btree iter refactoring
-rw-r--r--fs/bcachefs/btree_iter.c64
-rw-r--r--fs/bcachefs/btree_iter.h75
-rw-r--r--fs/bcachefs/btree_update_interior.c7
-rw-r--r--fs/bcachefs/btree_update_leaf.c7
4 files changed, 83 insertions, 70 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 8009acc717d7..d047dd656b47 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -34,11 +34,9 @@ void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
EBUG_ON(iter->l[b->level].b != b);
EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq);
- for_each_linked_btree_node(iter, b, linked)
+ for_each_btree_iter_with_node(iter, b, linked)
linked->lock_seq[b->level] += 2;
- iter->lock_seq[b->level] += 2;
-
six_unlock_write(&b->lock);
}
@@ -103,7 +101,7 @@ success:
return true;
}
-bool bch2_btree_iter_relock(struct btree_iter *iter)
+static bool __bch2_btree_iter_relock(struct btree_iter *iter)
{
unsigned l = iter->level;
@@ -124,6 +122,17 @@ bool bch2_btree_iter_relock(struct btree_iter *iter)
return true;
}
+bool bch2_btree_iter_relock(struct btree_iter *iter)
+{
+ struct btree_iter *linked;
+ bool ret = true;
+
+ for_each_btree_iter(iter, linked)
+ ret &= __bch2_btree_iter_relock(linked);
+
+ return ret;
+}
+
/* Slowpath: */
bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
unsigned level,
@@ -243,7 +252,7 @@ bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter,
iter->locks_want = new_locks_want;
btree_iter_drop_extra_locks(iter);
- if (bch2_btree_iter_relock(iter))
+ if (__bch2_btree_iter_relock(iter))
return true;
/*
@@ -271,9 +280,8 @@ int bch2_btree_iter_unlock(struct btree_iter *iter)
{
struct btree_iter *linked;
- for_each_linked_btree_iter(iter, linked)
+ for_each_btree_iter(iter, linked)
__bch2_btree_iter_unlock(linked);
- __bch2_btree_iter_unlock(iter);
return iter->flags & BTREE_ITER_ERROR ? -EIO : 0;
}
@@ -324,11 +332,8 @@ void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
- if (iter->l[b->level].b == b)
- __bch2_btree_iter_verify(iter, b);
-
- for_each_linked_btree_node(iter, b, linked)
- __bch2_btree_iter_verify(iter, b);
+ for_each_btree_iter_with_node(iter, b, linked)
+ __bch2_btree_iter_verify(linked, b);
}
#endif
@@ -460,12 +465,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
__bch2_btree_node_iter_fix(iter, b, node_iter, t,
where, clobber_u64s, new_u64s);
- if (iter->l[b->level].b == b)
- __bch2_btree_node_iter_fix(iter, b,
- &iter->l[b->level].iter, t,
- where, clobber_u64s, new_u64s);
-
- for_each_linked_btree_node(iter, b, linked)
+ for_each_btree_iter_with_node(iter, b, linked)
__bch2_btree_node_iter_fix(linked, b,
&linked->l[b->level].iter, t,
where, clobber_u64s, new_u64s);
@@ -672,7 +672,6 @@ void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
unsigned level = b->level;
if (iter->l[level].b == b) {
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
btree_node_unlock(iter, level);
iter->l[level].b = BTREE_ITER_NOT_END;
}
@@ -686,9 +685,8 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
- for_each_linked_btree_node(iter, b, linked)
+ for_each_btree_iter_with_node(iter, b, linked)
__btree_iter_init(linked, b);
- __btree_iter_init(iter, b);
}
static inline int btree_iter_lock_root(struct btree_iter *iter,
@@ -1032,17 +1030,21 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
return NULL;
}
- /* parent node usually won't be locked: redo traversal if necessary */
- btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- return NULL;
+ if (!bch2_btree_node_relock(iter, iter->level)) {
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ return NULL;
+ }
b = iter->l[iter->level].b;
BUG_ON(!b);
if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
- /* Haven't gotten to the end of the parent node: */
+ /*
+ * Haven't gotten to the end of the parent node: go back down to
+ * the next child node
+ */
/* ick: */
iter->pos = iter->btree_id == BTREE_ID_INODES
@@ -1367,13 +1369,11 @@ void bch2_btree_iter_unlink(struct btree_iter *iter)
if (!btree_iter_linked(iter))
return;
- for_each_linked_btree_iter(iter, linked) {
-
+ for_each_linked_btree_iter(iter, linked)
if (linked->next == iter) {
linked->next = iter->next;
return;
}
- }
BUG();
}
@@ -1386,9 +1386,9 @@ void bch2_btree_iter_link(struct btree_iter *iter, struct btree_iter *new)
iter->next = new;
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
- unsigned nr_iters = 1;
+ unsigned nr_iters = 0;
- for_each_linked_btree_iter(iter, new)
+ for_each_btree_iter(iter, new)
nr_iters++;
BUG_ON(nr_iters > SIX_LOCK_MAX_RECURSE);
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 0097a2a20a18..e7e8ae209d0b 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -28,40 +28,47 @@ static inline bool btree_iter_linked(const struct btree_iter *iter)
return iter->next != iter;
}
-/**
- * for_each_linked_btree_iter - iterate over all iterators linked with @_iter
- */
-#define for_each_linked_btree_iter(_iter, _linked) \
- for ((_linked) = (_iter)->next; \
- (_linked) != (_iter); \
- (_linked) = (_linked)->next)
+static inline bool __iter_has_node(const struct btree_iter *iter,
+ const struct btree *b)
+{
+ /*
+ * We don't compare the low bits of the lock sequence numbers because
+ * @iter might have taken a write lock on @b, and we don't want to skip
+ * the linked iterator if the sequence numbers were equal before taking
+ * that write lock. The lock sequence number is incremented by taking
+ * and releasing write locks and is even when unlocked:
+ */
+
+ return iter->l[b->level].b == b &&
+ iter->lock_seq[b->level] >> 1 == b->lock.state.seq >> 1;
+}
static inline struct btree_iter *
-__next_linked_btree_node(struct btree_iter *iter, struct btree *b,
- struct btree_iter *linked)
-{
- do {
- linked = linked->next;
-
- if (linked == iter)
- return NULL;
-
- /*
- * We don't compare the low bits of the lock sequence numbers
- * because @iter might have taken a write lock on @b, and we
- * don't want to skip the linked iterator if the sequence
- * numbers were equal before taking that write lock. The lock
- * sequence number is incremented by taking and releasing write
- * locks and is even when unlocked:
- */
- } while (linked->l[b->level].b != b ||
- linked->lock_seq[b->level] >> 1 != b->lock.state.seq >> 1);
+__next_linked_iter(struct btree_iter *iter, struct btree_iter *linked)
+{
+ return linked->next != iter ? linked->next : NULL;
+}
+
+static inline struct btree_iter *
+__next_iter_with_node(struct btree_iter *iter, struct btree *b,
+ struct btree_iter *linked)
+{
+ while (linked && !__iter_has_node(linked, b))
+ linked = __next_linked_iter(iter, linked);
return linked;
}
/**
- * for_each_linked_btree_node - iterate over all iterators linked with @_iter
+ * for_each_btree_iter - iterate over all iterators linked with @_iter,
+ * including @_iter
+ */
+#define for_each_btree_iter(_iter, _linked) \
+ for ((_linked) = (_iter); (_linked); \
+ (_linked) = __next_linked_iter(_iter, _linked))
+
+/**
+ * for_each_btree_iter_with_node - iterate over all iterators linked with @_iter
* that also point to @_b
*
* @_b is assumed to be locked by @_iter
@@ -69,9 +76,19 @@ __next_linked_btree_node(struct btree_iter *iter, struct btree *b,
* Filters out iterators that don't have a valid btree_node iterator for @_b -
* i.e. iterators for which bch2_btree_node_relock() would not succeed.
*/
-#define for_each_linked_btree_node(_iter, _b, _linked) \
+#define for_each_btree_iter_with_node(_iter, _b, _linked) \
for ((_linked) = (_iter); \
- ((_linked) = __next_linked_btree_node(_iter, _b, _linked));)
+ ((_linked) = __next_iter_with_node(_iter, _b, _linked)); \
+ (_linked) = __next_linked_iter(_iter, _linked))
+
+/**
+ * for_each_linked_btree_iter - iterate over all iterators linked with @_iter,
+ * _not_ including @_iter
+ */
+#define for_each_linked_btree_iter(_iter, _linked) \
+ for ((_linked) = (_iter)->next; \
+ (_linked) != (_iter); \
+ (_linked) = (_linked)->next)
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 65b345db2931..9b644dc794e7 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1492,9 +1492,8 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
btree_update_updated_node(as, b);
- for_each_linked_btree_node(iter, b, linked)
+ for_each_btree_iter_with_node(iter, b, linked)
bch2_btree_node_iter_peek(&linked->l[b->level].iter, b);
- bch2_btree_node_iter_peek(&iter->l[b->level].iter, b);
bch2_btree_iter_verify(iter, b);
}
@@ -1572,11 +1571,9 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
* We already have a disk reservation and open buckets pinned; this
* allocation must not block:
*/
- for_each_linked_btree_iter(iter, linked)
+ for_each_btree_iter(iter, linked)
if (linked->btree_id == BTREE_ID_EXTENTS)
flags |= BTREE_INSERT_USE_RESERVE;
- if (iter->btree_id == BTREE_ID_EXTENTS)
- flags |= BTREE_INSERT_USE_RESERVE;
closure_init_stack(&cl);
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 3cfac2954d77..fee65c0c8a8e 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -420,6 +420,9 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
unsigned flags;
int ret;
+ /* for the sake of sanity: */
+ BUG_ON(trans->nr > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
+
trans_for_each_entry(trans, i) {
BUG_ON(i->iter->level);
BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
@@ -429,10 +432,6 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
BUG_ON(i->iter->uptodate == BTREE_ITER_END);
}
- /* for sanity purposes: */
- if (trans->nr > 1)
- trans->flags |= BTREE_INSERT_ATOMIC;
-
bubble_sort(trans->entries, trans->nr, btree_trans_cmp);
if (unlikely(!percpu_ref_tryget(&c->writes)))