summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-03-06 20:56:08 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2022-03-12 20:14:13 -0500
commit6613d8471c6baedbd01ecc69b88a2e7fc3169938 (patch)
treedd64bbcad0153d0a85f361688f8761af574a73ad
parent354d8bc719546419d8a051f100a041df45481b5c (diff)
bcachefs: Lock ordering asserts for traverse_all()
This adds some new assertions that we always take locks in the correct order while running under traverse_all() - we've been seeing some livelocks and a bit of strange behaviour, this helps ensure that everything is working the way we expect. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/btree_iter.c25
-rw-r--r--fs/bcachefs/btree_key_cache.c4
-rw-r--r--fs/bcachefs/btree_locking.h10
-rw-r--r--fs/bcachefs/btree_types.h1
4 files changed, 26 insertions, 14 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 23923503dc6e..780b77e72bb5 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -189,7 +189,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
(btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, want))) {
- mark_btree_node_locked(path, level, want);
+ mark_btree_node_locked(trans, path, level, want);
return true;
}
fail:
@@ -240,7 +240,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
return false;
success:
- mark_btree_node_intent_locked(path, level);
+ mark_btree_node_intent_locked(trans, path, level);
return true;
}
@@ -1168,7 +1168,7 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
t != BTREE_NODE_UNLOCKED) {
btree_node_unlock(path, b->c.level);
six_lock_increment(&b->c.lock, t);
- mark_btree_node_locked(path, b->c.level, t);
+ mark_btree_node_locked(trans, path, b->c.level, t);
}
btree_path_level_init(trans, path, b);
@@ -1245,7 +1245,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans,
for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
path->l[i].b = NULL;
- mark_btree_node_locked(path, path->level, lock_type);
+ mark_btree_node_locked(trans, path, path->level, lock_type);
btree_path_level_init(trans, path, b);
return 0;
}
@@ -1410,7 +1410,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
if (unlikely(ret))
goto err;
- mark_btree_node_locked(path, level, lock_type);
+ mark_btree_node_locked(trans, path, level, lock_type);
btree_path_level_init(trans, path, b);
if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
@@ -1443,6 +1443,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
trans->in_traverse_all = true;
retry_all:
trans->restarted = false;
+ trans->traverse_all_idx = U8_MAX;
trans_for_each_path(trans, path)
path->should_be_locked = false;
@@ -1475,9 +1476,9 @@ retry_all:
}
/* Now, redo traversals in correct order: */
- i = 0;
- while (i < trans->nr_sorted) {
- path = trans->paths + trans->sorted[i];
+ trans->traverse_all_idx = 0;
+ while (trans->traverse_all_idx < trans->nr_sorted) {
+ path = trans->paths + trans->sorted[trans->traverse_all_idx];
/*
* Traversing a path can cause another path to be added at about
@@ -1489,8 +1490,9 @@ retry_all:
goto retry_all;
if (ret)
goto err;
+ BUG_ON(path->uptodate);
} else {
- i++;
+ trans->traverse_all_idx++;
}
}
@@ -2834,6 +2836,11 @@ static inline void btree_path_list_add(struct btree_trans *trans,
path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
+ if (trans->in_traverse_all &&
+ trans->traverse_all_idx != U8_MAX &&
+ trans->traverse_all_idx >= path->sorted_idx)
+ trans->traverse_all_idx++;
+
array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
for (i = path->sorted_idx; i < trans->nr_sorted; i++)
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index ee89b650f6a4..b1b7a30417bc 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -309,7 +309,7 @@ retry:
if (!ck)
goto retry;
- mark_btree_node_locked(path, 0, SIX_LOCK_intent);
+ mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
path->locks_want = 1;
} else {
enum six_lock_type lock_want = __btree_lock_want(path, 0);
@@ -330,7 +330,7 @@ retry:
goto retry;
}
- mark_btree_node_locked(path, 0, lock_want);
+ mark_btree_node_locked(trans, path, 0, lock_want);
}
path->l[0].lock_seq = ck->c.lock.state.seq;
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index b4434eca0746..67c970d727ac 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -58,7 +58,8 @@ static inline void mark_btree_node_unlocked(struct btree_path *path,
path->nodes_intent_locked &= ~(1 << level);
}
-static inline void mark_btree_node_locked(struct btree_path *path,
+static inline void mark_btree_node_locked(struct btree_trans *trans,
+ struct btree_path *path,
unsigned level,
enum six_lock_type type)
{
@@ -66,14 +67,17 @@ static inline void mark_btree_node_locked(struct btree_path *path,
BUILD_BUG_ON(SIX_LOCK_read != 0);
BUILD_BUG_ON(SIX_LOCK_intent != 1);
+ BUG_ON(trans->in_traverse_all && path->sorted_idx > trans->traverse_all_idx);
+
path->nodes_locked |= 1 << level;
path->nodes_intent_locked |= type << level;
}
-static inline void mark_btree_node_intent_locked(struct btree_path *path,
+static inline void mark_btree_node_intent_locked(struct btree_trans *trans,
+ struct btree_path *path,
unsigned level)
{
- mark_btree_node_locked(path, level, SIX_LOCK_intent);
+ mark_btree_node_locked(trans, path, level, SIX_LOCK_intent);
}
static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index e6deb3a4494b..575635b5fa10 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -392,6 +392,7 @@ struct btree_trans {
u8 nr_sorted;
u8 nr_updates;
+ u8 traverse_all_idx;
bool used_mempool:1;
bool in_traverse_all:1;
bool restarted:1;