summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-02-15 22:01:33 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2022-02-17 02:20:00 -0500
commit161ce5dd3b33f4d91a6fb093a9378474d14b1a2c (patch)
tree5916aab96a19c3382f0662a9db33d88a0ebfd114
parent0da5072da9217ad94ef9314730fa7b9e0dcd8b07 (diff)
bcachefs: Fix __bch2_btree_node_lock
__bch2_btree_node_lock() was implementing the wrong lock ordering for cached vs. non cached paths - this fixes it to match the btree path sort order as defined by __btree_path_cmp(), and also simplifies the code some. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/btree_iter.c63
1 files changed, 32 insertions, 31 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 4b7833511c26..f8ce9d3dfe94 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -58,6 +58,9 @@ static inline int __btree_path_cmp(const struct btree_path *l,
struct bpos r_pos,
unsigned r_level)
{
+ /*
+ * Must match lock ordering as defined by __bch2_btree_node_lock:
+ */
return cmp_int(l->btree_id, r_btree_id) ?:
cmp_int((int) l->cached, (int) r_cached) ?:
bpos_cmp(l->pos, r_pos) ?:
@@ -300,8 +303,8 @@ bool __bch2_btree_node_lock(struct btree_trans *trans,
six_lock_should_sleep_fn should_sleep_fn, void *p,
unsigned long ip)
{
- struct btree_path *linked, *deadlock_path = NULL;
- unsigned reason = 9;
+ struct btree_path *linked;
+ unsigned reason;
/* Check if it's safe to block: */
trans_for_each_path(trans, linked) {
@@ -322,28 +325,28 @@ bool __bch2_btree_node_lock(struct btree_trans *trans,
*/
if (type == SIX_LOCK_intent &&
linked->nodes_locked != linked->nodes_intent_locked) {
- deadlock_path = linked;
reason = 1;
+ goto deadlock;
}
if (linked->btree_id != path->btree_id) {
- if (linked->btree_id > path->btree_id) {
- deadlock_path = linked;
- reason = 3;
- }
- continue;
+ if (linked->btree_id < path->btree_id)
+ continue;
+
+ reason = 3;
+ goto deadlock;
}
/*
- * Within the same btree, cached paths come before non
- * cached paths:
+ * Within the same btree, non-cached paths come before cached
+ * paths:
*/
if (linked->cached != path->cached) {
- if (path->cached) {
- deadlock_path = linked;
- reason = 4;
- }
- continue;
+ if (!linked->cached)
+ continue;
+
+ reason = 4;
+ goto deadlock;
}
/*
@@ -352,35 +355,33 @@ bool __bch2_btree_node_lock(struct btree_trans *trans,
* we're about to lock, it must have the ancestors locked too:
*/
if (level > __fls(linked->nodes_locked)) {
- deadlock_path = linked;
reason = 5;
+ goto deadlock;
}
/* Must lock btree nodes in key order: */
if (btree_node_locked(linked, level) &&
bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
linked->cached)) <= 0) {
- deadlock_path = linked;
- reason = 7;
BUG_ON(trans->in_traverse_all);
+ reason = 7;
+ goto deadlock;
}
}
- if (unlikely(deadlock_path)) {
- trace_trans_restart_would_deadlock(trans->fn, ip,
- trans->in_traverse_all, reason,
- deadlock_path->btree_id,
- deadlock_path->cached,
- &deadlock_path->pos,
- path->btree_id,
- path->cached,
- &pos);
- btree_trans_restart(trans);
- return false;
- }
-
return btree_node_lock_type(trans, path, b, pos, level,
type, should_sleep_fn, p);
+deadlock:
+ trace_trans_restart_would_deadlock(trans->fn, ip,
+ trans->in_traverse_all, reason,
+ linked->btree_id,
+ linked->cached,
+ &linked->pos,
+ path->btree_id,
+ path->cached,
+ &pos);
+ btree_trans_restart(trans);
+ return false;
}
/* Btree iterator locking: */