diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-08-26 14:55:00 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2022-10-03 23:53:40 -0400 |
commit | 030f15398e699f70135d8fb2e9784a6392ad51a3 (patch) | |
tree | 81616f8f163861d202d39a676d847080e06abef4 | |
parent | 86d3e99fe9df41df9f1d1168c16613b382476019 (diff) |
bcachefs: Mark write locks before taking lock
six locks are unfair: while a thread is blocked trying to take a write
lock, new read locks will fail. The new deadlock cycle detector makes
use of our existing lock tracing, so we need to tell it we're holding a
write lock before we take the lock for it to work correctly.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/btree_locking.h | 9 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_leaf.c | 11 |
2 files changed, 16 insertions, 4 deletions
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index 54da397eb4e5..3c136f22bfdb 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -272,10 +272,15 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans, EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq); EBUG_ON(!btree_node_intent_locked(path, b->c.level)); + /* + * six locks are unfair, and read locks block while a thread wants a + * write lock: thus, we need to tell the cycle detector we have a write + * lock _before_ taking the lock: + */ + mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write); + if (unlikely(!six_trylock_write(&b->c.lock))) __bch2_btree_node_lock_write(trans, b); - - mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write); } /* relock: */ diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c index d7bc25e473e8..c1f9250b674f 100644 --- a/fs/bcachefs/btree_update_leaf.c +++ b/fs/bcachefs/btree_update_leaf.c @@ -811,6 +811,13 @@ static inline int trans_lock_write(struct btree_trans *trans) if (same_leaf_as_prev(trans, i)) continue; + /* + * six locks are unfair, and read locks block while a thread + * wants a write lock: thus, we need to tell the cycle detector + * we have a write lock _before_ taking the lock: + */ + mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_write); + if (!six_trylock_write(&insert_l(i)->b->c.lock)) { if (have_conflicting_read_lock(trans, i->path)) goto fail; @@ -822,13 +829,13 @@ static inline int trans_lock_write(struct btree_trans *trans) BUG_ON(ret); } - mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_write); - bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b); } return 0; fail: + mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_intent); + while (--i >= trans->updates) { if (same_leaf_as_prev(trans, i)) continue; |