diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-09-03 21:09:54 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2022-10-03 23:53:52 -0400 |
commit | c32bb2823a81c99d75ba0342ce1c01f98d9fcdf2 (patch) | |
tree | 981835862487fc63d43b996222d80c53d6e553fb | |
parent | 7e66c211571632a0abb0f96a46e8382ef1f19eb6 (diff) |
bcachefs: Convert more locking code to btree_bkey_cached_common
Ideally, all the code in btree_locking.c should be converted, but then
we'd want to convert btree_path to point to btree_key_cached_common too,
and then we'd be in for a much bigger cleanup - but a bit of incremental
cleanup will still be helpful for the next patches.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/btree_key_cache.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_locking.c | 11 | ||||
-rw-r--r-- | fs/bcachefs/btree_locking.h | 16 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_interior.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_leaf.c | 2 |
5 files changed, 18 insertions, 17 deletions
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index c7843c836049..53c58d7dc97f 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -343,7 +343,7 @@ static int btree_key_cache_fill(struct btree_trans *trans, } } - ret = bch2_btree_node_lock_write(trans, ck_path, ck_path->l[0].b); + ret = bch2_btree_node_lock_write(trans, ck_path, &ck_path->l[0].b->c); if (ret) { kfree(new_k); goto err; diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index c8c90b4fca58..19e19192a08b 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -52,9 +52,10 @@ void bch2_btree_node_unlock_write(struct btree_trans *trans, /* lock */ -void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b) +void __bch2_btree_node_lock_write(struct btree_trans *trans, + struct btree_bkey_cached_common *b) { - int readers = bch2_btree_node_lock_counts(trans, NULL, &b->c, b->c.level).n[SIX_LOCK_read]; + int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read]; /* * Must drop our read locks before calling six_lock_write() - @@ -62,9 +63,9 @@ void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b) * goes to 0, and it's safe because we have the node intent * locked: */ - six_lock_readers_add(&b->c.lock, -readers); - btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); - six_lock_readers_add(&b->c.lock, readers); + six_lock_readers_add(&b->lock, -readers); + btree_node_lock_nopath_nofail(trans, b, SIX_LOCK_write); + six_lock_readers_add(&b->lock, readers); } static inline bool path_has_read_locks(struct btree_path *path) diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index 1869b993a4f7..3bc3301b7b1b 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -280,31 +280,31 @@ static inline int btree_node_lock(struct btree_trans *trans, return ret; } -void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *); +void __bch2_btree_node_lock_write(struct btree_trans *, struct btree_bkey_cached_common *); static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans, struct btree_path *path, - struct btree *b) + struct btree_bkey_cached_common *b) { - EBUG_ON(path->l[b->c.level].b != b); - EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq); - EBUG_ON(!btree_node_intent_locked(path, b->c.level)); + EBUG_ON(&path->l[b->level].b->c != b); + EBUG_ON(path->l[b->level].lock_seq != b->lock.state.seq); + EBUG_ON(!btree_node_intent_locked(path, b->level)); /* * six locks are unfair, and read locks block while a thread wants a * write lock: thus, we need to tell the cycle detector we have a write * lock _before_ taking the lock: */ - mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write); + mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write); - if (unlikely(!six_trylock_write(&b->c.lock))) + if (unlikely(!six_trylock_write(&b->lock))) __bch2_btree_node_lock_write(trans, b); } static inline int __must_check bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path, - struct btree *b) + struct btree_bkey_cached_common *b) { bch2_btree_node_lock_write_nofail(trans, path, b); return 0; diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 2a2ab3578ff4..0abb79ef8455 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -1162,7 +1162,7 @@ static void bch2_btree_set_root(struct btree_update *as, * Ensure no one is using the old root while we switch to the * new root: */ - bch2_btree_node_lock_write_nofail(trans, path, old); + bch2_btree_node_lock_write_nofail(trans, path, &old->c); bch2_btree_set_root_inmem(c, b); @@ -2002,7 +2002,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, if (ret) goto err; - bch2_btree_node_lock_write_nofail(trans, iter->path, b); + bch2_btree_node_lock_write_nofail(trans, iter->path, &b->c); if (new_hash) { mutex_lock(&c->btree_cache.lock); diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c index 46a46dc6348a..e9518fbc92a4 100644 --- a/fs/bcachefs/btree_update_leaf.c +++ b/fs/bcachefs/btree_update_leaf.c @@ -81,7 +81,7 @@ void bch2_btree_node_lock_for_insert(struct btree_trans *trans, struct btree_path *path, struct btree *b) { - bch2_btree_node_lock_write_nofail(trans, path, b); + bch2_btree_node_lock_write_nofail(trans, path, &b->c); bch2_btree_node_prep_for_write(trans, path, b); } |