summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Hill <daniel@gluo.nz>2022-07-14 18:58:23 +1200
committerKent Overstreet <kent.overstreet@linux.dev>2022-10-03 22:52:25 -0400
commitfdb541db181f60b6677007a5825f9ac155c2a94d (patch)
tree9b7581206508fa9f5a71c3d8751c5e5303557990
parentf404831e85bb73289a46a9ed474a3923aa2fd5a0 (diff)
bcachefs: lock time stats prep work.
We need the caller name and a place to store our results, btree_trans provides this. Signed-off-by: Daniel Hill <daniel@gluo.nz>
-rw-r--r--fs/bcachefs/btree_cache.c2
-rw-r--r--fs/bcachefs/btree_iter.c58
-rw-r--r--fs/bcachefs/btree_iter.h7
-rw-r--r--fs/bcachefs/btree_key_cache.c2
-rw-r--r--fs/bcachefs/btree_locking.h8
-rw-r--r--fs/bcachefs/btree_update_interior.c4
6 files changed, 43 insertions, 38 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 7618ecd5ed9d..a7c99f7ec81b 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -893,7 +893,7 @@ lock_node:
* was removed - and we'll bail out:
*/
if (btree_node_read_locked(path, level + 1))
- btree_node_unlock(path, level + 1);
+ btree_node_unlock(trans, path, level + 1);
if (!btree_node_lock(trans, path, b, k->k.p, level, lock_type,
lock_node_check_fn, (void *) k, trace_ip)) {
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 923381d87cc6..e94b19409096 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -236,7 +236,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
if (btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
- btree_node_unlock(path, level);
+ btree_node_unlock(trans, path, level);
goto success;
}
@@ -271,7 +271,7 @@ static inline bool btree_path_get_locks(struct btree_trans *trans,
* the node that we failed to relock:
*/
if (fail_idx >= 0) {
- __bch2_btree_path_unlock(path);
+ __bch2_btree_path_unlock(trans, path);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
do {
@@ -429,7 +429,7 @@ bool bch2_btree_path_relock_intent(struct btree_trans *trans,
l < path->locks_want && btree_path_node(path, l);
l++) {
if (!bch2_btree_node_relock(trans, path, l)) {
- __bch2_btree_path_unlock(path);
+ __bch2_btree_path_unlock(trans, path);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
path->btree_id, &path->pos);
@@ -500,7 +500,8 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
return false;
}
-void __bch2_btree_path_downgrade(struct btree_path *path,
+void __bch2_btree_path_downgrade(struct btree_trans *trans,
+ struct btree_path *path,
unsigned new_locks_want)
{
unsigned l;
@@ -512,7 +513,7 @@ void __bch2_btree_path_downgrade(struct btree_path *path,
while (path->nodes_locked &&
(l = __fls(path->nodes_locked)) >= path->locks_want) {
if (l > path->level) {
- btree_node_unlock(path, l);
+ btree_node_unlock(trans, path, l);
} else {
if (btree_node_intent_locked(path, l)) {
six_lock_downgrade(&path->l[l].b->c.lock);
@@ -530,7 +531,7 @@ void bch2_trans_downgrade(struct btree_trans *trans)
struct btree_path *path;
trans_for_each_path(trans, path)
- bch2_btree_path_downgrade(path);
+ bch2_btree_path_downgrade(trans, path);
}
/* Btree transaction locking: */
@@ -558,7 +559,7 @@ void bch2_trans_unlock(struct btree_trans *trans)
struct btree_path *path;
trans_for_each_path(trans, path)
- __bch2_btree_path_unlock(path);
+ __bch2_btree_path_unlock(trans, path);
/*
* bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
@@ -586,7 +587,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans,
bkey_cmp(ck->key.pos, path->pos));
if (!locked)
- btree_node_unlock(path, 0);
+ btree_node_unlock(trans, path, 0);
}
static void bch2_btree_path_verify_level(struct btree_trans *trans,
@@ -643,7 +644,7 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
}
if (!locked)
- btree_node_unlock(path, level);
+ btree_node_unlock(trans, path, level);
return;
err:
bch2_bpos_to_text(&buf1, path->pos);
@@ -1115,7 +1116,7 @@ static void btree_path_verify_new_node(struct btree_trans *trans,
}
if (!parent_locked)
- btree_node_unlock(path, plevel);
+ btree_node_unlock(trans, path, plevel);
}
static inline void __btree_path_level_init(struct btree_path *path,
@@ -1167,7 +1168,7 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
if (path->nodes_locked &&
t != BTREE_NODE_UNLOCKED) {
- btree_node_unlock(path, b->c.level);
+ btree_node_unlock(trans, path, b->c.level);
six_lock_increment(&b->c.lock, t);
mark_btree_node_locked(trans, path, b->c.level, t);
}
@@ -1286,7 +1287,7 @@ static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *pat
}
if (!was_locked)
- btree_node_unlock(path, path->level);
+ btree_node_unlock(trans, path, path->level);
bch2_bkey_buf_exit(&tmp, c);
return ret;
@@ -1321,7 +1322,7 @@ static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *p
}
if (!was_locked)
- btree_node_unlock(path, path->level);
+ btree_node_unlock(trans, path, path->level);
bch2_bkey_buf_exit(&tmp, c);
return ret;
@@ -1346,7 +1347,7 @@ static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
bp->mem_ptr = (unsigned long)b;
if (!locked)
- btree_node_unlock(path, plevel);
+ btree_node_unlock(trans, path, plevel);
}
static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
@@ -1419,7 +1420,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
btree_node_mem_ptr_set(trans, path, level + 1, b);
if (btree_node_read_locked(path, level + 1))
- btree_node_unlock(path, level + 1);
+ btree_node_unlock(trans, path, level + 1);
path->level = level;
bch2_btree_path_verify_locks(path);
@@ -1528,9 +1529,10 @@ static inline bool btree_path_good_node(struct btree_trans *trans,
return true;
}
-static void btree_path_set_level_up(struct btree_path *path)
+static void btree_path_set_level_up(struct btree_trans *trans,
+ struct btree_path *path)
{
- btree_node_unlock(path, path->level);
+ btree_node_unlock(trans, path, path->level);
path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
path->level++;
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
@@ -1546,7 +1548,7 @@ static void btree_path_set_level_down(struct btree_trans *trans,
for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
- btree_node_unlock(path, l);
+ btree_node_unlock(trans, path, l);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
bch2_btree_path_verify(trans, path);
@@ -1560,7 +1562,7 @@ static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
while (btree_path_node(path, l) &&
!btree_path_good_node(trans, path, l, check_pos)) {
- btree_node_unlock(path, l);
+ btree_node_unlock(trans, path, l);
path->l[l].b = BTREE_ITER_NO_NODE_UP;
l++;
}
@@ -1571,7 +1573,7 @@ static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
i++)
if (!bch2_btree_node_relock(trans, path, i))
while (l <= i) {
- btree_node_unlock(path, l);
+ btree_node_unlock(trans, path, l);
path->l[l].b = BTREE_ITER_NO_NODE_UP;
l++;
}
@@ -1640,7 +1642,7 @@ static int btree_path_traverse_one(struct btree_trans *trans,
goto out;
}
- __bch2_btree_path_unlock(path);
+ __bch2_btree_path_unlock(trans, path);
path->level = depth_want;
if (ret == -EIO)
@@ -1737,7 +1739,7 @@ bch2_btree_path_set_pos(struct btree_trans *trans,
bch2_btree_path_check_sort(trans, path, cmp);
if (unlikely(path->cached)) {
- btree_node_unlock(path, 0);
+ btree_node_unlock(trans, path, 0);
path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
goto out;
@@ -1760,7 +1762,7 @@ bch2_btree_path_set_pos(struct btree_trans *trans,
if (l != path->level) {
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
- __bch2_btree_path_unlock(path);
+ __bch2_btree_path_unlock(trans, path);
}
out:
bch2_btree_path_verify(trans, path);
@@ -1801,7 +1803,7 @@ static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btr
static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
{
- __bch2_btree_path_unlock(path);
+ __bch2_btree_path_unlock(trans, path);
btree_path_list_remove(trans, path);
trans->paths_allocated &= ~(1ULL << path->idx);
}
@@ -2139,12 +2141,12 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
/* got to end? */
if (!btree_path_node(path, path->level + 1)) {
- btree_path_set_level_up(path);
+ btree_path_set_level_up(trans, path);
return NULL;
}
if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
- __bch2_btree_path_unlock(path);
+ __bch2_btree_path_unlock(trans, path);
path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
@@ -2158,7 +2160,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
b = btree_path_node(path, path->level + 1);
if (!bpos_cmp(iter->pos, b->key.k.p)) {
- btree_node_unlock(path, path->level);
+ btree_node_unlock(trans, path, path->level);
path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
path->level++;
} else {
@@ -2595,7 +2597,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
(iter->advanced &&
!bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
iter->pos = path_l(iter->path)->b->key.k.p;
- btree_path_set_level_up(iter->path);
+ btree_path_set_level_up(trans, iter->path);
iter->advanced = false;
continue;
}
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 4b9d03b875ef..74982348910d 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -191,14 +191,15 @@ static inline bool bch2_btree_path_upgrade(struct btree_trans *trans,
: path->uptodate == BTREE_ITER_UPTODATE;
}
-void __bch2_btree_path_downgrade(struct btree_path *, unsigned);
+void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
-static inline void bch2_btree_path_downgrade(struct btree_path *path)
+static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
+ struct btree_path *path)
{
unsigned new_locks_want = path->level + !!path->intent_ref;
if (path->locks_want > new_locks_want)
- __bch2_btree_path_downgrade(path, new_locks_want);
+ __bch2_btree_path_downgrade(trans, path, new_locks_want);
}
void bch2_trans_downgrade(struct btree_trans *);
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 7afa58647f14..0bbab86c0138 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -431,7 +431,7 @@ fill:
return ret;
err:
if (ret != -EINTR) {
- btree_node_unlock(path, 0);
+ btree_node_unlock(trans, path, 0);
path->l[0].b = BTREE_ITER_NO_NODE_ERROR;
}
return ret;
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 67c970d727ac..4a3ed247d8ce 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -99,7 +99,8 @@ btree_lock_want(struct btree_path *path, int level)
return BTREE_NODE_UNLOCKED;
}
-static inline void btree_node_unlock(struct btree_path *path, unsigned level)
+static inline void btree_node_unlock(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
{
int lock_type = btree_node_locked_type(path, level);
@@ -110,12 +111,13 @@ static inline void btree_node_unlock(struct btree_path *path, unsigned level)
mark_btree_node_unlocked(path, level);
}
-static inline void __bch2_btree_path_unlock(struct btree_path *path)
+static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
+ struct btree_path *path)
{
btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
while (path->nodes_locked)
- btree_node_unlock(path, __ffs(path->nodes_locked));
+ btree_node_unlock(trans, path, __ffs(path->nodes_locked));
}
static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index c3ef2387ddad..d7f271d67703 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1830,7 +1830,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
bch2_btree_update_done(as);
out:
- bch2_btree_path_downgrade(iter->path);
+ bch2_btree_path_downgrade(trans, iter->path);
return ret;
}
@@ -1943,7 +1943,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
BUG_ON(iter2.path->level != b->c.level);
BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p));
- btree_node_unlock(iter2.path, iter2.path->level);
+ btree_node_unlock(trans, iter2.path, iter2.path->level);
path_l(iter2.path)->b = BTREE_ITER_NO_NODE_UP;
iter2.path->level++;
btree_path_set_dirty(iter2.path, BTREE_ITER_NEED_TRAVERSE);