summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-10-12 11:04:28 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-01-06 19:47:53 -0500
commit2a2d5725251cf10c4ccb7a4e8607341dd0d02a74 (patch)
treec58dad48a8099315e91d94db7aaa8d9dedd81f1b
parent7a4f20138a85ce3ceac52c8a297095dc822acbf3 (diff)
bcachefs: Call bch2_btree_update_add_new_node() before dropping write lock
btree nodes can be written by other threads (shrinker, journal reclaim) with only a read lock, but brand new nodes should only be written by the thread doing the split/interior update. bch2_btree_update_add_new_node() sets btree node flags to indicate that this is a new node and should not be written out by other threads, thus we need to call it before dropping our write lock. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/btree_update_interior.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 1c52e7a2fb40..634b3030f2ad 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -428,7 +428,6 @@ static struct btree *__btree_root_alloc(struct btree_update *as,
btree_node_set_format(b, b->data->format);
bch2_btree_build_aux_trees(b);
- six_unlock_write(&b->c.lock);
return b;
}
@@ -1526,6 +1525,9 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
bch2_btree_build_aux_trees(n2);
bch2_btree_build_aux_trees(n1);
+
+ bch2_btree_update_add_new_node(as, n1);
+ bch2_btree_update_add_new_node(as, n2);
six_unlock_write(&n2->c.lock);
six_unlock_write(&n1->c.lock);
@@ -1539,9 +1541,6 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
mark_btree_node_locked(trans, path2, n2->c.level, SIX_LOCK_intent);
bch2_btree_path_level_init(trans, path2, n2);
- bch2_btree_update_add_new_node(as, n1);
- bch2_btree_update_add_new_node(as, n2);
-
/*
* Note that on recursive parent_keys == keys, so we
* can't start adding new keys to parent_keys before emptying it
@@ -1554,6 +1553,9 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
/* Depth increases, make a new root */
n3 = __btree_root_alloc(as, trans, b->c.level + 1);
+ bch2_btree_update_add_new_node(as, n3);
+ six_unlock_write(&n3->c.lock);
+
path2->locks_want++;
BUG_ON(btree_node_locked(path2, n3->c.level));
six_lock_increment(&n3->c.lock, SIX_LOCK_intent);
@@ -1563,14 +1565,13 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
n3->sib_u64s[0] = U16_MAX;
n3->sib_u64s[1] = U16_MAX;
- bch2_btree_update_add_new_node(as, n3);
-
btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
}
} else {
trace_and_count(c, btree_node_compact, c, b);
bch2_btree_build_aux_trees(n1);
+ bch2_btree_update_add_new_node(as, n1);
six_unlock_write(&n1->c.lock);
path1 = get_unlocked_mut_path(trans, path->btree_id, n1->c.level, n1->key.k.p);
@@ -1578,8 +1579,6 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
mark_btree_node_locked(trans, path1, n1->c.level, SIX_LOCK_intent);
bch2_btree_path_level_init(trans, path1, n1);
- bch2_btree_update_add_new_node(as, n1);
-
if (parent)
bch2_keylist_add(&as->parent_keys, &n1->key);
}
@@ -1902,9 +1901,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
bch2_btree_sort_into(c, n, next);
bch2_btree_build_aux_trees(n);
- six_unlock_write(&n->c.lock);
-
bch2_btree_update_add_new_node(as, n);
+ six_unlock_write(&n->c.lock);
new_path = get_unlocked_mut_path(trans, path->btree_id, n->c.level, n->key.k.p);
six_lock_increment(&n->c.lock, SIX_LOCK_intent);
@@ -1978,9 +1976,9 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
bch2_btree_interior_update_will_free_node(as, b);
n = bch2_btree_node_alloc_replacement(as, trans, b);
- bch2_btree_update_add_new_node(as, n);
bch2_btree_build_aux_trees(n);
+ bch2_btree_update_add_new_node(as, n);
six_unlock_write(&n->c.lock);
new_path = get_unlocked_mut_path(trans, iter->btree_id, n->c.level, n->key.k.p);