summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/bcachefs/btree_cache.c47
-rw-r--r--fs/bcachefs/btree_cache.h4
-rw-r--r--fs/bcachefs/btree_iter.c5
3 files changed, 22 insertions, 34 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 798f7be00cec..e347368e7afc 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -577,10 +577,11 @@ err:
/* Slowpath, don't want it inlined into btree_iter_traverse() */
static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
- struct btree_iter *iter,
- const struct bkey_i *k,
- unsigned level,
- enum six_lock_type lock_type)
+ struct btree_iter *iter,
+ const struct bkey_i *k,
+ unsigned level,
+ enum six_lock_type lock_type,
+ bool sync)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
@@ -590,6 +591,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
* been freed:
*/
BUG_ON(!btree_node_locked(iter, level + 1));
+ BUG_ON(level >= BTREE_MAX_DEPTH);
b = bch2_btree_node_mem_alloc(c);
if (IS_ERR(b))
@@ -623,9 +625,15 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
if (btree_node_read_locked(iter, level + 1))
btree_node_unlock(iter, level + 1);
- bch2_btree_node_read(c, b, true);
+ bch2_btree_node_read(c, b, sync);
+
six_unlock_write(&b->lock);
+ if (!sync) {
+ six_unlock_intent(&b->lock);
+ return NULL;
+ }
+
if (lock_type == SIX_LOCK_read)
six_lock_downgrade(&b->lock);
@@ -670,7 +678,7 @@ retry:
* else we could read in a btree node from disk that's been
* freed:
*/
- b = bch2_btree_node_fill(c, iter, k, level, lock_type);
+ b = bch2_btree_node_fill(c, iter, k, level, lock_type, true);
/* We raced and found the btree node in the cache */
if (!b)
@@ -855,12 +863,13 @@ out_upgrade:
goto out;
}
-void bch2_btree_node_prefetch(struct bch_fs *c, const struct bkey_i *k,
- unsigned level, enum btree_id btree_id)
+void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
+ const struct bkey_i *k, unsigned level)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
+ BUG_ON(!btree_node_locked(iter, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH);
rcu_read_lock();
@@ -870,27 +879,7 @@ void bch2_btree_node_prefetch(struct bch_fs *c, const struct bkey_i *k,
if (b)
return;
- b = bch2_btree_node_mem_alloc(c);
- if (IS_ERR(b))
- return;
-
- bkey_copy(&b->key, k);
- if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
- /* raced with another fill: */
-
- /* mark as unhashed... */
- bkey_i_to_extent(&b->key)->v._data[0] = 0;
-
- mutex_lock(&bc->lock);
- list_add(&b->list, &bc->freeable);
- mutex_unlock(&bc->lock);
- goto out;
- }
-
- bch2_btree_node_read(c, b, false);
-out:
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ bch2_btree_node_fill(c, iter, k, level, SIX_LOCK_read, false);
}
int bch2_print_btree_node(struct bch_fs *c, struct btree *b,
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index 43109d086479..a89183996a4f 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -29,8 +29,8 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *, struct btree_iter *,
struct btree *, bool,
enum btree_node_sibling);
-void bch2_btree_node_prefetch(struct bch_fs *, const struct bkey_i *,
- unsigned, enum btree_id);
+void bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *,
+ const struct bkey_i *, unsigned);
void bch2_fs_btree_cache_exit(struct bch_fs *);
int bch2_fs_btree_cache_init(struct bch_fs *);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 09f360712a23..3ad9f8f275f5 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -834,9 +834,8 @@ static void btree_iter_prefetch(struct btree_iter *iter)
break;
bch2_bkey_unpack(l->b, &tmp.k, k);
- bch2_btree_node_prefetch(iter->c, &tmp.k,
- iter->level - 1,
- iter->btree_id);
+ bch2_btree_node_prefetch(iter->c, iter, &tmp.k,
+ iter->level - 1);
}
if (!was_locked)