summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_key_cache.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-02-06 23:15:12 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2022-05-30 18:17:04 -0400
commitaadb9c6f8ddbbbc9f7906b40868562b6f9620267 (patch)
treea3a59cd0e881a6dfd4e57cd2638f136a1d1affc8 /fs/bcachefs/btree_key_cache.c
parentdfb8cab03bb1783d4c86a2e3ed8c775101476397 (diff)
bcachefs: BTREE_ITER_WITH_KEY_CACHE
This is the start of cache coherency with the btree key cache - this adds a btree iterator flag that causes lookups to also check the key cache when we're iterating over the btree (not iterating over the key cache). Note that we could still race with another thread creating at item in the key cache and updating it, since we aren't holding the key cache locked if it wasn't found. The next patch for the update path will address this by causing the transaction to restart if the key cache is found to be dirty. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/btree_key_cache.c')
-rw-r--r--fs/bcachefs/btree_key_cache.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index f43153bcbf2f..8bfdbbdbf7c8 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -208,19 +208,21 @@ static int btree_key_cache_fill(struct btree_trans *trans,
struct btree_path *ck_path,
struct bkey_cached *ck)
{
- struct btree_iter iter;
+ struct btree_path *path;
struct bkey_s_c k;
unsigned new_u64s = 0;
struct bkey_i *new_k = NULL;
+ struct bkey u;
int ret;
- bch2_trans_iter_init(trans, &iter, ck->key.btree_id,
- ck->key.pos, BTREE_ITER_SLOTS);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
+ path = bch2_path_get(trans, ck->key.btree_id,
+ ck->key.pos, 0, 0, 0, _THIS_IP_);
+ ret = bch2_btree_path_traverse(trans, path, 0);
if (ret)
goto err;
+ k = bch2_btree_path_peek_slot(path, &u);
+
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
trace_trans_restart_relock_key_cache_fill(trans->fn,
_THIS_IP_, ck_path->btree_id, &ck_path->pos);
@@ -261,9 +263,9 @@ static int btree_key_cache_fill(struct btree_trans *trans,
bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
/* We're not likely to need this iterator again: */
- set_btree_iter_dontneed(&iter);
+ path->preserve = false;
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_path_put(trans, path, 0);
return ret;
}
@@ -384,6 +386,8 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_CACHED_NOCREATE|
BTREE_ITER_INTENT);
+ b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
+
ret = bch2_btree_iter_traverse(&c_iter);
if (ret)
goto out;