summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-05-28 16:43:46 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2022-06-05 17:41:01 -0400
commit1ad42f37543f403a7fc6f1ebff30d67f02be7e73 (patch)
tree808c9697020a10153f636d519419296e07887095
parent61a64d2944fe690a7a992eac9328a82fb0011f2f (diff)
bcachefs: Btree key cache coherency
This is the last piece for btree key cache coherency: We already have: - btree iterator code checks the key cache when iterating over a cached btree - update path ensures that updates go to the key cache when updating a cached btree But for iterating over a cached btree to work, we need to ensure that if a key exists in the key cache, it also exists in the btree - otherwise the iterator code will skip past it and not check the key cache. This patch implements that last piece: on a key cache update, if creating a new key, we now also update the underlying btree. This fixes a device removal bug, where deleting alloc info wasn't correctly deleting all keys associated with a given device. It also means we should be able to re-enable the key cache for inodes. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/btree_iter.c23
-rw-r--r--fs/bcachefs/btree_key_cache.c10
-rw-r--r--fs/bcachefs/btree_key_cache.h2
-rw-r--r--fs/bcachefs/btree_types.h1
-rw-r--r--fs/bcachefs/btree_update_leaf.c57
5 files changed, 79 insertions, 14 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 05156258c565..761b09bfa7f7 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -1850,8 +1850,9 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
trans_for_each_update(trans, i) {
struct bkey_s_c old = { &i->old_k, i->old_v };
- pr_buf(buf, "update: btree %s %pS",
+ pr_buf(buf, "update: btree=%s cached=%u %pS",
bch2_btree_ids[i->btree_id],
+ i->cached,
(void *) i->ip_allocated);
pr_newline(buf);
@@ -2237,16 +2238,22 @@ static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
struct bpos pos)
{
struct btree_insert_entry *i;
+ struct bkey_i *ret = NULL;
- trans_for_each_update(trans, i)
- if ((cmp_int(btree_id, i->btree_id) ?:
- bpos_cmp(pos, i->k->k.p)) <= 0) {
- if (btree_id == i->btree_id)
- return i->k;
+ trans_for_each_update(trans, i) {
+ if (i->btree_id < btree_id)
+ continue;
+ if (i->btree_id > btree_id)
break;
- }
+ if (bpos_cmp(i->k->k.p, pos) < 0)
+ continue;
+ if (i->key_cache_already_flushed)
+ continue;
+ if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
+ ret = i->k;
+ }
- return NULL;
+ return ret;
}
struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index a575189f358c..5785efc8c276 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -562,6 +562,16 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
return true;
}
+void bch2_btree_key_cache_drop(struct btree_trans *trans,
+ struct btree_path *path)
+{
+ struct bkey_cached *ck = (void *) path->l[0].b;
+
+ ck->valid = false;
+
+ BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
+}
+
static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
diff --git a/fs/bcachefs/btree_key_cache.h b/fs/bcachefs/btree_key_cache.h
index fd29c14c5626..670746e72dab 100644
--- a/fs/bcachefs/btree_key_cache.h
+++ b/fs/bcachefs/btree_key_cache.h
@@ -32,6 +32,8 @@ bool bch2_btree_insert_key_cached(struct btree_trans *,
struct btree_path *, struct bkey_i *);
int bch2_btree_key_cache_flush(struct btree_trans *,
enum btree_id, struct bpos);
+void bch2_btree_key_cache_drop(struct btree_trans *,
+ struct btree_path *);
void bch2_fs_btree_key_cache_exit(struct btree_key_cache *);
void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *);
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index a1c0441940a5..d8f92cc96c4f 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -348,6 +348,7 @@ struct btree_insert_entry {
bool cached:1;
bool insert_trigger_run:1;
bool overwrite_trigger_run:1;
+ bool key_cache_already_flushed:1;
/*
* @old_k may be a key from the journal; @old_btree_u64s always refers
* to the size of the key being overwritten in the btree:
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 7a26c7938c71..1640ef9b391b 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -31,6 +31,7 @@ static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
const struct btree_insert_entry *r)
{
return cmp_int(l->btree_id, r->btree_id) ?:
+ cmp_int(l->cached, r->cached) ?:
-cmp_int(l->level, r->level) ?:
bpos_cmp(l->k->k.p, r->k->k.p);
}
@@ -406,8 +407,12 @@ static inline void do_btree_insert_one(struct btree_trans *trans,
if (!i->cached)
btree_insert_key_leaf(trans, i);
- else
+ else if (!i->key_cache_already_flushed)
bch2_btree_insert_key_cached(trans, i->path, i->k);
+ else {
+ bch2_btree_key_cache_drop(trans, i->path);
+ return;
+ }
if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
bch2_journal_add_keys(j, &trans->journal_res,
@@ -1473,11 +1478,13 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
}
static int __must_check
-bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
- struct bkey_i *k, enum btree_update_flags flags)
+bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *path,
+ struct bkey_i *k, enum btree_update_flags flags,
+ unsigned long ip)
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i, n;
+ int ret = 0;
BUG_ON(!path->should_be_locked);
@@ -1492,7 +1499,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
.cached = path->cached,
.path = path,
.k = k,
- .ip_allocated = _RET_IP_,
+ .ip_allocated = ip,
};
#ifdef CONFIG_BCACHEFS_DEBUG
@@ -1537,8 +1544,43 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
}
}
- __btree_path_get(n.path, true);
- return 0;
+ __btree_path_get(i->path, true);
+
+ /*
+ * If a key is present in the key cache, it must also exist in the
+ * btree - this is necessary for cache coherency. When iterating over
+ * a btree that's cached in the key cache, the btree iter code checks
+ * the key cache - but the key has to exist in the btree for that to
+ * work:
+ */
+ if (path->cached &&
+ bkey_deleted(&i->old_k)) {
+ struct btree_path *btree_path;
+
+ i->key_cache_already_flushed = true;
+ i->flags |= BTREE_TRIGGER_NORUN;
+
+ btree_path = bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
+ BTREE_ITER_INTENT, _THIS_IP_);
+
+ ret = bch2_btree_path_traverse(trans, btree_path, 0);
+ if (ret)
+ goto err;
+
+ btree_path->should_be_locked = true;
+ ret = bch2_trans_update_by_path_trace(trans, btree_path, k, flags, ip);
+err:
+ bch2_path_put(trans, btree_path, true);
+ }
+
+ return ret;
+}
+
+static int __must_check
+bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
+ struct bkey_i *k, enum btree_update_flags flags)
+{
+ return bch2_trans_update_by_path_trace(trans, path, k, flags, _RET_IP_);
}
int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
@@ -1562,6 +1604,9 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
k->k.type = KEY_TYPE_whiteout;
}
+ /*
+ * Ensure that updates to cached btrees go to the key cache:
+ */
if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
!path->cached &&
!path->level &&