diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-11-25 16:04:42 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2022-11-30 12:30:50 -0500 |
commit | fd00fc3adc7e0adaf7de55d2e100a419cf278bbf (patch) | |
tree | 19f8c62a852ac349c21eb50ca096d71d044555b4 | |
parent | 479b0f33455a8ce2cc77ab26ed6d9618f004ab83 (diff) |
bcachefs: Don't set accessed bit on btree node fill
Btree nodes shouldn't have their accessed bit set when entering the
btree cache by being read in from disk - this fixes linear scans
thrashing the cache.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/btree_cache.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 2b48db53f9d0..d24827fb0164 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -753,6 +753,12 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, if (IS_ERR(b)) return b; + /* + * Btree nodes read in from disk should not have the accessed bit set + * initially, so that linear scans don't thrash the cache: + */ + clear_btree_node_accessed(b); + bkey_copy(&b->key, k); if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) { /* raced with another fill: */ @@ -889,6 +895,10 @@ retry: trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused)); } + + /* avoid atomic set bit if it's not needed: */ + if (!btree_node_accessed(b)) + set_btree_node_accessed(b); } if (unlikely(btree_node_read_in_flight(b))) { @@ -926,10 +936,6 @@ retry: prefetch(p + L1_CACHE_BYTES * 2); } - /* avoid atomic set bit if it's not needed: */ - if (!btree_node_accessed(b)) - set_btree_node_accessed(b); - if (unlikely(btree_node_read_error(b))) { six_unlock_type(&b->c.lock, lock_type); return ERR_PTR(-EIO); |