diff options
-rw-r--r-- | drivers/md/bcache/btree.h | 9 | ||||
-rw-r--r-- | drivers/md/bcache/buckets.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/extents.c | 6 |
3 files changed, 11 insertions, 6 deletions
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index ce6cd86ad8ce..4eb60cd5baff 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -284,10 +284,12 @@ int bch_btree_map_keys(struct btree_op *, struct cache_set *, struct bkey *, * seqlock. */ static inline bool __gc_will_visit_key(struct cache_set *c, + enum btree_id id, const struct bkey *k) { - return (c->gc_cur_btree == BTREE_ID_EXTENTS && - bkey_cmp(&c->gc_cur_key, k) < 0); + return c->gc_cur_btree != id + ? c->gc_cur_btree < id + : bkey_cmp(&c->gc_cur_key, k) < 0; } /** @@ -297,6 +299,7 @@ static inline bool __gc_will_visit_key(struct cache_set *c, * into -- the GC will do it before the current pass ends. */ static inline bool gc_will_visit_key(struct cache_set *c, + enum btree_id id, const struct bkey *k) { unsigned seq; @@ -304,7 +307,7 @@ static inline bool gc_will_visit_key(struct cache_set *c, do { seq = read_seqbegin(&c->gc_cur_lock); - ret = __gc_will_visit_key(c, k); + ret = __gc_will_visit_key(c, id, k); } while (read_seqretry(&c->gc_cur_lock, seq)); return ret; diff --git a/drivers/md/bcache/buckets.c b/drivers/md/bcache/buckets.c index 4c6a2c65087a..c9975ae000e2 100644 --- a/drivers/md/bcache/buckets.c +++ b/drivers/md/bcache/buckets.c @@ -196,7 +196,7 @@ u8 bch_mark_data_bucket(struct cache_set *c, struct cache *ca, struct bkey *k, * GC starting between when we check gc_cur_key and when * the GC zeroes out marks */ - if (!gc && gc_will_visit_key(c, k)) + if (!gc && gc_will_visit_key(c, BTREE_ID_EXTENTS, k)) return 0; /* diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 99b842d12a0d..2dbd272218ad 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -319,7 +319,8 @@ static bool btree_ptr_debug_invalid(struct btree_keys *bk, struct bkey *k) do { seq = read_seqbegin(&c->gc_cur_lock); - bad = (!__gc_will_visit_key(c, k) && + bad = (!__gc_will_visit_key(c, b->btree_id, + k) && !g->mark.is_metadata); } while (read_seqretry(&c->gc_cur_lock, seq)); @@ -889,7 +890,8 @@ static bool bch_extent_debug_invalid(struct btree_keys *bk, struct bkey *k) stale); bad = (!stale && - !__gc_will_visit_key(c, k) && + !__gc_will_visit_key(c, b->btree_id, + k) && (mark.is_metadata || (!mark.dirty_sectors && !mark.owned_by_allocator && |