diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2015-08-17 15:40:41 -0800 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2016-10-07 12:34:44 -0800 |
commit | 50082bdfdd423a50e864df1b6b1691941c9e5c34 (patch) | |
tree | 640fcc8e724ce92a5ad1de17c8f33caddf6863c7 | |
parent | 3b4484c3888dd4132fffd589be059c66122dbc58 (diff) |
bcache: gc_pos
Clean up the gc position code and make it more explicit; also, fix a race in
bch_pending_btree_node_free_insert_done() -> bch_mark_pointers() - because the
gc pos check wasn't being done in bch_mark_pointers() under the seq lock, it was
racing with the start of gc - making the pos more explicit helps in fixing that.
-rw-r--r-- | drivers/md/bcache/bcache.h | 14 | ||||
-rw-r--r-- | drivers/md/bcache/btree_gc.c | 57 | ||||
-rw-r--r-- | drivers/md/bcache/btree_gc.h | 105 | ||||
-rw-r--r-- | drivers/md/bcache/btree_update.c | 40 | ||||
-rw-r--r-- | drivers/md/bcache/buckets.c | 38 | ||||
-rw-r--r-- | drivers/md/bcache/buckets.h | 4 | ||||
-rw-r--r-- | drivers/md/bcache/extents.c | 32 | ||||
-rw-r--r-- | drivers/md/bcache/journal.c | 5 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/sysfs.c | 2 |
10 files changed, 166 insertions, 133 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index d2c99c1f42e7..0a7cdb39a632 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -268,6 +268,12 @@ enum gc_phase { GC_PHASE_DONE }; +struct gc_pos { + enum gc_phase phase; + struct bpos pos; + unsigned level; +}; + struct cache_group { seqcount_t lock; unsigned nr_devices; @@ -591,13 +597,11 @@ struct cache_set { * gc_cur_phase == GC_PHASE_DONE indicates that gc is finished/not * currently running, and gc marks are currently valid * - * Protected by gc_cur_lock. Only written to by GC thread, so GC thread + * Protected by gc_pos_lock. Only written to by GC thread, so GC thread * can read without a lock. */ - seqcount_t gc_cur_lock; - enum gc_phase gc_cur_phase; - unsigned gc_cur_level; - struct bpos gc_cur_pos; + seqcount_t gc_pos_lock; + struct gc_pos gc_pos; /* * The allocation code needs gc_mark in struct bucket to be correct, but diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c index 97d204eb06b0..cfe78a20dbcd 100644 --- a/drivers/md/bcache/btree_gc.c +++ b/drivers/md/bcache/btree_gc.c @@ -111,9 +111,10 @@ void __bch_btree_mark_key(struct cache_set *c, int level, struct bkey_s_c k) if (bkey_extent_is_data(k.k)) { struct bkey_s_c_extent e = bkey_s_c_to_extent(k); - bch_mark_pointers(c, NULL, e, level + bch_mark_pointers(c, e, level ? CACHE_BTREE_NODE_SIZE(&c->sb) - : e.k->size, false, level != 0, true); + : e.k->size, false, level != 0, + true, GC_POS_MIN); } } @@ -165,22 +166,17 @@ static bool btree_gc_mark_node(struct cache_set *c, struct btree *b) return false; } -static inline void __gc_set_pos(struct cache_set *c, enum gc_phase phase, - struct bpos pos, unsigned level) +static inline void __gc_pos_set(struct cache_set *c, struct gc_pos new_pos) { - write_seqcount_begin(&c->gc_cur_lock); - c->gc_cur_phase = phase; - c->gc_cur_pos = pos; - c->gc_cur_level = level; - write_seqcount_end(&c->gc_cur_lock); + write_seqcount_begin(&c->gc_pos_lock); + c->gc_pos = new_pos; + write_seqcount_end(&c->gc_pos_lock); } -static inline void gc_set_pos(struct cache_set *c, enum gc_phase phase, - struct bpos pos, unsigned level) +static inline void gc_pos_set(struct cache_set *c, struct gc_pos new_pos) { - BUG_ON(!__gc_will_visit(c, phase, pos, level)); - - __gc_set_pos(c, phase, pos, level); + BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0); + __gc_pos_set(c, new_pos); } static int bch_gc_btree(struct cache_set *c, enum btree_id btree_id) @@ -199,12 +195,7 @@ static int bch_gc_btree(struct cache_set *c, enum btree_id btree_id) should_rewrite = btree_gc_mark_node(c, b); - BUG_ON(bkey_cmp(c->gc_cur_pos, b->key.k.p) > 0); - BUG_ON(!gc_will_visit_node(c, b)); - - gc_set_pos(c, b->btree_id, b->key.k.p, b->level); - - BUG_ON(gc_will_visit_node(c, b)); + gc_pos_set(c, gc_pos_btree_node(b)); if (should_rewrite) bch_btree_node_rewrite(b, &iter, false); @@ -217,7 +208,7 @@ static int bch_gc_btree(struct cache_set *c, enum btree_id btree_id) b = c->btree_roots[btree_id]; __bch_btree_mark_key(c, b->level + 1, bkey_i_to_s_c(&b->key)); - gc_set_pos(c, b->btree_id, POS_MAX, U8_MAX); + gc_pos_set(c, gc_pos_btree_root(b->btree_id)); spin_unlock(&c->btree_root_lock); return 0; @@ -289,14 +280,14 @@ static void bch_mark_pending_btree_node_frees(struct cache_set *c) struct pending_btree_node_free *d; mutex_lock(&c->btree_node_pending_free_lock); - gc_set_pos(c, GC_PHASE_PENDING_DELETE, POS_MIN, 0); + gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE)); list_for_each_entry(d, &c->btree_node_pending_free, list) if (d->index_update_done) - bch_mark_pointers(c, NULL, - bkey_i_to_s_c_extent(&d->key), + bch_mark_pointers(c, bkey_i_to_s_c_extent(&d->key), CACHE_BTREE_NODE_SIZE(&c->sb), - false, true, true); + false, true, + true, GC_POS_MIN); mutex_unlock(&c->btree_node_pending_free_lock); } @@ -359,7 +350,7 @@ void bch_gc(struct cache_set *c) ca->bucket_stats_cached = __bucket_stats_read(ca); /* Indicates to buckets code that gc is now in progress: */ - __gc_set_pos(c, 0, POS_MIN, 0); + __gc_pos_set(c, GC_POS_MIN); /* Clear bucket marks: */ for_each_cache(ca, c, i) @@ -372,9 +363,9 @@ void bch_gc(struct cache_set *c) bch_mark_allocator_buckets(c); /* Walk btree: */ - while (c->gc_cur_phase < (int) BTREE_ID_NR) { - int ret = c->btree_roots[c->gc_cur_phase] - ? bch_gc_btree(c, c->gc_cur_phase) + while (c->gc_pos.phase < (int) BTREE_ID_NR) { + int ret = c->btree_roots[c->gc_pos.phase] + ? bch_gc_btree(c, c->gc_pos.phase) : 0; if (ret) { @@ -384,7 +375,7 @@ void bch_gc(struct cache_set *c) return; } - gc_set_pos(c, c->gc_cur_phase + 1, POS_MIN, 0); + gc_pos_set(c, gc_phase(c->gc_pos.phase + 1)); } bch_mark_metadata(c); @@ -397,8 +388,8 @@ void bch_gc(struct cache_set *c) ca->inc_gen_needs_gc = 0; } - /* Indicate to buckets code that gc is no longer in progress: */ - gc_set_pos(c, GC_PHASE_DONE, POS_MIN, 0); + /* Indicates that gc is no longer in progress: */ + gc_pos_set(c, gc_phase(GC_PHASE_DONE)); up_write(&c->gc_lock); trace_bcache_gc_end(c); @@ -820,7 +811,7 @@ int bch_initial_gc(struct cache_set *c, struct list_head *journal) } bch_mark_metadata(c); - gc_set_pos(c, GC_PHASE_DONE, POS_MIN, 0); + gc_pos_set(c, gc_phase(GC_PHASE_DONE)); set_bit(CACHE_SET_INITIAL_GC_DONE, &c->flags); return 0; diff --git a/drivers/md/bcache/btree_gc.h b/drivers/md/bcache/btree_gc.h index 9a6f4dd72c9d..0b7559422192 100644 --- a/drivers/md/bcache/btree_gc.h +++ b/drivers/md/bcache/btree_gc.h @@ -10,57 +10,90 @@ int bch_initial_gc(struct cache_set *, struct list_head *); u8 bch_btree_key_recalc_oldest_gen(struct cache_set *, struct bkey_s_c); void __bch_btree_mark_key(struct cache_set *, int, struct bkey_s_c); -static inline bool __gc_will_visit(struct cache_set *c, enum gc_phase phase, - struct bpos pos, unsigned level) -{ - return phase != c->gc_cur_phase - ? phase > c->gc_cur_phase - : bkey_cmp(pos, c->gc_cur_pos) - ? bkey_cmp(pos, c->gc_cur_pos) > 0 - : level > c->gc_cur_level; -} +/* + * For concurrent mark and sweep (with other index updates), we define a total + * ordering of _all_ references GC walks: + * + * Note that some references will have the same GC position as others - e.g. + * everything within the same btree node; in those cases we're relying on + * whatever locking exists for where those references live, i.e. the write lock + * on a btree node. + * + * That locking is also required to ensure GC doesn't pass the updater in + * between the updater adding/removing the reference and updating the GC marks; + * without that, we would at best double count sometimes. + * + * That part is important - whenever calling bch_mark_pointers(), a lock _must_ + * be held that prevents GC from passing the position the updater is at. + * + * (What about the start of gc, when we're clearing all the marks? GC clears the + * mark with the gc pos seqlock held, and bch_mark_bucket checks against the gc + * position inside its cmpxchg loop, so crap magically works). + */ -static inline bool gc_will_visit(struct cache_set *c, enum gc_phase phase, - struct bpos pos, unsigned level) +/* Position of (the start of) a gc phase: */ +static inline struct gc_pos gc_phase(enum gc_phase phase) { - unsigned seq; - bool ret; + return (struct gc_pos) { + .phase = phase, + .pos = POS_MIN, + .level = 0, + }; +} - do { - seq = read_seqcount_begin(&c->gc_cur_lock); - ret = __gc_will_visit(c, phase, pos, level); - } while (read_seqcount_retry(&c->gc_cur_lock, seq)); +#define GC_POS_MIN gc_phase(0) - return ret; +static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r) +{ + if (l.phase != r.phase) + return l.phase < r.phase ? -1 : 1; + if (bkey_cmp(l.pos, r.pos)) + return bkey_cmp(l.pos, r.pos); + if (l.level != r.level) + return l.level < r.level ? -1 : 1; + return 0; } -/** - * __gc_will_visit_node - for checking GC marks while holding a btree read lock - * - * Since btree GC takes intent locks, it might advance the current key, so in - * this case the entire reading of the mark has to be surrounded with the - * seqlock. +/* + * GC position of the pointers within a btree node: note, _not_ for &b->key + * itself, that lives in the parent node: */ -static inline bool __gc_will_visit_node(struct cache_set *c, struct btree *b) +static inline struct gc_pos gc_pos_btree_node(struct btree *b) { - return __gc_will_visit(c, b->btree_id, b->key.k.p, b->level); + return (struct gc_pos) { + .phase = b->btree_id, + .pos = b->key.k.p, + .level = b->level, + }; } -/** - * gc_will_visit_key - is the currently-running GC pass going to visit the given - * btree node? - * - * If so, we don't have to update reference counts for buckets this key points - * into -- the GC will do it before the current pass ends. +/* + * GC position of the pointer to a btree root: we don't use + * gc_pos_pointer_to_btree_node() here to avoid a potential race with + * btree_split() increasing the tree depth - the new root will have level > the + * old root and thus have a greater gc position than the old root, but that + * would be incorrect since once gc has marked the root it's not coming back. */ -static inline bool gc_will_visit_node(struct cache_set *c, struct btree *b) +static inline struct gc_pos gc_pos_btree_root(enum btree_id id) { - return gc_will_visit(c, b->btree_id, b->key.k.p, b->level); + return (struct gc_pos) { + .phase = id, + .pos = POS_MAX, + .level = U8_MAX, + }; } -static inline bool gc_will_visit_root(struct cache_set *c, enum btree_id id) +static inline bool gc_will_visit(struct cache_set *c, struct gc_pos pos) { - return gc_will_visit(c, (int) id, POS_MAX, U8_MAX); + unsigned seq; + bool ret; + + do { + seq = read_seqcount_begin(&c->gc_pos_lock); + ret = gc_pos_cmp(c->gc_pos, pos) < 0; + } while (read_seqcount_retry(&c->gc_pos_lock, seq)); + + return ret; } #endif diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c index c6feb4c33eb8..ee4795e528e3 100644 --- a/drivers/md/bcache/btree_update.c +++ b/drivers/md/bcache/btree_update.c @@ -140,14 +140,12 @@ found: * cancel out one of mark and sweep's markings if necessary: */ - if ((b - ? !gc_will_visit_node(c, b) - : !gc_will_visit_root(c, id)) && - gc_will_visit(c, GC_PHASE_PENDING_DELETE, POS_MIN, 0)) - bch_mark_pointers(c, NULL, - bkey_i_to_s_c_extent(&d->key), + if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) + bch_mark_pointers(c, bkey_i_to_s_c_extent(&d->key), -CACHE_BTREE_NODE_SIZE(&c->sb), - false, true, true); + false, true, false, b + ? gc_pos_btree_node(b) + : gc_pos_btree_root(id)); mutex_unlock(&c->btree_node_pending_free_lock); } @@ -208,10 +206,9 @@ static void bch_btree_node_free_ondisk(struct cache_set *c, mutex_lock(&c->btree_node_pending_free_lock); list_del(&pending->list); - if (!gc_will_visit(c, GC_PHASE_PENDING_DELETE, POS_MIN, 0)) - bch_mark_pointers(c, NULL, bkey_i_to_s_c_extent(&pending->key), - -CACHE_BTREE_NODE_SIZE(&c->sb), - false, true, true); + bch_mark_pointers(c, bkey_i_to_s_c_extent(&pending->key), + -CACHE_BTREE_NODE_SIZE(&c->sb), false, true, + false, gc_phase(GC_PHASE_PENDING_DELETE)); mutex_unlock(&c->btree_node_pending_free_lock); } @@ -324,6 +321,8 @@ struct btree *btree_node_alloc_replacement(struct cache_set *c, static void __bch_btree_set_root(struct cache_set *c, struct btree *b) { + bool stale; + /* Root nodes cannot be reaped */ mutex_lock(&c->btree_cache_lock); list_del_init(&b->list); @@ -332,16 +331,10 @@ static void __bch_btree_set_root(struct cache_set *c, struct btree *b) spin_lock(&c->btree_root_lock); btree_node_root(b) = b; - if (b->btree_id != c->gc_cur_phase - ? b->btree_id < c->gc_cur_phase - : b->level <= c->gc_cur_level) { - bool stale = bch_mark_pointers(c, NULL, - bkey_i_to_s_c_extent(&b->key), - CACHE_BTREE_NODE_SIZE(&c->sb), - true, true, false); - - BUG_ON(stale); - } + stale = bch_mark_pointers(c, bkey_i_to_s_c_extent(&b->key), + CACHE_BTREE_NODE_SIZE(&c->sb), true, true, + false, gc_pos_btree_root(b->btree_id)); + BUG_ON(stale); spin_unlock(&c->btree_root_lock); bch_recalc_btree_reserve(c); @@ -552,9 +545,10 @@ static bool bch_insert_fixup_btree_ptr(struct btree_iter *iter, if (bkey_extent_is_data(&insert->k)) { bool stale; - stale = bch_mark_pointers(c, b, bkey_i_to_s_c_extent(insert), + stale = bch_mark_pointers(c, bkey_i_to_s_c_extent(insert), CACHE_BTREE_NODE_SIZE(&c->sb), - true, true, false); + true, true, false, + gc_pos_btree_node(b)); BUG_ON(stale); } diff --git a/drivers/md/bcache/buckets.c b/drivers/md/bcache/buckets.c index 44a974b5fe34..4b483e94e748 100644 --- a/drivers/md/bcache/buckets.c +++ b/drivers/md/bcache/buckets.c @@ -76,11 +76,11 @@ struct bucket_stats bch_bucket_stats_read(struct cache *ca) unsigned seq; do { - seq = read_seqcount_begin(&c->gc_cur_lock); - ret = c->gc_cur_phase == GC_PHASE_DONE + seq = read_seqcount_begin(&c->gc_pos_lock); + ret = c->gc_pos.phase == GC_PHASE_DONE ? __bucket_stats_read(ca) : ca->bucket_stats_cached; - } while (read_seqcount_retry(&c->gc_cur_lock, seq)); + } while (read_seqcount_retry(&c->gc_pos_lock, seq)); return ret; } @@ -110,7 +110,7 @@ static void bucket_stats_update(struct cache *ca, BUG_ON(!may_make_unavailable && is_available_bucket(old) && !is_available_bucket(new) && - ca->set->gc_cur_phase == GC_PHASE_DONE); + ca->set->gc_pos.phase == GC_PHASE_DONE); preempt_disable(); stats = this_cpu_ptr(ca->bucket_stats_percpu); @@ -209,9 +209,17 @@ do { \ } \ } while (0) +/* + * If is_gc is false, marks iff gc's position is _after_ gc_pos + * + * Checking against gc's position has to be done here, inside the cmpxchg() + * loop, to avoid racing with the start of gc clearing all the marks - GC does + * that with the gc pos seqlock held. + */ static u8 bch_mark_bucket(struct cache_set *c, struct cache *ca, - struct btree *b, const struct bch_extent_ptr *ptr, - int sectors, bool dirty, bool metadata, bool is_gc) + const struct bch_extent_ptr *ptr, int sectors, + bool dirty, bool metadata, bool is_gc, + struct gc_pos gc_pos) { struct bucket_mark old, new; unsigned long bucket_nr = PTR_BUCKET_NR(ca, ptr); @@ -243,9 +251,7 @@ static u8 bch_mark_bucket(struct cache_set *c, struct cache *ca, * GC starting between when we check gc_cur_key and when * the GC zeroes out marks */ - if (b - ? gc_will_visit_node(c, b) - : gc_will_visit_root(c, BTREE_ID_EXTENTS)) + if (gc_will_visit(c, gc_pos)) return 0; /* @@ -297,9 +303,9 @@ static u8 bch_mark_bucket(struct cache_set *c, struct cache *ca, /* * Returns 0 on success, -1 on failure (pointer was stale) */ -int bch_mark_pointers(struct cache_set *c, struct btree *b, - struct bkey_s_c_extent e, int sectors, - bool fail_if_stale, bool metadata, bool is_gc) +int bch_mark_pointers(struct cache_set *c, struct bkey_s_c_extent e, + int sectors, bool fail_if_stale, bool metadata, + bool is_gc, struct gc_pos pos) { const struct bch_extent_ptr *ptr, *ptr2; struct cache *ca; @@ -345,8 +351,8 @@ int bch_mark_pointers(struct cache_set *c, struct btree *b, * * Fuck me, I hate my life. */ - stale = bch_mark_bucket(c, ca, b, ptr, sectors, - dirty, metadata, is_gc); + stale = bch_mark_bucket(c, ca, ptr, sectors, dirty, + metadata, is_gc, pos); if (stale && dirty && fail_if_stale) goto stale; } @@ -358,9 +364,9 @@ stale: if (ptr2 == ptr) break; - bch_mark_bucket(c, ca, b, ptr, -sectors, + bch_mark_bucket(c, ca, ptr, -sectors, bch_extent_ptr_is_dirty(c, e, ptr), - metadata, is_gc); + metadata, is_gc, pos); } rcu_read_unlock(); diff --git a/drivers/md/bcache/buckets.h b/drivers/md/bcache/buckets.h index 73c811985da2..b4fa92d74897 100644 --- a/drivers/md/bcache/buckets.h +++ b/drivers/md/bcache/buckets.h @@ -272,7 +272,7 @@ void bch_mark_alloc_bucket(struct cache *, struct bucket *); void bch_mark_metadata_bucket(struct cache *, struct bucket *, bool); void bch_unmark_open_bucket(struct cache *, struct bucket *); -int bch_mark_pointers(struct cache_set *, struct btree *, - struct bkey_s_c_extent, int, bool, bool, bool); +int bch_mark_pointers(struct cache_set *, struct bkey_s_c_extent, + int, bool, bool, bool, struct gc_pos); #endif /* _BUCKETS_H */ diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 36f1c38184ba..49592ae70e27 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -399,10 +399,10 @@ static void btree_ptr_debugcheck(struct cache_set *c, struct btree *b, goto err; do { - seq = read_seqcount_begin(&c->gc_cur_lock); - bad = (!__gc_will_visit_node(c, b) && - !g->mark.is_metadata); - } while (read_seqcount_retry(&c->gc_cur_lock, seq)); + seq = read_seqcount_begin(&c->gc_pos_lock); + bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 && + !g->mark.is_metadata; + } while (read_seqcount_retry(&c->gc_pos_lock, seq)); err = "inconsistent"; if (bad) @@ -775,8 +775,9 @@ static int bch_add_sectors(struct cache_set *c, struct btree *b, struct bkey_s_c_extent e = bkey_s_c_to_extent(k); int ret; - ret = bch_mark_pointers(c, b, e, sectors, fail_if_stale, - false, false); + ret = bch_mark_pointers(c, e, sectors, fail_if_stale, + false, false, + gc_pos_btree_node(b)); if (ret) return ret; @@ -1401,7 +1402,7 @@ static void bch_extent_debugcheck(struct cache_set *c, struct btree *b, do { struct bucket_mark mark; - seq = read_seqcount_begin(&c->gc_cur_lock); + seq = read_seqcount_begin(&c->gc_pos_lock); mark = READ_ONCE(g->mark); /* between mark and bucket gen */ @@ -1416,13 +1417,16 @@ static void bch_extent_debugcheck(struct cache_set *c, struct btree *b, "key too stale: %i", stale); - bad = (!stale && - !__gc_will_visit_node(c, b) && - (mark.is_metadata || - (!mark.dirty_sectors && - !mark.owned_by_allocator && - dirty))); - } while (read_seqcount_retry(&c->gc_cur_lock, seq)); + if (stale) + break; + + bad = (mark.is_metadata || + (gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 && + !mark.owned_by_allocator && + !(dirty + ? mark.dirty_sectors + : mark.cached_sectors))); + } while (read_seqcount_retry(&c->gc_pos_lock, seq)); if (bad) goto bad_ptr; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 3ee286b72bbb..51b5515277e0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -940,8 +940,9 @@ static int bch_journal_replay_key(struct cache_set *c, enum btree_id id, * them again */ if (do_subtract) - bch_mark_pointers(c, NULL, bkey_i_to_s_c_extent(&temp.key), - -temp.key.k.size, false, false, true); + bch_mark_pointers(c, bkey_i_to_s_c_extent(&temp.key), + -temp.key.k.size, false, false, + true, GC_POS_MIN); return 0; } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index fc76ec9737f4..b9f7b3f36ee5 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1054,7 +1054,7 @@ static struct cache_set *bch_cache_set_alloc(struct cache_sb *sb, spin_lock_init(&c->read_race_lock); INIT_WORK(&c->read_race_work, bch_read_race_work); - seqcount_init(&c->gc_cur_lock); + seqcount_init(&c->gc_pos_lock); c->prio_clock[READ].hand = 1; c->prio_clock[READ].min_prio = 0; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 4a0cf0ad8c78..493e14d61491 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -602,7 +602,7 @@ SHOW(bch_cache_set) sysfs_hprint(btree_cache_size, bch_cache_size(c)); sysfs_print(cache_available_percent, bch_cache_available_percent(c)); - sysfs_print(btree_gc_running, c->gc_cur_phase != GC_PHASE_DONE); + sysfs_print(btree_gc_running, c->gc_pos.phase != GC_PHASE_DONE); sysfs_print_time_stats(&c->mca_alloc_time, mca_alloc, sec, us); sysfs_print_time_stats(&c->mca_scan_time, mca_scan, sec, ms); |