diff options
author | Slava Pestov <sviatoslavpestov@gmail.com> | 2015-02-14 18:30:35 -0800 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2016-10-07 12:33:34 -0800 |
commit | e7dace05f7e05bb16809c86b685b9d19807a1c01 (patch) | |
tree | 4e754a879d96578369a1100afb98d810fb70aa1f | |
parent | cf963e9043aa1e70d82e41c10079a89fffa99478 (diff) |
bcache: use group_for_each_cache_rcu() in journal code
This way we only look at active devices after they have completed
bch_cache_read_write(). Previously, we would iterate over all
cache devices, skipping inactive devices or those not in tier 0.
However, newly-added devices are initialized after they are already
active, which would cause a race in journal_reclaim().
Also, move group_for_each_cache_rcu() from alloc.c to super.h.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r-- | drivers/md/bcache/alloc.c | 15 | ||||
-rw-r--r-- | drivers/md/bcache/journal.c | 18 | ||||
-rw-r--r-- | drivers/md/bcache/super.h | 17 |
3 files changed, 21 insertions, 29 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 207e7495d97c..9b8b068e3e56 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -97,21 +97,6 @@ void bch_cache_group_add_cache(struct cache_group *grp, struct cache *ca) write_seqcount_end(&grp->lock); } -static inline struct cache *cache_group_next(struct cache_group *devs, - unsigned *iter) -{ - struct cache *ret = NULL; - - while (*iter < devs->nr_devices && - !(ret = rcu_dereference(devs->devices[*iter]))) - (*iter)++; - - return ret; -} - -#define group_for_each_cache_rcu(ca, devs, iter) \ - for ((iter) = 0; ((ca) = cache_group_next((devs), &(iter))); (iter)++) - /* Ratelimiting/PD controllers */ static void pd_controllers_update(struct work_struct *work) diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 3ee0d45619e0..e9a39c71cef1 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -773,15 +773,11 @@ static void journal_reclaim_work(struct work_struct *work) rcu_read_lock(); - for_each_cache_rcu(ca, c, iter) { + group_for_each_cache_rcu(ca, &c->cache_tiers[0], iter) { struct journal_device *ja = &ca->journal; unsigned nr = bch_nr_journal_buckets(&ca->sb); unsigned next = (ja->cur_idx + (nr >> 1)) % nr; - if ((CACHE_TIER(&ca->mi) != 0) - || (CACHE_STATE(&ca->mi) != CACHE_ACTIVE)) - continue; - /* * Write out enough btree nodes to free up 50% journal * buckets @@ -853,14 +849,10 @@ static bool journal_reclaim_fast(struct cache_set *c) * Advance last_idx to point to the oldest journal entry containing * btree node updates that have not yet been written out */ - for_each_cache_rcu(ca, c, iter) { + group_for_each_cache_rcu(ca, &c->cache_tiers[0], iter) { struct journal_device *ja = &ca->journal; unsigned nr = bch_nr_journal_buckets(&ca->sb); - if ((CACHE_TIER(&ca->mi) != 0) - || (CACHE_STATE(&ca->mi) != CACHE_ACTIVE)) - continue; - while (ja->last_idx != ja->cur_idx && ja->seq[ja->last_idx] < last_seq) ja->last_idx = (ja->last_idx + 1) % nr; @@ -919,7 +911,7 @@ static void journal_next_bucket(struct cache_set *c) * Determine location of the next journal write: * XXX: sort caches by free journal space */ - for_each_cache_rcu(ca, c, iter) { + group_for_each_cache_rcu(ca, &c->cache_tiers[0], iter) { struct journal_device *ja = &ca->journal; unsigned next = (ja->cur_idx + 1) % bch_nr_journal_buckets(&ca->sb); @@ -931,9 +923,7 @@ static void journal_next_bucket(struct cache_set *c) * Check that we can use this device, and aren't already using * it: */ - if ((CACHE_TIER(&ca->mi) != 0) || - (CACHE_STATE(&ca->mi) != CACHE_ACTIVE) || - bch_extent_has_device(extent_s_to_s_c(e), + if (bch_extent_has_device(extent_s_to_s_c(e), ca->sb.nr_this_dev)) continue; diff --git a/drivers/md/bcache/super.h b/drivers/md/bcache/super.h index 317f0f557f9f..9c46b724cff9 100644 --- a/drivers/md/bcache/super.h +++ b/drivers/md/bcache/super.h @@ -59,6 +59,23 @@ static inline struct cache *bch_get_next_cache(struct cache_set *c, (ca = bch_get_next_cache(c, &(iter))); \ percpu_ref_put(&ca->ref), (iter)++) +static inline struct cache *cache_group_next(struct cache_group *devs, + unsigned *iter) +{ + struct cache *ret = NULL; + + while (*iter < devs->nr_devices && + !(ret = rcu_dereference(devs->devices[*iter]))) + (*iter)++; + + return ret; +} + +#define group_for_each_cache_rcu(ca, devs, iter) \ + for ((iter) = 0; \ + ((ca) = cache_group_next((devs), &(iter))); \ + (iter)++) + u64 bch_checksum_update(unsigned, u64, const void *, size_t); u64 bch_checksum(unsigned, const void *, size_t); |