summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-01-13 23:12:02 -0900
committerKent Overstreet <kent.overstreet@gmail.com>2016-10-07 12:35:19 -0800
commitd8d59661a97940a0ad63fbda85cff7a5d806d139 (patch)
treeb798851dd9a25f468b8e143e757c6457d0de89e8
parentfc59369fb2211615b1fca9b72d7eea4bfa0d9eeb (diff)
bcache: split out c->disk_mi
-rw-r--r--drivers/md/bcache/alloc.c8
-rw-r--r--drivers/md/bcache/bcache.h32
-rw-r--r--drivers/md/bcache/chardev.c12
-rw-r--r--drivers/md/bcache/error.c2
-rw-r--r--drivers/md/bcache/extents.c13
-rw-r--r--drivers/md/bcache/io.c2
-rw-r--r--drivers/md/bcache/journal.c6
-rw-r--r--drivers/md/bcache/super.c245
-rw-r--r--drivers/md/bcache/super.h8
-rw-r--r--drivers/md/bcache/sysfs.c16
-rw-r--r--drivers/md/bcache/tier.c8
-rw-r--r--include/trace/events/bcache.h2
12 files changed, 197 insertions, 157 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 49329a906f19..12df531bdcc9 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -707,7 +707,7 @@ static void invalidate_buckets_random(struct cache *ca)
static void invalidate_buckets(struct cache *ca)
{
- switch (CACHE_REPLACEMENT(&ca->mi)) {
+ switch (ca->mi.replacement) {
case CACHE_REPLACEMENT_LRU:
invalidate_buckets_lru(ca);
break;
@@ -777,7 +777,7 @@ static int bch_allocator_thread(void *arg)
* dropped bucket lock
*/
- if (CACHE_DISCARD(&ca->mi) &&
+ if (ca->mi.discard &&
blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
blkdev_issue_discard(ca->disk_sb.bdev,
bucket_to_sector(ca, bucket),
@@ -1495,7 +1495,7 @@ found:
void bch_cache_allocator_stop(struct cache *ca)
{
struct cache_set *c = ca->set;
- struct cache_group *tier = &c->cache_tiers[CACHE_TIER(&ca->mi)];
+ struct cache_group *tier = &c->cache_tiers[ca->mi.tier];
struct open_bucket *ob;
struct bch_extent_ptr *ptr;
struct task_struct *p;
@@ -1587,7 +1587,7 @@ void bch_cache_allocator_stop(struct cache *ca)
const char *bch_cache_allocator_start(struct cache *ca)
{
struct cache_set *c = ca->set;
- struct cache_group *tier = &c->cache_tiers[CACHE_TIER(&ca->mi)];
+ struct cache_group *tier = &c->cache_tiers[ca->mi.tier];
struct task_struct *k;
k = kthread_create(bch_allocator_thread, ca, "bcache_allocator");
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index ce6521ce85f1..bcb6f39971f1 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -310,6 +310,26 @@ struct cache_group {
struct cache __rcu *devices[MAX_CACHES_PER_SET];
};
+struct cache_member_cpu {
+ u64 nbuckets; /* device size */
+ u16 first_bucket; /* index of first bucket used */
+ u16 bucket_size; /* sectors */
+ u8 state;
+ u8 tier;
+ u8 replication_set;
+ u8 has_metadata;
+ u8 has_data;
+ u8 replacement;
+ u8 discard;
+ u8 valid;
+};
+
+struct cache_member_rcu {
+ struct rcu_head rcu;
+ unsigned nr_in_set;
+ struct cache_member_cpu m[];
+};
+
struct cache {
struct percpu_ref ref;
struct rcu_head free_rcu;
@@ -328,7 +348,7 @@ struct cache {
struct {
u8 nr_this_dev;
} sb;
- struct cache_member mi;
+ struct cache_member_cpu mi;
struct bcache_superblock disk_sb;
@@ -461,12 +481,6 @@ enum {
CACHE_SET_BTREE_WRITE_ERROR,
};
-struct cache_member_rcu {
- struct rcu_head rcu;
- unsigned nr_in_set;
- struct cache_member m[];
-};
-
struct btree_debug {
unsigned id;
struct dentry *btree;
@@ -495,9 +509,11 @@ struct cache_set {
struct work_struct read_only_work;
struct cache __rcu *cache[MAX_CACHES_PER_SET];
- struct cache_member_rcu __rcu *members;
unsigned long cache_slots_used[BITS_TO_LONGS(MAX_CACHES_PER_SET)];
+ struct cache_member_rcu __rcu *members;
+ struct cache_member *disk_mi; /* protected by register_lock */
+
struct cache_set_opts opts;
/*
diff --git a/drivers/md/bcache/chardev.c b/drivers/md/bcache/chardev.c
index f9de013a1a23..447c55db4a90 100644
--- a/drivers/md/bcache/chardev.c
+++ b/drivers/md/bcache/chardev.c
@@ -192,14 +192,14 @@ static long bch_ioctl_disk_fail(struct cache_set *c,
static struct cache_member *bch_uuid_lookup(struct cache_set *c, uuid_le uuid)
{
- struct cache_member_rcu *mi =
- rcu_dereference_protected(c->members,
- lockdep_is_held(&bch_register_lock));
+ struct cache_member *mi = c->disk_mi;
unsigned i;
- for (i = 0; i < mi->nr_in_set; i++)
- if (!memcmp(&mi->m[i].uuid, &uuid, sizeof(uuid)))
- return &mi->m[i];
+ lockdep_assert_held(&bch_register_lock);
+
+ for (i = 0; i < c->disk_sb.nr_in_set; i++)
+ if (!memcmp(&mi[i].uuid, &uuid, sizeof(uuid)))
+ return &mi[i];
return NULL;
}
diff --git a/drivers/md/bcache/error.c b/drivers/md/bcache/error.c
index 3a6069b9a253..e550f7fad663 100644
--- a/drivers/md/bcache/error.c
+++ b/drivers/md/bcache/error.c
@@ -118,7 +118,7 @@ void bch_nonfatal_io_error_work(struct work_struct *work)
bch_notify_cache_error(ca, true);
mutex_lock(&bch_register_lock);
- if (CACHE_STATE(&ca->mi) == CACHE_ACTIVE) {
+ if (ca->mi.state == CACHE_ACTIVE) {
printk(KERN_ERR "bcache: too many IO errors on %s, going RO\n",
bdevname(ca->disk_sb.bdev, buf));
bch_cache_read_only(ca);
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 7eab9ae9e590..fa7e477de0b4 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -272,10 +272,9 @@ static const char *extent_ptr_invalid(const struct cache_member_rcu *mi,
const struct bch_extent_ptr *ptr,
unsigned size_ondisk)
{
- const struct cache_member *m = mi->m + ptr->dev;
+ const struct cache_member_cpu *m = mi->m + ptr->dev;
- if (ptr->dev > mi->nr_in_set ||
- bch_is_zero(m->uuid.b, sizeof(uuid_le)))
+ if (ptr->dev > mi->nr_in_set || !m->valid)
return "pointer to invalid device";
if (ptr->offset + size_ondisk > m->bucket_size * m->nbuckets)
@@ -1433,10 +1432,10 @@ static void bch_extent_debugcheck_extent(struct cache_set *c, struct btree *b,
if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
continue;
- if (bch_is_zero(mi->m[ptr->dev].uuid.b, sizeof(uuid_le)))
+ if (!mi->m[ptr->dev].valid)
goto bad_device;
- tier = CACHE_TIER(&mi->m[ptr->dev]);
+ tier = mi->m[ptr->dev].tier;
ptrs_per_tier[tier]++;
stale = 0;
@@ -1555,7 +1554,7 @@ static unsigned PTR_TIER(struct cache_member_rcu *mi,
const struct bch_extent_ptr *ptr)
{
return ptr->dev < mi->nr_in_set
- ? CACHE_TIER(&mi->m[ptr->dev])
+ ? mi->m[ptr->dev].tier
: UINT_MAX;
}
@@ -1797,7 +1796,7 @@ static enum merge_result bch_extent_merge(struct btree_keys *bk,
extent_for_each_entry(el, en_l) {
struct bch_extent_ptr *lp, *rp;
- struct cache_member *m;
+ struct cache_member_cpu *m;
en_r = bkey_idx(er.v, (u64 *) en_l - el.v->_data);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index b2dcca806ca6..a4758effcd93 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -1484,7 +1484,7 @@ static void bch_read_extent_iter(struct cache_set *c, struct bio *orig,
bool bounce = false, read_full = false;
/* only promote if we're not reading from the fastest tier: */
- if ((flags & BCH_READ_PROMOTE) && CACHE_TIER(&pick->ca->mi)) {
+ if ((flags & BCH_READ_PROMOTE) && pick->ca->mi.tier) {
promote_op = kmalloc(sizeof(*promote_op), GFP_NOIO);
if (promote_op)
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 968387e98980..b8f9cb41eebe 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -1046,7 +1046,7 @@ int bch_cache_journal_alloc(struct cache *ca)
int ret;
unsigned i;
- if (CACHE_TIER(&ca->mi) != 0)
+ if (ca->mi.tier != 0)
return 0;
if (dynamic_fault("bcache:add:journal_alloc"))
@@ -1141,7 +1141,7 @@ static void journal_reclaim_work(struct work_struct *work)
while (ja->last_idx != cur_idx &&
ja->bucket_seq[ja->last_idx] < last_seq) {
- if (CACHE_DISCARD(&ca->mi) &&
+ if (ca->mi.discard &&
blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
blkdev_issue_discard(ca->disk_sb.bdev,
bucket_to_sector(ca,
@@ -1236,7 +1236,7 @@ static void journal_next_bucket(struct cache_set *c)
*/
extent_for_each_ptr_backwards(e, ptr)
if (!(ca = PTR_CACHE(c, ptr)) ||
- CACHE_STATE(&ca->mi) != CACHE_ACTIVE ||
+ ca->mi.state != CACHE_ACTIVE ||
ca->journal.sectors_free <= j->sectors_free)
__bch_extent_drop_ptr(e, ptr);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3854df664351..06f44fa6a9cb 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -185,6 +185,23 @@ static int bch_congested_fn(void *data, int bdi_bits)
/* Superblock */
+static struct cache_member_cpu cache_mi_to_cpu_mi(struct cache_member *mi)
+{
+ return (struct cache_member_cpu) {
+ .nbuckets = le64_to_cpu(mi->nbuckets),
+ .first_bucket = le16_to_cpu(mi->first_bucket),
+ .bucket_size = le16_to_cpu(mi->bucket_size),
+ .state = CACHE_STATE(mi),
+ .tier = CACHE_TIER(mi),
+ .replication_set= CACHE_REPLICATION_SET(mi),
+ .has_metadata = CACHE_HAS_METADATA(mi),
+ .has_data = CACHE_HAS_DATA(mi),
+ .replacement = CACHE_REPLACEMENT(mi),
+ .discard = CACHE_DISCARD(mi),
+ .valid = !bch_is_zero(mi->uuid.b, sizeof(uuid_le)),
+ };
+}
+
static const char *validate_cache_super(struct cache_set *c, struct cache *ca)
{
struct cache_sb *sb = ca->disk_sb.sb;
@@ -258,7 +275,7 @@ static const char *validate_cache_super(struct cache_set *c, struct cache *ca)
if (le16_to_cpu(sb->u64s) < bch_journal_buckets_offset(sb))
return "Invalid superblock: member info area missing";
- ca->mi = sb->members[sb->nr_this_dev];
+ ca->mi = cache_mi_to_cpu_mi(sb->members + sb->nr_this_dev);
if (ca->mi.nbuckets > LONG_MAX)
return "Too many buckets";
@@ -466,21 +483,30 @@ static void bcache_write_super_unlock(struct closure *cl)
up(&c->sb_write_mutex);
}
-static int cache_sb_to_cache_set(struct cache_set *c, struct cache_sb *src)
+/* Update cached mi: */
+static int cache_set_mi_update(struct cache_set *c,
+ struct cache_member *mi,
+ unsigned nr_in_set)
{
struct cache_member_rcu *new, *old;
- struct cache_sb *dst = &c->disk_sb;
- unsigned nr_in_set = le16_to_cpu(src->nr_in_set);
+ struct cache *ca;
+ unsigned i;
new = kzalloc(sizeof(struct cache_member_rcu) +
- sizeof(struct cache_member) * nr_in_set,
+ sizeof(struct cache_member_cpu) * nr_in_set,
GFP_KERNEL);
if (!new)
return -ENOMEM;
new->nr_in_set = nr_in_set;
- memcpy(&new->m, src->members,
- nr_in_set * sizeof(new->m[0]));
+
+ for (i = 0; i < nr_in_set; i++)
+ new->m[i] = cache_mi_to_cpu_mi(&mi[i]);
+
+ rcu_read_lock();
+ for_each_cache(ca, c, i)
+ ca->mi = new->m[i];
+ rcu_read_unlock();
old = rcu_dereference_protected(c->members,
lockdep_is_held(&bch_register_lock));
@@ -489,6 +515,33 @@ static int cache_sb_to_cache_set(struct cache_set *c, struct cache_sb *src)
if (old)
kfree_rcu(old, rcu);
+ return 0;
+}
+
+static int cache_sb_to_cache_set(struct cache_set *c, struct cache_sb *src)
+{
+ struct cache_member *new;
+ struct cache_sb *dst = &c->disk_sb;
+ unsigned nr_in_set = src->nr_in_set;
+
+ lockdep_assert_held(&bch_register_lock);
+
+ new = kzalloc(sizeof(struct cache_member) * nr_in_set,
+ GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ memcpy(new, src->members,
+ nr_in_set * sizeof(struct cache_member));
+
+ if (cache_set_mi_update(c, new, nr_in_set)) {
+ kfree(new);
+ return -ENOMEM;
+ }
+
+ kfree(c->disk_mi);
+ c->disk_mi = new;
+
dst->version = src->version;
dst->seq = src->seq;
dst->user_uuid = src->user_uuid;
@@ -513,7 +566,13 @@ static int cache_sb_to_cache_set(struct cache_set *c, struct cache_sb *src)
static int cache_sb_from_cache_set(struct cache_set *c, struct cache *ca)
{
struct cache_sb *src = &c->disk_sb, *dst = ca->disk_sb.sb;
- struct cache_member_rcu *mi;
+
+ dst->version = BCACHE_SB_VERSION_CDEV;
+ dst->seq = src->seq;
+ dst->user_uuid = src->user_uuid;
+ dst->set_uuid = src->set_uuid;
+ memcpy(dst->label, src->label, SB_LABEL_SIZE);
+ dst->flags = src->flags;
if (src->nr_in_set != dst->nr_in_set) {
/*
@@ -536,20 +595,9 @@ static int cache_sb_from_cache_set(struct cache_set *c, struct cache *ca)
bch_nr_journal_buckets(dst) * sizeof(u64));
}
- mi = cache_member_info_get(c);
- ca->mi = mi->m[ca->sb.nr_this_dev];
-
- memcpy(ca->disk_sb.sb->_data, mi->m,
- mi->nr_in_set * sizeof(mi->m[0]));
- cache_member_info_put();
-
- dst->version = BCACHE_SB_VERSION_CDEV;
- dst->seq = src->seq;
- dst->user_uuid = src->user_uuid;
- dst->set_uuid = src->set_uuid;
- memcpy(dst->label, src->label, SB_LABEL_SIZE);
- dst->nr_in_set = src->nr_in_set;
- dst->flags = src->flags;
+ memcpy(dst->_data,
+ c->disk_mi,
+ src->nr_in_set * sizeof(struct cache_member));
return 0;
}
@@ -560,6 +608,8 @@ static void __bcache_write_super(struct cache_set *c)
struct cache *ca;
unsigned i;
+ cache_set_mi_update(c, c->disk_mi, c->sb.nr_in_set);
+
closure_init(cl, &c->cl);
c->disk_sb.seq = cpu_to_le64(le64_to_cpu(c->disk_sb.seq) + 1);
@@ -607,7 +657,7 @@ void bch_check_mark_super_slowpath(struct cache_set *c, const struct bkey_i *k,
return;
}
- mi = cache_member_info_get(c)->m;
+ mi = c->disk_mi;
extent_for_each_ptr(e, ptr)
if (bch_extent_ptr_is_dirty(c, e, ptr))
@@ -615,8 +665,6 @@ void bch_check_mark_super_slowpath(struct cache_set *c, const struct bkey_i *k,
? SET_CACHE_HAS_METADATA
: SET_CACHE_HAS_DATA)(mi + ptr->dev, true);
- cache_member_info_put();
-
__bcache_write_super(c);
}
@@ -744,7 +792,7 @@ static const char *__bch_cache_set_read_write(struct cache_set *c)
goto err;
for_each_cache(ca, c, i) {
- if (CACHE_STATE(&ca->mi) != CACHE_ACTIVE)
+ if (ca->mi.state != CACHE_ACTIVE)
continue;
err = "error starting moving GC thread";
@@ -782,7 +830,7 @@ const char *bch_cache_set_read_write(struct cache_set *c)
return NULL;
for_each_cache(ca, c, i)
- if (CACHE_STATE(&ca->mi) == CACHE_ACTIVE &&
+ if (ca->mi.state == CACHE_ACTIVE &&
(err = bch_cache_allocator_start(ca))) {
percpu_ref_put(&ca->ref);
goto err;
@@ -829,6 +877,7 @@ static void cache_set_free(struct cache_set *c)
destroy_workqueue(c->wq);
kfree_rcu(rcu_dereference_protected(c->members, 1), rcu); /* shutting down */
+ kfree(c->disk_mi);
kfree(c);
module_put(THIS_MODULE);
}
@@ -935,13 +984,13 @@ void bch_cache_set_unregister(struct cache_set *c)
static unsigned cache_set_nr_devices(struct cache_set *c)
{
unsigned i, nr = 0;
- struct cache_member_rcu *mi = cache_member_info_get(c);
+ struct cache_member *mi = c->disk_mi;
- for (i = 0; i < mi->nr_in_set; i++)
- if (!bch_is_zero(mi->m[i].uuid.b, sizeof(uuid_le)))
- nr++;
+ lockdep_assert_held(&bch_register_lock);
- cache_member_info_put();
+ for (i = 0; i < c->disk_sb.nr_in_set; i++)
+ if (!bch_is_zero(mi[i].uuid.b, sizeof(uuid_le)))
+ nr++;
return nr;
}
@@ -1156,7 +1205,6 @@ static int bch_cache_set_online(struct cache_set *c)
static const char *run_cache_set(struct cache_set *c)
{
const char *err = "cannot allocate memory";
- struct cache_member_rcu *mi;
struct cache *ca;
unsigned i, id;
long now;
@@ -1171,11 +1219,8 @@ static const char *run_cache_set(struct cache_set *c)
* Make sure that each cache object's mi is up to date before
* we start testing it.
*/
-
- mi = cache_member_info_get(c);
for_each_cache(ca, c, i)
- ca->mi = mi->m[ca->sb.nr_this_dev];
- cache_member_info_put();
+ cache_sb_from_cache_set(c, ca);
/*
* CACHE_SYNC is true if the cache set has already been run
@@ -1247,7 +1292,7 @@ static const char *run_cache_set(struct cache_set *c)
bch_journal_start(c);
for_each_cache(ca, c, i)
- if (CACHE_STATE(&ca->mi) == CACHE_ACTIVE &&
+ if (ca->mi.state == CACHE_ACTIVE &&
(err = bch_cache_allocator_start_once(ca))) {
percpu_ref_put(&ca->ref);
goto err;
@@ -1287,7 +1332,7 @@ static const char *run_cache_set(struct cache_set *c)
bch_journal_set_replay_done(&c->journal);
for_each_cache(ca, c, i)
- if (CACHE_STATE(&ca->mi) == CACHE_ACTIVE &&
+ if (ca->mi.state == CACHE_ACTIVE &&
(err = bch_cache_allocator_start_once(ca))) {
percpu_ref_put(&ca->ref);
goto err;
@@ -1334,10 +1379,8 @@ static const char *run_cache_set(struct cache_set *c)
}
now = get_seconds();
- mi = cache_member_info_get(c);
for_each_cache_rcu(ca, c, i)
- mi->m[ca->sb.nr_this_dev].last_mount = now;
- cache_member_info_put();
+ c->disk_mi[ca->sb.nr_this_dev].last_mount = now;
bcache_write_super(c);
@@ -1384,7 +1427,6 @@ static const char *can_add_cache(struct cache_sb *sb,
static const char *can_attach_cache(struct cache_sb *sb, struct cache_set *c)
{
const char *err;
- struct cache_member_rcu *mi;
bool match;
err = can_add_cache(sb, c);
@@ -1395,15 +1437,13 @@ static const char *can_attach_cache(struct cache_sb *sb, struct cache_set *c)
* When attaching an existing device, the cache set superblock must
* already contain member_info with a matching UUID
*/
- mi = cache_member_info_get(c);
-
- match = !(le64_to_cpu(sb->seq) <= le64_to_cpu(c->disk_sb.seq) &&
- (sb->nr_this_dev >= mi->nr_in_set ||
- memcmp(&mi->m[sb->nr_this_dev].uuid,
- &sb->disk_uuid,
- sizeof(uuid_le))));
-
- cache_member_info_put();
+ match = le64_to_cpu(sb->seq) <= le64_to_cpu(c->disk_sb.seq)
+ ? (sb->nr_this_dev < c->disk_sb.nr_in_set &&
+ !memcmp(&c->disk_mi[sb->nr_this_dev].uuid,
+ &sb->disk_uuid, sizeof(uuid_le)))
+ : (sb->nr_this_dev < sb->nr_in_set &&
+ !memcmp(&sb->members[sb->nr_this_dev].uuid,
+ &sb->disk_uuid, sizeof(uuid_le)));
if (!match)
return "cache sb does not match set";
@@ -1413,28 +1453,10 @@ static const char *can_attach_cache(struct cache_sb *sb, struct cache_set *c)
/* Cache device */
-/*
- * Update the cache set's member info and then the various superblocks from one
- * device's member info:
- */
-void bch_cache_member_info_update(struct cache *ca)
-{
- struct cache_set *c = ca->set;
- struct cache_member *mi;
-
- lockdep_assert_held(&bch_register_lock);
-
- mi = cache_member_info_get(c)->m;
- mi[ca->sb.nr_this_dev] = ca->mi;
- cache_member_info_put();
-
- bcache_write_super(c);
-}
-
static bool cache_may_remove(struct cache *ca)
{
struct cache_set *c = ca->set;
- struct cache_group *tier = &c->cache_tiers[CACHE_TIER(&ca->mi)];
+ struct cache_group *tier = &c->cache_tiers[ca->mi.tier];
/*
* Right now, we can't remove the last device from a tier,
@@ -1488,7 +1510,7 @@ void bch_cache_read_only(struct cache *ca)
lockdep_assert_held(&bch_register_lock);
- if (CACHE_STATE(&ca->mi) != CACHE_ACTIVE)
+ if (ca->mi.state != CACHE_ACTIVE)
return;
if (!cache_may_remove(ca)) {
@@ -1505,15 +1527,15 @@ void bch_cache_read_only(struct cache *ca)
pr_notice("%s read only", bdevname(ca->disk_sb.bdev, buf));
bch_notify_cache_read_only(ca);
- SET_CACHE_STATE(&ca->mi, CACHE_RO);
- bch_cache_member_info_update(ca);
+ SET_CACHE_STATE(&c->disk_mi[ca->sb.nr_this_dev], CACHE_RO);
+ bcache_write_super(c);
}
static const char *__bch_cache_read_write(struct cache *ca)
{
const char *err;
- BUG_ON(CACHE_STATE(&ca->mi) != CACHE_ACTIVE);
+ BUG_ON(ca->mi.state != CACHE_ACTIVE);
lockdep_assert_held(&bch_register_lock);
trace_bcache_cache_read_write(ca);
@@ -1543,11 +1565,12 @@ static const char *__bch_cache_read_write(struct cache *ca)
const char *bch_cache_read_write(struct cache *ca)
{
+ struct cache_set *c = ca->set;
const char *err;
lockdep_assert_held(&bch_register_lock);
- if (CACHE_STATE(&ca->mi) == CACHE_ACTIVE)
+ if (ca->mi.state == CACHE_ACTIVE)
return NULL;
if (test_bit(CACHE_DEV_REMOVING, &ca->flags))
@@ -1557,8 +1580,8 @@ const char *bch_cache_read_write(struct cache *ca)
if (err)
return err;
- SET_CACHE_STATE(&ca->mi, CACHE_ACTIVE);
- bch_cache_member_info_update(ca);
+ SET_CACHE_STATE(&c->disk_mi[ca->sb.nr_this_dev], CACHE_ACTIVE);
+ bcache_write_super(c);
return NULL;
}
@@ -1676,7 +1699,6 @@ static void bch_cache_remove_work(struct work_struct *work)
{
struct cache *ca = container_of(work, struct cache, remove_work);
struct cache_set *c = ca->set;
- struct cache_member *mi;
char name[BDEVNAME_SIZE];
bool force = test_bit(CACHE_DEV_FORCE_REMOVE, &ca->flags);
unsigned dev = ca->sb.nr_this_dev;
@@ -1689,16 +1711,20 @@ static void bch_cache_remove_work(struct work_struct *work)
* XXX: locking is sketchy, bch_cache_read_write() has to check
* CACHE_DEV_REMOVING bit
*/
- if (!CACHE_HAS_DATA(&ca->mi)) {
+ if (!ca->mi.has_data) {
/* Nothing to do: */
} else if (!bch_move_data_off_device(ca)) {
- SET_CACHE_HAS_DATA(&ca->mi, false);
- bch_cache_member_info_update(ca);
+ lockdep_assert_held(&bch_register_lock);
+ SET_CACHE_HAS_DATA(&c->disk_mi[ca->sb.nr_this_dev], false);
+
+ bcache_write_super(c);
} else if (force) {
bch_flag_data_bad(ca);
- SET_CACHE_HAS_DATA(&ca->mi, false);
- bch_cache_member_info_update(ca);
+ lockdep_assert_held(&bch_register_lock);
+ SET_CACHE_HAS_DATA(&c->disk_mi[ca->sb.nr_this_dev], false);
+
+ bcache_write_super(c);
} else {
pr_err("Remove of %s failed, unable to migrate data off", name);
clear_bit(CACHE_DEV_REMOVING, &ca->flags);
@@ -1707,11 +1733,13 @@ static void bch_cache_remove_work(struct work_struct *work)
/* Now metadata: */
- if (!CACHE_HAS_METADATA(&ca->mi)) {
+ if (!ca->mi.has_metadata) {
/* Nothing to do: */
} else if (!bch_move_meta_data_off_device(ca)) {
- SET_CACHE_HAS_METADATA(&ca->mi, false);
- bch_cache_member_info_update(ca);
+ lockdep_assert_held(&bch_register_lock);
+ SET_CACHE_HAS_METADATA(&c->disk_mi[ca->sb.nr_this_dev], false);
+
+ bcache_write_super(c);
} else {
pr_err("Remove of %s failed, unable to migrate metadata off",
name);
@@ -1746,9 +1774,8 @@ static void bch_cache_remove_work(struct work_struct *work)
*/
synchronize_rcu();
- mi = cache_member_info_get(c)->m;
- memset(&mi[dev].uuid, 0, sizeof(mi[dev].uuid));
- cache_member_info_put();
+ lockdep_assert_held(&bch_register_lock);
+ memset(&c->disk_mi[dev].uuid, 0, sizeof(c->disk_mi[dev].uuid));
bcache_write_super(c);
mutex_unlock(&bch_register_lock);
@@ -1764,8 +1791,8 @@ bool bch_cache_remove(struct cache *ca, bool force)
return false;
if (!cache_may_remove(ca)) {
- pr_err("Can't remove last device in tier %llu of %pU.",
- CACHE_TIER(&ca->mi), ca->set->disk_sb.set_uuid.b);
+ pr_err("Can't remove last device in tier %u of %pU.",
+ ca->mi.tier, ca->set->disk_sb.set_uuid.b);
bch_notify_cache_remove_failed(ca);
return false;
}
@@ -1997,7 +2024,7 @@ int bch_cache_set_add_cache(struct cache_set *c, const char *path)
struct bcache_superblock sb;
const char *err;
struct cache *ca;
- struct cache_member_rcu *new_mi, *old_mi;
+ struct cache_member *new_mi;
struct cache_member mi;
unsigned nr_this_dev, nr_in_set, u64s;
int ret = -EINVAL;
@@ -2030,7 +2057,7 @@ int bch_cache_set_add_cache(struct cache_set *c, const char *path)
for (nr_this_dev = 0; nr_this_dev < MAX_CACHES_PER_SET; nr_this_dev++)
if (!test_bit(nr_this_dev, c->cache_slots_used) &&
(nr_this_dev >= c->sb.nr_in_set ||
- bch_is_zero(c->members->m[nr_this_dev].uuid.b,
+ bch_is_zero(c->disk_mi[nr_this_dev].uuid.b,
sizeof(uuid_le))))
goto have_slot;
no_slot:
@@ -2054,32 +2081,32 @@ have_slot:
sb.sb->nr_in_set = cpu_to_le16(nr_in_set);
sb.sb->u64s = u64s;
- old_mi = c->members;
- new_mi = (dynamic_fault("bcache:add:member_info_realloc")
- ? NULL
- : kzalloc(sizeof(struct cache_member_rcu) +
- sizeof(struct cache_member) * nr_in_set,
- GFP_KERNEL));
+ new_mi = dynamic_fault("bcache:add:member_info_realloc")
+ ? NULL
+ : kmalloc(sizeof(struct cache_member) * nr_in_set,
+ GFP_KERNEL);
if (!new_mi) {
err = "cannot allocate memory";
ret = -ENOMEM;
goto err_unlock;
}
- new_mi->nr_in_set = nr_in_set;
- memcpy(new_mi->m, old_mi->m,
- c->sb.nr_in_set * sizeof(new_mi->m[0]));
- new_mi->m[nr_this_dev] = mi;
+ memcpy(new_mi, c->disk_mi,
+ sizeof(struct cache_member) * c->sb.nr_in_set);
+ new_mi[nr_this_dev] = mi;
- memcpy(sb.sb->members, new_mi->m,
- nr_in_set * sizeof(new_mi->m[0]));
+ if (cache_set_mi_update(c, new_mi, nr_in_set)) {
+ err = "cannot allocate memory";
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
/* commit new member info */
- rcu_assign_pointer(c->members, new_mi);
+ kfree(c->disk_mi);
+ c->disk_mi = new_mi;
+ c->disk_sb.nr_in_set = nr_in_set;
c->sb.nr_in_set = nr_in_set;
- kfree_rcu(old_mi, rcu);
-
err = cache_alloc(&sb, c, &ca);
if (err)
goto err_unlock;
diff --git a/drivers/md/bcache/super.h b/drivers/md/bcache/super.h
index a2c9e3d273bb..b881cec06c42 100644
--- a/drivers/md/bcache/super.h
+++ b/drivers/md/bcache/super.h
@@ -117,14 +117,14 @@ static inline bool bch_check_super_marked(struct cache_set *c,
{
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
const struct bch_extent_ptr *ptr;
- struct cache_member_rcu *mi = cache_member_info_get(c);
+ struct cache_member_cpu *mi = cache_member_info_get(c)->m;
bool ret = true;
extent_for_each_ptr(e, ptr)
if (bch_extent_ptr_is_dirty(c, e, ptr) &&
!(meta
- ? CACHE_HAS_METADATA
- : CACHE_HAS_DATA)(mi->m + ptr->dev)) {
+ ? mi[ptr->dev].has_metadata
+ : mi[ptr->dev].has_data)) {
ret = false;
break;
}
@@ -148,8 +148,6 @@ int bch_super_realloc(struct bcache_superblock *, unsigned);
void bcache_write_super(struct cache_set *);
void __write_super(struct cache_set *, struct bcache_superblock *);
-void bch_cache_member_info_update(struct cache *);
-
void bch_cache_set_release(struct kobject *);
void bch_cache_release(struct kobject *);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index afbaf24e7a79..9a0a3195e2b7 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -1170,7 +1170,7 @@ SHOW(bch_cache)
sysfs_print(block_size_bytes, block_bytes(c));
sysfs_print(first_bucket, ca->mi.first_bucket);
sysfs_print(nbuckets, ca->mi.nbuckets);
- sysfs_print(discard, CACHE_DISCARD(&ca->mi));
+ sysfs_print(discard, ca->mi.discard);
sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
sysfs_hprint(btree_written,
atomic_long_read(&ca->btree_sectors_written) << 9);
@@ -1191,8 +1191,8 @@ SHOW(bch_cache)
sysfs_print(alloc_buckets, stats.buckets_alloc);
sysfs_print(available_buckets, buckets_available_cache(ca));
sysfs_print(free_buckets, buckets_free_cache(ca, RESERVE_NONE));
- sysfs_print(has_data, CACHE_HAS_DATA(&ca->mi));
- sysfs_print(has_metadata, CACHE_HAS_METADATA(&ca->mi));
+ sysfs_print(has_data, ca->mi.has_data);
+ sysfs_print(has_metadata, ca->mi.has_metadata);
sysfs_pd_controller_show(copy_gc, &ca->moving_gc_pd);
sysfs_queue_show(copy_gc, &ca->moving_gc_queue);
@@ -1203,14 +1203,14 @@ SHOW(bch_cache)
if (attr == &sysfs_cache_replacement_policy)
return bch_snprint_string_list(buf, PAGE_SIZE,
cache_replacement_policies,
- CACHE_REPLACEMENT(&ca->mi));
+ ca->mi.replacement);
- sysfs_print(tier, CACHE_TIER(&ca->mi));
+ sysfs_print(tier, ca->mi.tier);
if (attr == &sysfs_state_rw)
return bch_snprint_string_list(buf, PAGE_SIZE,
bch_cache_state,
- CACHE_STATE(&ca->mi));
+ ca->mi.state);
if (attr == &sysfs_read_priority_stats)
return show_quantiles(ca, buf, bucket_priority_fn, (void *) 0);
@@ -1232,7 +1232,7 @@ STORE(__bch_cache)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
struct cache_set *c = ca->set;
- struct cache_member *mi = &c->members->m[ca->sb.nr_this_dev];
+ struct cache_member *mi = &c->disk_mi[ca->sb.nr_this_dev];
sysfs_pd_controller_store(copy_gc, &ca->moving_gc_pd);
sysfs_queue_store(copy_gc, &ca->moving_gc_queue);
@@ -1269,7 +1269,7 @@ STORE(__bch_cache)
if (v < 0)
return v;
- if (v == CACHE_STATE(&ca->mi))
+ if (v == ca->mi.state)
return size;
switch (v) {
diff --git a/drivers/md/bcache/tier.c b/drivers/md/bcache/tier.c
index b0c0b970ee05..c147f55f9545 100644
--- a/drivers/md/bcache/tier.c
+++ b/drivers/md/bcache/tier.c
@@ -37,7 +37,7 @@ static bool tiering_pred(struct scan_keylist *kl, struct bkey_s_c k)
mi = cache_member_info_get(c);
extent_for_each_ptr(e, ptr)
if (ptr->dev < mi->nr_in_set &&
- CACHE_TIER(&mi->m[ptr->dev]))
+ mi->m[ptr->dev].tier)
replicas++;
cache_member_info_put();
@@ -276,9 +276,9 @@ static int tiering_next_cache(struct cache_set *c,
}
ca = rcu_dereference(tier->devices[*cache_iter]);
- if (ca == NULL
- || CACHE_STATE(&ca->mi) != CACHE_ACTIVE
- || ca->tiering_queue.stopped) {
+ if (ca == NULL ||
+ ca->mi.state != CACHE_ACTIVE ||
+ ca->tiering_queue.stopped) {
rcu_read_unlock();
(*cache_iter)++;
continue;
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index eeaa1a4ccb12..f6081ad0bd6f 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -374,7 +374,7 @@ DECLARE_EVENT_CLASS(cache,
TP_fast_assign(
memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
- __entry->tier = CACHE_TIER(&ca->mi);
+ __entry->tier = ca->mi.tier;
),
TP_printk("%pU tier %u", __entry->uuid, __entry->tier)