diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2016-01-11 22:55:30 -0900 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2016-10-07 12:35:18 -0800 |
commit | fc59369fb2211615b1fca9b72d7eea4bfa0d9eeb (patch) | |
tree | e4398364895b79d96145a2f9047ff30f13575f76 | |
parent | 81cab3bb9f95290dcea18b583a2f707e76133516 (diff) |
bcache: Move c->sb to c->disk_sb
More endianness refactoring: do what the last patch did, except for struct
cache_set
-rw-r--r-- | drivers/md/bcache/alloc.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/bcache.h | 21 | ||||
-rw-r--r-- | drivers/md/bcache/blockdev.c | 8 | ||||
-rw-r--r-- | drivers/md/bcache/btree_cache.h | 6 | ||||
-rw-r--r-- | drivers/md/bcache/btree_gc.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/btree_io.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/btree_update.c | 14 | ||||
-rw-r--r-- | drivers/md/bcache/debug.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/error.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/extents.c | 14 | ||||
-rw-r--r-- | drivers/md/bcache/journal.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/migrate.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/notify.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 36 | ||||
-rw-r--r-- | drivers/md/bcache/sysfs.c | 14 | ||||
-rw-r--r-- | include/trace/events/bcache.h | 32 |
16 files changed, 98 insertions, 71 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 96ba7cfbe723..49329a906f19 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -264,7 +264,7 @@ static int bch_prio_write(struct cache *ca) } p->next_bucket = ca->prio_buckets[i + 1]; - p->magic = pset_magic(&c->sb); + p->magic = pset_magic(&c->disk_sb); SET_PSET_CSUM_TYPE(p, c->opts.metadata_checksum); p->csum = bch_checksum(PSET_CSUM_TYPE(p), @@ -363,7 +363,7 @@ int bch_prio_read(struct cache *ca) return -EIO; got = p->magic; - expect = pset_magic(&c->sb); + expect = pset_magic(&c->disk_sb); if (cache_inconsistent_on(got != expect, ca, "bad magic (got %llu expect %llu) while reading prios from bucket %llu", got, expect, bucket)) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 8469b4bde672..ce6521ce85f1 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -500,7 +500,21 @@ struct cache_set { struct cache_set_opts opts; - struct cache_sb sb; + /* + * Cached copy in native endianness: + * Set by cache_sb_to_cache_set: + */ + struct { + u16 block_size; + u16 btree_node_size; + + u8 nr_in_set; + + u8 meta_replicas_have; + u8 data_replicas_have; + } sb; + + struct cache_sb disk_sb; unsigned short block_bits; /* ilog2(block_size) */ struct closure sb_write; @@ -771,7 +785,10 @@ static inline unsigned bucket_bytes(const struct cache *ca) return ca->mi.bucket_size << 9; } -#define block_bytes(c) ((c)->sb.block_size << 9) +static inline unsigned block_bytes(struct cache_set *c) +{ + return c->sb.block_size << 9; +} #define prios_per_bucket(ca) \ ((bucket_bytes(ca) - sizeof(struct prio_set)) / \ diff --git a/drivers/md/bcache/blockdev.c b/drivers/md/bcache/blockdev.c index d0666bafa943..6f618727ded2 100644 --- a/drivers/md/bcache/blockdev.c +++ b/drivers/md/bcache/blockdev.c @@ -382,7 +382,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) bdevname(dc->disk_sb.bdev, buf); - if (memcmp(&dc->sb.set_uuid, &c->sb.set_uuid, sizeof(c->sb.set_uuid))) + if (memcmp(&dc->sb.set_uuid, + &c->disk_sb.set_uuid, + sizeof(c->disk_sb.set_uuid))) return -ENOENT; if (dc->disk.c) { @@ -446,7 +448,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) pr_info("attached inode %llu", bcache_dev_inum(&dc->disk)); - dc->sb.set_uuid = c->sb.set_uuid; + dc->sb.set_uuid = c->disk_sb.set_uuid; SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); bch_write_bdev_super(dc, &cl); @@ -487,7 +489,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) pr_info("Caching %s as %s on set %pU", bdevname(dc->disk_sb.bdev, buf), dc->disk.disk->disk_name, - dc->disk.c->sb.set_uuid.b); + dc->disk.c->disk_sb.set_uuid.b); return 0; } diff --git a/drivers/md/bcache/btree_cache.h b/drivers/md/bcache/btree_cache.h index 83526381e991..03a1cf5f30df 100644 --- a/drivers/md/bcache/btree_cache.h +++ b/drivers/md/bcache/btree_cache.h @@ -33,17 +33,17 @@ int bch_btree_cache_alloc(struct cache_set *); static inline size_t btree_bytes(struct cache_set *c) { - return CACHE_BTREE_NODE_SIZE(&c->sb) << 9; + return CACHE_BTREE_NODE_SIZE(&c->disk_sb) << 9; } static inline size_t btree_pages(struct cache_set *c) { - return CACHE_BTREE_NODE_SIZE(&c->sb) >> (PAGE_SHIFT - 9); + return CACHE_BTREE_NODE_SIZE(&c->disk_sb) >> (PAGE_SHIFT - 9); } static inline unsigned btree_blocks(struct cache_set *c) { - return CACHE_BTREE_NODE_SIZE(&c->sb) >> c->block_bits; + return CACHE_BTREE_NODE_SIZE(&c->disk_sb) >> c->block_bits; } #define btree_node_root(_b) ((_b)->c->btree_roots[(_b)->btree_id]) diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c index 4eae4e2ade74..e6015aa00eab 100644 --- a/drivers/md/bcache/btree_gc.c +++ b/drivers/md/bcache/btree_gc.c @@ -132,7 +132,7 @@ static u8 __bch_btree_mark_key(struct cache_set *c, enum bkey_type type, bch_mark_pointers(c, e, type == BKEY_TYPE_BTREE - ? CACHE_BTREE_NODE_SIZE(&c->sb) + ? c->sb.btree_node_size : e.k->size, false, type == BKEY_TYPE_BTREE, true, GC_POS_MIN); @@ -328,7 +328,7 @@ static void bch_mark_pending_btree_node_frees(struct cache_set *c) list_for_each_entry(d, &c->btree_node_pending_free, list) if (d->index_update_done) bch_mark_pointers(c, bkey_i_to_s_c_extent(&d->key), - CACHE_BTREE_NODE_SIZE(&c->sb), + c->sb.btree_node_size, false, true, true, GC_POS_MIN); mutex_unlock(&c->btree_node_pending_free_lock); diff --git a/drivers/md/bcache/btree_io.c b/drivers/md/bcache/btree_io.c index d0a44eed2a87..b135b4ea505c 100644 --- a/drivers/md/bcache/btree_io.c +++ b/drivers/md/bcache/btree_io.c @@ -262,7 +262,7 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b, goto err; err = "bad magic"; - if (b->data->magic != bset_magic(&c->sb)) + if (b->data->magic != bset_magic(&c->disk_sb)) goto err; err = "bad btree header"; @@ -531,7 +531,7 @@ static void do_btree_node_write(struct closure *cl) SET_BSET_CSUM_TYPE(i, c->opts.metadata_checksum); if (!b->written) { - BUG_ON(b->data->magic != bset_magic(&c->sb)); + BUG_ON(b->data->magic != bset_magic(&c->disk_sb)); b->data->format = b->keys.format; data = b->data; diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c index b3a704ad8232..490f962d8d1e 100644 --- a/drivers/md/bcache/btree_update.c +++ b/drivers/md/bcache/btree_update.c @@ -142,7 +142,7 @@ found: if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) bch_mark_pointers(c, bkey_i_to_s_c_extent(&d->key), - -CACHE_BTREE_NODE_SIZE(&c->sb), + -c->sb.btree_node_size, false, true, false, b ? gc_pos_btree_node(b) : gc_pos_btree_root(id)); @@ -215,7 +215,7 @@ static void bch_btree_node_free_ondisk(struct cache_set *c, list_del(&pending->list); bch_mark_pointers(c, bkey_i_to_s_c_extent(&pending->key), - -CACHE_BTREE_NODE_SIZE(&c->sb), false, true, + -c->sb.btree_node_size, false, true, false, gc_phase(GC_PHASE_PENDING_DELETE)); mutex_unlock(&c->btree_node_pending_free_lock); @@ -250,14 +250,14 @@ static struct btree *__bch_btree_node_alloc(struct cache_set *c, retry: /* alloc_sectors is weird, I suppose */ bkey_extent_init(&tmp.k); - tmp.k.k.size = CACHE_BTREE_NODE_SIZE(&c->sb), + tmp.k.k.size = c->sb.btree_node_size, ob = bch_alloc_sectors(c, &c->btree_write_point, &tmp.k, check_enospc, cl); if (IS_ERR(ob)) return ERR_CAST(ob); - if (tmp.k.k.size < CACHE_BTREE_NODE_SIZE(&c->sb)) { + if (tmp.k.k.size < c->sb.btree_node_size) { bch_open_bucket_put(c, ob); goto retry; } @@ -291,7 +291,7 @@ static struct btree *bch_btree_node_alloc(struct cache_set *c, set_btree_node_dirty(b); bch_bset_init_first(&b->keys, &b->data->keys); - b->data->magic = bset_magic(&c->sb); + b->data->magic = bset_magic(&c->disk_sb); SET_BSET_BTREE_LEVEL(&b->data->keys, level); bch_check_mark_super(c, &b->key, true); @@ -353,7 +353,7 @@ static void __bch_btree_set_root(struct cache_set *c, struct btree *b) btree_node_root(b) = b; stale = bch_mark_pointers(c, bkey_i_to_s_c_extent(&b->key), - CACHE_BTREE_NODE_SIZE(&c->sb), true, true, + c->sb.btree_node_size, true, true, false, gc_pos_btree_root(b->btree_id)); BUG_ON(stale); spin_unlock(&c->btree_root_lock); @@ -586,7 +586,7 @@ static bool bch_insert_fixup_btree_ptr(struct btree_iter *iter, bool stale; stale = bch_mark_pointers(c, bkey_i_to_s_c_extent(insert), - CACHE_BTREE_NODE_SIZE(&c->sb), + c->sb.btree_node_size, true, true, false, gc_pos_btree_node(b)); BUG_ON(stale); diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 102aca606dbb..a5386acaca86 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -478,7 +478,7 @@ void bch_debug_init_cache_set(struct cache_set *c) if (IS_ERR_OR_NULL(bch_debug)) return; - snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b); + snprintf(name, sizeof(name), "%pU", c->disk_sb.user_uuid.b); c->debug = debugfs_create_dir(name, bch_debug); if (IS_ERR_OR_NULL(c->debug)) return; diff --git a/drivers/md/bcache/error.c b/drivers/md/bcache/error.c index 0a316ffd6966..3a6069b9a253 100644 --- a/drivers/md/bcache/error.c +++ b/drivers/md/bcache/error.c @@ -30,7 +30,7 @@ void bch_fatal_error(struct cache_set *c) { if (bch_cache_set_read_only(c)) printk(KERN_ERR "bcache: %pU emergency read only\n", - c->sb.set_uuid.b); + c->disk_sb.user_uuid.b); } /* Nonfatal IO errors, IO error/latency accounting: */ diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 56ec8c660a9d..7eab9ae9e590 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -365,7 +365,7 @@ static const char *bch_btree_ptr_invalid(const struct cache_set *c, extent_for_each_ptr_crc(e, ptr, crc) { reason = extent_ptr_invalid(mi, ptr, - CACHE_BTREE_NODE_SIZE(&c->sb)); + c->sb.btree_node_size); if (reason) { cache_member_info_put(); @@ -425,12 +425,12 @@ static void btree_ptr_debugcheck(struct cache_set *c, struct btree *b, rcu_read_unlock(); - if (replicas < CACHE_SET_META_REPLICAS_HAVE(&c->sb)) { + if (replicas < c->sb.meta_replicas_have) { bch_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k); cache_set_bug(c, - "btree key bad (too few replicas, %u < %llu): %s", - replicas, CACHE_SET_META_REPLICAS_HAVE(&c->sb), buf); + "btree key bad (too few replicas, %u < %u): %s", + replicas, c->sb.meta_replicas_have, buf); return; } @@ -1489,12 +1489,12 @@ static void bch_extent_debugcheck_extent(struct cache_set *c, struct btree *b, } if (!bkey_extent_is_cached(e.k) && - replicas < CACHE_SET_DATA_REPLICAS_HAVE(&c->sb)) { + replicas < c->sb.data_replicas_have) { bch_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), e.s_c); cache_set_bug(c, - "extent key bad (too few replicas, %u < %llu): %s", - replicas, CACHE_SET_DATA_REPLICAS_HAVE(&c->sb), buf); + "extent key bad (too few replicas, %u < %u): %s", + replicas, c->sb.data_replicas_have, buf); return; } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 41f9a70f2a7c..968387e98980 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -353,7 +353,7 @@ static enum { if (bch_meta_read_fault("journal")) return JOURNAL_ENTRY_BAD; - if (j->magic != jset_magic(&ca->set->sb)) { + if (j->magic != jset_magic(&ca->set->disk_sb)) { pr_debug("bad magic while reading journal from %llu", sector); return JOURNAL_ENTRY_BAD; } @@ -1452,7 +1452,7 @@ static void journal_write_locked(struct closure *cl) w->data->read_clock = c->prio_clock[READ].hand; w->data->write_clock = c->prio_clock[WRITE].hand; - w->data->magic = jset_magic(&c->sb); + w->data->magic = jset_magic(&c->disk_sb); w->data->version = BCACHE_JSET_VERSION; w->data->last_seq = last_seq(j); diff --git a/drivers/md/bcache/migrate.c b/drivers/md/bcache/migrate.c index 385803b580ee..cff129a74ce3 100644 --- a/drivers/md/bcache/migrate.c +++ b/drivers/md/bcache/migrate.c @@ -340,7 +340,7 @@ int bch_move_meta_data_off_device(struct cache *ca) if (bch_journal_move(ca) != 0) { pr_err("Unable to move the journal off in %pU.", - ca->set->sb.set_uuid.b); + ca->set->disk_sb.user_uuid.b); ret = 1; /* Failure */ } diff --git a/drivers/md/bcache/notify.c b/drivers/md/bcache/notify.c index 17180f9af486..e9b5568c95e3 100644 --- a/drivers/md/bcache/notify.c +++ b/drivers/md/bcache/notify.c @@ -25,7 +25,7 @@ static void notify_get(struct cache_set *c) env->envp_idx = 0; env->buflen = 0; - notify_var(c, "SET_UUID=%pU", c->sb.set_uuid.b); + notify_var(c, "SET_UUID=%pU", c->disk_sb.user_uuid.b); } static void notify_get_cache(struct cache *ca) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 1e7a67d22e8a..3854df664351 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -469,7 +469,7 @@ static void bcache_write_super_unlock(struct closure *cl) static int cache_sb_to_cache_set(struct cache_set *c, struct cache_sb *src) { struct cache_member_rcu *new, *old; - struct cache_sb *dst = &c->sb; + struct cache_sb *dst = &c->disk_sb; unsigned nr_in_set = le16_to_cpu(src->nr_in_set); new = kzalloc(sizeof(struct cache_member_rcu) + @@ -498,13 +498,21 @@ static int cache_sb_to_cache_set(struct cache_set *c, struct cache_sb *src) dst->flags = src->flags; dst->block_size = src->block_size; - pr_debug("set version = %llu", le64_to_cpu(c->sb.version)); + c->sb.block_size = le16_to_cpu(src->block_size); + c->sb.btree_node_size = CACHE_BTREE_NODE_SIZE(src); + + c->sb.nr_in_set = src->nr_in_set; + + c->sb.meta_replicas_have= CACHE_SET_META_REPLICAS_HAVE(src); + c->sb.data_replicas_have= CACHE_SET_DATA_REPLICAS_HAVE(src); + + pr_debug("set version = %llu", le64_to_cpu(dst->version)); return 0; } static int cache_sb_from_cache_set(struct cache_set *c, struct cache *ca) { - struct cache_sb *src = &c->sb, *dst = ca->disk_sb.sb; + struct cache_sb *src = &c->disk_sb, *dst = ca->disk_sb.sb; struct cache_member_rcu *mi; if (src->nr_in_set != dst->nr_in_set) { @@ -554,7 +562,7 @@ static void __bcache_write_super(struct cache_set *c) closure_init(cl, &c->cl); - c->sb.seq++; + c->disk_sb.seq = cpu_to_le64(le64_to_cpu(c->disk_sb.seq) + 1); for_each_cache(ca, c, i) { struct cache_sb *sb = ca->disk_sb.sb; @@ -842,7 +850,7 @@ void bch_cache_set_release(struct kobject *kobj) complete(c->stop_completion); bch_notify_cache_set_stopped(c); - pr_info("Cache set %pU unregistered", c->sb.set_uuid.b); + pr_info("Cache set %pU unregistered", c->disk_sb.set_uuid.b); cache_set_free(c); } @@ -1034,7 +1042,7 @@ static struct cache_set *bch_cache_set_alloc(struct cache_sb *sb, if (cache_sb_to_cache_set(c, sb)) goto err; - scnprintf(c->uuid, sizeof(c->uuid), "%pU", &c->sb.user_uuid); + scnprintf(c->uuid, sizeof(c->uuid), "%pU", &c->disk_sb.user_uuid); c->opts = cache_superblock_opts(sb); cache_set_opts_apply(&c->opts, opts); @@ -1128,7 +1136,7 @@ static int bch_cache_set_online(struct cache_set *c) if (IS_ERR(c->chardev)) return PTR_ERR(c->chardev); - if (kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) || + if (kobject_add(&c->kobj, NULL, "%pU", c->disk_sb.user_uuid.b) || kobject_add(&c->internal, &c->kobj, "internal") || kobject_add(&c->opts_dir, &c->kobj, "options") || kobject_add(&c->time_stats, &c->kobj, "time_stats") || @@ -1175,7 +1183,7 @@ static const char *run_cache_set(struct cache_set *c) * It is false if it is the first time it is run. */ - if (CACHE_SYNC(&c->sb)) { + if (CACHE_SYNC(&c->disk_sb)) { LIST_HEAD(journal); struct jset *j; @@ -1311,7 +1319,7 @@ static const char *run_cache_set(struct cache_set *c) goto err; /* Mark cache set as initialized: */ - SET_CACHE_SYNC(&c->sb, true); + SET_CACHE_SYNC(&c->disk_sb, true); } bch_prio_timer_start(c, READ); @@ -1367,7 +1375,7 @@ static const char *can_add_cache(struct cache_sb *sb, return "mismatched block size"; if (sb->members[le16_to_cpu(sb->nr_this_dev)].bucket_size < - CACHE_BTREE_NODE_SIZE(&c->sb)) + CACHE_BTREE_NODE_SIZE(&c->disk_sb)) return "new cache bucket_size is too small"; return NULL; @@ -1389,7 +1397,7 @@ static const char *can_attach_cache(struct cache_sb *sb, struct cache_set *c) */ mi = cache_member_info_get(c); - match = !(sb->seq <= c->sb.seq && + match = !(le64_to_cpu(sb->seq) <= le64_to_cpu(c->disk_sb.seq) && (sb->nr_this_dev >= mi->nr_in_set || memcmp(&mi->m[sb->nr_this_dev].uuid, &sb->disk_uuid, @@ -1757,7 +1765,7 @@ bool bch_cache_remove(struct cache *ca, bool force) if (!cache_may_remove(ca)) { pr_err("Can't remove last device in tier %llu of %pU.", - CACHE_TIER(&ca->mi), ca->set->sb.set_uuid.b); + CACHE_TIER(&ca->mi), ca->set->disk_sb.set_uuid.b); bch_notify_cache_remove_failed(ca); return false; } @@ -1904,7 +1912,7 @@ static const char *cache_alloc(struct bcache_superblock *sb, bch_moving_init_cache(ca); bch_tiering_init_cache(ca); - if (le64_to_cpu(ca->disk_sb.sb->seq) > le64_to_cpu(c->sb.seq)) + if (le64_to_cpu(ca->disk_sb.sb->seq) > le64_to_cpu(c->disk_sb.seq)) cache_sb_to_cache_set(c, ca->disk_sb.sb); err = "error creating kobject"; @@ -1929,7 +1937,7 @@ static struct cache_set *cache_set_lookup(uuid_le uuid) lockdep_assert_held(&bch_register_lock); list_for_each_entry(c, &bch_cache_sets, list) - if (!memcmp(&c->sb.set_uuid, &uuid, sizeof(uuid_le))) + if (!memcmp(&c->disk_sb.set_uuid, &uuid, sizeof(uuid_le))) return c; return NULL; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index c9a9a86d98b8..afbaf24e7a79 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -652,9 +652,9 @@ SHOW(bch_cache_set) sysfs_hprint(block_size, block_bytes(c)); sysfs_print(block_size_bytes, block_bytes(c)); sysfs_hprint(btree_node_size, - CACHE_BTREE_NODE_SIZE(&c->sb) << 9); + CACHE_BTREE_NODE_SIZE(&c->disk_sb) << 9); sysfs_print(btree_node_size_bytes, - CACHE_BTREE_NODE_SIZE(&c->sb) << 9); + CACHE_BTREE_NODE_SIZE(&c->disk_sb) << 9); sysfs_hprint(btree_cache_size, bch_cache_size(c)); sysfs_print(cache_available_percent, bch_cache_available_percent(c)); @@ -704,9 +704,9 @@ SHOW(bch_cache_set) sysfs_print(btree_flush_delay, c->btree_flush_delay); sysfs_printf(meta_replicas_have, "%llu", - CACHE_SET_META_REPLICAS_HAVE(&c->sb)); + CACHE_SET_META_REPLICAS_HAVE(&c->disk_sb)); sysfs_printf(data_replicas_have, "%llu", - CACHE_SET_DATA_REPLICAS_HAVE(&c->sb)); + CACHE_SET_DATA_REPLICAS_HAVE(&c->disk_sb)); /* Debugging: */ @@ -731,7 +731,7 @@ SHOW(bch_cache_set) if (attr == &sysfs_compression_stats) return bch_compression_stats(c, buf); - sysfs_printf(internal_uuid, "%pU", c->sb.set_uuid.b); + sysfs_printf(internal_uuid, "%pU", c->disk_sb.set_uuid.b); return 0; } @@ -994,8 +994,8 @@ STORE(bch_cache_set_opts_dir) \ c->opts._name = v; \ \ - if (_sb_opt##_BITS && v != _sb_opt(&c->sb)) { \ - SET_##_sb_opt(&c->sb, v); \ + if (_sb_opt##_BITS && v != _sb_opt(&c->disk_sb)) { \ + SET_##_sb_opt(&c->disk_sb, v); \ bcache_write_super(c); \ } \ \ diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index fc2be7639032..eeaa1a4ccb12 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h @@ -185,7 +185,7 @@ TRACE_EVENT(bcache_write, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->inode = inode; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; @@ -215,7 +215,7 @@ TRACE_EVENT(bcache_write_throttle, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->inode = inode; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; @@ -250,7 +250,7 @@ DECLARE_EVENT_CLASS(page_alloc_fail, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->size = size; ), @@ -268,7 +268,7 @@ DECLARE_EVENT_CLASS(cache_set, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); ), TP_printk("%pU", __entry->uuid) @@ -309,7 +309,7 @@ TRACE_EVENT(bcache_journal_write_oldest, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->seq = seq; ), @@ -327,7 +327,7 @@ TRACE_EVENT(bcache_journal_write_oldest_done, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->seq = seq; __entry->written = written; ), @@ -428,7 +428,7 @@ DECLARE_EVENT_CLASS(btree_node, ), TP_fast_assign( - memcpy(__entry->uuid, b->c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, b->c->disk_sb.user_uuid.b, 16); __entry->bucket = PTR_BUCKET_NR_TRACE(b->c, &b->key, 0); __entry->level = b->level; __entry->id = b->btree_id; @@ -486,7 +486,7 @@ TRACE_EVENT(bcache_btree_node_alloc_fail, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->id = id; ), @@ -529,7 +529,7 @@ TRACE_EVENT(bcache_mca_scan, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->touched = touched; __entry->freed = freed; __entry->can_free = can_free; @@ -551,7 +551,7 @@ DECLARE_EVENT_CLASS(mca_cannibalize_lock, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->cl = cl; ), @@ -591,7 +591,7 @@ DECLARE_EVENT_CLASS(btree_node_op, ), TP_fast_assign( - memcpy(__entry->uuid, b->c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, b->c->disk_sb.user_uuid.b, 16); __entry->bucket = PTR_BUCKET_NR_TRACE(b->c, &b->key, 0); __entry->level = b->level; __entry->id = b->btree_id; @@ -747,7 +747,7 @@ TRACE_EVENT(bcache_btree_node_alloc_replacement, ), TP_fast_assign( - memcpy(__entry->uuid, b->c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, b->c->disk_sb.user_uuid.b, 16); __entry->old_bucket = PTR_BUCKET_NR_TRACE(old->c, &old->key, 0); __entry->bucket = PTR_BUCKET_NR_TRACE(b->c, &b->key, 0); @@ -875,7 +875,7 @@ TRACE_EVENT(bcache_btree_reserve_get_fail, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->required = required; __entry->cl = cl; ), @@ -959,7 +959,7 @@ DECLARE_EVENT_CLASS(cache_set_bucket_alloc, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->reserve = reserve; __entry->cl = cl; ), @@ -990,7 +990,7 @@ DECLARE_EVENT_CLASS(open_bucket_alloc, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->cl = cl; ), @@ -1213,7 +1213,7 @@ TRACE_EVENT(bcache_tiering_end, ), TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16); __entry->sectors_moved = sectors_moved; __entry->keys_moved = keys_moved; ), |