diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2016-01-13 22:50:15 -0900 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2016-10-07 12:35:20 -0800 |
commit | 02a1f97a5588b1635a1cabd82976b625966d429d (patch) | |
tree | f8cd37eb52db263d39e59df9e63080c1a68065a4 | |
parent | 97cf3167901f976476b460f1c72eebae616c6619 (diff) |
bcache: Endianness fixes
-rw-r--r-- | drivers/md/bcache/alloc.c | 26 | ||||
-rw-r--r-- | drivers/md/bcache/bkey.c | 32 | ||||
-rw-r--r-- | drivers/md/bcache/blockdev.c | 10 | ||||
-rw-r--r-- | drivers/md/bcache/bset.c | 17 | ||||
-rw-r--r-- | drivers/md/bcache/bset.h | 2 | ||||
-rw-r--r-- | drivers/md/bcache/btree_gc.c | 14 | ||||
-rw-r--r-- | drivers/md/bcache/btree_io.c | 46 | ||||
-rw-r--r-- | drivers/md/bcache/btree_update.c | 24 | ||||
-rw-r--r-- | drivers/md/bcache/btree_update.h | 4 | ||||
-rw-r--r-- | drivers/md/bcache/debug.c | 34 | ||||
-rw-r--r-- | drivers/md/bcache/dirent.c | 7 | ||||
-rw-r--r-- | drivers/md/bcache/extents.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/fs-gc.c | 36 | ||||
-rw-r--r-- | drivers/md/bcache/fs.c | 78 | ||||
-rw-r--r-- | drivers/md/bcache/journal.c | 119 | ||||
-rw-r--r-- | drivers/md/bcache/journal.h | 4 | ||||
-rw-r--r-- | drivers/md/bcache/journal_types.h | 2 | ||||
-rw-r--r-- | drivers/md/bcache/opts.h | 2 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 63 | ||||
-rw-r--r-- | drivers/md/bcache/super.h | 6 | ||||
-rw-r--r-- | drivers/md/bcache/sysfs.c | 8 | ||||
-rw-r--r-- | drivers/md/bcache/xattr.c | 13 | ||||
-rw-r--r-- | include/trace/events/bcache.h | 2 | ||||
-rw-r--r-- | include/uapi/linux/bcache.h | 255 |
24 files changed, 445 insertions, 363 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 12df531bdcc9..79060ca4eea7 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -263,13 +263,13 @@ static int bch_prio_write(struct cache *ca) d->gen = ca->bucket_gens[r]; } - p->next_bucket = ca->prio_buckets[i + 1]; - p->magic = pset_magic(&c->disk_sb); + p->next_bucket = cpu_to_le64(ca->prio_buckets[i + 1]); + p->magic = cpu_to_le64(pset_magic(&c->disk_sb)); SET_PSET_CSUM_TYPE(p, c->opts.metadata_checksum); - p->csum = bch_checksum(PSET_CSUM_TYPE(p), - &p->magic, - bucket_bytes(ca) - 8); + p->csum = cpu_to_le64(bch_checksum(PSET_CSUM_TYPE(p), + &p->magic, + bucket_bytes(ca) - 8)); spin_lock(&ca->prio_buckets_lock); r = bch_bucket_alloc(ca, RESERVE_PRIO); @@ -291,7 +291,7 @@ static int bch_prio_write(struct cache *ca) } spin_lock(&c->journal.lock); - c->journal.prio_buckets[ca->sb.nr_this_dev] = ca->prio_buckets[0]; + c->journal.prio_buckets[ca->sb.nr_this_dev] = cpu_to_le64(ca->prio_buckets[0]); c->journal.nr_prio_buckets = max_t(unsigned, ca->sb.nr_this_dev + 1, c->journal.nr_prio_buckets); @@ -332,7 +332,9 @@ int bch_prio_read(struct cache *ca) size_t b; int ret; - bucket = c->journal.prio_buckets[ca->sb.nr_this_dev]; + spin_lock(&c->journal.lock); + bucket = le64_to_cpu(c->journal.prio_buckets[ca->sb.nr_this_dev]); + spin_unlock(&c->journal.lock); /* * If the device hasn't been used yet, there won't be a prio bucket ptr @@ -346,10 +348,6 @@ int bch_prio_read(struct cache *ca) return -EIO; } - spin_lock(&c->journal.lock); - c->journal.prio_buckets[ca->sb.nr_this_dev] = bucket; - spin_unlock(&c->journal.lock); - for (b = 0; b < ca->mi.nbuckets; b++, d++) { if (d == end) { ca->prio_last_buckets[bucket_nr] = bucket; @@ -362,14 +360,14 @@ int bch_prio_read(struct cache *ca) bch_meta_read_fault("prio")) return -EIO; - got = p->magic; + got = le64_to_cpu(p->magic); expect = pset_magic(&c->disk_sb); if (cache_inconsistent_on(got != expect, ca, "bad magic (got %llu expect %llu) while reading prios from bucket %llu", got, expect, bucket)) return -EIO; - got = p->csum; + got = le64_to_cpu(p->csum); expect = bch_checksum(PSET_CSUM_TYPE(p), &p->magic, bucket_bytes(ca) - 8); @@ -378,7 +376,7 @@ int bch_prio_read(struct cache *ca) got, expect, bucket)) return -EIO; - bucket = p->next_bucket; + bucket = le64_to_cpu(p->next_bucket); d = p->data; } diff --git a/drivers/md/bcache/bkey.c b/drivers/md/bcache/bkey.c index 82cae7564fc6..0920ace0803a 100644 --- a/drivers/md/bcache/bkey.c +++ b/drivers/md/bcache/bkey.c @@ -155,7 +155,7 @@ __always_inline static u64 get_inc_field(struct unpack_state *state, unsigned field) { unsigned bits = state->format->bits_per_field[field]; - u64 v = 0, offset = state->format->field_offset[field]; + u64 v = 0, offset = le64_to_cpu(state->format->field_offset[field]); if (bits >= state->bits) { v = state->w >> (64 - bits); @@ -180,7 +180,7 @@ __always_inline static bool set_inc_field(struct pack_state *state, unsigned field, u64 v) { unsigned bits = state->format->bits_per_field[field]; - u64 offset = state->format->field_offset[field]; + u64 offset = le64_to_cpu(state->format->field_offset[field]); if (v < offset) return false; @@ -403,7 +403,7 @@ __always_inline static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v) { unsigned bits = state->format->bits_per_field[field]; - u64 offset = state->format->field_offset[field]; + u64 offset = le64_to_cpu(state->format->field_offset[field]); bool ret = true; EBUG_ON(v < offset); @@ -493,7 +493,7 @@ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out, #endif bool exact = true; - if (unlikely(in.snapshot < format->field_offset[BKEY_FIELD_SNAPSHOT])) { + if (unlikely(in.snapshot < le64_to_cpu(format->field_offset[BKEY_FIELD_SNAPSHOT]))) { if (!in.offset-- && !in.inode--) return BKEY_PACK_POS_FAIL; @@ -501,7 +501,7 @@ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out, exact = false; } - if (unlikely(in.offset < format->field_offset[BKEY_FIELD_OFFSET])) { + if (unlikely(in.offset < le64_to_cpu(format->field_offset[BKEY_FIELD_OFFSET]))) { if (!in.inode--) return BKEY_PACK_POS_FAIL; in.offset = KEY_OFFSET_MAX; @@ -509,7 +509,7 @@ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out, exact = false; } - if (unlikely(in.inode < format->field_offset[BKEY_FIELD_INODE])) + if (unlikely(in.inode < le64_to_cpu(format->field_offset[BKEY_FIELD_INODE]))) return BKEY_PACK_POS_FAIL; if (!set_inc_field_lossy(&state, BKEY_FIELD_INODE, in.inode)) { @@ -596,9 +596,8 @@ struct bkey_format bch_bkey_format_done(struct bkey_format_state *s) }; for (i = 0; i < ARRAY_SIZE(s->field_min); i++) { - ret.field_offset[i] = min(s->field_min[i], s->field_max[i]); - ret.bits_per_field[i] = fls64(s->field_max[i] - - ret.field_offset[i]); + u64 field_offset = min(s->field_min[i], s->field_max[i]); + ret.bits_per_field[i] = fls64(s->field_max[i] - field_offset); /* * We don't want it to be possible for the packed format to @@ -606,10 +605,11 @@ struct bkey_format bch_bkey_format_done(struct bkey_format_state *s) * confusion and issues (like with bkey_packed_successor()) */ - ret.field_offset[i] = ret.bits_per_field[i] != 64 - ? min(ret.field_offset[i], U64_MAX - + field_offset = ret.bits_per_field[i] != 64 + ? min(field_offset, U64_MAX - ((1ULL << ret.bits_per_field[i]) - 1)) : 0; + ret.field_offset[i] = cpu_to_le64(field_offset); bits += ret.bits_per_field[i]; } @@ -627,14 +627,14 @@ const char *bch_bkey_format_validate(struct bkey_format *f) return "invalid format: incorrect number of fields"; for (i = 0; i < f->nr_fields; i++) { + u64 field_offset = le64_to_cpu(f->field_offset[i]); + if (f->bits_per_field[i] > 64) return "invalid format: field too large"; - if ((f->bits_per_field[i] == 64 && - f->field_offset[i]) || - (f->field_offset[i] + - ((1ULL << f->bits_per_field[i]) - 1) < - f->field_offset[i])) + if ((f->bits_per_field[i] == 64 && field_offset) || + (field_offset + ((1ULL << f->bits_per_field[i]) - 1) < + field_offset)) return "invalid format: offset + bits overflow"; bits += f->bits_per_field[i]; diff --git a/drivers/md/bcache/blockdev.c b/drivers/md/bcache/blockdev.c index c67e83f611a6..3cf6b15ce1fc 100644 --- a/drivers/md/bcache/blockdev.c +++ b/drivers/md/bcache/blockdev.c @@ -47,7 +47,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) closure_get(cl); - sb->csum = csum_set(sb, BCH_CSUM_CRC64); + sb->csum = cpu_to_le64(__csum_set(sb, 0, BCH_CSUM_CRC64)); __write_super(dc->disk.c, (void *) &dc->disk_sb); closure_return_with_destructor(cl, bch_write_bdev_super_unlock); @@ -362,7 +362,7 @@ void bch_cached_dev_detach(struct cached_dev *dc) int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) { struct timespec ts = CURRENT_TIME; - s64 rtime = timespec_to_ns(&ts); + __le64 rtime = cpu_to_le64(timespec_to_ns(&ts)); char buf[BDEVNAME_SIZE]; bool found; int ret; @@ -706,7 +706,7 @@ static int blockdev_volume_run(struct cache_set *c, kobject_init(&d->kobj, &bch_blockdev_volume_ktype); ret = bcache_device_init(d, block_bytes(c), - inode.v->i_inode.i_size >> 9); + le64_to_cpu(inode.v->i_inode.i_size) >> 9); if (ret) goto err; @@ -759,7 +759,7 @@ int bch_blockdev_volumes_start(struct cache_set *c) int bch_blockdev_volume_create(struct cache_set *c, u64 size) { struct timespec ts = CURRENT_TIME; - s64 rtime = timespec_to_ns(&ts); + __le64 rtime = cpu_to_le64(timespec_to_ns(&ts)); struct bkey_i_inode_blockdev inode; int ret; @@ -767,7 +767,7 @@ int bch_blockdev_volume_create(struct cache_set *c, u64 size) get_random_bytes(&inode.v.i_uuid, sizeof(inode.v.i_uuid)); inode.v.i_inode.i_ctime = rtime; inode.v.i_inode.i_mtime = rtime; - inode.v.i_inode.i_size = size; + inode.v.i_inode.i_size = cpu_to_le64(size); ret = bch_inode_create(c, &inode.k_i, 0, BLOCKDEV_INODE_MAX, &c->unused_inode_hint); diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 646f2347ae07..e163b7becd9c 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -481,7 +481,8 @@ static void make_bfloat(struct bkey_format *format, : tree_to_prev_bkey(t, j >> ffs(j)); struct bkey_packed *r = is_power_of_2(j + 1) - ? bset_bkey_idx(t->data, t->data->u64s - t->end.u64s) + ? bset_bkey_idx(t->data, + le16_to_cpu(t->data->u64s) - t->end.u64s) : tree_to_bkey(t, j >> (ffz(j) + 1)); int shift, exponent; @@ -955,7 +956,7 @@ struct bkey_packed *bch_bset_insert(struct btree_keys *b, bkeyp_key_bytes(f, src)); memcpy(bkeyp_val(f, where), &insert->v, bkeyp_val_bytes(f, src)); - i->u64s += src->u64s; + le16_add_cpu(&i->u64s, src->u64s); if (!bkey_deleted(src)) btree_keys_account_key_add(&b->nr, src); @@ -1537,7 +1538,7 @@ static struct btree_nr_keys btree_mergesort_simple(struct btree_keys *b, } } - bset->u64s = (u64 *) out - bset->_data; + bset->u64s = cpu_to_le16((u64 *) out - bset->_data); return b->nr; } @@ -1577,7 +1578,7 @@ static struct btree_nr_keys btree_mergesort(struct btree_keys *dst, (void *) dst_set + (PAGE_SIZE << dst->page_order)); } - dst_set->u64s = (u64 *) out - dst_set->_data; + dst_set->u64s = cpu_to_le16((u64 *) out - dst_set->_data); return nr; } @@ -1647,7 +1648,7 @@ static struct btree_nr_keys btree_mergesort_extents(struct btree_keys *dst, out = dst_set->start; } - dst_set->u64s = (u64 *) out - dst_set->_data; + dst_set->u64s = cpu_to_le16((u64 *) out - dst_set->_data); return nr; } @@ -1714,7 +1715,9 @@ void bch_btree_sort_into(struct btree_keys *dst, nr = btree_mergesort_extents(dst, dst->set->data, src, &iter, filter); - BUG_ON(set_bytes(dst->set->data) > (PAGE_SIZE << dst->page_order)); + BUG_ON(__set_bytes(dst->set->data, + le16_to_cpu(dst->set->data->u64s)) > + (PAGE_SIZE << dst->page_order)); bch_time_stats_update(state->time, start_time); @@ -1737,7 +1740,7 @@ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) size_t j; stats->sets[type].nr++; - stats->sets[type].bytes += t->data->u64s * sizeof(u64); + stats->sets[type].bytes += le16_to_cpu(t->data->u64s) * sizeof(u64); if (bset_written(t)) { stats->floats += t->size - 1; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index a647cb3294b2..0ca13c51e3be 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -379,7 +379,7 @@ static inline void btree_keys_account_key(struct btree_nr_keys *n, __bkey_idx((_set), (_set)->u64s) #define bset_bkey_last(_set) \ - bkey_idx((_set), (_set)->u64s) + bkey_idx((_set), le16_to_cpu((_set)->u64s)) static inline struct bkey_packed *bset_bkey_idx(struct bset *i, unsigned idx) { diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c index e6015aa00eab..a0fce4a155d5 100644 --- a/drivers/md/bcache/btree_gc.c +++ b/drivers/md/bcache/btree_gc.c @@ -538,21 +538,21 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], for (k = s2->start; k < bset_bkey_last(s2) && - __set_blocks(n1->data, s1->u64s + u64s + k->u64s, + __set_blocks(n1->data, le16_to_cpu(s1->u64s) + u64s + k->u64s, block_bytes(c)) <= blocks; k = bkey_next(k)) { last = k; u64s += k->u64s; } - if (u64s == s2->u64s) { + if (u64s == le16_to_cpu(s2->u64s)) { /* n2 fits entirely in n1 */ n1->key.k.p = n1->data->max_key = n2->data->max_key; memcpy(bset_bkey_last(s1), s2->start, - s2->u64s * sizeof(u64)); - s1->u64s += s2->u64s; + le16_to_cpu(s2->u64s) * sizeof(u64)); + le16_add_cpu(&s1->u64s, le16_to_cpu(s2->u64s)); six_unlock_write(&n2->lock); bch_btree_node_free_never_inserted(c, n2); @@ -574,12 +574,12 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], memcpy(bset_bkey_last(s1), s2->start, u64s * sizeof(u64)); - s1->u64s += u64s; + le16_add_cpu(&s1->u64s, u64s); memmove(s2->start, bset_bkey_idx(s2, u64s), - (s2->u64s - u64s) * sizeof(u64)); - s2->u64s -= u64s; + (le16_to_cpu(s2->u64s) - u64s) * sizeof(u64)); + s2->u64s = cpu_to_le16(le16_to_cpu(s2->u64s) - u64s); } } diff --git a/drivers/md/bcache/btree_io.c b/drivers/md/bcache/btree_io.c index b135b4ea505c..fa287da2ffde 100644 --- a/drivers/md/bcache/btree_io.c +++ b/drivers/md/bcache/btree_io.c @@ -31,7 +31,7 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, for (t = b->keys.set + from; t <= b->keys.set + b->keys.nsets; t++) - u64s += t->data->u64s; + u64s += le16_to_cpu(t->data->u64s); order = get_order(__set_bytes(b->data, u64s)); } @@ -52,7 +52,7 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, btree_node_lock_write(b, iter); if (!from) { - unsigned u64s = out->keys.u64s; + unsigned u64s = le16_to_cpu(out->keys.u64s); BUG_ON(order != b->keys.page_order); @@ -62,7 +62,7 @@ static void btree_node_sort(struct cache_set *c, struct btree *b, * memcpy() */ *out = *b->data; - out->keys.u64s = u64s; + out->keys.u64s = cpu_to_le16(u64s); swap(out, b->data); b->keys.set->data = &b->data->keys; } else { @@ -110,7 +110,7 @@ static bool btree_node_compact(struct cache_set *c, struct btree *b, for (i = b->keys.nsets - 1; i >= 0; --i) { crit *= c->sort.crit_factor; - if (b->keys.set[i].data->u64s < crit) + if (le16_to_cpu(b->keys.set[i].data->u64s) < crit) goto sort; } @@ -187,7 +187,7 @@ static const char *validate_bset(struct cache_set *c, struct btree *b, struct bkey_format *f = &b->keys.format; struct bkey_packed *k; - if (i->version != BCACHE_BSET_VERSION) + if (le16_to_cpu(i->version) != BCACHE_BSET_VERSION) return "unsupported bset version"; if (b->written + blocks > btree_blocks(c)) @@ -207,7 +207,7 @@ static const char *validate_bset(struct cache_set *c, struct btree *b, "KEY_U64s 0: %zu bytes of metadata lost", (void *) bset_bkey_last(i) - (void *) k); - i->u64s = (u64 *) k - i->_data; + i->u64s = cpu_to_le16((u64 *) k - i->_data); break; } @@ -215,7 +215,7 @@ static const char *validate_bset(struct cache_set *c, struct btree *b, btree_node_error(b, c, ptr, "key extends past end of bset"); - i->u64s = (u64 *) k - i->_data; + i->u64s = cpu_to_le16((u64 *) k - i->_data); break; } @@ -231,7 +231,7 @@ static const char *validate_bset(struct cache_set *c, struct btree *b, btree_node_error(b, c, ptr, "invalid bkey %s", buf); - i->u64s -= k->u64s; + i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); memmove(k, bkey_next(k), (void *) bset_bkey_last(i) - (void *) k); continue; @@ -262,7 +262,7 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b, goto err; err = "bad magic"; - if (b->data->magic != bset_magic(&c->disk_sb)) + if (le64_to_cpu(b->data->magic) != bset_magic(&c->disk_sb)) goto err; err = "bad btree header"; @@ -297,11 +297,12 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b, /* XXX: retry checksum errors */ err = "bad checksum"; - if (b->data->csum != btree_csum_set(b, b->data)) + if (le64_to_cpu(b->data->csum) != + btree_csum_set(b, b->data)) goto err; blocks = __set_blocks(b->data, - b->data->keys.u64s, + le16_to_cpu(b->data->keys.u64s), block_bytes(c)); } else { bne = write_block(c, b); @@ -315,11 +316,12 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b, goto err; err = "bad checksum"; - if (bne->csum != btree_csum_set(b, bne)) + if (le64_to_cpu(bne->csum) != + btree_csum_set(b, bne)) goto err; blocks = __set_blocks(bne, - bne->keys.u64s, + le16_to_cpu(bne->keys.u64s), block_bytes(c)); } @@ -328,7 +330,7 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b, goto err; err = "insufficient memory"; - ret = bch_journal_seq_blacklisted(c, i->journal_seq, b); + ret = bch_journal_seq_blacklisted(c, le64_to_cpu(i->journal_seq), b); if (ret < 0) goto err; @@ -526,27 +528,27 @@ static void do_btree_node_write(struct closure *cl) change_bit(BTREE_NODE_write_idx, &b->flags); - i->version = BCACHE_BSET_VERSION; + i->version = cpu_to_le16(BCACHE_BSET_VERSION); SET_BSET_CSUM_TYPE(i, c->opts.metadata_checksum); if (!b->written) { - BUG_ON(b->data->magic != bset_magic(&c->disk_sb)); + BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(&c->disk_sb)); b->data->format = b->keys.format; data = b->data; - b->data->csum = btree_csum_set(b, b->data); + b->data->csum = cpu_to_le64(btree_csum_set(b, b->data)); blocks_to_write = __set_blocks(b->data, - b->data->keys.u64s, + le16_to_cpu(b->data->keys.u64s), block_bytes(c)); } else { struct btree_node_entry *bne = write_block(c, b); data = bne; - bne->csum = btree_csum_set(b, bne); + bne->csum = cpu_to_le64(btree_csum_set(b, bne)); blocks_to_write = __set_blocks(bne, - bne->keys.u64s, + le16_to_cpu(bne->keys.u64s), block_bytes(c)); } @@ -693,7 +695,7 @@ void bch_btree_node_write_lazy(struct btree *b, struct btree_iter *iter) struct btree_node_entry *bne = container_of(btree_bset_last(b), struct btree_node_entry, keys); - unsigned long bytes = __set_bytes(bne, bne->keys.u64s); + unsigned long bytes = __set_bytes(bne, le16_to_cpu(bne->keys.u64s)); if ((max(round_up(bytes, block_bytes(iter->c)), PAGE_SIZE) - bytes < 48 || @@ -775,7 +777,7 @@ void bch_btree_node_flush_journal_entries(struct cache_set *c, * need to loop: */ for (i = b->keys.nsets; i >= 0; --i) { - u64 seq = b->keys.set[i].data->journal_seq; + u64 seq = le64_to_cpu(b->keys.set[i].data->journal_seq); if (seq) { bch_journal_flush_seq_async(&c->journal, seq, cl); diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c index 490f962d8d1e..d49ddc24d475 100644 --- a/drivers/md/bcache/btree_update.c +++ b/drivers/md/bcache/btree_update.c @@ -291,7 +291,7 @@ static struct btree *bch_btree_node_alloc(struct cache_set *c, set_btree_node_dirty(b); bch_bset_init_first(&b->keys, &b->data->keys); - b->data->magic = bset_magic(&c->disk_sb); + b->data->magic = cpu_to_le64(bset_magic(&c->disk_sb)); SET_BSET_BTREE_LEVEL(&b->data->keys, level); bch_check_mark_super(c, &b->key, true); @@ -720,7 +720,7 @@ void bch_btree_insert_and_journal(struct btree_iter *iter, if (res->ref) { bch_journal_add_keys(&c->journal, res, b->btree_id, insert, b->level); - btree_bset_last(b)->journal_seq = c->journal.seq; + btree_bset_last(b)->journal_seq = cpu_to_le64(c->journal.seq); } } @@ -1256,7 +1256,7 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n nr_packed++; else nr_unpacked++; - if (k->_data - set1->_data >= (set1->u64s * 3) / 5) + if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5) break; k = bkey_next(k); } @@ -1268,16 +1268,16 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n n2->data->min_key = btree_type_successor(n1->btree_id, n1->key.k.p); - set2->u64s = (u64 *) bset_bkey_last(set1) - (u64 *) k; - set1->u64s -= set2->u64s; + set2->u64s = cpu_to_le16((u64 *) bset_bkey_last(set1) - (u64 *) k); + set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s)); - n2->keys.nr.live_u64s = set2->u64s; + n2->keys.nr.live_u64s = le16_to_cpu(set2->u64s); n2->keys.nr.packed_keys = n1->keys.nr.packed_keys - nr_packed; n2->keys.nr.unpacked_keys = n1->keys.nr.unpacked_keys - nr_unpacked; - n1->keys.nr.live_u64s = set1->u64s; + n1->keys.nr.live_u64s = le16_to_cpu(set1->u64s); n1->keys.nr.packed_keys = nr_packed; n1->keys.nr.unpacked_keys = nr_unpacked; @@ -1286,7 +1286,7 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n memcpy(set2->start, bset_bkey_last(set1), - set2->u64s * sizeof(u64)); + le16_to_cpu(set2->u64s) * sizeof(u64)); n1->keys.set->size = 0; n1->keys.set->extra = BSET_TREE_NONE_VAL; @@ -1412,7 +1412,7 @@ static int btree_split(struct btree *b, struct btree_iter *iter, k = i->start; while (k != bset_bkey_last(i)) if (bkey_deleted(k)) { - i->u64s -= k->u64s; + i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); memmove(k, bkey_next(k), (void *) bset_bkey_last(i) - (void *) k); @@ -1423,9 +1423,9 @@ static int btree_split(struct btree *b, struct btree_iter *iter, } if (__set_blocks(n1->data, - n1->data->keys.u64s + u64s_to_insert, + le16_to_cpu(n1->data->keys.u64s) + u64s_to_insert, block_bytes(n1->c)) > btree_blocks(c) * 3 / 4) { - trace_bcache_btree_node_split(b, btree_bset_first(n1)->u64s); + trace_bcache_btree_node_split(b, le16_to_cpu(btree_bset_first(n1)->u64s)); n2 = __btree_split_node(iter, n1, reserve); six_unlock_write(&n1->lock); @@ -1450,7 +1450,7 @@ static int btree_split(struct btree *b, struct btree_iter *iter, bch_btree_node_write(n3, &as->cl, NULL); } } else { - trace_bcache_btree_node_compact(b, btree_bset_first(n1)->u64s); + trace_bcache_btree_node_compact(b, le16_to_cpu(btree_bset_first(n1)->u64s)); six_unlock_write(&n1->lock); bch_keylist_add(&as->parent_keys, &n1->key); diff --git a/drivers/md/bcache/btree_update.h b/drivers/md/bcache/btree_update.h index 770eca73dccb..4480d92bf448 100644 --- a/drivers/md/bcache/btree_update.h +++ b/drivers/md/bcache/btree_update.h @@ -112,7 +112,7 @@ static inline size_t bch_btree_keys_u64s_remaining(struct cache_set *c, struct bset *i = btree_bset_last(b); BUG_ON((PAGE_SIZE << b->keys.page_order) < - (bset_byte_offset(b, i) + set_bytes(i))); + (bset_byte_offset(b, i) + __set_bytes(i, le16_to_cpu(i->u64s)))); if (b->written == btree_blocks(c)) return 0; @@ -123,7 +123,7 @@ static inline size_t bch_btree_keys_u64s_remaining(struct cache_set *c, : &b->data->keys)); return ((PAGE_SIZE << b->keys.page_order) - - (bset_byte_offset(b, i) + set_bytes(i))) / + (bset_byte_offset(b, i) + __set_bytes(i, le16_to_cpu(i->u64s)))) / sizeof(u64); #else /* diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index a5386acaca86..9d56ea5e7633 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -110,7 +110,7 @@ void __bch_btree_verify(struct cache_set *c, struct btree *b) if (!b->written) { i = &n_ondisk->keys; block += __set_blocks(n_ondisk, - n_ondisk->keys.u64s, + le16_to_cpu(n_ondisk->keys.u64s), block_bytes(c)); } else { struct btree_node_entry *bne = @@ -119,7 +119,7 @@ void __bch_btree_verify(struct cache_set *c, struct btree *b) i = &bne->keys; block += __set_blocks(bne, - bne->keys.u64s, + le16_to_cpu(bne->keys.u64s), block_bytes(c)); } @@ -132,7 +132,7 @@ void __bch_btree_verify(struct cache_set *c, struct btree *b) printk(KERN_ERR "*** block %u not written\n", block); - for (j = 0; j < inmemory->u64s; j++) + for (j = 0; j < le16_to_cpu(inmemory->u64s); j++) if (inmemory->_data[j] != sorted->_data[j]) break; @@ -188,6 +188,8 @@ void bch_verify_inode_refs(struct cache_set *c) struct bkey_s_c k; struct bkey_i_inode inode; u64 cur_inum = 0; + u64 i_size; + u16 i_mode; char buf[100]; for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, @@ -208,21 +210,23 @@ void bch_verify_inode_refs(struct cache_set *c) } cur_inum = k.k->p.inode; + i_mode = le16_to_cpu(inode.v.i_mode); + i_size = le64_to_cpu(inode.v.i_size); - if (!S_ISREG(inode.v.i_mode) && - !S_ISLNK(inode.v.i_mode)) + if (!S_ISREG(i_mode) && + !S_ISLNK(i_mode)) cache_set_inconsistent(c, "extent for non regular file, inode %llu mode %u", - k.k->p.inode, inode.v.i_mode); + k.k->p.inode, i_mode); - BUG_ON(inode.v.i_flags & BCH_INODE_I_SIZE_DIRTY); + BUG_ON(le32_to_cpu(inode.v.i_flags) & BCH_INODE_I_SIZE_DIRTY); - if (k.k->p.offset > round_up(inode.v.i_size, PAGE_SIZE) >> 9) { + if (k.k->p.offset > round_up(i_size, PAGE_SIZE) >> 9) { bch_bkey_val_to_text(c, BTREE_ID_EXTENTS, buf, sizeof(buf), k); cache_set_inconsistent(c, "extent past end of inode %llu: i_size %llu extent\n%s", - k.k->p.inode, inode.v.i_size, buf); + k.k->p.inode, i_size, buf); } } bch_btree_iter_unlock(&iter); @@ -242,11 +246,12 @@ void bch_verify_inode_refs(struct cache_set *c) } cur_inum = k.k->p.inode; + i_mode = le16_to_cpu(inode.v.i_mode); - if (!S_ISDIR(inode.v.i_mode)) + if (!S_ISDIR(i_mode)) cache_set_inconsistent(c, "dirent for non directory, inode %llu mode %u", - k.k->p.inode, inode.v.i_mode); + k.k->p.inode, i_mode); } bch_btree_iter_unlock(&iter); @@ -262,11 +267,12 @@ void bch_verify_inode_refs(struct cache_set *c) } cur_inum = k.k->p.inode; + i_mode = le16_to_cpu(inode.v.i_mode); - cache_set_inconsistent_on(!S_ISREG(inode.v.i_mode) && - !S_ISDIR(inode.v.i_mode), c, + cache_set_inconsistent_on(!S_ISREG(i_mode) && + !S_ISDIR(i_mode), c, "xattr for non file/directory, inode %llu mode %u", - k.k->p.inode, inode.v.i_mode); + k.k->p.inode, i_mode); } bch_btree_iter_unlock(&iter); } diff --git a/drivers/md/bcache/dirent.c b/drivers/md/bcache/dirent.c index 759bd4ab6452..44c2f34a6658 100644 --- a/drivers/md/bcache/dirent.c +++ b/drivers/md/bcache/dirent.c @@ -147,7 +147,7 @@ static struct bkey_i_dirent *dirent_create_key(u8 type, bkey_dirent_init(&dirent->k_i); dirent->k.u64s = u64s; - dirent->v.d_inum = dst; + dirent->v.d_inum = cpu_to_le64(dst); dirent->v.d_type = type; memcpy(dirent->v.d_name, name->name, name->len); @@ -417,7 +417,7 @@ u64 bch_dirent_lookup(struct cache_set *c, u64 dir_inum, k = __dirent_find(&iter, dir_inum, name); if (!IS_ERR(k.k)) - inum = bkey_s_c_to_dirent(k).v->d_inum; + inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum); bch_btree_iter_unlock(&iter); @@ -485,7 +485,8 @@ int bch_readdir(struct file *file, struct dir_context *ctx) * locks */ if (!dir_emit(ctx, dirent.v->d_name, len, - dirent.v->d_inum, dirent.v->d_type)) + le64_to_cpu(dirent.v->d_inum), + dirent.v->d_type)) break; ctx->pos = k.k->p.offset + 1; diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index fa7e477de0b4..9a3df3d1aaa2 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -99,7 +99,7 @@ struct btree_nr_keys bch_key_sort_fix_overlapping(struct btree_keys *b, heap_sift(iter, 0, key_sort_cmp); } - bset->u64s = (u64 *) out - bset->_data; + bset->u64s = cpu_to_le16((u64 *) out - bset->_data); return nr; } @@ -775,7 +775,7 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct btree_keys *b, } } - bset->u64s = (u64 *) out - bset->_data; + bset->u64s = cpu_to_le16((u64 *) out - bset->_data); return nr; } diff --git a/drivers/md/bcache/fs-gc.c b/drivers/md/bcache/fs-gc.c index 837811398f45..91711dbfaa69 100644 --- a/drivers/md/bcache/fs-gc.c +++ b/drivers/md/bcache/fs-gc.c @@ -51,6 +51,7 @@ static int bch_gc_walk_dirents(struct cache_set *c, struct nlinks *links, struct btree_iter iter; struct bkey_s_c k; struct bkey_s_c_dirent d; + u64 d_inum; inc_link(links, range_start, range_end, BCACHE_ROOT_INO, 2, false); @@ -58,15 +59,16 @@ static int bch_gc_walk_dirents(struct cache_set *c, struct nlinks *links, switch (k.k->type) { case BCH_DIRENT: d = bkey_s_c_to_dirent(k); + d_inum = le64_to_cpu(d.v->d_inum); if (d.v->d_type == DT_DIR) { inc_link(links, range_start, range_end, - d.v->d_inum, 2, false); + d_inum, 2, false); inc_link(links, range_start, range_end, d.k->p.inode, 1, true); } else { inc_link(links, range_start, range_end, - d.v->d_inum, 1, false); + d_inum, 1, false); } break; @@ -82,17 +84,21 @@ static int bch_gc_do_inode(struct cache_set *c, struct btree_iter *iter, { struct bkey_i_inode update; int ret; + u16 i_mode = le16_to_cpu(inode.v->i_mode); + u32 i_flags = le32_to_cpu(inode.v->i_flags); + u32 i_nlink = le32_to_cpu(inode.v->i_nlink); + u64 i_size = le64_to_cpu(inode.v->i_size); - cache_set_inconsistent_on(inode.v->i_nlink < link.count, c, + cache_set_inconsistent_on(i_nlink < link.count, c, "i_link too small (%u < %u, type %i)", - inode.v->i_nlink, link.count + link.dir_count, - mode_to_type(inode.v->i_mode)); + i_nlink, link.count + link.dir_count, + mode_to_type(i_mode)); if (!link.count) { - cache_set_inconsistent_on(S_ISDIR(inode.v->i_mode) && + cache_set_inconsistent_on(S_ISDIR(i_mode) && bch_empty_dir(c, inode.k->p.inode), c, "non empty directory with link count 0,inode nlink %u, dir links found %u", - inode.v->i_nlink, link.dir_count); + i_nlink, link.dir_count); if (c->opts.verbose_recovery) pr_info("deleting inum %llu", inode.k->p.inode); @@ -101,7 +107,7 @@ static int bch_gc_do_inode(struct cache_set *c, struct btree_iter *iter, return bch_inode_rm(c, inode.k->p.inode); } - if (inode.v->i_flags & BCH_INODE_I_SIZE_DIRTY) { + if (i_flags & BCH_INODE_I_SIZE_DIRTY) { if (c->opts.verbose_recovery) pr_info("truncating inode %llu", inode.k->p.inode); @@ -111,23 +117,23 @@ static int bch_gc_do_inode(struct cache_set *c, struct btree_iter *iter, */ ret = bch_inode_truncate(c, inode.k->p.inode, - round_up(inode.v->i_size, PAGE_SIZE) >> 9, + round_up(i_size, PAGE_SIZE) >> 9, NULL); if (ret) return ret; } - if (inode.v->i_nlink != link.count + link.dir_count || - inode.v->i_flags & BCH_INODE_I_SIZE_DIRTY) { + if (i_nlink != link.count + link.dir_count || + i_flags & BCH_INODE_I_SIZE_DIRTY) { if (c->opts.verbose_recovery && - inode.v->i_nlink != link.count + link.dir_count) + i_nlink != link.count + link.dir_count) pr_info("setting inum %llu nlinks from %u to %u", - inode.k->p.inode, inode.v->i_nlink, + inode.k->p.inode, i_nlink, link.count + link.dir_count); bkey_reassemble(&update.k_i, inode.s_c); - update.v.i_nlink = link.count + link.dir_count; - update.v.i_flags &= ~BCH_INODE_I_SIZE_DIRTY; + update.v.i_nlink = cpu_to_le32(link.count + link.dir_count); + update.v.i_flags = cpu_to_le32(i_flags & ~BCH_INODE_I_SIZE_DIRTY); return bch_btree_insert_at(iter, &keylist_single(&update.k_i), diff --git a/drivers/md/bcache/fs.c b/drivers/md/bcache/fs.c index c9f9bc757845..b158ca0012b6 100644 --- a/drivers/md/bcache/fs.c +++ b/drivers/md/bcache/fs.c @@ -218,14 +218,14 @@ static int __must_check __bch_write_inode(struct cache_set *c, goto out; } - bi->i_mode = inode->i_mode; - bi->i_uid = i_uid_read(inode); - bi->i_gid = i_gid_read(inode); - bi->i_nlink = inode->i_nlink; - bi->i_dev = inode->i_rdev; - bi->i_atime = timespec_to_ns(&inode->i_atime); - bi->i_mtime = timespec_to_ns(&inode->i_mtime); - bi->i_ctime = timespec_to_ns(&inode->i_ctime); + bi->i_mode = cpu_to_le16(inode->i_mode); + bi->i_uid = cpu_to_le32(i_uid_read(inode)); + bi->i_gid = cpu_to_le32(i_gid_read(inode)); + bi->i_nlink = cpu_to_le32(inode->i_nlink); + bi->i_dev = cpu_to_le32(inode->i_rdev); + bi->i_atime = cpu_to_le64(timespec_to_ns(&inode->i_atime)); + bi->i_mtime = cpu_to_le64(timespec_to_ns(&inode->i_mtime)); + bi->i_ctime = cpu_to_le64(timespec_to_ns(&inode->i_ctime)); ret = bch_btree_insert_at(&iter, &keylist_single(&new_inode.k_i), @@ -236,8 +236,8 @@ static int __must_check __bch_write_inode(struct cache_set *c, if (!ret) { write_seqcount_begin(&ei->shadow_i_size_lock); - ei->i_size = bi->i_size; - ei->i_flags = bi->i_flags; + ei->i_size = le64_to_cpu(bi->i_size); + ei->i_flags = le32_to_cpu(bi->i_flags); write_seqcount_end(&ei->shadow_i_size_lock); bch_write_inode_checks(c, ei); @@ -258,15 +258,18 @@ static int inode_set_size(struct bch_inode_info *ei, struct bch_inode *bi, void *p) { loff_t *new_i_size = p; + unsigned i_flags = le32_to_cpu(bi->i_flags); lockdep_assert_held(&ei->update_lock); - bi->i_size = *new_i_size; + bi->i_size = cpu_to_le64(*new_i_size); if (atomic_long_read(&ei->i_size_dirty_count)) - bi->i_flags |= BCH_INODE_I_SIZE_DIRTY; + i_flags |= BCH_INODE_I_SIZE_DIRTY; else - bi->i_flags &= ~BCH_INODE_I_SIZE_DIRTY; + i_flags &= ~BCH_INODE_I_SIZE_DIRTY; + + bi->i_flags = cpu_to_le32(i_flags);; return 0; } @@ -281,7 +284,8 @@ static int __must_check bch_write_inode_size(struct cache_set *c, static int inode_set_dirty(struct bch_inode_info *ei, struct bch_inode *bi, void *p) { - bi->i_flags |= BCH_INODE_I_SIZE_DIRTY; + bi->i_flags = cpu_to_le32(le32_to_cpu(bi->i_flags)| + BCH_INODE_I_SIZE_DIRTY); return 0; } @@ -450,8 +454,8 @@ static struct inode *bch_vfs_inode_create(struct cache_set *c, struct posix_acl *default_acl = NULL, *acl = NULL; struct bch_inode_info *ei; struct bch_inode *bi; - struct timespec ts = CURRENT_TIME; struct bkey_i_inode bkey_inode; + struct timespec ts = CURRENT_TIME; s64 now = timespec_to_ns(&ts); int ret; @@ -470,15 +474,15 @@ static struct inode *bch_vfs_inode_create(struct cache_set *c, ei = to_bch_ei(inode); bi = &bkey_inode_init(&bkey_inode.k_i)->v; - bi->i_uid = i_uid_read(inode); - bi->i_gid = i_gid_read(inode); + bi->i_uid = cpu_to_le32(i_uid_read(inode)); + bi->i_gid = cpu_to_le32(i_gid_read(inode)); - bi->i_mode = inode->i_mode; - bi->i_dev = rdev; - bi->i_atime = now; - bi->i_mtime = now; - bi->i_ctime = now; - bi->i_nlink = S_ISDIR(mode) ? 2 : 1; + bi->i_mode = cpu_to_le16(inode->i_mode); + bi->i_dev = cpu_to_le32(rdev); + bi->i_atime = cpu_to_le64(now); + bi->i_mtime = cpu_to_le64(now); + bi->i_ctime = cpu_to_le64(now); + bi->i_nlink = cpu_to_le32(S_ISDIR(mode) ? 2 : 1); ret = bch_inode_create(c, &bkey_inode.k_i, BLOCKDEV_INODE_MAX, 0, @@ -1685,7 +1689,7 @@ static inline bool bch_flags_allowed(umode_t mode, u32 flags) static int bch_inode_set_flags(struct bch_inode_info *ei, struct bch_inode *bi, void *p) { - unsigned oldflags = bi->i_flags; + unsigned oldflags = le32_to_cpu(bi->i_flags); unsigned newflags = *((unsigned *) p); if (((newflags ^ oldflags) & (FS_APPEND_FL|FS_IMMUTABLE_FL)) && @@ -1694,9 +1698,9 @@ static int bch_inode_set_flags(struct bch_inode_info *ei, struct bch_inode *bi, newflags = newflags & BCH_FL_USER_FLAGS; newflags |= oldflags & ~BCH_FL_USER_FLAGS; - bi->i_flags = newflags; + bi->i_flags = cpu_to_le32(newflags); - ei->vfs_inode.i_ctime = CURRENT_TIME_SEC; + ei->vfs_inode.i_ctime = CURRENT_TIME; return 0; } @@ -2837,20 +2841,20 @@ static void bch_inode_init(struct bch_inode_info *ei, pr_debug("init inode %llu with mode %o", bkey_inode.k->p.inode, bi->i_mode); - ei->i_flags = bi->i_flags; - ei->i_size = bi->i_size; + ei->i_flags = le32_to_cpu(bi->i_flags); + ei->i_size = le64_to_cpu(bi->i_size); - inode->i_mode = bi->i_mode; - i_uid_write(inode, bi->i_uid); - i_gid_write(inode, bi->i_gid); + inode->i_mode = le16_to_cpu(bi->i_mode); + i_uid_write(inode, le32_to_cpu(bi->i_uid)); + i_gid_write(inode, le32_to_cpu(bi->i_gid)); inode->i_ino = bkey_inode.k->p.inode; - set_nlink(inode, bi->i_nlink); - inode->i_rdev = bi->i_dev; - inode->i_size = bi->i_size; - inode->i_atime = ns_to_timespec(bi->i_atime); - inode->i_mtime = ns_to_timespec(bi->i_mtime); - inode->i_ctime = ns_to_timespec(bi->i_ctime); + set_nlink(inode, le32_to_cpu(bi->i_nlink)); + inode->i_rdev = le32_to_cpu(bi->i_dev); + inode->i_size = le64_to_cpu(bi->i_size); + inode->i_atime = ns_to_timespec(le64_to_cpu(bi->i_atime)); + inode->i_mtime = ns_to_timespec(le64_to_cpu(bi->i_mtime)); + inode->i_ctime = ns_to_timespec(le64_to_cpu(bi->i_ctime)); bch_set_inode_flags(inode); inode->i_mapping->a_ops = &bch_address_space_operations; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index b8f9cb41eebe..da7b391fce59 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -37,14 +37,14 @@ static inline u64 journal_pin_seq(struct journal *j, #define for_each_jset_jkeys(jkeys, jset) \ for (jkeys = (jset)->start; \ - jkeys < (struct jset_entry *) bset_bkey_last(jset); \ + jkeys < (struct jset_entry *) bkey_idx(jset, le32_to_cpu((jset)->u64s));\ jkeys = jset_keys_next(jkeys)) #define for_each_jset_key(k, _n, jkeys, jset) \ for_each_jset_jkeys(jkeys, jset) \ if (JKEYS_TYPE(jkeys) == JKEYS_BTREE_KEYS) \ for (k = (jkeys)->start; \ - (k < bset_bkey_last(jkeys) && \ + (k < bkey_idx(jkeys, le16_to_cpu((jkeys)->u64s)) &&\ (_n = bkey_next(k), 1)); \ k = _n) @@ -57,7 +57,7 @@ static inline void bch_journal_add_entry_at(struct journal *j, const void *data, { struct jset_entry *jkeys = bkey_idx(journal_cur_write(j)->data, offset); - jkeys->u64s = u64s; + jkeys->u64s = cpu_to_le16(u64s); jkeys->btree_id = id; jkeys->level = level; jkeys->flags = 0; @@ -72,8 +72,9 @@ static inline void bch_journal_add_entry(struct journal *j, const void *data, { struct jset *jset = journal_cur_write(j)->data; - bch_journal_add_entry_at(j, data, u64s, type, id, level, jset->u64s); - jset->u64s += jset_u64s(u64s); + bch_journal_add_entry_at(j, data, u64s, type, id, level, + le32_to_cpu(jset->u64s)); + le32_add_cpu(&jset->u64s, jset_u64s(u64s)); } static struct jset_entry *bch_journal_find_entry(struct jset *j, unsigned type, @@ -102,7 +103,7 @@ struct bkey_i *bch_journal_find_btree_root(struct cache_set *c, struct jset *j, *level = jkeys->level; if (cache_set_inconsistent_on(!jkeys->u64s || - jkeys->u64s != k->k.u64s || + le16_to_cpu(jkeys->u64s) != k->k.u64s || bkey_invalid(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(k)), c, "invalid btree root in journal")) return NULL; @@ -285,7 +286,7 @@ static enum { { struct journal_replay *i, *pos; struct list_head *where; - size_t bytes = set_bytes(j); + size_t bytes = __set_bytes(j, le32_to_cpu(j->u64s)); int ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE; mutex_lock(&jlist->lock); @@ -293,9 +294,10 @@ static enum { /* This entry too old? */ if (!list_empty(jlist->head)) { i = list_last_entry(jlist->head, struct journal_replay, list); - if (j->seq < i->j.last_seq) { + if (le64_to_cpu(j->seq) < le64_to_cpu(i->j.last_seq)) { pr_debug("j->seq %llu i->j.seq %llu", - j->seq, i->j.seq); + le64_to_cpu(j->seq), + le64_to_cpu(i->j.seq)); goto out; } } @@ -304,20 +306,20 @@ static enum { /* Drop entries we don't need anymore */ list_for_each_entry_safe(i, pos, jlist->head, list) { - if (i->j.seq >= j->last_seq) + if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq)) break; list_del(&i->list); kfree(i); } list_for_each_entry_reverse(i, jlist->head, list) { - if (j->seq == i->j.seq) { + if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) { pr_debug("j->seq %llu i->j.seq %llu", j->seq, i->j.seq); goto out; } - if (j->seq > i->j.seq) { + if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) { where = &i->list; goto add; } @@ -334,7 +336,7 @@ add: memcpy(&i->j, j, bytes); list_add(&i->list, where); - pr_debug("seq %llu", j->seq); + pr_debug("seq %llu", le64_to_cpu(j->seq)); out: mutex_unlock(&jlist->lock); return ret; @@ -347,18 +349,18 @@ static enum { } journal_entry_validate(struct cache *ca, const struct jset *j, u64 sector, unsigned bucket_sectors_left, unsigned sectors_read) { - size_t bytes = set_bytes(j); + size_t bytes = __set_bytes(j, le32_to_cpu(j->u64s)); u64 got, expect; if (bch_meta_read_fault("journal")) return JOURNAL_ENTRY_BAD; - if (j->magic != jset_magic(&ca->set->disk_sb)) { + if (le64_to_cpu(j->magic) != jset_magic(&ca->set->disk_sb)) { pr_debug("bad magic while reading journal from %llu", sector); return JOURNAL_ENTRY_BAD; } - got = j->version; + got = le32_to_cpu(j->version); expect = BCACHE_JSET_VERSION; if (cache_inconsistent_on(got != expect, ca, @@ -377,14 +379,15 @@ static enum { /* XXX: retry on checksum error */ - got = j->csum; - expect = csum_set(j, JSET_CSUM_TYPE(j)); + got = le64_to_cpu(j->csum); + expect = __csum_set(j, le32_to_cpu(j->u64s), JSET_CSUM_TYPE(j)); if (cache_inconsistent_on(got != expect, ca, "journal checksum bad (got %llu expect %llu), sector %lluu", got, expect, sector)) return JOURNAL_ENTRY_BAD; - if (cache_inconsistent_on(j->last_seq > j->seq, ca, + if (cache_inconsistent_on(le64_to_cpu(j->last_seq) > + le64_to_cpu(j->seq), ca, "invalid journal entry: last_seq > seq")) return JOURNAL_ENTRY_BAD; @@ -462,10 +465,10 @@ reread: * journal entries. We don't need the rest of the * bucket: */ - if (j->seq < ja->bucket_seq[bucket]) + if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket]) goto out; - ja->bucket_seq[bucket] = j->seq; + ja->bucket_seq[bucket] = le64_to_cpu(j->seq); switch (journal_entry_add(jlist, j)) { case JOURNAL_ENTRY_ADD_ERROR: @@ -478,10 +481,11 @@ reread: break; } - if (j->seq > *seq) - *seq = j->seq; + if (le64_to_cpu(j->seq) > *seq) + *seq = le64_to_cpu(j->seq); - blocks = set_blocks(j, block_bytes(c)); + blocks = __set_blocks(j, le32_to_cpu(j->u64s), + block_bytes(c)); pr_debug("next"); bucket_offset += blocks * c->sb.block_size; @@ -701,15 +705,17 @@ const char *bch_journal_read(struct cache_set *c, struct list_head *list) j = &list_entry(list->prev, struct journal_replay, list)->j; - if (j->seq - j->last_seq + 1 > c->journal.pin.size) + if (le64_to_cpu(j->seq) - + le64_to_cpu(j->last_seq) + 1 > c->journal.pin.size) return "too many journal entries open for refcount fifo"; - c->journal.pin.back = j->seq - j->last_seq + 1; + c->journal.pin.back = le64_to_cpu(j->seq) - + le64_to_cpu(j->last_seq) + 1; - c->journal.seq = j->seq; - c->journal.last_seq_ondisk = j->last_seq; + c->journal.seq = le64_to_cpu(j->seq); + c->journal.last_seq_ondisk = le64_to_cpu(j->last_seq); - BUG_ON(last_seq(&c->journal) != j->last_seq); + BUG_ON(last_seq(&c->journal) != le64_to_cpu(j->last_seq)); i = list_first_entry(list, struct journal_replay, list); @@ -720,7 +726,7 @@ const char *bch_journal_read(struct cache_set *c, struct list_head *list) INIT_LIST_HEAD(&p->list); - if (i && i->j.seq == seq) { + if (i && le64_to_cpu(i->j.seq) == seq) { atomic_set(&p->count, 1); if (journal_seq_blacklist_read(c, i, p)) @@ -742,8 +748,8 @@ const char *bch_journal_read(struct cache_set *c, struct list_head *list) memcpy(c->journal.prio_buckets, prio_ptrs->_data, - prio_ptrs->u64s * sizeof(u64)); - c->journal.nr_prio_buckets = prio_ptrs->u64s; + le16_to_cpu(prio_ptrs->u64s) * sizeof(u64)); + c->journal.nr_prio_buckets = le16_to_cpu(prio_ptrs->u64s); return NULL; } @@ -802,7 +808,7 @@ __journal_entry_close(struct journal *j, u32 val) old.v, new.v)) != old.v); if (old.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) - journal_cur_write(j)->data->u64s = old.cur_entry_offset; + journal_cur_write(j)->data->u64s = cpu_to_le32(old.cur_entry_offset); if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) return JOURNAL_ENTRY_ERROR; @@ -846,7 +852,7 @@ static void journal_entry_open(struct journal *j) u64s -= JSET_KEYS_U64s + j->nr_prio_buckets; u64s = max_t(ssize_t, 0L, u64s); - if (u64s > w->data->u64s) { + if (u64s > le32_to_cpu(w->data->u64s)) { union journal_res_state old, new; u64 v = atomic64_read(&j->reservations.counter); @@ -867,7 +873,7 @@ static void journal_entry_open(struct journal *j) break; /* Handle any already added entries */ - new.cur_entry_offset = w->data->u64s; + new.cur_entry_offset = le32_to_cpu(w->data->u64s); } while ((v = atomic64_cmpxchg(&j->reservations.counter, old.v, new.v)) != old.v); @@ -964,29 +970,33 @@ int bch_journal_replay(struct cache_set *c, struct list_head *list) struct jset_entry *jkeys; struct journal_replay *i, *n; u64 cur_seq = last_seq(j); - u64 end_seq = list_last_entry(list, struct journal_replay, list)->j.seq; + u64 end_seq = le64_to_cpu(list_last_entry(list, struct journal_replay, + list)->j.seq); list_for_each_entry_safe(i, n, list, list) { mutex_lock(&j->blacklist_lock); - while (cur_seq < i->j.seq && + while (cur_seq < le64_to_cpu(i->j.seq) && journal_seq_blacklist_find(j, cur_seq)) cur_seq++; - cache_set_inconsistent_on(journal_seq_blacklist_find(j, i->j.seq), c, + cache_set_inconsistent_on(journal_seq_blacklist_find(j, + le64_to_cpu(i->j.seq)), c, "found blacklisted journal entry %llu", - i->j.seq); + le64_to_cpu(i->j.seq)); mutex_unlock(&j->blacklist_lock); - cache_set_inconsistent_on(i->j.seq != cur_seq, c, + cache_set_inconsistent_on(le64_to_cpu(i->j.seq) != cur_seq, c, "journal entries %llu-%llu missing! (replaying %llu-%llu)", - cur_seq, i->j.seq - 1, last_seq(j), end_seq); + cur_seq, le64_to_cpu(i->j.seq) - 1, + last_seq(j), end_seq); - cur_seq = i->j.seq + 1; + cur_seq = le64_to_cpu(i->j.seq) + 1; j->cur_pin_list = - &j->pin.data[((j->pin.back - 1 - (j->seq - i->j.seq)) & + &j->pin.data[((j->pin.back - 1 - + (j->seq - le64_to_cpu(i->j.seq))) & j->pin.mask)]; BUG_ON(atomic_read(&j->cur_pin_list->count) != 1); @@ -1347,7 +1357,7 @@ static void __bch_journal_next_entry(struct journal *j) } jset = journal_cur_write(j)->data; - jset->seq = ++j->seq; + jset->seq = cpu_to_le64(++j->seq); jset->u64s = 0; } @@ -1378,7 +1388,7 @@ static void journal_write_done(struct closure *cl) struct journal *j = container_of(cl, struct journal, io); struct journal_write *w = journal_prev_write(j); - j->last_seq_ondisk = w->data->last_seq; + j->last_seq_ondisk = le64_to_cpu(w->data->last_seq); __bch_time_stats_update(j->write_time, j->write_start_time); @@ -1450,16 +1460,19 @@ static void journal_write_locked(struct closure *cl) /* So last_seq is up to date */ journal_reclaim_fast(j); - w->data->read_clock = c->prio_clock[READ].hand; - w->data->write_clock = c->prio_clock[WRITE].hand; - w->data->magic = jset_magic(&c->disk_sb); - w->data->version = BCACHE_JSET_VERSION; - w->data->last_seq = last_seq(j); + w->data->read_clock = cpu_to_le16(c->prio_clock[READ].hand); + w->data->write_clock = cpu_to_le16(c->prio_clock[WRITE].hand); + w->data->magic = cpu_to_le64(jset_magic(&c->disk_sb)); + w->data->version = cpu_to_le32(BCACHE_JSET_VERSION); + w->data->last_seq = cpu_to_le64(last_seq(j)); SET_JSET_CSUM_TYPE(w->data, c->opts.metadata_checksum); - w->data->csum = csum_set(w->data, JSET_CSUM_TYPE(w->data)); + w->data->csum = cpu_to_le64(__csum_set(w->data, + le32_to_cpu(w->data->u64s), + JSET_CSUM_TYPE(w->data))); - sectors = set_blocks(w->data, block_bytes(c)) * c->sb.block_size; + sectors = __set_blocks(w->data, le32_to_cpu(w->data->u64s), + block_bytes(c)) * c->sb.block_size; BUG_ON(sectors > j->sectors_free); j->sectors_free -= sectors; @@ -1510,7 +1523,7 @@ static void journal_write_locked(struct closure *cl) ptr->offset += sectors; - ca->journal.bucket_seq[ca->journal.cur_idx] = w->data->seq; + ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq); } /* diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h index 1b960b0bddf4..180019a571e0 100644 --- a/drivers/md/bcache/journal.h +++ b/drivers/md/bcache/journal.h @@ -112,7 +112,7 @@ static inline struct jset_entry *jset_keys_next(struct jset_entry *j) { - return (void *) __bset_bkey_last(j); + return (void *) __bkey_idx(j, le16_to_cpu(j->u64s)); } /* @@ -250,7 +250,7 @@ ssize_t bch_journal_print_debug(struct journal *, char *); int bch_cache_journal_alloc(struct cache *); -static inline u64 *__journal_buckets(struct cache *ca) +static inline __le64 *__journal_buckets(struct cache *ca) { return ca->disk_sb.sb->_data + bch_journal_buckets_offset(ca->disk_sb.sb); } diff --git a/drivers/md/bcache/journal_types.h b/drivers/md/bcache/journal_types.h index 32fb49273b13..50a491e0dca9 100644 --- a/drivers/md/bcache/journal_types.h +++ b/drivers/md/bcache/journal_types.h @@ -160,7 +160,7 @@ struct journal { struct work_struct reclaim_work; - u64 prio_buckets[MAX_CACHES_PER_SET]; + __le64 prio_buckets[MAX_CACHES_PER_SET]; unsigned nr_prio_buckets; diff --git a/drivers/md/bcache/opts.h b/drivers/md/bcache/opts.h index d2c81d5f618e..a17dc6ff1735 100644 --- a/drivers/md/bcache/opts.h +++ b/drivers/md/bcache/opts.h @@ -25,7 +25,7 @@ extern const char * const bch_csum_types[]; extern const char * const bch_compression_types[]; /* dummy option, for options that aren't stored in the superblock */ -BITMASK(NO_SB_OPT, struct cache_sb, flags, 0, 0); +LE64_BITMASK(NO_SB_OPT, struct cache_sb, flags, 0, 0); #define CACHE_SET_VISIBLE_OPTS() \ CACHE_SET_OPT(verbose_recovery, \ diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 06f44fa6a9cb..29687dc4214d 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -208,7 +208,7 @@ static const char *validate_cache_super(struct cache_set *c, struct cache *ca) u16 block_size; unsigned i; - switch (sb->version) { + switch (le64_to_cpu(sb->version)) { case BCACHE_SB_VERSION_CDEV_V0: case BCACHE_SB_VERSION_CDEV_WITH_UUID: case BCACHE_SB_VERSION_CDEV_V2: @@ -219,7 +219,7 @@ static const char *validate_cache_super(struct cache_set *c, struct cache *ca) } if (CACHE_SYNC(sb) && - sb->version != BCACHE_SB_VERSION_CDEV_V3) + le64_to_cpu(sb->version) != BCACHE_SB_VERSION_CDEV_V3) return "Unsupported superblock version"; block_size = le16_to_cpu(sb->block_size); @@ -285,7 +285,7 @@ static const char *validate_cache_super(struct cache_set *c, struct cache *ca) if (!is_power_of_2(ca->mi.bucket_size) || ca->mi.bucket_size < PAGE_SECTORS || - ca->mi.bucket_size < sb->block_size) + ca->mi.bucket_size < block_size) return "Bad bucket size"; ca->bucket_bits = ilog2(ca->mi.bucket_size); @@ -359,12 +359,14 @@ int bch_super_realloc(struct bcache_superblock *sb, unsigned u64s) struct cache_member *mi = sb->sb->members + sb->sb->nr_this_dev; char buf[BDEVNAME_SIZE]; size_t bytes = __set_bytes((struct cache_sb *) NULL, u64s); - size_t want = bytes + (SB_SECTOR << 9); + u64 want = bytes + (SB_SECTOR << 9); - if (want > mi->first_bucket * (mi->bucket_size << 9)) { - pr_err("%s: superblock too big: want %zu but have %u", - bdevname(sb->bdev, buf), want, - mi->first_bucket * mi->bucket_size << 9); + u64 first_bucket_offset = (u64) le16_to_cpu(mi->first_bucket) * + ((u64) le16_to_cpu(mi->bucket_size) << 9); + + if (want > first_bucket_offset) { + pr_err("%s: superblock too big: want %llu but have %llu", + bdevname(sb->bdev, buf), want, first_bucket_offset); return -ENOSPC; } @@ -428,11 +430,12 @@ retry: goto retry; err = "Bad checksum"; - if (sb->sb->csum != csum_set(sb->sb, - le64_to_cpu(sb->sb->version) < - BCACHE_SB_VERSION_CDEV_V3 - ? BCH_CSUM_CRC64 - : CACHE_SB_CSUM_TYPE(sb->sb))) + if (le64_to_cpu(sb->sb->csum) != + __csum_set(sb->sb, le16_to_cpu(sb->sb->u64s), + le64_to_cpu(sb->sb->version) < + BCACHE_SB_VERSION_CDEV_V3 + ? BCH_CSUM_CRC64 + : CACHE_SB_CSUM_TYPE(sb->sb))) goto err; return NULL; @@ -449,7 +452,7 @@ void __write_super(struct cache_set *c, struct bcache_superblock *disk_sb) bio->bi_bdev = disk_sb->bdev; bio->bi_iter.bi_sector = SB_SECTOR; bio->bi_iter.bi_size = - roundup(set_bytes(sb), + roundup(__set_bytes(sb, le16_to_cpu(sb->u64s)), bdev_logical_block_size(disk_sb->bdev)); bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); bch_bio_map(bio, sb); @@ -567,7 +570,7 @@ static int cache_sb_from_cache_set(struct cache_set *c, struct cache *ca) { struct cache_sb *src = &c->disk_sb, *dst = ca->disk_sb.sb; - dst->version = BCACHE_SB_VERSION_CDEV; + dst->version = cpu_to_le64(BCACHE_SB_VERSION_CDEV); dst->seq = src->seq; dst->user_uuid = src->user_uuid; dst->set_uuid = src->set_uuid; @@ -612,7 +615,7 @@ static void __bcache_write_super(struct cache_set *c) closure_init(cl, &c->cl); - c->disk_sb.seq = cpu_to_le64(le64_to_cpu(c->disk_sb.seq) + 1); + le64_add_cpu(&c->disk_sb.seq, 1); for_each_cache(ca, c, i) { struct cache_sb *sb = ca->disk_sb.sb; @@ -621,7 +624,9 @@ static void __bcache_write_super(struct cache_set *c) cache_sb_from_cache_set(c, ca); SET_CACHE_SB_CSUM_TYPE(sb, c->opts.metadata_checksum); - sb->csum = cpu_to_le64(csum_set(sb, CACHE_SB_CSUM_TYPE(sb))); + sb->csum = cpu_to_le64(__csum_set(sb, + le16_to_cpu(sb->u64s), + CACHE_SB_CSUM_TYPE(sb))); bio_reset(bio); bio->bi_bdev = ca->disk_sb.bdev; @@ -1247,8 +1252,8 @@ static const char *run_cache_set(struct cache_set *c) goto err; } - c->prio_clock[READ].hand = j->read_clock; - c->prio_clock[WRITE].hand = j->write_clock; + c->prio_clock[READ].hand = le16_to_cpu(j->read_clock); + c->prio_clock[WRITE].hand = le16_to_cpu(j->write_clock); for_each_cache(ca, c, i) { bch_recalc_min_prio(ca, READ); @@ -1350,8 +1355,8 @@ static const char *run_cache_set(struct cache_set *c) bkey_inode_init(&inode.k_i); inode.k.p.inode = BCACHE_ROOT_INO; - inode.v.i_mode = S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO; - inode.v.i_nlink = 2; + inode.v.i_mode = cpu_to_le16(S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO); + inode.v.i_nlink = cpu_to_le32(2); err = "error creating root directory"; if (bch_btree_insert(c, BTREE_ID_INODES, @@ -1380,7 +1385,7 @@ static const char *run_cache_set(struct cache_set *c) now = get_seconds(); for_each_cache_rcu(ca, c, i) - c->disk_mi[ca->sb.nr_this_dev].last_mount = now; + c->disk_mi[ca->sb.nr_this_dev].last_mount = cpu_to_le32(now); bcache_write_super(c); @@ -1414,10 +1419,10 @@ err: static const char *can_add_cache(struct cache_sb *sb, struct cache_set *c) { - if (sb->block_size != c->sb.block_size) + if (le16_to_cpu(sb->block_size) != c->sb.block_size) return "mismatched block size"; - if (sb->members[le16_to_cpu(sb->nr_this_dev)].bucket_size < + if (le16_to_cpu(sb->members[sb->nr_this_dev].bucket_size) < CACHE_BTREE_NODE_SIZE(&c->disk_sb)) return "new cache bucket_size is too small"; @@ -2043,8 +2048,8 @@ int bch_cache_set_add_cache(struct cache_set *c, const char *path) * Preserve the old cache member information (esp. tier) * before we start bashing the disk stuff. */ - mi = sb.sb->members[le16_to_cpu(sb.sb->nr_this_dev)]; - mi.last_mount = get_seconds(); + mi = sb.sb->members[sb.sb->nr_this_dev]; + mi.last_mount = cpu_to_le32(get_seconds()); down_read(&c->gc_lock); @@ -2077,9 +2082,9 @@ have_slot: if (bch_super_realloc(&sb, u64s)) goto err_unlock; - sb.sb->nr_this_dev = cpu_to_le16(nr_this_dev); - sb.sb->nr_in_set = cpu_to_le16(nr_in_set); - sb.sb->u64s = u64s; + sb.sb->nr_this_dev = nr_this_dev; + sb.sb->nr_in_set = nr_in_set; + sb.sb->u64s = cpu_to_le16(u64s); new_mi = dynamic_fault("bcache:add:member_info_realloc") ? NULL diff --git a/drivers/md/bcache/super.h b/drivers/md/bcache/super.h index b881cec06c42..9d537e1eb91c 100644 --- a/drivers/md/bcache/super.h +++ b/drivers/md/bcache/super.h @@ -101,14 +101,16 @@ u64 bch_checksum(unsigned, const void *, size_t); * This is used for various on disk data structures - cache_sb, prio_set, bset, * jset: The checksum is _always_ the first 8 bytes of these structs */ -#define csum_set(i, type) \ +#define __csum_set(i, u64s, type) \ ({ \ const void *start = ((const void *) (i)) + sizeof(u64); \ - const void *end = __bset_bkey_last(i); \ + const void *end = __bkey_idx(i, u64s); \ \ bch_checksum(type, start, end - start); \ }) +#define csum_set(i, type) __csum_set(i, (i)->u64s, type) + void bch_check_mark_super_slowpath(struct cache_set *, const struct bkey_i *, bool); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 3bbcdbc292c4..b7a44c522ac1 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -390,7 +390,7 @@ SHOW(bch_blockdev_volume) struct bcache_device *d = container_of(kobj, struct bcache_device, kobj); - sysfs_hprint(size, d->inode.v.i_inode.i_size); + sysfs_hprint(size, le64_to_cpu(d->inode.v.i_inode.i_size)); if (attr == &sysfs_label) { memcpy(buf, d->inode.v.i_label, SB_LABEL_SIZE); @@ -414,7 +414,7 @@ STORE(__bch_blockdev_volume) mutex_lock(&d->inode_lock); - if (v < d->inode.v.i_inode.i_size) { + if (v < le64_to_cpu(d->inode.v.i_inode.i_size) ){ ret = bch_inode_truncate(d->c, d->inode.k.p.inode, v >> 9, NULL); if (ret) { @@ -422,7 +422,7 @@ STORE(__bch_blockdev_volume) return ret; } } - d->inode.v.i_inode.i_size = v; + d->inode.v.i_inode.i_size = cpu_to_le64(v); ret = bch_inode_update(d->c, &d->inode.k_i, &journal_seq); mutex_unlock(&d->inode_lock); @@ -434,7 +434,7 @@ STORE(__bch_blockdev_volume) if (ret) return ret; - set_capacity(d->disk, d->inode.v.i_inode.i_size >> 9); + set_capacity(d->disk, v >> 9); } if (attr == &sysfs_label) { diff --git a/drivers/md/bcache/xattr.c b/drivers/md/bcache/xattr.c index c6cc55586481..650808cf1e3b 100644 --- a/drivers/md/bcache/xattr.c +++ b/drivers/md/bcache/xattr.c @@ -118,7 +118,8 @@ static void bch_xattr_to_text(struct cache_set *c, char *buf, size -= n; if (size) { - n = min_t(unsigned, size, xattr.v->x_val_len); + n = min_t(unsigned, size, + le16_to_cpu(xattr.v->x_val_len)); memcpy(buf, xattr_val(xattr.v), n); buf[size - 1] = '\0'; buf += n; @@ -157,13 +158,15 @@ int bch_xattr_get(struct cache_set *c, u64 inum, const char *name, /* collision? */ if (!xattr_cmp(xattr, type, &qname)) { - ret = xattr->x_val_len; + unsigned len = le16_to_cpu(xattr->x_val_len); + + ret = len; if (buffer) { - if (xattr->x_val_len > size) + if (len > size) ret = -ERANGE; else memcpy(buffer, xattr_val(xattr), - xattr->x_val_len); + len); } goto out; } @@ -252,7 +255,7 @@ int bch_xattr_set(struct inode *inode, const char *name, xattr->k.p = k.k->p; xattr->v.x_type = type; xattr->v.x_name_len = qname.len; - xattr->v.x_val_len = size; + xattr->v.x_val_len = cpu_to_le16(size); memcpy(xattr->v.x_name, qname.name, qname.len); memcpy(xattr_val(&xattr->v), value, size); diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index f6081ad0bd6f..0049cd9b7d51 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h @@ -459,7 +459,7 @@ TRACE_EVENT(bcache_btree_write, TP_fast_assign( __entry->bucket = PTR_BUCKET_NR_TRACE(b->c, &b->key, 0); __entry->block = b->written; - __entry->u64s = b->keys.set[b->keys.nsets].data->u64s; + __entry->u64s = le16_to_cpu(b->keys.set[b->keys.nsets].data->u64s); ), TP_printk("bucket %llu block %u u64s %u", diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index d06fdeb8900c..516c1facb033 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -14,18 +14,44 @@ extern "C" { #include <asm/byteorder.h> #include <linux/uuid.h> -#define BITMASK(name, type, field, offset, end) \ +#define LE32_BITMASK(name, type, field, offset, end) \ static const unsigned name##_OFFSET = offset; \ static const unsigned name##_BITS = (end - offset); \ static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \ \ static inline __u64 name(const type *k) \ -{ return (k->field >> offset) & ~(~0ULL << (end - offset)); } \ +{ \ + return (__le32_to_cpu(k->field) >> offset) & \ + ~(~0ULL << (end - offset)); \ +} \ \ static inline void SET_##name(type *k, __u64 v) \ { \ - k->field &= ~(~(~0ULL << (end - offset)) << offset); \ - k->field |= (v & ~(~0ULL << (end - offset))) << offset; \ + __u64 new = __le32_to_cpu(k->field); \ + \ + new &= ~(~(~0ULL << (end - offset)) << offset); \ + new |= (v & ~(~0ULL << (end - offset))) << offset; \ + k->field = __cpu_to_le32(new); \ +} + +#define LE64_BITMASK(name, type, field, offset, end) \ +static const unsigned name##_OFFSET = offset; \ +static const unsigned name##_BITS = (end - offset); \ +static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \ + \ +static inline __u64 name(const type *k) \ +{ \ + return (__le64_to_cpu(k->field) >> offset) & \ + ~(~0ULL << (end - offset)); \ +} \ + \ +static inline void SET_##name(type *k, __u64 v) \ +{ \ + __u64 new = __le64_to_cpu(k->field); \ + \ + new &= ~(~(~0ULL << (end - offset)) << offset); \ + new |= (v & ~(~0ULL << (end - offset))) << offset; \ + k->field = __cpu_to_le64(new); \ } struct bkey_format { @@ -33,7 +59,7 @@ struct bkey_format { __u8 nr_fields; /* One unused slot for now: */ __u8 bits_per_field[6]; - __u64 field_offset[6]; + __le64 field_offset[6]; }; /* Btree keys - all units are in sectors */ @@ -237,7 +263,7 @@ struct bkey_i_##name { \ struct bch_cookie { struct bch_val v; - __u64 cookie; + __le64 cookie; }; BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE); @@ -448,22 +474,22 @@ enum { struct bch_inode { struct bch_val v; - __u16 i_mode; - __u16 pad; - __u32 i_flags; + __le16 i_mode; + __le16 pad; + __le32 i_flags; /* Nanoseconds */ - __s64 i_atime; - __s64 i_ctime; - __s64 i_mtime; + __le64 i_atime; + __le64 i_ctime; + __le64 i_mtime; - __u64 i_size; + __le64 i_size; - __u32 i_uid; - __u32 i_gid; - __u32 i_nlink; + __le32 i_uid; + __le32 i_gid; + __le32 i_nlink; - __u32 i_dev; + __le32 i_dev; } __attribute__((packed)); BKEY_VAL_TYPE(inode, BCH_INODE_FS); @@ -498,7 +524,7 @@ struct bch_dirent { struct bch_val v; /* Target inode number: */ - __u64 d_inum; + __le64 d_inum; /* * Copy of mode bits 12-15 from the target inode - so userspace can get @@ -527,7 +553,7 @@ struct bch_xattr { struct bch_val v; __u8 x_type; __u8 x_name_len; - __u16 x_val_len; + __le16 x_val_len; __u8 x_name[]; } __attribute__((packed)); BKEY_VAL_TYPE(xattr, BCH_XATTR); @@ -559,43 +585,43 @@ BKEY_VAL_TYPE(xattr, BCH_XATTR); struct cache_member { uuid_le uuid; - __u64 nbuckets; /* device size */ - __u16 first_bucket; /* index of first bucket used */ - __u16 bucket_size; /* sectors */ - __u32 last_mount; /* time_t */ + __le64 nbuckets; /* device size */ + __le16 first_bucket; /* index of first bucket used */ + __le16 bucket_size; /* sectors */ + __le32 last_mount; /* time_t */ - __u64 f1; - __u64 f2; + __le64 f1; + __le64 f2; }; -BITMASK(CACHE_STATE, struct cache_member, f1, 0, 4) +LE64_BITMASK(CACHE_STATE, struct cache_member, f1, 0, 4) #define CACHE_ACTIVE 0U #define CACHE_RO 1U #define CACHE_FAILED 2U #define CACHE_SPARE 3U -BITMASK(CACHE_TIER, struct cache_member, f1, 4, 8) +LE64_BITMASK(CACHE_TIER, struct cache_member, f1, 4, 8) #define CACHE_TIERS 4U -BITMASK(CACHE_REPLICATION_SET, struct cache_member, f1, 8, 16) +LE64_BITMASK(CACHE_REPLICATION_SET, struct cache_member, f1, 8, 16) -BITMASK(CACHE_HAS_METADATA, struct cache_member, f1, 24, 25) -BITMASK(CACHE_HAS_DATA, struct cache_member, f1, 25, 26) +LE64_BITMASK(CACHE_HAS_METADATA, struct cache_member, f1, 24, 25) +LE64_BITMASK(CACHE_HAS_DATA, struct cache_member, f1, 25, 26) -BITMASK(CACHE_REPLACEMENT, struct cache_member, f1, 26, 30) +LE64_BITMASK(CACHE_REPLACEMENT, struct cache_member, f1, 26, 30) #define CACHE_REPLACEMENT_LRU 0U #define CACHE_REPLACEMENT_FIFO 1U #define CACHE_REPLACEMENT_RANDOM 2U -BITMASK(CACHE_DISCARD, struct cache_member, f1, 30, 31); +LE64_BITMASK(CACHE_DISCARD, struct cache_member, f1, 30, 31); -BITMASK(CACHE_NR_READ_ERRORS, struct cache_member, f2, 0, 20); -BITMASK(CACHE_NR_WRITE_ERRORS, struct cache_member, f2, 20, 40); +LE64_BITMASK(CACHE_NR_READ_ERRORS, struct cache_member, f2, 0, 20); +LE64_BITMASK(CACHE_NR_WRITE_ERRORS, struct cache_member, f2, 20, 40); struct cache_sb { - __u64 csum; - __u64 offset; /* sector where this sb was written */ - __u64 version; /* of on disk format */ + __le64 csum; + __le64 offset; /* sector where this sb was written */ + __le64 version; /* of on disk format */ uuid_le magic; /* bcache superblock UUID */ @@ -608,22 +634,22 @@ struct cache_sb { */ union { uuid_le set_uuid; - __u64 set_magic; + __le64 set_magic; }; __u8 label[SB_LABEL_SIZE]; - __u64 flags; + __le64 flags; /* Incremented each time superblock is written: */ - __u64 seq; + __le64 seq; /* * User visible UUID for identifying the cache set the user is allowed * to change: */ uuid_le user_uuid; - __u64 pad1[6]; + __le64 pad1[6]; /* Number of cache_member entries: */ __u8 nr_in_set; @@ -633,12 +659,12 @@ struct cache_sb { * slot in the cache_member array: */ __u8 nr_this_dev; - __u16 pad2[3]; + __le16 pad2[3]; __le16 block_size; /* sectors */ - __u16 pad3[6]; + __le16 pad3[6]; - __u16 u64s; /* size of variable length portion */ + __le16 u64s; /* size of variable length portion */ union { struct cache_member members[0]; @@ -646,37 +672,37 @@ struct cache_sb { * Journal buckets also in the variable length portion, after * the member info: */ - __u64 _data[0]; + __le64 _data[0]; }; }; -BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1); +LE64_BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1); -BITMASK(CACHE_ERROR_ACTION, struct cache_sb, flags, 1, 4); +LE64_BITMASK(CACHE_ERROR_ACTION, struct cache_sb, flags, 1, 4); #define BCH_ON_ERROR_CONTINUE 0U #define BCH_ON_ERROR_RO 1U #define BCH_ON_ERROR_PANIC 2U #define BCH_NR_ERROR_ACTIONS 3U -BITMASK(CACHE_SET_META_REPLICAS_WANT, struct cache_sb, flags, 4, 8); -BITMASK(CACHE_SET_DATA_REPLICAS_WANT, struct cache_sb, flags, 8, 12); +LE64_BITMASK(CACHE_SET_META_REPLICAS_WANT,struct cache_sb, flags, 4, 8); +LE64_BITMASK(CACHE_SET_DATA_REPLICAS_WANT,struct cache_sb, flags, 8, 12); #define BCH_REPLICAS_MAX 4U -BITMASK(CACHE_SB_CSUM_TYPE, struct cache_sb, flags, 12, 16); +LE64_BITMASK(CACHE_SB_CSUM_TYPE, struct cache_sb, flags, 12, 16); -BITMASK(CACHE_META_PREFERRED_CSUM_TYPE, struct cache_sb, flags, 16, 20); +LE64_BITMASK(CACHE_META_PREFERRED_CSUM_TYPE,struct cache_sb, flags, 16, 20); #define BCH_CSUM_NONE 0U #define BCH_CSUM_CRC32C 1U #define BCH_CSUM_CRC64 2U #define BCH_CSUM_NR 3U -BITMASK(CACHE_BTREE_NODE_SIZE, struct cache_sb, flags, 20, 36); +LE64_BITMASK(CACHE_BTREE_NODE_SIZE, struct cache_sb, flags, 20, 36); -BITMASK(CACHE_SET_META_REPLICAS_HAVE, struct cache_sb, flags, 36, 40); -BITMASK(CACHE_SET_DATA_REPLICAS_HAVE, struct cache_sb, flags, 40, 44); +LE64_BITMASK(CACHE_SET_META_REPLICAS_HAVE,struct cache_sb, flags, 36, 40); +LE64_BITMASK(CACHE_SET_DATA_REPLICAS_HAVE,struct cache_sb, flags, 40, 44); -BITMASK(CACHE_SET_DIRENT_CSUM_TYPE, struct cache_sb, flags, 44, 48); +LE64_BITMASK(CACHE_SET_DIRENT_CSUM_TYPE,struct cache_sb, flags, 44, 48); enum { BCH_DIRENT_CSUM_CRC32C = 0, BCH_DIRENT_CSUM_CRC64 = 1, @@ -684,9 +710,9 @@ enum { BCH_DIRENT_CSUM_SHA1 = 3, }; -BITMASK(CACHE_DATA_PREFERRED_CSUM_TYPE, struct cache_sb, flags, 48, 52); +LE64_BITMASK(CACHE_DATA_PREFERRED_CSUM_TYPE, struct cache_sb, flags, 48, 52); -BITMASK(CACHE_COMPRESSION_TYPE, struct cache_sb, flags, 52, 56); +LE64_BITMASK(CACHE_COMPRESSION_TYPE, struct cache_sb, flags, 52, 56); enum { BCH_COMPRESSION_NONE = 0, BCH_COMPRESSION_LZO1X = 1, @@ -752,9 +778,9 @@ enum { /* backing device specific stuff: */ struct backingdev_sb { - __u64 csum; - __u64 offset; /* sector where this sb was written */ - __u64 version; /* of on disk format */ + __le64 csum; + __le64 offset; /* sector where this sb was written */ + __le64 version; /* of on disk format */ uuid_le magic; /* bcache superblock UUID */ @@ -766,14 +792,14 @@ struct backingdev_sb { */ union { uuid_le set_uuid; - __u64 set_magic; + __le64 set_magic; }; __u8 label[SB_LABEL_SIZE]; - __u64 flags; + __le64 flags; /* Incremented each time superblock is written: */ - __u64 seq; + __le64 seq; /* * User visible UUID for identifying the cache set the user is allowed @@ -782,26 +808,26 @@ struct backingdev_sb { * XXX hooked up? */ uuid_le user_uuid; - __u64 pad1[6]; + __le64 pad1[6]; - __u64 data_offset; - __u16 block_size; /* sectors */ - __u16 pad2[3]; + __le64 data_offset; + __le16 block_size; /* sectors */ + __le16 pad2[3]; - __u32 last_mount; /* time_t */ - __u16 pad3; + __le32 last_mount; /* time_t */ + __le16 pad3; /* size of variable length portion - always 0 for backingdev superblock */ - __u16 u64s; + __le16 u64s; __u64 _data[0]; }; -BITMASK(BDEV_CACHE_MODE, struct backingdev_sb, flags, 0, 4); +LE64_BITMASK(BDEV_CACHE_MODE, struct backingdev_sb, flags, 0, 4); #define CACHE_MODE_WRITETHROUGH 0U #define CACHE_MODE_WRITEBACK 1U #define CACHE_MODE_WRITEAROUND 2U #define CACHE_MODE_NONE 3U -BITMASK(BDEV_STATE, struct backingdev_sb, flags, 61, 63); +LE64_BITMASK(BDEV_STATE, struct backingdev_sb, flags, 61, 63); #define BDEV_STATE_NONE 0U #define BDEV_STATE_CLEAN 1U #define BDEV_STATE_DIRTY 2U @@ -814,7 +840,7 @@ static inline unsigned bch_journal_buckets_offset(struct cache_sb *sb) static inline unsigned bch_nr_journal_buckets(struct cache_sb *sb) { - return le16_to_cpu(sb->u64s) - bch_journal_buckets_offset(sb); + return __le16_to_cpu(sb->u64s) - bch_journal_buckets_offset(sb); } static inline _Bool __SB_IS_BDEV(__u64 version) @@ -849,17 +875,17 @@ static inline _Bool SB_IS_BDEV(const struct cache_sb *sb) static inline __u64 jset_magic(struct cache_sb *sb) { - return sb->set_magic ^ JSET_MAGIC; + return __le64_to_cpu(sb->set_magic) ^ JSET_MAGIC; } static inline __u64 pset_magic(struct cache_sb *sb) { - return sb->set_magic ^ PSET_MAGIC; + return __le64_to_cpu(sb->set_magic) ^ PSET_MAGIC; } static inline __u64 bset_magic(struct cache_sb *sb) { - return sb->set_magic ^ BSET_MAGIC; + return __le64_to_cpu(sb->set_magic) ^ BSET_MAGIC; } /* @@ -881,10 +907,10 @@ static inline __u64 bset_magic(struct cache_sb *sb) #define BCACHE_JSET_VERSION 2 struct jset_entry { - __u16 u64s; + __le16 u64s; __u8 btree_id; __u8 level; - __u32 flags; /* designates what this jset holds */ + __le32 flags; /* designates what this jset holds */ union { struct bkey_i start[0]; @@ -894,8 +920,7 @@ struct jset_entry { #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64)) - -BITMASK(JKEYS_TYPE, struct jset_entry, flags, 0, 8); +LE32_BITMASK(JKEYS_TYPE, struct jset_entry, flags, 0, 8); enum { JKEYS_BTREE_KEYS = 0, JKEYS_BTREE_ROOT = 1, @@ -915,18 +940,18 @@ enum { }; struct jset { - __u64 csum; - __u64 magic; - __u32 version; - __u32 flags; + __le64 csum; + __le64 magic; + __le32 version; + __le32 flags; /* Sequence number of oldest dirty journal entry */ - __u64 seq; - __u64 last_seq; + __le64 seq; + __le64 last_seq; - __u16 read_clock; - __u16 write_clock; - __u32 u64s; /* size of d[] in u64s */ + __le16 read_clock; + __le16 write_clock; + __le32 u64s; /* size of d[] in u64s */ union { struct jset_entry start[0]; @@ -934,26 +959,26 @@ struct jset { }; }; -BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4); +LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4); /* Bucket prios/gens */ struct prio_set { - __u64 csum; - __u64 magic; - __u32 version; - __u32 flags; + __le64 csum; + __le64 magic; + __le32 version; + __le32 flags; - __u64 next_bucket; + __le64 next_bucket; struct bucket_disk { - __u16 read_prio; - __u16 write_prio; + __le16 read_prio; + __le16 write_prio; __u8 gen; } __attribute__((packed)) data[]; }; -BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4); +LE32_BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4); /* Btree: */ @@ -990,7 +1015,7 @@ enum btree_id { * sorted */ struct bset { - __u64 seq; + __le64 seq; /* * Highest journal entry this bset contains keys for. @@ -999,11 +1024,11 @@ struct bset { * crash, since the journal records a total order of all index updates * and anything that didn't make it to the journal doesn't get used. */ - __u64 journal_seq; + __le64 journal_seq; - __u32 flags; - __u16 version; - __u16 u64s; /* count of d[] in u64s */ + __le32 flags; + __le16 version; + __le16 u64s; /* count of d[] in u64s */ union { struct bkey_packed start[0]; @@ -1011,14 +1036,14 @@ struct bset { }; } __attribute__((packed)); -BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4); +LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4); /* Only used in first bset */ -BITMASK(BSET_BTREE_LEVEL, struct bset, flags, 4, 8); +LE32_BITMASK(BSET_BTREE_LEVEL, struct bset, flags, 4, 8); struct btree_node { - __u64 csum; - __u64 magic; + __le64 csum; + __le64 magic; /* Closed interval: */ struct bpos min_key; @@ -1029,12 +1054,26 @@ struct btree_node { } __attribute__((packed)); struct btree_node_entry { - __u64 csum; + __le64 csum; struct bset keys; } __attribute__((packed)); /* OBSOLETE */ +#define BITMASK(name, type, field, offset, end) \ +static const unsigned name##_OFFSET = offset; \ +static const unsigned name##_BITS = (end - offset); \ +static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \ + \ +static inline __u64 name(const type *k) \ +{ return (k->field >> offset) & ~(~0ULL << (end - offset)); } \ + \ +static inline void SET_##name(type *k, __u64 v) \ +{ \ + k->field &= ~(~(~0ULL << (end - offset)) << offset); \ + k->field |= (v & ~(~0ULL << (end - offset))) << offset; \ +} + struct bkey_v0 { __u64 high; __u64 low; |