summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2017-12-16 15:13:33 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2018-05-22 00:44:18 -0400
commitfa3331c26d2e014e942859d70ba7852ec2fd5cd3 (patch)
treeffab4b3c21ba83381537f9f11ef125ffdac91316
parente6b76c5588b2400ee657688dc9bd62429a6ab61f (diff)
bcachefs: fix various sparse issues
some real endianness bugs, minor rcu issues - checksum endianness is still sketchy
-rw-r--r--fs/bcachefs/alloc.c14
-rw-r--r--fs/bcachefs/bcachefs.h4
-rw-r--r--fs/bcachefs/bcachefs_format.h7
-rw-r--r--fs/bcachefs/bkey.c5
-rw-r--r--fs/bcachefs/bkey.h3
-rw-r--r--fs/bcachefs/btree_gc.c18
-rw-r--r--fs/bcachefs/btree_io.c13
-rw-r--r--fs/bcachefs/btree_iter.c4
-rw-r--r--fs/bcachefs/btree_locking.h2
-rw-r--r--fs/bcachefs/btree_update_interior.c3
-rw-r--r--fs/bcachefs/buckets.c6
-rw-r--r--fs/bcachefs/buckets_types.h6
-rw-r--r--fs/bcachefs/chardev.c5
-rw-r--r--fs/bcachefs/checksum.h8
-rw-r--r--fs/bcachefs/extents.c26
-rw-r--r--fs/bcachefs/extents.h6
-rw-r--r--fs/bcachefs/fs.c4
-rw-r--r--fs/bcachefs/fsck.c10
-rw-r--r--fs/bcachefs/inode.c2
-rw-r--r--fs/bcachefs/io.c12
-rw-r--r--fs/bcachefs/journal.c12
-rw-r--r--fs/bcachefs/super-io.c18
-rw-r--r--fs/bcachefs/super.c77
-rw-r--r--fs/bcachefs/super.h20
-rw-r--r--fs/bcachefs/sysfs.c2
-rw-r--r--fs/bcachefs/tier.c3
-rw-r--r--fs/bcachefs/vstructs.h8
27 files changed, 166 insertions, 132 deletions
diff --git a/fs/bcachefs/alloc.c b/fs/bcachefs/alloc.c
index 0e2b874243ed..4d203e8f57df 100644
--- a/fs/bcachefs/alloc.c
+++ b/fs/bcachefs/alloc.c
@@ -257,7 +257,7 @@ static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
return;
a = bkey_s_c_to_alloc(k);
- ca = c->devs[a.k->p.inode];
+ ca = bch_dev_bkey_exists(c, a.k->p.inode);
if (a.k->p.offset >= ca->mi.nbuckets)
return;
@@ -368,7 +368,7 @@ int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode])
return 0;
- ca = c->devs[pos.inode];
+ ca = bch_dev_bkey_exists(c, pos.inode);
if (pos.offset >= ca->mi.nbuckets)
return 0;
@@ -461,7 +461,7 @@ static void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
/* Bucket heap / gen */
-void bch2_recalc_min_prio(struct bch_fs *c, struct bch_dev *ca, int rw)
+static void bch2_recalc_min_prio(struct bch_fs *c, struct bch_dev *ca, int rw)
{
struct prio_clock *clock = &c->prio_clock[rw];
struct bucket *g;
@@ -975,7 +975,7 @@ static int bch2_allocator_thread(void *arg)
void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
{
- struct bch_dev *ca = c->devs[ob->ptr.dev];
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
spin_lock(&ob->lock);
bch2_mark_alloc_bucket(c, ca, PTR_BUCKET(ca, &ob->ptr), false,
@@ -1303,7 +1303,7 @@ static void writepoint_drop_ptrs(struct bch_fs *c,
for (i = wp->nr_ptrs - 1; i >= 0; --i) {
struct open_bucket *ob = wp->ptrs[i];
- struct bch_dev *ca = c->devs[ob->ptr.dev];
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
if (nr_ptrs_dislike && !test_bit(ob->ptr.dev, devs->d)) {
BUG_ON(ca->open_buckets_partial_nr >=
@@ -1331,7 +1331,7 @@ static void verify_not_stale(struct bch_fs *c, const struct write_point *wp)
unsigned i;
writepoint_for_each_ptr(wp, ob, i) {
- struct bch_dev *ca = c->devs[ob->ptr.dev];
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
BUG_ON(ptr_stale(ca, &ob->ptr));
}
@@ -1537,7 +1537,7 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
for (i = 0; i < wp->nr_ptrs_can_use; i++) {
struct open_bucket *ob = wp->ptrs[i];
- struct bch_dev *ca = c->devs[ob->ptr.dev];
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
struct bch_extent_ptr tmp = ob->ptr;
EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev));
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 07aecb9c828d..d472b3b81bfd 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -326,9 +326,9 @@ struct io_count {
struct bch_dev {
struct kobject kobj;
struct percpu_ref ref;
+ struct completion ref_completion;
struct percpu_ref io_ref;
- struct completion stop_complete;
- struct completion offline_complete;
+ struct completion io_ref_completion;
struct bch_fs *fs;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index e0af6ead47b2..6e0e0452bbb5 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -800,7 +800,7 @@ struct bch_sb_layout {
__u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
__u8 nr_superblocks;
__u8 pad[5];
- __u64 sb_offset[61];
+ __le64 sb_offset[61];
} __attribute__((packed, aligned(8)));
#define BCH_SB_LAYOUT_SECTOR 7
@@ -1095,6 +1095,11 @@ struct jset_entry {
};
};
+struct jset_entry_blacklist {
+ struct jset_entry entry;
+ __le64 seq;
+};
+
#define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
enum {
diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c
index 73089a90f486..3c02afc06fb0 100644
--- a/fs/bcachefs/bkey.c
+++ b/fs/bcachefs/bkey.c
@@ -336,7 +336,8 @@ bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
* Extents - we have to guarantee that if an extent is packed, a trimmed
* version will also pack:
*/
- if (bkey_start_offset(in) < format->field_offset[BKEY_FIELD_OFFSET])
+ if (bkey_start_offset(in) <
+ le64_to_cpu(format->field_offset[BKEY_FIELD_OFFSET]))
return false;
pack_state_finish(&state, out);
@@ -800,7 +801,7 @@ static u8 *compile_bkey_field(const struct bkey_format *format, u8 *out,
bool *eax_zeroed)
{
unsigned bits = format->bits_per_field[field];
- u64 offset = format->field_offset[field];
+ u64 offset = le64_to_cpu(format->field_offset[field]);
unsigned i, byte, bit_offset, align, shl, shr;
if (!bits && !offset) {
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index dc0b88f75ebe..44a724d1818a 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -377,7 +377,8 @@ static inline u64 bkey_field_max(const struct bkey_format *f,
enum bch_bkey_fields nr)
{
return f->bits_per_field[nr] < 64
- ? f->field_offset[nr] + ~(~0ULL << f->bits_per_field[nr])
+ ? (le64_to_cpu(f->field_offset[nr]) +
+ ~(~0ULL << f->bits_per_field[nr]))
: U64_MAX;
}
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 1198fe39c100..ff6273737916 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -96,7 +96,7 @@ u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
extent_for_each_ptr(e, ptr) {
- struct bch_dev *ca = c->devs[ptr->dev];
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t b = PTR_BUCKET_NR(ca, ptr);
if (gen_after(ca->oldest_gens[b], ptr->gen))
@@ -166,7 +166,7 @@ int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
}
extent_for_each_ptr(e, ptr) {
- struct bch_dev *ca = c->devs[ptr->dev];
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g = PTR_BUCKET(ca, ptr);
if (mustfix_fsck_err_on(!g->mark.gen_valid, c,
@@ -315,14 +315,14 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
lockdep_assert_held(&c->sb_lock);
for (i = 0; i < layout->nr_superblocks; i++) {
- if (layout->sb_offset[i] == BCH_SB_SECTOR)
+ u64 offset = le64_to_cpu(layout->sb_offset[i]);
+
+ if (offset == BCH_SB_SECTOR)
mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
BUCKET_SB, flags);
- mark_metadata_sectors(c, ca,
- layout->sb_offset[i],
- layout->sb_offset[i] +
- (1 << layout->sb_max_size_bits),
+ mark_metadata_sectors(c, ca, offset,
+ offset + (1 << layout->sb_max_size_bits),
BUCKET_SB, flags);
}
@@ -414,7 +414,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c)
spin_lock(&ob->lock);
if (ob->valid) {
gc_pos_set(c, gc_pos_alloc(c, ob));
- ca = c->devs[ob->ptr.dev];
+ ca = bch_dev_bkey_exists(c, ob->ptr.dev);
bch2_mark_alloc_bucket(c, ca, PTR_BUCKET(ca, &ob->ptr), true,
gc_pos_alloc(c, ob),
BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
@@ -424,7 +424,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c)
}
}
-void bch2_gc_start(struct bch_fs *c)
+static void bch2_gc_start(struct bch_fs *c)
{
struct bch_dev *ca;
struct bucket *g;
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 974b436db8d9..9dc58b5d786a 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -556,7 +556,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
struct bset_tree *t;
struct bset *start_bset = bset(b, &b->set[start_idx]);
bool used_mempool = false;
- u64 start_time;
+ u64 start_time, seq = 0;
unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
bool sorting_entire_node = start_idx == 0 &&
end_idx == b->nsets;
@@ -595,12 +595,9 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
bch2_time_stats_update(&c->btree_sort_time, start_time);
/* Make sure we preserve bset journal_seq: */
- for (t = b->set + start_idx + 1;
- t < b->set + end_idx;
- t++)
- start_bset->journal_seq =
- max(start_bset->journal_seq,
- bset(b, t)->journal_seq);
+ for (t = b->set + start_idx; t < b->set + end_idx; t++)
+ seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
+ start_bset->journal_seq = cpu_to_le64(seq);
if (sorting_entire_node) {
unsigned u64s = le16_to_cpu(out->keys.u64s);
@@ -1025,7 +1022,7 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
if (!BSET_SEPARATE_WHITEOUTS(i)) {
seen_non_whiteout = true;
- whiteout_u64s = 0;
+ *whiteout_u64s = 0;
}
for (k = i->start;
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index b0e64957d493..819b8efc5fd8 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -75,8 +75,8 @@ bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
{
struct btree_iter *linked;
struct btree *b = iter->nodes[level];
- enum btree_node_locked_type want = btree_lock_want(iter, level);
- enum btree_node_locked_type have = btree_node_locked_type(iter, level);
+ int want = btree_lock_want(iter, level);
+ int have = btree_node_locked_type(iter, level);
if (want == have)
return true;
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index c2711892d7f3..a000306228fa 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -75,7 +75,7 @@ static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
mark_btree_node_locked(iter, level, SIX_LOCK_intent);
}
-static inline int btree_lock_want(struct btree_iter *iter, int level)
+static inline enum six_lock_type btree_lock_want(struct btree_iter *iter, int level)
{
return level < iter->locks_want
? SIX_LOCK_intent
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 1fe8fff8eb07..09f515f4dace 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -925,7 +925,8 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
* in with keys that aren't in the journal anymore:
*/
for_each_bset(b, t)
- as->journal_seq = max(as->journal_seq, bset(b, t)->journal_seq);
+ as->journal_seq = max(as->journal_seq,
+ le64_to_cpu(bset(b, t)->journal_seq));
mutex_lock(&c->btree_interior_update_lock);
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index b73002def9fc..f0a63232093a 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -174,9 +174,11 @@ do { \
#define bch2_usage_read_raw(_stats) \
({ \
- typeof(*this_cpu_ptr(_stats)) _acc = { 0 }; \
+ typeof(*this_cpu_ptr(_stats)) _acc; \
int cpu; \
\
+ memset(&_acc, 0, sizeof(_acc)); \
+ \
for_each_possible_cpu(cpu) \
bch2_usage_add(&_acc, per_cpu_ptr((_stats), cpu)); \
\
@@ -479,7 +481,7 @@ static void bch2_mark_pointer(struct bch_fs *c,
{
struct bucket_mark old, new;
unsigned saturated;
- struct bch_dev *ca = c->devs[ptr->dev];
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
unsigned data_type = type == S_META
? BUCKET_BTREE : BUCKET_DATA;
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 0bd8d2d8f5bc..6f9b12265df3 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -68,16 +68,14 @@ struct bch_dev_usage {
struct bch_fs_usage {
/* all fields are in units of 512 byte sectors: */
-
/* _uncompressed_ sectors: */
+ u64 online_reserved;
+ u64 available_cache;
struct {
u64 data[S_ALLOC_NR];
u64 persistent_reserved;
} s[BCH_REPLICAS_MAX];
-
- u64 online_reserved;
- u64 available_cache;
};
/*
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index d9a3212c7e09..24af2ca1620e 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -2,6 +2,7 @@
#include "bcachefs.h"
#include "bcachefs_ioctl.h"
+#include "chardev.h"
#include "super.h"
#include "super-io.h"
@@ -25,7 +26,7 @@ static struct bch_dev *bch2_device_lookup(struct bch_fs *c, u64 dev,
return ERR_PTR(-EINVAL);
rcu_read_lock();
- ca = c->devs[dev];
+ ca = rcu_dereference(c->devs[dev]);
if (ca)
percpu_ref_get(&ca->ref);
rcu_read_unlock();
@@ -80,7 +81,7 @@ static long bch2_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg)
devs = kcalloc(arg.nr_devs, sizeof(char *), GFP_KERNEL);
- if (copy_from_user(user_devs, arg.devs,
+ if (copy_from_user(user_devs, user_arg->devs,
sizeof(u64) * arg.nr_devs))
goto err;
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
index 17b617c66a21..b0c8a50e7c13 100644
--- a/fs/bcachefs/checksum.h
+++ b/fs/bcachefs/checksum.h
@@ -144,6 +144,14 @@ static inline struct nonce nonce_add(struct nonce nonce, unsigned offset)
return nonce;
}
+static inline struct nonce null_nonce(void)
+{
+ struct nonce ret;
+
+ memset(&ret, 0, sizeof(ret));
+ return ret;
+}
+
static inline struct nonce extent_nonce(struct bversion version,
struct bch_extent_crc_unpacked crc)
{
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index e9f30ba70f83..985f980c95d0 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -18,6 +18,7 @@
#include "extents.h"
#include "inode.h"
#include "journal.h"
+#include "super.h"
#include "super-io.h"
#include "util.h"
#include "xattr.h"
@@ -362,7 +363,7 @@ static bool should_drop_ptr(const struct bch_fs *c,
struct bkey_s_c_extent e,
const struct bch_extent_ptr *ptr)
{
- return ptr->cached && ptr_stale(c->devs[ptr->dev], ptr);
+ return ptr->cached && ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr);
}
static void bch2_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e)
@@ -411,8 +412,10 @@ static void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
break;
case BCH_EXTENT_ENTRY_crc128:
- entry->crc128.csum.hi = swab64(entry->crc64.csum_hi);
- entry->crc128.csum.lo = swab64(entry->crc64.csum_lo);
+ entry->crc128.csum.hi = (__force __le64)
+ swab64((__force u64) entry->crc128.csum.hi);
+ entry->crc128.csum.lo = (__force __le64)
+ swab64((__force u64) entry->crc128.csum.lo);
break;
case BCH_EXTENT_ENTRY_ptr:
break;
@@ -435,7 +438,7 @@ static const char *extent_ptr_invalid(const struct bch_fs *c,
if (ptr->dev >= c->sb.nr_devices)
return "pointer to invalid device";
- ca = c->devs[ptr->dev];
+ ca = bch_dev_bkey_exists(c, ptr->dev);
if (!ca)
return "pointer to invalid device";
@@ -487,7 +490,7 @@ static size_t extent_print_ptrs(struct bch_fs *c, char *buf,
break;
case BCH_EXTENT_ENTRY_ptr:
ptr = entry_to_ptr(entry);
- ca = c->devs[ptr->dev];
+ ca = bch_dev_bkey_exists(c, ptr->dev);
p("ptr: %u:%llu gen %u%s", ptr->dev,
(u64) ptr->offset, ptr->gen,
@@ -528,7 +531,7 @@ static void extent_pick_read_device(struct bch_fs *c,
struct bch_extent_crc_unpacked crc;
extent_for_each_ptr_crc(e, ptr, crc) {
- struct bch_dev *ca = c->devs[ptr->dev];
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
if (ptr->cached && ptr_stale(ca, ptr))
continue;
@@ -621,7 +624,7 @@ static void btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
bool bad;
extent_for_each_ptr(e, ptr) {
- ca = c->devs[ptr->dev];
+ ca = bch_dev_bkey_exists(c, ptr->dev);
g = PTR_BUCKET(ca, ptr);
replicas++;
@@ -1730,7 +1733,7 @@ static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
memset(ptrs_per_tier, 0, sizeof(ptrs_per_tier));
extent_for_each_ptr(e, ptr) {
- ca = c->devs[ptr->dev];
+ ca = bch_dev_bkey_exists(c, ptr->dev);
g = PTR_BUCKET(ca, ptr);
replicas++;
ptrs_per_tier[ca->mi.tier]++;
@@ -1844,7 +1847,7 @@ static void bch2_extent_to_text(struct bch_fs *c, char *buf,
static unsigned PTR_TIER(struct bch_fs *c,
const struct bch_extent_ptr *ptr)
{
- return c->devs[ptr->dev]->mi.tier;
+ return bch_dev_bkey_exists(c, ptr->dev)->mi.tier;
}
static void bch2_extent_crc_init(union bch_extent_crc *crc,
@@ -1976,7 +1979,8 @@ void bch2_extent_mark_replicas_cached(struct bch_fs *c,
extent_for_each_ptr(e, ptr)
if (!ptr->cached &&
- c->devs[ptr->dev]->mi.state != BCH_MEMBER_STATE_FAILED)
+ bch_dev_bkey_exists(c, ptr->dev)->mi.state !=
+ BCH_MEMBER_STATE_FAILED)
nr_good++;
if (nr_good <= c->opts.data_replicas)
@@ -2103,7 +2107,7 @@ static enum merge_result bch2_extent_merge(struct bch_fs *c,
return BCH_MERGE_NOMERGE;
/* We don't allow extents to straddle buckets: */
- ca = c->devs[lp->dev];
+ ca = bch_dev_bkey_exists(c, lp->dev);
if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
return BCH_MERGE_NOMERGE;
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 936ef5b320a8..ff4ce2af16e0 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -243,14 +243,14 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
case BCH_EXTENT_CRC32:
return (struct bch_extent_crc_unpacked) {
common_fields(crc->crc32),
- .csum.lo = crc->crc32.csum,
+ .csum.lo = (__force __le64) crc->crc32.csum,
};
case BCH_EXTENT_CRC64:
return (struct bch_extent_crc_unpacked) {
common_fields(crc->crc64),
.nonce = crc->crc64.nonce,
- .csum.lo = crc->crc64.csum_lo,
- .csum.hi = crc->crc64.csum_hi,
+ .csum.lo = (__force __le64) crc->crc64.csum_lo,
+ .csum.hi = (__force __le64) crc->crc64.csum_hi,
};
case BCH_EXTENT_CRC128:
return (struct bch_extent_crc_unpacked) {
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 408642167e9f..cb0397f1343b 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -145,7 +145,7 @@ int __must_check bch2_write_inode(struct bch_fs *c,
return __bch2_write_inode(c, inode, NULL, NULL);
}
-int bch2_inc_nlink(struct bch_fs *c, struct bch_inode_info *inode)
+static int bch2_inc_nlink(struct bch_fs *c, struct bch_inode_info *inode)
{
int ret;
@@ -157,7 +157,7 @@ int bch2_inc_nlink(struct bch_fs *c, struct bch_inode_info *inode)
return ret;
}
-int bch2_dec_nlink(struct bch_fs *c, struct bch_inode_info *inode)
+static int bch2_dec_nlink(struct bch_fs *c, struct bch_inode_info *inode)
{
int ret = 0;
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index c329c873498d..25e330441c68 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -204,7 +204,7 @@ static int hash_check_key(const struct bch_hash_desc desc,
"hash table key at wrong offset: %llu, "
"hashed to %llu chain starts at %llu\n%s",
k.k->p.offset, hashed, h->chain.pos.offset,
- bch2_bkey_val_to_text(c, desc.btree_id,
+ bch2_bkey_val_to_text(c, bkey_type(0, desc.btree_id),
buf, sizeof(buf), k))) {
ret = hash_redo_key(desc, h, c, k_iter, k, hashed);
if (ret) {
@@ -224,7 +224,7 @@ static int hash_check_key(const struct bch_hash_desc desc,
if (fsck_err_on(k2.k->type == desc.key_type &&
!desc.cmp_bkey(k, k2), c,
"duplicate hash table keys:\n%s",
- bch2_bkey_val_to_text(c, desc.btree_id,
+ bch2_bkey_val_to_text(c, bkey_type(0, desc.btree_id),
buf, sizeof(buf), k))) {
ret = bch2_hash_delete_at(desc, &h->info, &h->iter, NULL);
if (ret)
@@ -397,9 +397,9 @@ static int check_dirents(struct bch_fs *c)
if (fsck_err_on(have_target &&
d.v->d_type !=
- mode_to_type(le16_to_cpu(target.bi_mode)), c,
+ mode_to_type(target.bi_mode), c,
"incorrect d_type: should be %u:\n%s",
- mode_to_type(le16_to_cpu(target.bi_mode)),
+ mode_to_type(target.bi_mode),
bch2_bkey_val_to_text(c, BTREE_ID_DIRENTS,
buf, sizeof(buf), k))) {
struct bkey_i_dirent *n;
@@ -411,7 +411,7 @@ static int check_dirents(struct bch_fs *c)
}
bkey_reassemble(&n->k_i, d.s_c);
- n->v.d_type = mode_to_type(le16_to_cpu(target.bi_mode));
+ n->v.d_type = mode_to_type(target.bi_mode);
ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
BTREE_INSERT_NOFAIL,
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 1f4ff18e6899..71a24cc66886 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -435,7 +435,7 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
struct bch_inode_unpacked inode_u;
if (!bch2_inode_unpack(bkey_s_c_to_inode(k), &inode_u))
- bi_generation = cpu_to_le32(inode_u.bi_generation) + 1;
+ bi_generation = inode_u.bi_generation + 1;
break;
}
case BCH_INODE_GENERATION: {
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index f3540c2e37fa..9da8706e2442 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -20,6 +20,7 @@
#include "journal.h"
#include "keylist.h"
#include "move.h"
+#include "super.h"
#include "super-io.h"
#include <linux/blkdev.h>
@@ -147,7 +148,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
!c->devs[ptr->dev]);
- ca = c->devs[ptr->dev];
+ ca = bch_dev_bkey_exists(c, ptr->dev);
if (ptr + 1 < &extent_entry_last(e)->ptr) {
n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
@@ -1090,7 +1091,8 @@ static void bch2_rbio_retry(struct work_struct *work)
__bch2_read(c, rbio, iter, inode, &avoid, flags);
}
-static void bch2_rbio_error(struct bch_read_bio *rbio, int retry, int error)
+static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
+ blk_status_t error)
{
rbio->retry = retry;
@@ -1253,7 +1255,7 @@ csum_err:
*/
if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
rbio->flags |= BCH_READ_MUST_BOUNCE;
- bch2_rbio_error(rbio, READ_RETRY, -EIO);
+ bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
return;
}
@@ -1262,13 +1264,13 @@ csum_err:
rbio->pos.inode, (u64) rbio->bvec_iter.bi_sector,
rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
csum.hi, csum.lo, crc.csum_type);
- bch2_rbio_error(rbio, READ_RETRY_AVOID, -EIO);
+ bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
return;
decompression_err:
__bcache_io_error(c, "decompression error, inode %llu offset %llu",
rbio->pos.inode,
(u64) rbio->bvec_iter.bi_sector);
- bch2_rbio_error(rbio, READ_ERR, -EIO);
+ bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
return;
}
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index ae7b4bbec498..050499e16bd8 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -508,7 +508,7 @@ static int __journal_entry_validate(struct bch_fs *c, struct jset *j,
if (journal_entry_err_on(vstruct_next(entry) >
vstruct_last(j), c,
"journal entry extends past end of jset")) {
- j->u64s = cpu_to_le64((u64 *) entry - j->_data);
+ j->u64s = cpu_to_le32((u64 *) entry - j->_data);
break;
}
@@ -917,7 +917,9 @@ static int journal_seq_blacklist_read(struct journal *j,
for_each_jset_entry_type(entry, &i->j,
JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED) {
- seq = le64_to_cpu(entry->_data[0]);
+ struct jset_entry_blacklist *bl_entry =
+ container_of(entry, struct jset_entry_blacklist, entry);
+ seq = le64_to_cpu(bl_entry->seq);
bch_verbose(c, "blacklisting existing journal seq %llu", seq);
@@ -1091,7 +1093,7 @@ void bch2_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set)
{
struct journal_buf *w = journal_prev_buf(j);
- atomic_dec_bug(&journal_seq_pin(j, w->data->seq)->count);
+ atomic_dec_bug(&journal_seq_pin(j, le64_to_cpu(w->data->seq))->count);
if (!need_write_just_set &&
test_bit(JOURNAL_NEED_WRITE, &j->flags))
@@ -2003,7 +2005,7 @@ static int journal_write_alloc(struct journal *j, unsigned sectors)
* i.e. whichever device was limiting the current journal entry size.
*/
extent_for_each_ptr_backwards(e, ptr) {
- ca = c->devs[ptr->dev];
+ ca = bch_dev_bkey_exists(c, ptr->dev);
if (ca->mi.state != BCH_MEMBER_STATE_RW ||
ca->journal.sectors_free <= sectors)
@@ -2287,7 +2289,7 @@ static void journal_write(struct closure *cl)
goto no_io;
extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr) {
- ca = c->devs[ptr->dev];
+ ca = bch_dev_bkey_exists(c, ptr->dev);
if (!percpu_ref_tryget(&ca->io_ref)) {
/* XXX: fix this */
bch_err(c, "missing device for journal write\n");
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 0baeb4a687e3..ef69b8f3a2a5 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -157,7 +157,7 @@ struct bch_sb_field *bch2_sb_field_resize(struct bch_sb_handle *sb,
return NULL;
f = __bch2_sb_field_resize(sb->sb, f, u64s);
- f->type = type;
+ f->type = cpu_to_le32(type);
return f;
}
@@ -188,7 +188,7 @@ struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *c,
}
f = __bch2_sb_field_resize(c->disk_sb, f, u64s);
- f->type = type;
+ f->type = cpu_to_le32(type);
return f;
}
@@ -516,7 +516,7 @@ static void __copy_super(struct bch_sb *dst, struct bch_sb *src)
if (src_f->type == BCH_SB_FIELD_journal)
continue;
- dst_f = bch2_sb_field_get(dst, src_f->type);
+ dst_f = bch2_sb_field_get(dst, le32_to_cpu(src_f->type));
dst_f = __bch2_sb_field_resize(dst, dst_f,
le32_to_cpu(src_f->u64s));
@@ -610,7 +610,7 @@ reread:
/* XXX: verify MACs */
csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb->sb),
- (struct nonce) { 0 }, sb->sb);
+ null_nonce(), sb->sb);
if (bch2_crc_cmp(csum, sb->sb->csum))
return "bad checksum reading superblock";
@@ -697,9 +697,9 @@ const char *bch2_read_super(const char *path,
got_super:
pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
le64_to_cpu(ret->sb->version),
- le64_to_cpu(ret->sb->flags),
+ le64_to_cpu(ret->sb->flags[0]),
le64_to_cpu(ret->sb->seq),
- le16_to_cpu(ret->sb->u64s));
+ le32_to_cpu(ret->sb->u64s));
err = "Superblock block size smaller than device block size";
if (le16_to_cpu(ret->sb->block_size) << 9 <
@@ -736,7 +736,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
SET_BCH_SB_CSUM_TYPE(sb, c->opts.metadata_checksum);
sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
- (struct nonce) { 0 }, sb);
+ null_nonce(), sb);
bio_reset(bio);
bio_set_dev(bio, ca->disk_sb.bdev);
@@ -935,14 +935,12 @@ static int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
struct bch_sb_field_replicas *sb_r;
struct bch_replicas_cpu *cpu_r, *old_r;
- lockdep_assert_held(&c->sb_lock);
-
sb_r = bch2_sb_get_replicas(c->disk_sb);
cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
if (!cpu_r)
return -ENOMEM;
- old_r = c->replicas;
+ old_r = rcu_dereference_check(c->replicas, lockdep_is_held(&c->sb_lock));
rcu_assign_pointer(c->replicas, cpu_r);
if (old_r)
kfree_rcu(old_r, rcu);
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index ae39bfab3a6e..0b63b112fa83 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -385,7 +385,7 @@ static void bch2_fs_free(struct bch_fs *c)
mempool_exit(&c->btree_reserve_pool);
mempool_exit(&c->fill_iter);
percpu_ref_exit(&c->writes);
- kfree(c->replicas);
+ kfree(rcu_dereference_protected(c->replicas, 1));
if (c->copygc_wq)
destroy_workqueue(c->copygc_wq);
@@ -406,7 +406,7 @@ static void bch2_fs_exit(struct bch_fs *c)
for (i = 0; i < c->sb.nr_devices; i++)
if (c->devs[i])
- bch2_dev_free(c->devs[i]);
+ bch2_dev_free(rcu_dereference_protected(c->devs[i], 1));
closure_debug_destroy(&c->cl);
kobject_put(&c->kobj);
@@ -986,13 +986,6 @@ static void bch2_dev_free(struct bch_dev *ca)
kobject_put(&ca->kobj);
}
-static void bch2_dev_io_ref_release(struct percpu_ref *ref)
-{
- struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
-
- complete(&ca->offline_complete);
-}
-
static void __bch2_dev_offline(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
@@ -1001,9 +994,9 @@ static void __bch2_dev_offline(struct bch_dev *ca)
__bch2_dev_read_only(c, ca);
- reinit_completion(&ca->offline_complete);
+ reinit_completion(&ca->io_ref_completion);
percpu_ref_kill(&ca->io_ref);
- wait_for_completion(&ca->offline_complete);
+ wait_for_completion(&ca->io_ref_completion);
if (ca->kobj.state_in_sysfs) {
struct kobject *block =
@@ -1017,27 +1010,18 @@ static void __bch2_dev_offline(struct bch_dev *ca)
bch2_dev_journal_exit(ca);
}
-static void bch2_dev_ref_release(struct percpu_ref *ref)
+static void bch2_dev_ref_complete(struct percpu_ref *ref)
{
struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
- complete(&ca->stop_complete);
+ complete(&ca->ref_completion);
}
-static void bch2_dev_stop(struct bch_dev *ca)
+static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
{
- struct bch_fs *c = ca->fs;
-
- lockdep_assert_held(&c->state_lock);
-
- BUG_ON(rcu_access_pointer(c->devs[ca->dev_idx]) != ca);
- rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
-
- synchronize_rcu();
+ struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
- reinit_completion(&ca->stop_complete);
- percpu_ref_kill(&ca->ref);
- wait_for_completion(&ca->stop_complete);
+ complete(&ca->io_ref_completion);
}
static int bch2_dev_sysfs_online(struct bch_dev *ca)
@@ -1086,8 +1070,8 @@ static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
return -ENOMEM;
kobject_init(&ca->kobj, &bch2_dev_ktype);
- init_completion(&ca->stop_complete);
- init_completion(&ca->offline_complete);
+ init_completion(&ca->ref_completion);
+ init_completion(&ca->io_ref_completion);
ca->dev_idx = dev_idx;
__set_bit(ca->dev_idx, ca->self.d);
@@ -1123,9 +1107,9 @@ static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
DIV_ROUND_UP(BTREE_NODE_RESERVE,
ca->mi.bucket_size / c->opts.btree_node_size);
- if (percpu_ref_init(&ca->ref, bch2_dev_ref_release,
+ if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
0, GFP_KERNEL) ||
- percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_release,
+ percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_BTREE], btree_node_reserve_buckets,
GFP_KERNEL) ||
@@ -1171,8 +1155,6 @@ static int __bch2_dev_online(struct bch_fs *c, struct bch_sb_handle *sb)
struct bch_dev *ca;
int ret;
- lockdep_assert_held(&c->sb_lock);
-
if (le64_to_cpu(sb->sb->seq) >
le64_to_cpu(c->disk_sb->seq))
bch2_sb_to_fs(c, sb->sb);
@@ -1180,7 +1162,7 @@ static int __bch2_dev_online(struct bch_fs *c, struct bch_sb_handle *sb)
BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
!c->devs[sb->sb->dev_idx]);
- ca = c->devs[sb->sb->dev_idx];
+ ca = bch_dev_locked(c, sb->sb->dev_idx);
if (ca->disk_sb.bdev) {
bch_err(c, "already have device online in slot %u",
sb->sb->dev_idx);
@@ -1284,6 +1266,7 @@ static bool bch2_fs_may_start(struct bch_fs *c)
{
struct replicas_status s;
struct bch_sb_field_members *mi;
+ struct bch_dev *ca;
unsigned i, flags = c->opts.degraded
? BCH_FORCE_IF_DEGRADED
: 0;
@@ -1292,14 +1275,19 @@ static bool bch2_fs_may_start(struct bch_fs *c)
mutex_lock(&c->sb_lock);
mi = bch2_sb_get_members(c->disk_sb);
- for (i = 0; i < c->disk_sb->nr_devices; i++)
- if (bch2_dev_exists(c->disk_sb, mi, i) &&
- !bch2_dev_is_online(c->devs[i]) &&
- (c->devs[i]->mi.state == BCH_MEMBER_STATE_RW ||
- c->devs[i]->mi.state == BCH_MEMBER_STATE_RO)) {
+ for (i = 0; i < c->disk_sb->nr_devices; i++) {
+ if (!bch2_dev_exists(c->disk_sb, mi, i))
+ continue;
+
+ ca = bch_dev_locked(c, i);
+
+ if (!bch2_dev_is_online(ca) &&
+ (ca->mi.state == BCH_MEMBER_STATE_RW ||
+ ca->mi.state == BCH_MEMBER_STATE_RO)) {
mutex_unlock(&c->sb_lock);
return false;
}
+ }
mutex_unlock(&c->sb_lock);
}
@@ -1425,7 +1413,14 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
bch2_journal_meta(&c->journal);
__bch2_dev_offline(ca);
- bch2_dev_stop(ca);
+
+ mutex_lock(&c->sb_lock);
+ rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
+ mutex_unlock(&c->sb_lock);
+
+ percpu_ref_kill(&ca->ref);
+ wait_for_completion(&ca->ref_completion);
+
bch2_dev_free(ca);
/*
@@ -1533,7 +1528,7 @@ have_slot:
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- ca = c->devs[dev_idx];
+ ca = bch_dev_locked(c, dev_idx);
if (ca->mi.state == BCH_MEMBER_STATE_RW) {
err = "journal alloc failed";
if (bch2_dev_journal_alloc(ca))
@@ -1559,7 +1554,7 @@ err:
/* Hot add existing device to running filesystem: */
int bch2_dev_online(struct bch_fs *c, const char *path)
{
- struct bch_sb_handle sb = { 0 };
+ struct bch_sb_handle sb = { NULL };
struct bch_dev *ca;
unsigned dev_idx;
const char *err;
@@ -1584,7 +1579,7 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
}
mutex_unlock(&c->sb_lock);
- ca = c->devs[dev_idx];
+ ca = bch_dev_locked(c, dev_idx);
if (ca->mi.state == BCH_MEMBER_STATE_RW) {
err = __bch2_dev_read_write(c, ca);
if (err)
diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h
index 28a0018442a2..b3c0ef50a4ff 100644
--- a/fs/bcachefs/super.h
+++ b/fs/bcachefs/super.h
@@ -131,6 +131,26 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
__for_each_online_member(ca, c, iter, \
(1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
+/*
+ * If a key exists that references a device, the device won't be going away and
+ * we can omit rcu_read_lock():
+ */
+static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
+{
+ EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
+
+ return rcu_dereference_check(c->devs[idx], 1);
+}
+
+static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
+{
+ EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
+
+ return rcu_dereference_protected(c->devs[idx],
+ lockdep_is_held(&c->sb_lock) ||
+ lockdep_is_held(&c->state_lock));
+}
+
/* XXX kill, move to struct bch_fs */
static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
{
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 35f1e561c3f5..3197a2e46166 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -739,7 +739,7 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
c->open_buckets_wait.list.first ? "waiting" : "empty");
}
-const char * const bch2_rw[] = {
+static const char * const bch2_rw[] = {
"read",
"write",
NULL
diff --git a/fs/bcachefs/tier.c b/fs/bcachefs/tier.c
index 2e29f7414c50..f5007864c6b6 100644
--- a/fs/bcachefs/tier.c
+++ b/fs/bcachefs/tier.c
@@ -6,7 +6,6 @@
#include "clock.h"
#include "extents.h"
#include "io.h"
-#include "keylist.h"
#include "move.h"
#include "super-io.h"
#include "tier.h"
@@ -28,7 +27,7 @@ static bool tiering_pred(void *arg, struct bkey_s_c_extent e)
return false;
extent_for_each_ptr(e, ptr)
- if (c->devs[ptr->dev]->mi.tier >= tier->idx)
+ if (bch_dev_bkey_exists(c, ptr->dev)->mi.tier >= tier->idx)
replicas++;
return replicas < c->opts.data_replicas;
diff --git a/fs/bcachefs/vstructs.h b/fs/bcachefs/vstructs.h
index ce2cece0d0cb..795664428876 100644
--- a/fs/bcachefs/vstructs.h
+++ b/fs/bcachefs/vstructs.h
@@ -9,10 +9,10 @@
*/
#define __vstruct_u64s(_s) \
({ \
- ( type_is((_s)->u64s, u64) ? le64_to_cpu((_s)->u64s) \
- : type_is((_s)->u64s, u32) ? le32_to_cpu((_s)->u64s) \
- : type_is((_s)->u64s, u16) ? le16_to_cpu((_s)->u64s) \
- : ((_s)->u64s)); \
+ ( type_is((_s)->u64s, u64) ? le64_to_cpu((__force __le64) (_s)->u64s) \
+ : type_is((_s)->u64s, u32) ? le32_to_cpu((__force __le32) (_s)->u64s) \
+ : type_is((_s)->u64s, u16) ? le16_to_cpu((__force __le16) (_s)->u64s) \
+ : ((__force u8) ((_s)->u64s))); \
})
#define __vstruct_bytes(_type, _u64s) \