diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-10-22 15:00:16 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-03-13 11:34:41 -0400 |
commit | 2453ad5c4e26aec28fd8ffe37f33cbc493e6a4fd (patch) | |
tree | 73d6999ea117fad0d20f04dcda2b3639633d08f9 | |
parent | 365d157ac076136856559c8963b1dc232903561a (diff) |
bcachefs: Convert to __packed and __aligned
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/bcachefs_format.h | 92 | ||||
-rw-r--r-- | fs/bcachefs/bcachefs_ioctl.h | 8 | ||||
-rw-r--r-- | fs/bcachefs/btree_types.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/inode.h | 2 |
4 files changed, 52 insertions, 52 deletions
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h index 912caf38642d..4d626d67a5d2 100644 --- a/fs/bcachefs/bcachefs_format.h +++ b/fs/bcachefs/bcachefs_format.h @@ -147,7 +147,7 @@ struct bpos { #else #error edit for your odd byteorder. #endif -} __attribute__((packed, aligned(4))); +} __packed __aligned(4); #define KEY_INODE_MAX ((__u64)~0ULL) #define KEY_OFFSET_MAX ((__u64)~0ULL) @@ -181,7 +181,7 @@ struct bversion { __u32 hi; __u64 lo; #endif -} __attribute__((packed, aligned(4))); +} __packed __aligned(4); struct bkey { /* Size of combined key and value, in u64s */ @@ -214,7 +214,7 @@ struct bkey { __u8 pad[1]; #endif -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bkey_packed { __u64 _data[0]; @@ -248,7 +248,7 @@ struct bkey_packed { * to the same size as struct bkey should hopefully be safest. */ __u8 pad[sizeof(struct bkey) - 3]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64)) #define BKEY_U64s_MAX U8_MAX @@ -476,7 +476,7 @@ struct bch_set { struct bch_csum { __le64 lo; __le64 hi; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #define BCH_EXTENT_ENTRY_TYPES() \ x(ptr, 0) \ @@ -513,7 +513,7 @@ struct bch_extent_crc32 { _compressed_size:7, type:2; #endif -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #define CRC32_SIZE_MAX (1U << 7) #define CRC32_NONCE_MAX 0 @@ -539,7 +539,7 @@ struct bch_extent_crc64 { type:3; #endif __u64 csum_lo; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #define CRC64_SIZE_MAX (1U << 9) #define CRC64_NONCE_MAX ((1U << 10) - 1) @@ -563,7 +563,7 @@ struct bch_extent_crc128 { type:4; #endif struct bch_csum csum; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #define CRC128_SIZE_MAX (1U << 13) #define CRC128_NONCE_MAX ((1U << 13) - 1) @@ -589,7 +589,7 @@ struct bch_extent_ptr { cached:1, type:1; #endif -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_extent_stripe_ptr { #if defined(__LITTLE_ENDIAN_BITFIELD) @@ -641,7 +641,7 @@ struct bch_btree_ptr { __u64 _data[0]; struct bch_extent_ptr start[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_btree_ptr_v2 { struct bch_val v; @@ -653,7 +653,7 @@ struct bch_btree_ptr_v2 { struct bpos min_key; __u64 _data[0]; struct bch_extent_ptr start[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1); @@ -662,7 +662,7 @@ struct bch_extent { __u64 _data[0]; union bch_extent_entry start[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_reservation { struct bch_val v; @@ -670,7 +670,7 @@ struct bch_reservation { __le32 generation; __u8 nr_replicas; __u8 pad[3]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); /* Maximum size (in u64s) a single pointer could be: */ #define BKEY_EXTENT_PTR_U64s_MAX\ @@ -704,7 +704,7 @@ struct bch_inode { __le32 bi_flags; __le16 bi_mode; __u8 fields[0]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_inode_v2 { struct bch_val v; @@ -714,14 +714,14 @@ struct bch_inode_v2 { __le64 bi_flags; __le16 bi_mode; __u8 fields[0]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_inode_generation { struct bch_val v; __le32 bi_generation; __le32 pad; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); /* * bi_subvol and bi_parent_subvol are only set for subvolume roots: @@ -842,7 +842,7 @@ struct bch_dirent { __u8 d_type; __u8 d_name[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #define DT_SUBVOL 16 #define BCH_DT_MAX 17 @@ -865,7 +865,7 @@ struct bch_xattr { __u8 x_name_len; __le16 x_val_len; __u8 x_name[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); /* Bucket/allocation information: */ @@ -874,7 +874,7 @@ struct bch_alloc { __u8 fields; __u8 gen; __u8 data[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #define BCH_ALLOC_FIELDS_V1() \ x(read_time, 16) \ @@ -893,7 +893,7 @@ struct bch_alloc_v2 { __u8 oldest_gen; __u8 data_type; __u8 data[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #define BCH_ALLOC_FIELDS_V2() \ x(read_time, 64) \ @@ -912,7 +912,7 @@ struct bch_alloc_v3 { __u8 oldest_gen; __u8 data_type; __u8 data[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_alloc_v4 { struct bch_val v; @@ -928,7 +928,7 @@ struct bch_alloc_v4 { __u32 stripe; __u32 nr_external_backpointers; struct bpos backpointers[0]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1) LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2) @@ -967,7 +967,7 @@ struct bch_quota_counter { struct bch_quota { struct bch_val v; struct bch_quota_counter c[Q_COUNTERS]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); /* Erasure coding */ @@ -983,7 +983,7 @@ struct bch_stripe { __u8 pad; struct bch_extent_ptr ptrs[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); /* Reflink: */ @@ -1000,14 +1000,14 @@ struct bch_reflink_p { */ __le32 front_pad; __le32 back_pad; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_reflink_v { struct bch_val v; __le64 refcount; union bch_extent_entry start[0]; __u64 _data[0]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_indirect_inline_data { struct bch_val v; @@ -1064,7 +1064,7 @@ LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2) struct bch_lru { struct bch_val v; __le64 idx; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #define LRU_ID_STRIPES (1U << 16) @@ -1263,19 +1263,19 @@ struct bch_replicas_entry_v0 { __u8 data_type; __u8 nr_devs; __u8 devs[]; -} __attribute__((packed)); +} __packed; struct bch_sb_field_replicas_v0 { struct bch_sb_field field; struct bch_replicas_entry_v0 entries[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_replicas_entry { __u8 data_type; __u8 nr_devs; __u8 nr_required; __u8 devs[]; -} __attribute__((packed)); +} __packed; #define replicas_entry_bytes(_i) \ (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs) @@ -1283,7 +1283,7 @@ struct bch_replicas_entry { struct bch_sb_field_replicas { struct bch_sb_field field; struct bch_replicas_entry entries[]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); /* BCH_SB_FIELD_quota: */ @@ -1300,7 +1300,7 @@ struct bch_sb_quota_type { struct bch_sb_field_quota { struct bch_sb_field field; struct bch_sb_quota_type q[QTYP_NR]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); /* BCH_SB_FIELD_disk_groups: */ @@ -1309,7 +1309,7 @@ struct bch_sb_field_quota { struct bch_disk_group { __u8 label[BCH_SB_LABEL_SIZE]; __le64 flags[2]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1) LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6) @@ -1318,7 +1318,7 @@ LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24) struct bch_sb_field_disk_groups { struct bch_sb_field field; struct bch_disk_group entries[0]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); /* BCH_SB_FIELD_counters */ @@ -1500,7 +1500,7 @@ struct bch_sb_layout { __u8 nr_superblocks; __u8 pad[5]; __le64 sb_offset[61]; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #define BCH_SB_LAYOUT_SECTOR 7 @@ -1551,7 +1551,7 @@ struct bch_sb { struct bch_sb_field start[0]; __le64 _data[0]; }; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); /* * Flags: @@ -1910,26 +1910,26 @@ enum { struct jset_entry_usage { struct jset_entry entry; __le64 v; -} __attribute__((packed)); +} __packed; struct jset_entry_data_usage { struct jset_entry entry; __le64 v; struct bch_replicas_entry r; -} __attribute__((packed)); +} __packed; struct jset_entry_clock { struct jset_entry entry; __u8 rw; __u8 pad[7]; __le64 time; -} __attribute__((packed)); +} __packed; struct jset_entry_dev_usage_type { __le64 buckets; __le64 sectors; __le64 fragmented; -} __attribute__((packed)); +} __packed; struct jset_entry_dev_usage { struct jset_entry entry; @@ -1940,7 +1940,7 @@ struct jset_entry_dev_usage { __le64 _buckets_unavailable; /* No longer used */ struct jset_entry_dev_usage_type d[]; -} __attribute__((packed)); +} __packed; static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u) { @@ -1951,7 +1951,7 @@ static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage struct jset_entry_log { struct jset_entry entry; u8 d[]; -} __attribute__((packed)); +} __packed; /* * On disk format for a journal entry: @@ -1986,7 +1986,7 @@ struct jset { struct jset_entry start[0]; __u64 _data[0]; }; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4); LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5); @@ -2048,7 +2048,7 @@ struct bset { struct bkey_packed start[0]; __u64 _data[0]; }; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4); @@ -2081,7 +2081,7 @@ struct btree_node { }; }; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4); LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8); @@ -2102,6 +2102,6 @@ struct btree_node_entry { }; }; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); #endif /* _BCACHEFS_FORMAT_H */ diff --git a/fs/bcachefs/bcachefs_ioctl.h b/fs/bcachefs/bcachefs_ioctl.h index b2edabf58260..ad47a506a907 100644 --- a/fs/bcachefs/bcachefs_ioctl.h +++ b/fs/bcachefs/bcachefs_ioctl.h @@ -208,7 +208,7 @@ struct bch_ioctl_data { __u64 pad[8]; }; }; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); enum bch_data_event { BCH_DATA_EVENT_PROGRESS = 0, @@ -224,7 +224,7 @@ struct bch_ioctl_data_progress { __u64 sectors_done; __u64 sectors_total; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_ioctl_data_event { __u8 type; @@ -233,12 +233,12 @@ struct bch_ioctl_data_event { struct bch_ioctl_data_progress p; __u64 pad2[15]; }; -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); struct bch_replicas_usage { __u64 sectors; struct bch_replicas_entry r; -} __attribute__((packed)); +} __packed; static inline struct bch_replicas_usage * replicas_usage_next(struct bch_replicas_usage *u) diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 1aeb9f04a7ee..b0ff3f97d4bc 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -323,7 +323,7 @@ struct btree_key_cache { struct bkey_cached_key { u32 btree_id; struct bpos pos; -} __attribute__((packed, aligned(4))); +} __packed __aligned(4); #define BKEY_CACHED_ACCESSED 0 #define BKEY_CACHED_DIRTY 1 diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h index 717a0bc95d93..5c80bdf587f9 100644 --- a/fs/bcachefs/inode.h +++ b/fs/bcachefs/inode.h @@ -66,7 +66,7 @@ struct bkey_inode_buf { #define x(_name, _bits) + 8 + _bits / 8 u8 _pad[0 + BCH_INODE_FIELDS()]; #undef x -} __attribute__((packed, aligned(8))); +} __packed __aligned(8); void bch2_inode_pack(struct bch_fs *, struct bkey_inode_buf *, const struct bch_inode_unpacked *); |