summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE.md63
-rw-r--r--src/codegen/struct_layout.rs6
-rw-r--r--src/ir/layout.rs2
-rw-r--r--tests/expectations/tests/layout_large_align_field.rs419
-rw-r--r--tests/headers/layout_large_align_field.h97
5 files changed, 584 insertions, 3 deletions
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 00000000..68dfb68e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,63 @@
+### Input C/C++ Header
+
+```C++
+// Insert your (minimal) C/C++ header here
+```
+
+### Bindgen Invokation
+
+<!--
+
+Place either the `bindgen::Builder` **OR** the command line flags used here.
+
+```Rust
+bindgen::Builder::default()
+ .header("input.h")
+ .generate()
+ .unwrap()
+```
+
+OR
+
+```
+$ bindgen input.h --whatever --flags
+```
+
+-->
+
+### Actual Results
+
+<!--
+
+```
+Insert panic message and backtrace here.
+```
+
+and/or
+
+```rust
+// Insert the (incorrect) generated bindings here
+```
+
+and/or
+
+```
+Insert compilation errors generated when compiling the bindings with rustc here
+```
+
+-->
+
+### Expected Results
+
+<!--
+Replace this with a description of what you expected instead of the actual
+results. The more precise, the better! For example, if a struct in the generated
+bindings is missing a field that exists in the C/C++ struct, not that here.
+-->
+
+### `RUST_LOG=bindgen` Output
+
+```
+Insert debug logging when running bindgen with the RUST_LOG=bindgen environment
+variable set.
+```
diff --git a/src/codegen/struct_layout.rs b/src/codegen/struct_layout.rs
index 24938c16..724bef98 100644
--- a/src/codegen/struct_layout.rs
+++ b/src/codegen/struct_layout.rs
@@ -197,7 +197,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
};
// Otherwise the padding is useless.
- let need_padding = padding_bytes >= field_layout.align;
+ let need_padding = padding_bytes >= field_layout.align || field_layout.align > mem::size_of::<*mut ()>();
self.latest_offset += padding_bytes;
@@ -213,7 +213,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
field_layout);
if need_padding && padding_bytes != 0 {
- Some(Layout::new(padding_bytes, field_layout.align))
+ Some(Layout::new(padding_bytes, cmp::min(field_layout.align, mem::size_of::<*mut ()>())))
} else {
None
}
@@ -262,6 +262,8 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
Layout::new(padding_bytes, layout.align)
};
+ debug!("pad bytes to struct {}, {:?}", name, layout);
+
Some(self.padding_field(layout))
} else {
None
diff --git a/src/ir/layout.rs b/src/ir/layout.rs
index 38379261..f21a501c 100644
--- a/src/ir/layout.rs
+++ b/src/ir/layout.rs
@@ -38,7 +38,7 @@ impl Layout {
/// alignment possible.
pub fn for_size(size: usize) -> Self {
let mut next_align = 2;
- while size % next_align == 0 && next_align <= 2 * mem::size_of::<*mut ()>() {
+ while size % next_align == 0 && next_align <= mem::size_of::<*mut ()>() {
next_align *= 2;
}
Layout {
diff --git a/tests/expectations/tests/layout_large_align_field.rs b/tests/expectations/tests/layout_large_align_field.rs
new file mode 100644
index 00000000..820e4210
--- /dev/null
+++ b/tests/expectations/tests/layout_large_align_field.rs
@@ -0,0 +1,419 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+
+#[repr(C)]
+#[derive(Default)]
+pub struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>);
+impl <T> __IncompleteArrayField<T> {
+ #[inline]
+ pub fn new() -> Self {
+ __IncompleteArrayField(::std::marker::PhantomData)
+ }
+ #[inline]
+ pub unsafe fn as_ptr(&self) -> *const T { ::std::mem::transmute(self) }
+ #[inline]
+ pub unsafe fn as_mut_ptr(&mut self) -> *mut T {
+ ::std::mem::transmute(self)
+ }
+ #[inline]
+ pub unsafe fn as_slice(&self, len: usize) -> &[T] {
+ ::std::slice::from_raw_parts(self.as_ptr(), len)
+ }
+ #[inline]
+ pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
+ ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
+ }
+}
+impl <T> ::std::fmt::Debug for __IncompleteArrayField<T> {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ fmt.write_str("__IncompleteArrayField")
+ }
+}
+impl <T> ::std::clone::Clone for __IncompleteArrayField<T> {
+ #[inline]
+ fn clone(&self) -> Self { Self::new() }
+}
+impl <T> ::std::marker::Copy for __IncompleteArrayField<T> { }
+pub const RTE_CACHE_LINE_SIZE: ::std::os::raw::c_uint = 64;
+pub const RTE_LIBRTE_IP_FRAG_MAX_FRAG: ::std::os::raw::c_uint = 4;
+pub const IP_LAST_FRAG_IDX: _bindgen_ty_1 = _bindgen_ty_1::IP_LAST_FRAG_IDX;
+pub const IP_FIRST_FRAG_IDX: _bindgen_ty_1 = _bindgen_ty_1::IP_FIRST_FRAG_IDX;
+pub const IP_MIN_FRAG_NUM: _bindgen_ty_1 = _bindgen_ty_1::IP_MIN_FRAG_NUM;
+pub const IP_MAX_FRAG_NUM: _bindgen_ty_1 = _bindgen_ty_1::IP_MAX_FRAG_NUM;
+#[repr(u32)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub enum _bindgen_ty_1 {
+ IP_LAST_FRAG_IDX = 0,
+ IP_FIRST_FRAG_IDX = 1,
+ IP_MIN_FRAG_NUM = 2,
+ IP_MAX_FRAG_NUM = 4,
+}
+/** @internal fragmented mbuf */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag {
+ /**< offset into the packet */
+ pub ofs: u16,
+ /**< length of fragment */
+ pub len: u16,
+ /**< fragment mbuf */
+ pub mb: *mut rte_mbuf,
+}
+#[test]
+fn bindgen_test_layout_ip_frag() {
+ assert_eq!(::std::mem::size_of::<ip_frag>() , 16usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_frag ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . ofs as * const _ as usize }
+ , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( ofs ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . len as * const _ as usize }
+ , 2usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( len ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . mb as * const _ as usize }
+ , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( mb ) ));
+}
+impl Clone for ip_frag {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct ip_frag_key {
+ /**< src address, first 8 bytes used for IPv4 */
+ pub src_dst: [u64; 4usize],
+ /**< dst address */
+ pub id: u32,
+ /**< src/dst key length */
+ pub key_len: u32,
+}
+#[test]
+fn bindgen_test_layout_ip_frag_key() {
+ assert_eq!(::std::mem::size_of::<ip_frag_key>() , 40usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag_key ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag_key>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_frag_key ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . src_dst as * const _ as
+ usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( src_dst ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . id as * const _ as
+ usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( id ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . key_len as * const _ as
+ usize } , 36usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( key_len ) ));
+}
+impl Clone for ip_frag_key {
+ fn clone(&self) -> Self { *self }
+}
+/**
+ * @internal Fragmented packet to reassemble.
+ * First two entries in the frags[] array are for the last and first fragments.
+ */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag_pkt {
+ /**< LRU list */
+ pub lru: ip_frag_pkt__bindgen_ty_1,
+ /**< fragmentation key */
+ pub key: ip_frag_key,
+ /**< creation timestamp */
+ pub start: u64,
+ /**< expected reassembled size */
+ pub total_size: u32,
+ /**< size of fragments received */
+ pub frag_size: u32,
+ /**< index of next entry to fill */
+ pub last_idx: u32,
+ /**< fragments */
+ pub frags: [ip_frag; 4usize],
+ pub __bindgen_padding_0: [u64; 6usize],
+}
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag_pkt__bindgen_ty_1 {
+ pub tqe_next: *mut ip_frag_pkt,
+ pub tqe_prev: *mut *mut ip_frag_pkt,
+}
+#[test]
+fn bindgen_test_layout_ip_frag_pkt__bindgen_ty_1() {
+ assert_eq!(::std::mem::size_of::<ip_frag_pkt__bindgen_ty_1>() , 16usize ,
+ concat ! (
+ "Size of: " , stringify ! ( ip_frag_pkt__bindgen_ty_1 ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag_pkt__bindgen_ty_1>() , 8usize ,
+ concat ! (
+ "Alignment of " , stringify ! ( ip_frag_pkt__bindgen_ty_1 )
+ ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt__bindgen_ty_1 ) ) . tqe_next
+ as * const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! (
+ ip_frag_pkt__bindgen_ty_1 ) , "::" , stringify ! ( tqe_next )
+ ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt__bindgen_ty_1 ) ) . tqe_prev
+ as * const _ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! (
+ ip_frag_pkt__bindgen_ty_1 ) , "::" , stringify ! ( tqe_prev )
+ ));
+}
+impl Clone for ip_frag_pkt__bindgen_ty_1 {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag_pkt__bindgen_ty_1 {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+#[test]
+fn bindgen_test_layout_ip_frag_pkt() {
+ assert_eq!(::std::mem::size_of::<ip_frag_pkt>() , 192usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag_pkt ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . lru as * const _ as
+ usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( lru ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . key as * const _ as
+ usize } , 16usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( key ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . start as * const _ as
+ usize } , 56usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( start ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . total_size as * const _
+ as usize } , 64usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( total_size ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . frag_size as * const _
+ as usize } , 68usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( frag_size ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . last_idx as * const _
+ as usize } , 72usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( last_idx ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . frags as * const _ as
+ usize } , 80usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( frags ) ));
+}
+impl Clone for ip_frag_pkt {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag_pkt {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_pkt_list {
+ pub tqh_first: *mut ip_frag_pkt,
+ pub tqh_last: *mut *mut ip_frag_pkt,
+}
+#[test]
+fn bindgen_test_layout_ip_pkt_list() {
+ assert_eq!(::std::mem::size_of::<ip_pkt_list>() , 16usize , concat ! (
+ "Size of: " , stringify ! ( ip_pkt_list ) ));
+ assert_eq! (::std::mem::align_of::<ip_pkt_list>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_pkt_list ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_pkt_list ) ) . tqh_first as * const _
+ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_pkt_list ) , "::" ,
+ stringify ! ( tqh_first ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_pkt_list ) ) . tqh_last as * const _
+ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_pkt_list ) , "::" ,
+ stringify ! ( tqh_last ) ));
+}
+impl Clone for ip_pkt_list {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_pkt_list {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/** fragmentation table statistics */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct ip_frag_tbl_stat {
+ /**< total # of find/insert attempts. */
+ pub find_num: u64,
+ /**< # of add ops. */
+ pub add_num: u64,
+ /**< # of del ops. */
+ pub del_num: u64,
+ /**< # of reuse (del/add) ops. */
+ pub reuse_num: u64,
+ /**< total # of add failures. */
+ pub fail_total: u64,
+ /**< # of 'no space' add failures. */
+ pub fail_nospace: u64,
+ pub __bindgen_padding_0: [u64; 2usize],
+}
+#[test]
+fn bindgen_test_layout_ip_frag_tbl_stat() {
+ assert_eq!(::std::mem::size_of::<ip_frag_tbl_stat>() , 64usize , concat !
+ ( "Size of: " , stringify ! ( ip_frag_tbl_stat ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . find_num as *
+ const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( find_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . add_num as * const
+ _ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( add_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . del_num as * const
+ _ as usize } , 16usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( del_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . reuse_num as *
+ const _ as usize } , 24usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( reuse_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . fail_total as *
+ const _ as usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( fail_total ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . fail_nospace as *
+ const _ as usize } , 40usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( fail_nospace ) ));
+}
+impl Clone for ip_frag_tbl_stat {
+ fn clone(&self) -> Self { *self }
+}
+/** fragmentation table */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct rte_ip_frag_tbl {
+ /**< ttl for table entries. */
+ pub max_cycles: u64,
+ /**< hash value mask. */
+ pub entry_mask: u32,
+ /**< max entries allowed. */
+ pub max_entries: u32,
+ /**< entries in use. */
+ pub use_entries: u32,
+ /**< hash assocaitivity. */
+ pub bucket_entries: u32,
+ /**< total size of the table. */
+ pub nb_entries: u32,
+ /**< num of associativity lines. */
+ pub nb_buckets: u32,
+ /**< last used entry. */
+ pub last: *mut ip_frag_pkt,
+ /**< LRU list for table entries. */
+ pub lru: ip_pkt_list,
+ pub __bindgen_padding_0: u64,
+ /**< statistics counters. */
+ pub stat: ip_frag_tbl_stat,
+ /**< hash table. */
+ pub pkt: __IncompleteArrayField<ip_frag_pkt>,
+}
+#[test]
+fn bindgen_test_layout_rte_ip_frag_tbl() {
+ assert_eq!(::std::mem::size_of::<rte_ip_frag_tbl>() , 128usize , concat !
+ ( "Size of: " , stringify ! ( rte_ip_frag_tbl ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . max_cycles as *
+ const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( max_cycles ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . entry_mask as *
+ const _ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( entry_mask ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . max_entries as *
+ const _ as usize } , 12usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( max_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . use_entries as *
+ const _ as usize } , 16usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( use_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . bucket_entries as *
+ const _ as usize } , 20usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( bucket_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . nb_entries as *
+ const _ as usize } , 24usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( nb_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . nb_buckets as *
+ const _ as usize } , 28usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( nb_buckets ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . last as * const _
+ as usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( last ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . lru as * const _ as
+ usize } , 40usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( lru ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . stat as * const _
+ as usize } , 64usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( stat ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . pkt as * const _ as
+ usize } , 128usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( pkt ) ));
+}
+impl Clone for rte_ip_frag_tbl {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for rte_ip_frag_tbl {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/**< fragment mbuf */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct rte_mbuf {
+ pub _address: u8,
+}
+impl Clone for rte_mbuf {
+ fn clone(&self) -> Self { *self }
+}
diff --git a/tests/headers/layout_large_align_field.h b/tests/headers/layout_large_align_field.h
new file mode 100644
index 00000000..f4f412c6
--- /dev/null
+++ b/tests/headers/layout_large_align_field.h
@@ -0,0 +1,97 @@
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+
+#define RTE_CACHE_LINE_SIZE 64
+
+/**
+ * Force alignment
+ */
+#define __rte_aligned(a) __attribute__((__aligned__(a)))
+
+/**
+ * Force alignment to cache line.
+ */
+#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
+
+#define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
+
+enum {
+ IP_LAST_FRAG_IDX, /**< index of last fragment */
+ IP_FIRST_FRAG_IDX, /**< index of first fragment */
+ IP_MIN_FRAG_NUM, /**< minimum number of fragments */
+ IP_MAX_FRAG_NUM = RTE_LIBRTE_IP_FRAG_MAX_FRAG,
+ /**< maximum number of fragments per packet */
+};
+
+/** @internal fragmented mbuf */
+struct ip_frag {
+ uint16_t ofs; /**< offset into the packet */
+ uint16_t len; /**< length of fragment */
+ struct rte_mbuf *mb; /**< fragment mbuf */
+};
+
+/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+struct ip_frag_key {
+ uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */
+ uint32_t id; /**< dst address */
+ uint32_t key_len; /**< src/dst key length */
+};
+
+/*
+ * Tail queue declarations.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/**
+ * @internal Fragmented packet to reassemble.
+ * First two entries in the frags[] array are for the last and first fragments.
+ */
+struct ip_frag_pkt {
+ TAILQ_ENTRY(ip_frag_pkt) lru; /**< LRU list */
+ struct ip_frag_key key; /**< fragmentation key */
+ uint64_t start; /**< creation timestamp */
+ uint32_t total_size; /**< expected reassembled size */
+ uint32_t frag_size; /**< size of fragments received */
+ uint32_t last_idx; /**< index of next entry to fill */
+ struct ip_frag frags[IP_MAX_FRAG_NUM]; /**< fragments */
+} __rte_cache_aligned;
+
+TAILQ_HEAD(ip_pkt_list, ip_frag_pkt); /**< @internal fragments tailq */
+
+/** fragmentation table statistics */
+struct ip_frag_tbl_stat {
+ uint64_t find_num; /**< total # of find/insert attempts. */
+ uint64_t add_num; /**< # of add ops. */
+ uint64_t del_num; /**< # of del ops. */
+ uint64_t reuse_num; /**< # of reuse (del/add) ops. */
+ uint64_t fail_total; /**< total # of add failures. */
+ uint64_t fail_nospace; /**< # of 'no space' add failures. */
+} __rte_cache_aligned;
+
+/** fragmentation table */
+struct rte_ip_frag_tbl {
+ uint64_t max_cycles; /**< ttl for table entries. */
+ uint32_t entry_mask; /**< hash value mask. */
+ uint32_t max_entries; /**< max entries allowed. */
+ uint32_t use_entries; /**< entries in use. */
+ uint32_t bucket_entries; /**< hash assocaitivity. */
+ uint32_t nb_entries; /**< total size of the table. */
+ uint32_t nb_buckets; /**< num of associativity lines. */
+ struct ip_frag_pkt *last; /**< last used entry. */
+ struct ip_pkt_list lru; /**< LRU list for table entries. */
+ struct ip_frag_tbl_stat stat; /**< statistics counters. */
+ __extension__ struct ip_frag_pkt pkt[0]; /**< hash table. */
+};