summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Fitzgerald <fitzgen@gmail.com>2017-07-26 11:58:34 -0700
committerNick Fitzgerald <fitzgen@gmail.com>2017-07-26 11:58:34 -0700
commitff3810bb0ab763a9f1160892d5c5f005034f99a0 (patch)
tree647527b42b85aa1bc88f767ac3a7a6a3f31b7d3e
parentbb62ad6fffb7a135e3b38d59cfdd72c7231035c1 (diff)
Be conservative about deriving Debug/Default with large alignment
When there is large enough alignment that we might generate padding which has more members that `RUST_DERIVE_IN_ARRAY_LIMIT`, we can break our ability to derive traits. This commit solves this issue conservatively: there are cases where we leave a derive on the table, because in order to know that we could add that derive, we would need to compute padding before we determine whether we can derive. Fixes #648
-rw-r--r--src/ir/analysis/derive_debug.rs9
-rw-r--r--src/ir/comp.rs5
-rw-r--r--tests/expectations/tests/issue-648-derive-debug-with-padding.rs65
-rw-r--r--tests/expectations/tests/layout_array.rs6
-rw-r--r--tests/expectations/tests/layout_array_too_long.rs2
-rw-r--r--tests/expectations/tests/layout_kni_mbuf.rs2
-rw-r--r--tests/expectations/tests/layout_large_align_field.rs9
-rw-r--r--tests/expectations/tests/layout_mbuf.rs2
-rw-r--r--tests/headers/issue-648-derive-debug-with-padding.h22
9 files changed, 113 insertions, 9 deletions
diff --git a/src/ir/analysis/derive_debug.rs b/src/ir/analysis/derive_debug.rs
index 0dc1a0c5..ef3b1e00 100644
--- a/src/ir/analysis/derive_debug.rs
+++ b/src/ir/analysis/derive_debug.rs
@@ -141,6 +141,15 @@ impl<'ctx, 'gen> MonotoneFramework for CannotDeriveDebug<'ctx, 'gen> {
};
}
+ if ty.layout(self.ctx).map_or(false, |l| l.align > RUST_DERIVE_IN_ARRAY_LIMIT) {
+ // We have to be conservative: the struct *could* have enough
+ // padding that we emit an array that is longer than
+ // `RUST_DERIVE_IN_ARRAY_LIMIT`. If we moved padding calculations
+ // into the IR and computed them before this analysis, then we could
+ // be precise rather than conservative here.
+ return self.insert(id);
+ }
+
match *ty.kind() {
// Handle the simple cases. These can derive debug without further
// information.
diff --git a/src/ir/comp.rs b/src/ir/comp.rs
index d6644466..c2e9bd8f 100644
--- a/src/ir/comp.rs
+++ b/src/ir/comp.rs
@@ -7,6 +7,7 @@ use super::dot::DotAttributes;
use super::item::{IsOpaque, Item};
use super::layout::Layout;
use super::traversal::{EdgeKind, Trace, Tracer};
+use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use super::template::TemplateParameters;
use clang;
use codegen::struct_layout::{align_to, bytes_from_bits_pow2};
@@ -1435,6 +1436,10 @@ impl<'a> CanDeriveDefault<'a> for CompInfo {
return true;
}
+ if layout.map_or(false, |l| l.align > RUST_DERIVE_IN_ARRAY_LIMIT) {
+ return false;
+ }
+
if self.kind == CompKind::Union {
if ctx.options().unstable_rust {
return false;
diff --git a/tests/expectations/tests/issue-648-derive-debug-with-padding.rs b/tests/expectations/tests/issue-648-derive-debug-with-padding.rs
new file mode 100644
index 00000000..bcd5141e
--- /dev/null
+++ b/tests/expectations/tests/issue-648-derive-debug-with-padding.rs
@@ -0,0 +1,65 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals)]
+
+
+/// We emit a `[u8; 63usize]` padding field for this struct, which cannot derive
+/// Debug because 63 is over the hard coded limit. (Yes, this struct doesn't end
+/// up with the reight alignment, we're waiting on `#[repr(align="N")]` to land
+/// in rustc).
+#[repr(C)]
+#[derive(Copy)]
+pub struct NoDebug {
+ pub c: ::std::os::raw::c_char,
+ pub __bindgen_padding_0: [u8; 63usize],
+}
+#[test]
+fn bindgen_test_layout_NoDebug() {
+ assert_eq!(::std::mem::size_of::<NoDebug>() , 64usize , concat ! (
+ "Size of: " , stringify ! ( NoDebug ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const NoDebug ) ) . c as * const _ as usize } ,
+ 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( NoDebug ) , "::" ,
+ stringify ! ( c ) ));
+}
+impl Clone for NoDebug {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for NoDebug {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/// This should derive Debug because the padding size is less than the max derive
+/// Debug impl for arrays. However, we conservatively don't derive Debug because
+/// we determine Debug derive-ability before we compute padding, which happens at
+/// codegen. (Again, we expect to get the alignment wrong for similar reasons.)
+#[repr(C)]
+#[derive(Copy)]
+pub struct ShouldDeriveDebugButDoesNot {
+ pub c: [::std::os::raw::c_char; 32usize],
+ pub d: ::std::os::raw::c_char,
+ pub __bindgen_padding_0: [u8; 31usize],
+}
+#[test]
+fn bindgen_test_layout_ShouldDeriveDebugButDoesNot() {
+ assert_eq!(::std::mem::size_of::<ShouldDeriveDebugButDoesNot>() , 64usize
+ , concat ! (
+ "Size of: " , stringify ! ( ShouldDeriveDebugButDoesNot ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ShouldDeriveDebugButDoesNot ) ) . c as *
+ const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! (
+ ShouldDeriveDebugButDoesNot ) , "::" , stringify ! ( c ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ShouldDeriveDebugButDoesNot ) ) . d as *
+ const _ as usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! (
+ ShouldDeriveDebugButDoesNot ) , "::" , stringify ! ( d ) ));
+}
+impl Clone for ShouldDeriveDebugButDoesNot {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ShouldDeriveDebugButDoesNot {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
diff --git a/tests/expectations/tests/layout_array.rs b/tests/expectations/tests/layout_array.rs
index 06bc213c..bccd5004 100644
--- a/tests/expectations/tests/layout_array.rs
+++ b/tests/expectations/tests/layout_array.rs
@@ -47,7 +47,7 @@ pub type rte_mempool_get_count =
-> ::std::os::raw::c_uint>;
/// Structure defining mempool operations structure
#[repr(C)]
-#[derive(Debug, Copy)]
+#[derive(Copy)]
pub struct rte_mempool_ops {
/// < Name of mempool ops struct.
pub name: [::std::os::raw::c_char; 32usize],
@@ -134,7 +134,7 @@ impl Clone for rte_spinlock_t {
/// any function pointers stored directly in the mempool struct would not be.
/// This results in us simply having "ops_index" in the mempool struct.
#[repr(C)]
-#[derive(Debug, Copy)]
+#[derive(Copy)]
pub struct rte_mempool_ops_table {
/// < Spinlock for add/delete.
pub sl: rte_spinlock_t,
@@ -173,7 +173,7 @@ impl Default for rte_mempool_ops_table {
}
/// Structure to hold malloc heap
#[repr(C)]
-#[derive(Debug, Copy)]
+#[derive(Copy)]
pub struct malloc_heap {
pub lock: rte_spinlock_t,
pub free_head: [malloc_heap__bindgen_ty_1; 13usize],
diff --git a/tests/expectations/tests/layout_array_too_long.rs b/tests/expectations/tests/layout_array_too_long.rs
index b80d0656..9c825625 100644
--- a/tests/expectations/tests/layout_array_too_long.rs
+++ b/tests/expectations/tests/layout_array_too_long.rs
@@ -96,7 +96,7 @@ impl Clone for ip_frag_key {
/// @internal Fragmented packet to reassemble.
/// First two entries in the frags[] array are for the last and first fragments.
#[repr(C)]
-#[derive(Debug, Copy)]
+#[derive(Copy)]
pub struct ip_frag_pkt {
/// < LRU list
pub lru: ip_frag_pkt__bindgen_ty_1,
diff --git a/tests/expectations/tests/layout_kni_mbuf.rs b/tests/expectations/tests/layout_kni_mbuf.rs
index f86fee57..72552bea 100644
--- a/tests/expectations/tests/layout_kni_mbuf.rs
+++ b/tests/expectations/tests/layout_kni_mbuf.rs
@@ -7,7 +7,7 @@
pub const RTE_CACHE_LINE_MIN_SIZE: ::std::os::raw::c_uint = 64;
pub const RTE_CACHE_LINE_SIZE: ::std::os::raw::c_uint = 64;
#[repr(C)]
-#[derive(Debug, Copy)]
+#[derive(Copy)]
pub struct rte_kni_mbuf {
pub buf_addr: *mut ::std::os::raw::c_void,
pub buf_physaddr: u64,
diff --git a/tests/expectations/tests/layout_large_align_field.rs b/tests/expectations/tests/layout_large_align_field.rs
index 40de09d3..6649f10d 100644
--- a/tests/expectations/tests/layout_large_align_field.rs
+++ b/tests/expectations/tests/layout_large_align_field.rs
@@ -129,7 +129,7 @@ impl Clone for ip_frag_key {
/// @internal Fragmented packet to reassemble.
/// First two entries in the frags[] array are for the last and first fragments.
#[repr(C)]
-#[derive(Debug, Copy)]
+#[derive(Copy)]
pub struct ip_frag_pkt {
/// < LRU list
pub lru: ip_frag_pkt__bindgen_ty_1,
@@ -258,7 +258,7 @@ impl Default for ip_pkt_list {
}
/// fragmentation table statistics
#[repr(C)]
-#[derive(Debug, Default, Copy)]
+#[derive(Copy)]
pub struct ip_frag_tbl_stat {
/// < total # of find/insert attempts.
pub find_num: u64,
@@ -312,9 +312,12 @@ fn bindgen_test_layout_ip_frag_tbl_stat() {
impl Clone for ip_frag_tbl_stat {
fn clone(&self) -> Self { *self }
}
+impl Default for ip_frag_tbl_stat {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
/// fragmentation table
#[repr(C)]
-#[derive(Debug, Copy)]
+#[derive(Copy)]
pub struct rte_ip_frag_tbl {
/// < ttl for table entries.
pub max_cycles: u64,
diff --git a/tests/expectations/tests/layout_mbuf.rs b/tests/expectations/tests/layout_mbuf.rs
index 29779b3c..2b614c6d 100644
--- a/tests/expectations/tests/layout_mbuf.rs
+++ b/tests/expectations/tests/layout_mbuf.rs
@@ -58,7 +58,7 @@ impl Clone for rte_atomic16_t {
}
/// The generic rte_mbuf, containing a packet mbuf.
#[repr(C)]
-#[derive(Debug, Copy)]
+#[derive(Copy)]
pub struct rte_mbuf {
pub cacheline0: MARKER,
/// < Virtual address of segment buffer.
diff --git a/tests/headers/issue-648-derive-debug-with-padding.h b/tests/headers/issue-648-derive-debug-with-padding.h
new file mode 100644
index 00000000..2bd5927a
--- /dev/null
+++ b/tests/headers/issue-648-derive-debug-with-padding.h
@@ -0,0 +1,22 @@
+/**
+ * We emit a `[u8; 63usize]` padding field for this struct, which cannot derive
+ * Debug because 63 is over the hard coded limit. (Yes, this struct doesn't end
+ * up with the reight alignment, we're waiting on `#[repr(align="N")]` to land
+ * in rustc).
+ */
+struct NoDebug {
+ char c;
+ // padding of 63 bytes
+} __attribute__((__aligned__(64)));
+
+/**
+ * This should derive Debug because the padding size is less than the max derive
+ * Debug impl for arrays. However, we conservatively don't derive Debug because
+ * we determine Debug derive-ability before we compute padding, which happens at
+ * codegen. (Again, we expect to get the alignment wrong for similar reasons.)
+ */
+struct ShouldDeriveDebugButDoesNot {
+ char c[32];
+ char d;
+ // padding of 31 bytes
+} __attribute__((__aligned__(64)));