diff options
author | Emilio Cobos Álvarez <emilio@crisal.io> | 2020-02-02 20:56:25 +0100 |
---|---|---|
committer | Emilio Cobos Álvarez <emilio@crisal.io> | 2020-02-02 23:10:49 +0100 |
commit | e951825e850dbb527e5de255a6500766dad61994 (patch) | |
tree | 35e2caee28f449cdbb56819bf48fecb3526b5154 | |
parent | 2929af608fdc18c3970e9390bc7d2079e745d752 (diff) |
ir: codegen: Handle too large bitfield units.
By not generating various code for it.
In the future, we could improve on this by splitting contiguous bitfield units,
if needed, so that we can implement them without dealing with rust array derive
limits.
Fixes #1718
-rw-r--r-- | src/codegen/mod.rs | 24 | ||||
-rw-r--r-- | src/ir/analysis/derive.rs | 14 | ||||
-rw-r--r-- | src/ir/comp.rs | 64 | ||||
-rw-r--r-- | tests/expectations/tests/timex.rs | 166 | ||||
-rw-r--r-- | tests/headers/timex.h | 15 |
5 files changed, 261 insertions, 22 deletions
diff --git a/src/codegen/mod.rs b/src/codegen/mod.rs index 5ac8dc11..ab62b135 100644 --- a/src/codegen/mod.rs +++ b/src/codegen/mod.rs @@ -1310,23 +1310,26 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit { F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>, { + use ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; + result.saw_bitfield_unit(); + let layout = self.layout(); + let unit_field_ty = helpers::bitfield_unit(ctx, layout); let field_ty = { - let ty = helpers::bitfield_unit(ctx, self.layout()); if parent.is_union() && !parent.can_be_rust_union(ctx) { result.saw_bindgen_union(); if ctx.options().enable_cxx_namespaces { quote! { - root::__BindgenUnionField<#ty> + root::__BindgenUnionField<#unit_field_ty> } } else { quote! { - __BindgenUnionField<#ty> + __BindgenUnionField<#unit_field_ty> } } } else { - ty + unit_field_ty.clone() } }; @@ -1338,12 +1341,13 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit { }; fields.extend(Some(field)); - let unit_field_ty = helpers::bitfield_unit(ctx, self.layout()); - let ctor_name = self.ctor_name(); let mut ctor_params = vec![]; let mut ctor_impl = quote! {}; - let mut generate_ctor = true; + + // We cannot generate any constructor if the underlying storage can't + // implement AsRef<[u8]> / AsMut<[u8]> / etc. + let mut generate_ctor = layout.size <= RUST_DERIVE_IN_ARRAY_LIMIT; for bf in self.bitfields() { // Codegen not allowed for anonymous bitfields @@ -1351,6 +1355,10 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit { continue; } + if layout.size > RUST_DERIVE_IN_ARRAY_LIMIT { + continue; + } + let mut bitfield_representable_as_int = true; bf.codegen( @@ -1395,7 +1403,7 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit { })); } - struct_layout.saw_bitfield_unit(self.layout()); + struct_layout.saw_bitfield_unit(layout); } } diff --git a/src/ir/analysis/derive.rs b/src/ir/analysis/derive.rs index 920590b4..f9cc404c 100644 --- a/src/ir/analysis/derive.rs +++ b/src/ir/analysis/derive.rs @@ -361,6 +361,20 @@ impl<'ctx> CannotDerive<'ctx> { return CanDerive::No; } + // Bitfield units are always represented as arrays of u8, but + // they're not traced as arrays, so we need to check here + // instead. + if !self.derive_trait.can_derive_large_array() && + info.has_too_large_bitfield_unit() && + !item.is_opaque(self.ctx, &()) + { + trace!( + " cannot derive {} for comp with too large bitfield unit", + self.derive_trait + ); + return CanDerive::No; + } + let pred = self.derive_trait.consider_edge_comp(); return self.constrain_join(item, pred); } diff --git a/src/ir/comp.rs b/src/ir/comp.rs index cc58e910..23120020 100644 --- a/src/ir/comp.rs +++ b/src/ir/comp.rs @@ -6,9 +6,9 @@ use super::context::{BindgenContext, FunctionId, ItemId, TypeId, VarId}; use super::dot::DotAttributes; use super::item::{IsOpaque, Item}; use super::layout::Layout; -// use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT; use super::template::TemplateParameters; use super::traversal::{EdgeKind, Trace, Tracer}; +use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT; use clang; use codegen::struct_layout::{align_to, bytes_from_bits_pow2}; use ir::derive::CanDeriveCopy; @@ -497,7 +497,7 @@ fn raw_fields_to_fields_and_bitfield_units<I>( ctx: &BindgenContext, raw_fields: I, packed: bool, -) -> Result<Vec<Field>, ()> +) -> Result<(Vec<Field>, bool), ()> where I: IntoIterator<Item = RawField>, { @@ -543,7 +543,7 @@ where "The above loop should consume all items in `raw_fields`" ); - Ok(fields) + Ok((fields, bitfield_unit_count != 0)) } /// Given a set of contiguous raw bitfields, group and allocate them into @@ -707,7 +707,10 @@ where #[derive(Debug)] enum CompFields { BeforeComputingBitfieldUnits(Vec<RawField>), - AfterComputingBitfieldUnits(Vec<Field>), + AfterComputingBitfieldUnits { + fields: Vec<Field>, + has_bitfield_units: bool, + }, ErrorComputingBitfieldUnits, } @@ -744,10 +747,13 @@ impl CompFields { let result = raw_fields_to_fields_and_bitfield_units(ctx, raws, packed); match result { - Ok(fields_and_units) => { + Ok((fields, has_bitfield_units)) => { mem::replace( self, - CompFields::AfterComputingBitfieldUnits(fields_and_units), + CompFields::AfterComputingBitfieldUnits { + fields, + has_bitfield_units, + }, ); } Err(()) => { @@ -758,11 +764,11 @@ impl CompFields { fn deanonymize_fields(&mut self, ctx: &BindgenContext, methods: &[Method]) { let fields = match *self { - CompFields::AfterComputingBitfieldUnits(ref mut fields) => fields, - CompFields::ErrorComputingBitfieldUnits => { - // Nothing to do here. - return; - } + CompFields::AfterComputingBitfieldUnits { + ref mut fields, .. + } => fields, + // Nothing to do here. + CompFields::ErrorComputingBitfieldUnits => return, CompFields::BeforeComputingBitfieldUnits(_) => { panic!("Not yet computed bitfield units."); } @@ -859,7 +865,7 @@ impl Trace for CompFields { tracer.visit_kind(f.ty().into(), EdgeKind::Field); } } - CompFields::AfterComputingBitfieldUnits(ref fields) => { + CompFields::AfterComputingBitfieldUnits { ref fields, .. } => { for f in fields { f.trace(context, tracer, &()); } @@ -1061,7 +1067,7 @@ impl CompInfo { /// Construct a new compound type. pub fn new(kind: CompKind) -> Self { CompInfo { - kind: kind, + kind, fields: CompFields::default(), template_params: vec![], methods: vec![], @@ -1124,13 +1130,43 @@ impl CompInfo { pub fn fields(&self) -> &[Field] { match self.fields { CompFields::ErrorComputingBitfieldUnits => &[], - CompFields::AfterComputingBitfieldUnits(ref fields) => fields, + CompFields::AfterComputingBitfieldUnits { ref fields, .. } => { + fields + } CompFields::BeforeComputingBitfieldUnits(_) => { panic!("Should always have computed bitfield units first"); } } } + fn has_bitfields(&self) -> bool { + match self.fields { + CompFields::ErrorComputingBitfieldUnits => false, + CompFields::AfterComputingBitfieldUnits { + has_bitfield_units, + .. + } => has_bitfield_units, + CompFields::BeforeComputingBitfieldUnits(_) => { + panic!("Should always have computed bitfield units first"); + } + } + } + + /// Returns whether we have a too large bitfield unit, in which case we may + /// not be able to derive some of the things we should be able to normally + /// derive. + pub fn has_too_large_bitfield_unit(&self) -> bool { + if !self.has_bitfields() { + return false; + } + self.fields().iter().any(|field| match *field { + Field::DataMember(..) => false, + Field::Bitfields(ref unit) => { + unit.layout.size > RUST_DERIVE_IN_ARRAY_LIMIT + } + }) + } + /// Does this type have any template parameters that aren't types /// (e.g. int)? pub fn has_non_type_template_params(&self) -> bool { diff --git a/tests/expectations/tests/timex.rs b/tests/expectations/tests/timex.rs new file mode 100644 index 00000000..19543173 --- /dev/null +++ b/tests/expectations/tests/timex.rs @@ -0,0 +1,166 @@ +/* automatically generated by rust-bindgen */ + +#![allow( + dead_code, + non_snake_case, + non_camel_case_types, + non_upper_case_globals +)] + +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit<Storage, Align> { + storage: Storage, + align: [Align; 0], +} +impl<Storage, Align> __BindgenBitfieldUnit<Storage, Align> { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage, align: [] } + } +} +impl<Storage, Align> __BindgenBitfieldUnit<Storage, Align> +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + byte & mask == mask + } + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + if val { + *byte |= mask; + } else { + *byte &= !mask; + } + } + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= + self.storage.as_ref().len() + ); + let mut val = 0; + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + val + } + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= + self.storage.as_ref().len() + ); + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct timex { + pub tai: ::std::os::raw::c_int, + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 44usize], u8>, +} +#[test] +fn bindgen_test_layout_timex() { + assert_eq!( + ::std::mem::size_of::<timex>(), + 48usize, + concat!("Size of: ", stringify!(timex)) + ); + assert_eq!( + ::std::mem::align_of::<timex>(), + 4usize, + concat!("Alignment of ", stringify!(timex)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::<timex>())).tai as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(timex), + "::", + stringify!(tai) + ) + ); +} +impl Default for timex { + fn default() -> Self { + unsafe { ::std::mem::zeroed() } + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct timex_named { + pub tai: ::std::os::raw::c_int, + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 44usize], u32>, +} +#[test] +fn bindgen_test_layout_timex_named() { + assert_eq!( + ::std::mem::size_of::<timex_named>(), + 48usize, + concat!("Size of: ", stringify!(timex_named)) + ); + assert_eq!( + ::std::mem::align_of::<timex_named>(), + 4usize, + concat!("Alignment of ", stringify!(timex_named)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::<timex_named>())).tai as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(timex_named), + "::", + stringify!(tai) + ) + ); +} +impl Default for timex_named { + fn default() -> Self { + unsafe { ::std::mem::zeroed() } + } +} diff --git a/tests/headers/timex.h b/tests/headers/timex.h new file mode 100644 index 00000000..1add26ca --- /dev/null +++ b/tests/headers/timex.h @@ -0,0 +1,15 @@ +struct timex { + int tai; + + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; +}; + +struct timex_named { + int tai; + + int a:32; int b:32; int c:32; int d:32; + int e:32; int f:32; int g:32; int h:32; + int i:32; int j:32; int k:32; +}; |