summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/codegen/mod.rs826
-rw-r--r--src/codegen/struct_layout.rs47
-rw-r--r--src/ir/comp.rs702
-rw-r--r--src/ir/context.rs37
-rw-r--r--src/ir/dot.rs2
-rw-r--r--src/ir/item.rs5
-rw-r--r--src/ir/ty.rs18
-rw-r--r--src/lib.rs1
8 files changed, 1119 insertions, 519 deletions
diff --git a/src/codegen/mod.rs b/src/codegen/mod.rs
index 550ff90c..fdf61f40 100644
--- a/src/codegen/mod.rs
+++ b/src/codegen/mod.rs
@@ -1,14 +1,16 @@
mod error;
mod helpers;
-mod struct_layout;
+pub mod struct_layout;
use self::helpers::{BlobTyBuilder, attributes};
-use self::struct_layout::{StructLayoutTracker, bytes_from_bits_pow2};
-use self::struct_layout::{align_to, bytes_from_bits};
+use self::struct_layout::StructLayoutTracker;
+
use aster;
+use aster::struct_field::StructFieldBuilder;
use ir::annotations::FieldAccessorKind;
-use ir::comp::{Base, CompInfo, CompKind, Field, Method, MethodKind};
+use ir::comp::{Base, BitfieldUnit, Bitfield, CompInfo, CompKind, Field,
+ FieldData, FieldMethods, Method, MethodKind};
use ir::context::{BindgenContext, ItemId};
use ir::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault};
use ir::dot;
@@ -27,7 +29,6 @@ use ir::var::Var;
use std::borrow::Cow;
use std::cell::Cell;
-use std::cmp;
use std::collections::{HashSet, VecDeque};
use std::collections::hash_map::{Entry, HashMap};
use std::fmt::Write;
@@ -736,110 +737,341 @@ impl<'a> TryToRustTy for Vtable<'a> {
}
}
-struct Bitfield<'a> {
- index: &'a mut usize,
- fields: Vec<&'a Field>,
+impl CodeGenerator for TemplateInstantiation {
+ type Extra = Item;
+
+ fn codegen<'a>(&self,
+ ctx: &BindgenContext,
+ result: &mut CodegenResult<'a>,
+ _whitelisted_items: &ItemSet,
+ item: &Item) {
+ // Although uses of instantiations don't need code generation, and are
+ // just converted to rust types in fields, vars, etc, we take this
+ // opportunity to generate tests for their layout here.
+ if !ctx.options().layout_tests {
+ return
+ }
+
+ let layout = item.kind().expect_type().layout(ctx);
+
+ if let Some(layout) = layout {
+ let size = layout.size;
+ let align = layout.align;
+
+ let name = item.canonical_name(ctx);
+ let fn_name = format!("__bindgen_test_layout_{}_instantiation_{}",
+ name,
+ item.id().as_usize());
+ let fn_name = ctx.rust_ident_raw(&fn_name);
+
+ let prefix = ctx.trait_prefix();
+ let ident = item.to_rust_ty_or_opaque(ctx, &());
+ let size_of_expr = quote_expr!(ctx.ext_cx(),
+ ::$prefix::mem::size_of::<$ident>());
+ let align_of_expr = quote_expr!(ctx.ext_cx(),
+ ::$prefix::mem::align_of::<$ident>());
+
+ let item = quote_item!(
+ ctx.ext_cx(),
+ #[test]
+ fn $fn_name() {
+ assert_eq!($size_of_expr, $size,
+ concat!("Size of template specialization: ",
+ stringify!($ident)));
+ assert_eq!($align_of_expr, $align,
+ concat!("Alignment of template specialization: ",
+ stringify!($ident)));
+ })
+ .unwrap();
+
+ result.push(item);
+ }
+ }
+}
+
+/// Generates an infinite number of anonymous field names.
+struct AnonFieldNames(usize);
+
+impl Default for AnonFieldNames {
+ fn default() -> AnonFieldNames {
+ AnonFieldNames(0)
+ }
+}
+
+impl Iterator for AnonFieldNames {
+ type Item = String;
+
+ fn next(&mut self) -> Option<String> {
+ self.0 += 1;
+ Some(format!("__bindgen_anon_{}", self.0))
+ }
+}
+
+/// Trait for implementing the code generation of a struct or union field.
+trait FieldCodegen<'a> {
+ type Extra;
+
+ fn codegen<F, M>(&self,
+ ctx: &BindgenContext,
+ fields_should_be_private: bool,
+ accessor_kind: FieldAccessorKind,
+ parent: &CompInfo,
+ anon_field_names: &mut AnonFieldNames,
+ result: &mut CodegenResult,
+ struct_layout: &mut StructLayoutTracker,
+ fields: &mut F,
+ methods: &mut M,
+ extra: Self::Extra)
+ where F: Extend<ast::StructField>,
+ M: Extend<ast::ImplItem>;
}
-impl<'a> Bitfield<'a> {
- fn new(index: &'a mut usize, fields: Vec<&'a Field>) -> Self {
- Bitfield {
- index: index,
- fields: fields,
+impl<'a> FieldCodegen<'a> for Field {
+ type Extra = ();
+
+ fn codegen<F, M>(&self,
+ ctx: &BindgenContext,
+ fields_should_be_private: bool,
+ accessor_kind: FieldAccessorKind,
+ parent: &CompInfo,
+ anon_field_names: &mut AnonFieldNames,
+ result: &mut CodegenResult,
+ struct_layout: &mut StructLayoutTracker,
+ fields: &mut F,
+ methods: &mut M,
+ _: ())
+ where F: Extend<ast::StructField>,
+ M: Extend<ast::ImplItem>
+ {
+ match *self {
+ Field::DataMember(ref data) => {
+ data.codegen(ctx,
+ fields_should_be_private,
+ accessor_kind,
+ parent,
+ anon_field_names,
+ result,
+ struct_layout,
+ fields,
+ methods,
+ ());
+ }
+ Field::Bitfields(ref unit) => {
+ unit.codegen(ctx,
+ fields_should_be_private,
+ accessor_kind,
+ parent,
+ anon_field_names,
+ result,
+ struct_layout,
+ fields,
+ methods,
+ ());
+ }
}
}
+}
- fn codegen_fields(self,
- ctx: &BindgenContext,
- parent: &CompInfo,
- fields: &mut Vec<ast::StructField>,
- methods: &mut Vec<ast::ImplItem>)
- -> Layout {
- // NOTE: What follows is reverse-engineered from LLVM's
- // lib/AST/RecordLayoutBuilder.cpp
- //
- // FIXME(emilio): There are some differences between Microsoft and the
- // Itanium ABI, but we'll ignore those and stick to Itanium for now.
- //
- // Also, we need to handle packed bitfields and stuff.
- // TODO(emilio): Take into account C++'s wide bitfields, and
- // packing, sigh.
- let mut total_size_in_bits = 0;
- let mut max_align = 0;
- let mut unfilled_bits_in_last_unit = 0;
- let mut field_size_in_bits = 0;
- *self.index += 1;
- let mut last_field_name = format!("_bitfield_{}", self.index);
- let mut last_field_align = 0;
-
- // (name, mask, width, bitfield's type, bitfield's layout)
- let mut bitfields: Vec<(&str, usize, usize, ast::Ty, Layout)> = vec![];
-
- for field in self.fields {
- let width = field.bitfield().unwrap() as usize;
- let field_item = ctx.resolve_item(field.ty());
- let field_ty_layout = field_item.kind()
- .expect_type()
- .layout(ctx)
- .expect("Bitfield without layout? Gah!");
- let field_align = field_ty_layout.align;
-
- if field_size_in_bits != 0 &&
- (width == 0 || width > unfilled_bits_in_last_unit) {
- // We've finished a physical field, so flush it and its bitfields.
- field_size_in_bits = align_to(field_size_in_bits, field_align);
- fields.push(flush_bitfields(ctx,
- parent,
- field_size_in_bits,
- last_field_align,
- &last_field_name,
- bitfields.drain(..),
- methods));
-
- // TODO(emilio): dedup this.
- *self.index += 1;
- last_field_name = format!("_bitfield_{}", self.index);
-
- // Now reset the size and the rest of stuff.
- // unfilled_bits_in_last_unit = 0;
- field_size_in_bits = 0;
- last_field_align = 0;
- }
-
- if let Some(name) = field.name() {
- let field_item_ty = field_item.to_rust_ty_or_opaque(ctx, &());
- bitfields.push((name,
- field_size_in_bits,
- width,
- field_item_ty.unwrap(),
- field_ty_layout));
- }
-
- field_size_in_bits += width;
- total_size_in_bits += width;
-
- let data_size = align_to(field_size_in_bits, field_align * 8);
-
- max_align = cmp::max(max_align, field_align);
-
- // NB: The width here is completely, absolutely intentional.
- last_field_align = cmp::max(last_field_align, width);
-
- unfilled_bits_in_last_unit = data_size - field_size_in_bits;
- }
-
- if field_size_in_bits != 0 {
- // Flush the last physical field and its bitfields.
- fields.push(flush_bitfields(ctx,
- parent,
- field_size_in_bits,
- last_field_align,
- &last_field_name,
- bitfields.drain(..),
- methods));
- }
-
- Layout::new(bytes_from_bits(total_size_in_bits), max_align)
+impl<'a> FieldCodegen<'a> for FieldData {
+ type Extra = ();
+
+ fn codegen<F, M>(&self,
+ ctx: &BindgenContext,
+ fields_should_be_private: bool,
+ accessor_kind: FieldAccessorKind,
+ parent: &CompInfo,
+ anon_field_names: &mut AnonFieldNames,
+ result: &mut CodegenResult,
+ struct_layout: &mut StructLayoutTracker,
+ fields: &mut F,
+ methods: &mut M,
+ _: ())
+ where F: Extend<ast::StructField>,
+ M: Extend<ast::ImplItem>
+ {
+ // Bitfields are handled by `FieldCodegen` implementations for
+ // `BitfieldUnit` and `Bitfield`.
+ assert!(self.bitfield().is_none());
+
+ let field_ty = ctx.resolve_type(self.ty());
+ let ty = self.ty().to_rust_ty_or_opaque(ctx, &());
+
+ // NB: In unstable rust we use proper `union` types.
+ let ty = if parent.is_union() && !ctx.options().unstable_rust {
+ if ctx.options().enable_cxx_namespaces {
+ quote_ty!(ctx.ext_cx(), root::__BindgenUnionField<$ty>)
+ } else {
+ quote_ty!(ctx.ext_cx(), __BindgenUnionField<$ty>)
+ }
+ } else if let Some(item) =
+ field_ty.is_incomplete_array(ctx) {
+ result.saw_incomplete_array();
+
+ let inner = item.to_rust_ty_or_opaque(ctx, &());
+
+ if ctx.options().enable_cxx_namespaces {
+ quote_ty!(ctx.ext_cx(), root::__IncompleteArrayField<$inner>)
+ } else {
+ quote_ty!(ctx.ext_cx(), __IncompleteArrayField<$inner>)
+ }
+ } else {
+ ty
+ };
+
+ let mut attrs = vec![];
+ if ctx.options().generate_comments {
+ if let Some(comment) = self.comment() {
+ attrs.push(attributes::doc(comment));
+ }
+ }
+
+ let field_name = self.name()
+ .map(|name| ctx.rust_mangle(name).into_owned())
+ .unwrap_or_else(|| anon_field_names.next().unwrap());
+
+ if !parent.is_union() {
+ if let Some(padding_field) =
+ struct_layout.pad_field(&field_name, field_ty, self.offset()) {
+ fields.extend(Some(padding_field));
+ }
+ }
+
+ let is_private = self.annotations()
+ .private_fields()
+ .unwrap_or(fields_should_be_private);
+
+ let accessor_kind = self.annotations()
+ .accessor_kind()
+ .unwrap_or(accessor_kind);
+
+ let mut field = StructFieldBuilder::named(&field_name);
+
+ if !is_private {
+ field = field.pub_();
+ }
+
+ let field = field.with_attrs(attrs)
+ .build_ty(ty.clone());
+
+ fields.extend(Some(field));
+
+ // TODO: Factor the following code out, please!
+ if accessor_kind == FieldAccessorKind::None {
+ return;
+ }
+
+ let getter_name =
+ ctx.rust_ident_raw(&format!("get_{}", field_name));
+ let mutable_getter_name =
+ ctx.rust_ident_raw(&format!("get_{}_mut", field_name));
+ let field_name = ctx.rust_ident_raw(&field_name);
+
+ let accessor_methods_impl = match accessor_kind {
+ FieldAccessorKind::None => unreachable!(),
+ FieldAccessorKind::Regular => {
+ quote_item!(ctx.ext_cx(),
+ impl X {
+ #[inline]
+ pub fn $getter_name(&self) -> &$ty {
+ &self.$field_name
+ }
+
+ #[inline]
+ pub fn $mutable_getter_name(&mut self) -> &mut $ty {
+ &mut self.$field_name
+ }
+ }
+ )
+ }
+ FieldAccessorKind::Unsafe => {
+ quote_item!(ctx.ext_cx(),
+ impl X {
+ #[inline]
+ pub unsafe fn $getter_name(&self) -> &$ty {
+ &self.$field_name
+ }
+
+ #[inline]
+ pub unsafe fn $mutable_getter_name(&mut self)
+ -> &mut $ty {
+ &mut self.$field_name
+ }
+ }
+ )
+ }
+ FieldAccessorKind::Immutable => {
+ quote_item!(ctx.ext_cx(),
+ impl X {
+ #[inline]
+ pub fn $getter_name(&self) -> &$ty {
+ &self.$field_name
+ }
+ }
+ )
+ }
+ };
+
+ match accessor_methods_impl.unwrap().node {
+ ast::ItemKind::Impl(_, _, _, _, _, ref items) => {
+ methods.extend(items.clone())
+ }
+ _ => unreachable!(),
+ }
+ }
+}
+
+impl<'a> FieldCodegen<'a> for BitfieldUnit {
+ type Extra = ();
+
+ fn codegen<F, M>(&self,
+ ctx: &BindgenContext,
+ fields_should_be_private: bool,
+ accessor_kind: FieldAccessorKind,
+ parent: &CompInfo,
+ anon_field_names: &mut AnonFieldNames,
+ result: &mut CodegenResult,
+ struct_layout: &mut StructLayoutTracker,
+ fields: &mut F,
+ methods: &mut M,
+ _: ())
+ where F: Extend<ast::StructField>,
+ M: Extend<ast::ImplItem>
+ {
+ let field_ty = BlobTyBuilder::new(self.layout()).build();
+ let unit_field_name = format!("_bitfield_{}", self.nth());
+
+ let field = StructFieldBuilder::named(&unit_field_name)
+ .pub_()
+ .build_ty(field_ty.clone());
+ fields.extend(Some(field));
+
+ let unit_field_int_ty = match self.layout().size {
+ 8 => quote_ty!(ctx.ext_cx(), u64),
+ 4 => quote_ty!(ctx.ext_cx(), u32),
+ 2 => quote_ty!(ctx.ext_cx(), u16),
+ 1 => quote_ty!(ctx.ext_cx(), u8),
+ _ => {
+ // Can't generate bitfield accessors for unit sizes larget than
+ // 64 bits at the moment.
+ struct_layout.saw_bitfield_unit(self.layout());
+ return;
+ }
+ };
+
+ for bf in self.bitfields() {
+ bf.codegen(ctx,
+ fields_should_be_private,
+ accessor_kind,
+ parent,
+ anon_field_names,
+ result,
+ struct_layout,
+ fields,
+ methods,
+ (&unit_field_name, unit_field_int_ty.clone()));
+ }
+
+ struct_layout.saw_bitfield_unit(self.layout());
}
}
@@ -889,58 +1121,51 @@ fn bitfield_setter_name(ctx: &BindgenContext,
ctx.ext_cx().ident_of(&setter)
}
-/// A physical field (which is a word or byte or ...) has many logical bitfields
-/// contained within it, but not all bitfields are in the same physical field of
-/// a struct. This function creates a single physical field and flushes all the
-/// accessors for the logical `bitfields` within that physical field to the
-/// outgoing `methods`.
-fn flush_bitfields<'a, I>(ctx: &BindgenContext,
- parent: &CompInfo,
- field_size_in_bits: usize,
- field_align: usize,
- field_name: &str,
- bitfields: I,
- methods: &mut Vec<ast::ImplItem>) -> ast::StructField
- where I: IntoIterator<Item = (&'a str, usize, usize, ast::Ty, Layout)>
-{
- use aster::struct_field::StructFieldBuilder;
-
- let field_layout = Layout::new(bytes_from_bits_pow2(field_size_in_bits),
- bytes_from_bits_pow2(field_align));
- let field_ty = BlobTyBuilder::new(field_layout).build();
-
- let field = StructFieldBuilder::named(field_name)
- .pub_()
- .build_ty(field_ty.clone());
-
- let field_int_ty = match field_layout.size {
- 8 => quote_ty!(ctx.ext_cx(), u64),
- 4 => quote_ty!(ctx.ext_cx(), u32),
- 2 => quote_ty!(ctx.ext_cx(), u16),
- 1 => quote_ty!(ctx.ext_cx(), u8),
- _ => return field
- };
+impl<'a> FieldCodegen<'a> for Bitfield {
+ type Extra = (&'a str, P<ast::Ty>);
- for (name, offset, width, bitfield_ty, bitfield_layout) in bitfields {
+ fn codegen<F, M>(&self,
+ ctx: &BindgenContext,
+ _fields_should_be_private: bool,
+ _accessor_kind: FieldAccessorKind,
+ parent: &CompInfo,
+ _anon_field_names: &mut AnonFieldNames,
+ _result: &mut CodegenResult,
+ _struct_layout: &mut StructLayoutTracker,
+ _fields: &mut F,
+ methods: &mut M,
+ (unit_field_name,
+ unit_field_int_ty): (&'a str, P<ast::Ty>))
+ where F: Extend<ast::StructField>,
+ M: Extend<ast::ImplItem>
+ {
let prefix = ctx.trait_prefix();
- let getter_name = bitfield_getter_name(ctx, parent, name);
- let setter_name = bitfield_setter_name(ctx, parent, name);
- let field_ident = ctx.ext_cx().ident_of(field_name);
+ let getter_name = bitfield_getter_name(ctx, parent, self.name());
+ let setter_name = bitfield_setter_name(ctx, parent, self.name());
+ let unit_field_ident = ctx.ext_cx().ident_of(unit_field_name);
+
+ let bitfield_ty_item = ctx.resolve_item(self.ty());
+ let bitfield_ty = bitfield_ty_item.expect_type();
- let bitfield_int_ty = BlobTyBuilder::new(bitfield_layout).build();
+ let bitfield_ty_layout = bitfield_ty.layout(ctx)
+ .expect("Bitfield without layout? Gah!");
+ let bitfield_int_ty = BlobTyBuilder::new(bitfield_ty_layout).build();
- let mask: usize = ((1usize << width) - 1usize) << offset;
+ let bitfield_ty = bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item);
+
+ let offset = self.offset_into_unit();
+ let mask: usize = ((1usize << self.width()) - 1usize) << offset;
let impl_item = quote_item!(
ctx.ext_cx(),
impl XxxIgnored {
#[inline]
pub fn $getter_name(&self) -> $bitfield_ty {
- let mask = $mask as $field_int_ty;
- let field_val: $field_int_ty = unsafe {
- ::$prefix::mem::transmute(self.$field_ident)
+ let mask = $mask as $unit_field_int_ty;
+ let unit_field_val: $unit_field_int_ty = unsafe {
+ ::$prefix::mem::transmute(self.$unit_field_ident)
};
- let val = (field_val & mask) >> $offset;
+ let val = (unit_field_val & mask) >> $offset;
unsafe {
::$prefix::mem::transmute(val as $bitfield_int_ty)
}
@@ -948,17 +1173,17 @@ fn flush_bitfields<'a, I>(ctx: &BindgenContext,
#[inline]
pub fn $setter_name(&mut self, val: $bitfield_ty) {
- let mask = $mask as $field_int_ty;
- let val = val as $bitfield_int_ty as $field_int_ty;
+ let mask = $mask as $unit_field_int_ty;
+ let val = val as $bitfield_int_ty as $unit_field_int_ty;
- let mut field_val: $field_int_ty = unsafe {
- ::$prefix::mem::transmute(self.$field_ident)
+ let mut unit_field_val: $unit_field_int_ty = unsafe {
+ ::$prefix::mem::transmute(self.$unit_field_ident)
};
- field_val &= !mask;
- field_val |= (val << $offset) & mask;
+ unit_field_val &= !mask;
+ unit_field_val |= (val << $offset) & mask;
- self.$field_ident = unsafe {
- ::$prefix::mem::transmute(field_val)
+ self.$unit_field_ident = unsafe {
+ ::$prefix::mem::transmute(unit_field_val)
};
}
}
@@ -971,58 +1196,6 @@ fn flush_bitfields<'a, I>(ctx: &BindgenContext,
_ => unreachable!(),
};
}
-
- field
-}
-
-impl CodeGenerator for TemplateInstantiation {
- type Extra = Item;
-
- fn codegen<'a>(&self,
- ctx: &BindgenContext,
- result: &mut CodegenResult<'a>,
- _whitelisted_items: &ItemSet,
- item: &Item) {
- // Although uses of instantiations don't need code generation, and are
- // just converted to rust types in fields, vars, etc, we take this
- // opportunity to generate tests for their layout here.
- if !ctx.options().layout_tests {
- return
- }
-
- let layout = item.kind().expect_type().layout(ctx);
-
- if let Some(layout) = layout {
- let size = layout.size;
- let align = layout.align;
-
- let name = item.canonical_name(ctx);
- let fn_name = format!("__bindgen_test_layout_{}_instantiation_{}",
- name,
- item.id().as_usize());
- let fn_name = ctx.rust_ident_raw(&fn_name);
-
- let prefix = ctx.trait_prefix();
- let ident = item.to_rust_ty_or_opaque(ctx, &());
- let size_of_expr = quote_expr!(ctx.ext_cx(),
- ::$prefix::mem::size_of::<$ident>());
- let align_of_expr = quote_expr!(ctx.ext_cx(),
- ::$prefix::mem::align_of::<$ident>());
-
- let item = quote_item!(
- ctx.ext_cx(),
- #[test]
- fn $fn_name() {
- assert_eq!($size_of_expr, $size,
- concat!("Size of template specialization: ", stringify!($ident)));
- assert_eq!($align_of_expr, $align,
- concat!("Alignment of template specialization: ", stringify!($ident)));
- })
- .unwrap();
-
- result.push(item);
- }
- }
}
impl CodeGenerator for CompInfo {
@@ -1033,8 +1206,6 @@ impl CodeGenerator for CompInfo {
result: &mut CodegenResult<'a>,
whitelisted_items: &ItemSet,
item: &Item) {
- use aster::struct_field::StructFieldBuilder;
-
debug!("<CompInfo as CodeGenerator>::codegen: item = {:?}", item);
// Don't output classes with template parameters that aren't types, and
@@ -1137,7 +1308,7 @@ impl CodeGenerator for CompInfo {
// Also, we need to generate the vtable in such a way it "inherits" from
// the parent too.
let mut fields = vec![];
- let mut struct_layout = StructLayoutTracker::new(ctx, self);
+ let mut struct_layout = StructLayoutTracker::new(ctx, self, &canonical_name);
if self.needs_explicit_vtable(ctx) {
let vtable =
Vtable::new(item.id(), self.methods(), self.base_members());
@@ -1185,7 +1356,7 @@ impl CodeGenerator for CompInfo {
let field = StructFieldBuilder::named(field_name)
.pub_()
.build_ty(inner);
- fields.push(field);
+ fields.extend(Some(field));
}
if is_union {
result.saw_union();
@@ -1193,11 +1364,6 @@ impl CodeGenerator for CompInfo {
let layout = item.kind().expect_type().layout(ctx);
- let mut current_bitfield_width = None;
- let mut current_bitfield_layout: Option<Layout> = None;
- let mut current_bitfield_fields = vec![];
- let mut bitfield_count = 0;
- let struct_fields = self.fields();
let fields_should_be_private = item.annotations()
.private_fields()
.unwrap_or(false);
@@ -1206,197 +1372,20 @@ impl CodeGenerator for CompInfo {
.unwrap_or(FieldAccessorKind::None);
let mut methods = vec![];
- let mut anonymous_field_count = 0;
- for field in struct_fields {
- debug_assert_eq!(current_bitfield_width.is_some(),
- current_bitfield_layout.is_some());
- debug_assert_eq!(current_bitfield_width.is_some(),
- !current_bitfield_fields.is_empty());
-
- let field_ty = ctx.resolve_type(field.ty());
-
- // Try to catch a bitfield contination early.
- if let (Some(ref mut bitfield_width), Some(width)) =
- (current_bitfield_width, field.bitfield()) {
- let layout = current_bitfield_layout.unwrap();
- debug!("Testing bitfield continuation {} {} {:?}",
- *bitfield_width,
- width,
- layout);
- if *bitfield_width + width <= (layout.size * 8) as u32 {
- *bitfield_width += width;
- current_bitfield_fields.push(field);
- continue;
- }
- }
-
- // Flush the current bitfield.
- if current_bitfield_width.is_some() {
- debug_assert!(!current_bitfield_fields.is_empty());
- let bitfield_fields =
- mem::replace(&mut current_bitfield_fields, vec![]);
- let bitfield_layout = Bitfield::new(&mut bitfield_count,
- bitfield_fields)
- .codegen_fields(ctx, self, &mut fields, &mut methods);
- struct_layout.saw_bitfield_batch(bitfield_layout);
-
- current_bitfield_width = None;
- current_bitfield_layout = None;
- }
- debug_assert!(current_bitfield_fields.is_empty());
-
- if let Some(width) = field.bitfield() {
- let layout = field_ty.layout(ctx)
- .expect("Bitfield type without layout?");
- current_bitfield_width = Some(width);
- current_bitfield_layout = Some(layout);
- current_bitfield_fields.push(field);
- continue;
- }
-
- let ty = field.ty().to_rust_ty_or_opaque(ctx, &());
-
- // NB: In unstable rust we use proper `union` types.
- let ty = if is_union && !ctx.options().unstable_rust {
- if ctx.options().enable_cxx_namespaces {
- quote_ty!(ctx.ext_cx(), root::__BindgenUnionField<$ty>)
- } else {
- quote_ty!(ctx.ext_cx(), __BindgenUnionField<$ty>)
- }
- } else if let Some(item) =
- field_ty.is_incomplete_array(ctx) {
- result.saw_incomplete_array();
-
- let inner = item.to_rust_ty_or_opaque(ctx, &());
-
- if ctx.options().enable_cxx_namespaces {
- quote_ty!(ctx.ext_cx(), root::__IncompleteArrayField<$inner>)
- } else {
- quote_ty!(ctx.ext_cx(), __IncompleteArrayField<$inner>)
- }
- } else {
- ty
- };
-
- let mut attrs = vec![];
- if ctx.options().generate_comments {
- if let Some(comment) = field.comment() {
- attrs.push(attributes::doc(comment));
- }
- }
- let field_name = match field.name() {
- Some(name) => ctx.rust_mangle(name).into_owned(),
- None => {
- anonymous_field_count += 1;
- format!("__bindgen_anon_{}", anonymous_field_count)
- }
- };
-
- if !is_union {
- if let Some(padding_field) =
- struct_layout.pad_field(&field_name, field_ty, field.offset()) {
- fields.push(padding_field);
- }
- }
-
- let is_private = field.annotations()
- .private_fields()
- .unwrap_or(fields_should_be_private);
-
- let accessor_kind = field.annotations()
- .accessor_kind()
- .unwrap_or(struct_accessor_kind);
-
- let mut field = StructFieldBuilder::named(&field_name);
-
- if !is_private {
- field = field.pub_();
- }
-
- let field = field.with_attrs(attrs)
- .build_ty(ty.clone());
-
- fields.push(field);
-
- // TODO: Factor the following code out, please!
- if accessor_kind == FieldAccessorKind::None {
- continue;
- }
-
- let getter_name =
- ctx.rust_ident_raw(&format!("get_{}", field_name));
- let mutable_getter_name =
- ctx.rust_ident_raw(&format!("get_{}_mut", field_name));
- let field_name = ctx.rust_ident_raw(&field_name);
-
- let accessor_methods_impl = match accessor_kind {
- FieldAccessorKind::None => unreachable!(),
- FieldAccessorKind::Regular => {
- quote_item!(ctx.ext_cx(),
- impl X {
- #[inline]
- pub fn $getter_name(&self) -> &$ty {
- &self.$field_name
- }
-
- #[inline]
- pub fn $mutable_getter_name(&mut self) -> &mut $ty {
- &mut self.$field_name
- }
- }
- )
- }
- FieldAccessorKind::Unsafe => {
- quote_item!(ctx.ext_cx(),
- impl X {
- #[inline]
- pub unsafe fn $getter_name(&self) -> &$ty {
- &self.$field_name
- }
-
- #[inline]
- pub unsafe fn $mutable_getter_name(&mut self)
- -> &mut $ty {
- &mut self.$field_name
- }
- }
- )
- }
- FieldAccessorKind::Immutable => {
- quote_item!(ctx.ext_cx(),
- impl X {
- #[inline]
- pub fn $getter_name(&self) -> &$ty {
- &self.$field_name
- }
- }
- )
- }
- };
-
- match accessor_methods_impl.unwrap().node {
- ast::ItemKind::Impl(_, _, _, _, _, ref items) => {
- methods.extend(items.clone())
- }
- _ => unreachable!(),
- }
+ let mut anon_field_names = AnonFieldNames::default();
+ for field in self.fields() {
+ field.codegen(ctx,
+ fields_should_be_private,
+ struct_accessor_kind,
+ self,
+ &mut anon_field_names,
+ result,
+ &mut struct_layout,
+ &mut fields,
+ &mut methods,
+ ());
}
- // Flush the last bitfield if any.
- //
- // FIXME: Reduce duplication with the loop above.
- // FIXME: May need to pass current_bitfield_layout too.
- if current_bitfield_width.is_some() {
- debug_assert!(!current_bitfield_fields.is_empty());
- let bitfield_fields = mem::replace(&mut current_bitfield_fields,
- vec![]);
- let bitfield_layout = Bitfield::new(&mut bitfield_count,
- bitfield_fields)
- .codegen_fields(ctx, self, &mut fields, &mut methods);
- struct_layout.saw_bitfield_batch(bitfield_layout);
- }
- debug_assert!(current_bitfield_fields.is_empty());
-
if is_union && !ctx.options().unstable_rust {
let layout = layout.expect("Unable to get layout information?");
let ty = BlobTyBuilder::new(layout).build();
@@ -1430,7 +1419,7 @@ impl CodeGenerator for CompInfo {
} else if !is_union && !self.is_unsized(ctx) {
if let Some(padding_field) =
layout.and_then(|layout| {
- struct_layout.pad_struct(&canonical_name, layout)
+ struct_layout.pad_struct(layout)
}) {
fields.push(padding_field);
}
@@ -1554,21 +1543,24 @@ impl CodeGenerator for CompInfo {
} else {
let asserts = self.fields()
.iter()
- .filter(|field| field.bitfield().is_none())
+ .filter_map(|field| match *field {
+ Field::DataMember(ref f) if f.name().is_some() => Some(f),
+ _ => None,
+ })
.flat_map(|field| {
- field.name().and_then(|name| {
- field.offset().and_then(|offset| {
- let field_offset = offset / 8;
- let field_name = ctx.rust_ident(name);
-
- quote_item!(ctx.ext_cx(),
- assert_eq!(unsafe { &(*(0 as *const $type_name)).$field_name as *const _ as usize },
- $field_offset,
- concat!("Alignment of field: ", stringify!($type_name), "::", stringify!($field_name)));
- )
- })
+ let name = field.name().unwrap();
+ field.offset().and_then(|offset| {
+ let field_offset = offset / 8;
+ let field_name = ctx.rust_ident(name);
+
+ quote_item!(ctx.ext_cx(),
+ assert_eq!(unsafe { &(*(0 as *const $type_name)).$field_name as *const _ as usize },
+ $field_offset,
+ concat!("Alignment of field: ", stringify!($type_name), "::", stringify!($field_name)));
+ )
})
- }).collect::<Vec<P<ast::Item>>>();
+ })
+ .collect::<Vec<P<ast::Item>>>();
Some(asserts)
};
diff --git a/src/codegen/struct_layout.rs b/src/codegen/struct_layout.rs
index 351f7642..2ba39bad 100644
--- a/src/codegen/struct_layout.rs
+++ b/src/codegen/struct_layout.rs
@@ -14,7 +14,9 @@ use std::mem;
use syntax::ast;
/// Trace the layout of struct.
+#[derive(Debug)]
pub struct StructLayoutTracker<'a, 'ctx: 'a> {
+ name: &'a str,
ctx: &'a BindgenContext<'ctx>,
comp: &'a CompInfo,
latest_offset: usize,
@@ -38,15 +40,6 @@ pub fn align_to(size: usize, align: usize) -> usize {
size + align - rem
}
-/// Returns the amount of bytes from a given amount of bytes, rounding up.
-pub fn bytes_from_bits(n: usize) -> usize {
- if n % 8 == 0 {
- return n / 8;
- }
-
- n / 8 + 1
-}
-
/// Returns the lower power of two byte count that can hold at most n bits.
pub fn bytes_from_bits_pow2(mut n: usize) -> usize {
if n == 0 {
@@ -87,23 +80,10 @@ fn test_bytes_from_bits_pow2() {
}
}
-#[test]
-fn test_bytes_from_bits() {
- assert_eq!(bytes_from_bits(0), 0);
- for i in 1..9 {
- assert_eq!(bytes_from_bits(i), 1);
- }
- for i in 9..17 {
- assert_eq!(bytes_from_bits(i), 2);
- }
- for i in 17..25 {
- assert_eq!(bytes_from_bits(i), 3);
- }
-}
-
impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
- pub fn new(ctx: &'a BindgenContext<'ctx>, comp: &'a CompInfo) -> Self {
+ pub fn new(ctx: &'a BindgenContext<'ctx>, comp: &'a CompInfo, name: &'a str) -> Self {
StructLayoutTracker {
+ name: name,
ctx: ctx,
comp: comp,
latest_offset: 0,
@@ -115,6 +95,8 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
}
pub fn saw_vtable(&mut self) {
+ debug!("saw vtable for {}", self.name);
+
let ptr_size = mem::size_of::<*mut ()>();
self.latest_offset += ptr_size;
self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size));
@@ -122,6 +104,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
}
pub fn saw_base(&mut self, base_ty: &Type) {
+ debug!("saw base for {}", self.name);
if let Some(layout) = base_ty.layout(self.ctx) {
self.align_to_latest_field(layout);
@@ -131,7 +114,9 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
}
}
- pub fn saw_bitfield_batch(&mut self, layout: Layout) {
+ pub fn saw_bitfield_unit(&mut self, layout: Layout) {
+ debug!("saw bitfield unit for {}: {:?}", self.name, layout);
+
self.align_to_latest_field(layout);
self.latest_offset += layout.size;
@@ -148,6 +133,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
}
pub fn saw_union(&mut self, layout: Layout) {
+ debug!("saw union for {}: {:?}", self.name, layout);
self.align_to_latest_field(layout);
self.latest_offset += self.padding_bytes(layout) + layout.size;
@@ -239,13 +225,12 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
padding_layout.map(|layout| self.padding_field(layout))
}
- pub fn pad_struct(&mut self,
- name: &str,
- layout: Layout)
- -> Option<ast::StructField> {
+ pub fn pad_struct(&mut self, layout: Layout) -> Option<ast::StructField> {
+ debug!("pad_struct:\n\tself = {:#?}\n\tlayout = {:#?}", self, layout);
+
if layout.size < self.latest_offset {
error!("Calculated wrong layout for {}, too more {} bytes",
- name,
+ self.name,
self.latest_offset - layout.size);
return None;
}
@@ -273,7 +258,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
Layout::new(padding_bytes, layout.align)
};
- debug!("pad bytes to struct {}, {:?}", name, layout);
+ debug!("pad bytes to struct {}, {:?}", self.name, layout);
Some(self.padding_field(layout))
} else {
diff --git a/src/ir/comp.rs b/src/ir/comp.rs
index 91593b79..e0ec683f 100644
--- a/src/ir/comp.rs
+++ b/src/ir/comp.rs
@@ -3,13 +3,19 @@
use super::annotations::Annotations;
use super::context::{BindgenContext, ItemId};
use super::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault};
+use super::dot::DotAttributes;
use super::item::Item;
use super::layout::Layout;
use super::traversal::{EdgeKind, Trace, Tracer};
use super::template::TemplateParameters;
use clang;
+use codegen::struct_layout::{align_to, bytes_from_bits_pow2};
use parse::{ClangItemParser, ParseError};
+use peeking_take_while::PeekableExt;
use std::cell::Cell;
+use std::cmp;
+use std::io;
+use std::mem;
/// The kind of compound type.
#[derive(Debug, Copy, Clone, PartialEq)]
@@ -98,78 +104,564 @@ impl Method {
}
}
+/// Methods common to the various field types.
+pub trait FieldMethods {
+ /// Get the name of this field.
+ fn name(&self) -> Option<&str>;
+
+ /// Get the type of this field.
+ fn ty(&self) -> ItemId;
+
+ /// Get the comment for this field.
+ fn comment(&self) -> Option<&str>;
+
+ /// If this is a bitfield, how many bits does it need?
+ fn bitfield(&self) -> Option<u32>;
+
+ /// Is this field marked as `mutable`?
+ fn is_mutable(&self) -> bool;
+
+ /// Get the annotations for this field.
+ fn annotations(&self) -> &Annotations;
+
+ /// The offset of the field (in bits)
+ fn offset(&self) -> Option<usize>;
+}
+
+/// A contiguous set of logical bitfields that live within the same physical
+/// allocation unit. See 9.2.4 [class.bit] in the C++ standard and [section
+/// 2.4.II.1 in the Itanium C++
+/// ABI](http://itanium-cxx-abi.github.io/cxx-abi/abi.html#class-types).
+#[derive(Debug)]
+pub struct BitfieldUnit {
+ nth: usize,
+ layout: Layout,
+ bitfields: Vec<Bitfield>,
+}
+
+impl BitfieldUnit {
+ /// Get the 1-based index of this bitfield unit within its containing
+ /// struct. Useful for generating a Rust struct's field name for this unit
+ /// of bitfields.
+ pub fn nth(&self) -> usize {
+ self.nth
+ }
+
+ /// Get the layout within which these bitfields reside.
+ pub fn layout(&self) -> Layout {
+ self.layout
+ }
+
+ /// Get the bitfields within this unit.
+ pub fn bitfields(&self) -> &[Bitfield] {
+ &self.bitfields
+ }
+}
+
/// A struct representing a C++ field.
+#[derive(Debug)]
+pub enum Field {
+ /// A normal data member.
+ DataMember(FieldData),
+
+ /// A physical allocation unit containing many logical bitfields.
+ Bitfields(BitfieldUnit),
+}
+
+impl Field {
+ fn has_destructor(&self, ctx: &BindgenContext) -> bool {
+ match *self {
+ Field::DataMember(ref data) => ctx.resolve_type(data.ty).has_destructor(ctx),
+ // Bitfields may not be of a type that has a destructor.
+ Field::Bitfields(BitfieldUnit { .. }) => false,
+ }
+ }
+
+ /// Get this field's layout.
+ pub fn layout(&self, ctx: &BindgenContext) -> Option<Layout> {
+ match *self {
+ Field::Bitfields(BitfieldUnit { layout, ..}) => Some(layout),
+ Field::DataMember(ref data) => {
+ ctx.resolve_type(data.ty).layout(ctx)
+ }
+ }
+ }
+}
+
+impl Trace for Field {
+ type Extra = ();
+
+ fn trace<T>(&self, _: &BindgenContext, tracer: &mut T, _: &())
+ where T: Tracer,
+ {
+ match *self {
+ Field::DataMember(ref data) => {
+ tracer.visit_kind(data.ty, EdgeKind::Field);
+ }
+ Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => {
+ for bf in bitfields {
+ tracer.visit_kind(bf.ty(), EdgeKind::Field);
+ }
+ }
+ }
+ }
+}
+
+impl DotAttributes for Field {
+ fn dot_attributes<W>(&self, ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ match *self {
+ Field::DataMember(ref data) => {
+ data.dot_attributes(ctx, out)
+ }
+ Field::Bitfields(BitfieldUnit { layout, ref bitfields, .. }) => {
+ writeln!(out,
+ r#"<tr>
+ <td>bitfield unit</td>
+ <td>
+ <table border="0">
+ <tr>
+ <td>unit.size</td><td>{}</td>
+ </tr>
+ <tr>
+ <td>unit.align</td><td>{}</td>
+ </tr>
+ "#,
+ layout.size,
+ layout.align)?;
+ for bf in bitfields {
+ bf.dot_attributes(ctx, out)?;
+ }
+ writeln!(out, "</table></td></tr>")
+ }
+ }
+ }
+}
+
+impl DotAttributes for FieldData {
+ fn dot_attributes<W>(&self, _ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ writeln!(out,
+ "<tr><td>{}</td><td>{:?}</td></tr>",
+ self.name().unwrap_or("(anonymous)"),
+ self.ty())
+ }
+}
+
+impl DotAttributes for Bitfield {
+ fn dot_attributes<W>(&self, _ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ writeln!(out,
+ "<tr><td>{} : {}</td><td>{:?}</td></tr>",
+ self.name(),
+ self.width(),
+ self.ty())
+ }
+}
+
+/// A logical bitfield within some physical bitfield allocation unit.
+#[derive(Debug)]
+pub struct Bitfield {
+ /// Index of the bit within this bitfield's allocation unit where this
+ /// bitfield's bits begin.
+ offset_into_unit: usize,
+
+ /// The field data for this bitfield.
+ data: FieldData,
+}
+
+impl Bitfield {
+ /// Construct a new bitfield.
+ fn new(offset_into_unit: usize, raw: RawField) -> Bitfield {
+ assert!(raw.bitfield().is_some());
+ assert!(raw.name().is_some());
+
+ Bitfield {
+ offset_into_unit: offset_into_unit,
+ data: raw.0,
+ }
+ }
+
+ /// Get the index of the bit within this bitfield's allocation unit where
+ /// this bitfield begins.
+ pub fn offset_into_unit(&self) -> usize {
+ self.offset_into_unit
+ }
+
+ /// Get the bit width of this bitfield.
+ pub fn width(&self) -> u32 {
+ self.data.bitfield().unwrap()
+ }
+
+ /// Get the name of this bitfield.
+ pub fn name(&self) -> &str {
+ self.data.name().unwrap()
+ }
+}
+
+impl FieldMethods for Bitfield {
+ fn name(&self) -> Option<&str> {
+ self.data.name()
+ }
+
+ fn ty(&self) -> ItemId {
+ self.data.ty()
+ }
+
+ fn comment(&self) -> Option<&str> {
+ self.data.comment()
+ }
+
+ fn bitfield(&self) -> Option<u32> {
+ self.data.bitfield()
+ }
+
+ fn is_mutable(&self) -> bool {
+ self.data.is_mutable()
+ }
+
+ fn annotations(&self) -> &Annotations {
+ self.data.annotations()
+ }
+
+ fn offset(&self) -> Option<usize> {
+ self.data.offset()
+ }
+}
+
+
+/// A raw field might be either of a plain data member or a bitfield within a
+/// bitfield allocation unit, but we haven't processed it and determined which
+/// yet (which would involve allocating it into a bitfield unit if it is a
+/// bitfield).
+#[derive(Debug)]
+struct RawField(FieldData);
+
+impl RawField {
+ /// Construct a new `RawField`.
+ fn new(name: Option<String>,
+ ty: ItemId,
+ comment: Option<String>,
+ annotations: Option<Annotations>,
+ bitfield: Option<u32>,
+ mutable: bool,
+ offset: Option<usize>)
+ -> RawField {
+ RawField(FieldData {
+ name: name,
+ ty: ty,
+ comment: comment,
+ annotations: annotations.unwrap_or_default(),
+ bitfield: bitfield,
+ mutable: mutable,
+ offset: offset,
+ })
+ }
+}
+
+impl FieldMethods for RawField {
+ fn name(&self) -> Option<&str> {
+ self.0.name()
+ }
+
+ fn ty(&self) -> ItemId {
+ self.0.ty()
+ }
+
+ fn comment(&self) -> Option<&str> {
+ self.0.comment()
+ }
+
+ fn bitfield(&self) -> Option<u32> {
+ self.0.bitfield()
+ }
+
+ fn is_mutable(&self) -> bool {
+ self.0.is_mutable()
+ }
+
+ fn annotations(&self) -> &Annotations {
+ self.0.annotations()
+ }
+
+ fn offset(&self) -> Option<usize> {
+ self.0.offset()
+ }
+}
+
+/// Convert the given ordered set of raw fields into a list of either plain data
+/// members, and/or bitfield units containing multiple bitfields.
+fn raw_fields_to_fields_and_bitfield_units<I>(ctx: &BindgenContext,
+ raw_fields: I)
+ -> Vec<Field>
+ where I: IntoIterator<Item=RawField>
+{
+ let mut raw_fields = raw_fields.into_iter().fuse().peekable();
+ let mut fields = vec![];
+ let mut bitfield_unit_count = 0;
+
+ loop {
+ // While we have plain old data members, just keep adding them to our
+ // resulting fields. We introduce a scope here so that we can use
+ // `raw_fields` again after the `by_ref` iterator adaptor is dropped.
+ {
+ let non_bitfields = raw_fields
+ .by_ref()
+ .peeking_take_while(|f| f.bitfield().is_none())
+ .map(|f| Field::DataMember(f.0));
+ fields.extend(non_bitfields);
+ }
+
+ // Now gather all the consecutive bitfields. Only consecutive bitfields
+ // may potentially share a bitfield allocation unit with each other in
+ // the Itanium C++ ABI.
+ let mut bitfields = raw_fields
+ .by_ref()
+ .peeking_take_while(|f| f.bitfield().is_some())
+ .peekable();
+
+ if bitfields.peek().is_none() {
+ break;
+ }
+
+ bitfields_to_allocation_units(ctx,
+ &mut bitfield_unit_count,
+ &mut fields,
+ bitfields);
+ }
+
+ assert!(raw_fields.next().is_none(),
+ "The above loop should consume all items in `raw_fields`");
+
+ fields
+}
+
+/// Given a set of contiguous raw bitfields, group and allocate them into
+/// (potentially multiple) bitfield units.
+fn bitfields_to_allocation_units<E, I>(ctx: &BindgenContext,
+ bitfield_unit_count: &mut usize,
+ mut fields: &mut E,
+ raw_bitfields: I)
+ where E: Extend<Field>,
+ I: IntoIterator<Item=RawField>
+{
+ assert!(ctx.collected_typerefs());
+
+ // NOTE: What follows is reverse-engineered from LLVM's
+ // lib/AST/RecordLayoutBuilder.cpp
+ //
+ // FIXME(emilio): There are some differences between Microsoft and the
+ // Itanium ABI, but we'll ignore those and stick to Itanium for now.
+ //
+ // Also, we need to handle packed bitfields and stuff.
+ //
+ // TODO(emilio): Take into account C++'s wide bitfields, and
+ // packing, sigh.
+
+ fn flush_allocation_unit<E>(mut fields: &mut E,
+ bitfield_unit_count: &mut usize,
+ unit_size_in_bits: usize,
+ unit_align_in_bits: usize,
+ bitfields: Vec<Bitfield>)
+ where E: Extend<Field>
+ {
+ *bitfield_unit_count += 1;
+ let layout = Layout::new(bytes_from_bits_pow2(unit_size_in_bits),
+ bytes_from_bits_pow2(unit_align_in_bits));
+ fields.extend(Some(Field::Bitfields(BitfieldUnit {
+ nth: *bitfield_unit_count,
+ layout: layout,
+ bitfields: bitfields,
+ })));
+ }
+
+ let mut max_align = 0;
+ let mut unfilled_bits_in_unit = 0;
+ let mut unit_size_in_bits = 0;
+ let mut unit_align = 0;
+ let mut bitfields_in_unit = vec![];
+
+ for bitfield in raw_bitfields {
+ let bitfield_width = bitfield.bitfield().unwrap() as usize;
+ let bitfield_align = ctx.resolve_type(bitfield.ty())
+ .layout(ctx)
+ .expect("Bitfield without layout? Gah!")
+ .align;
+
+ if unit_size_in_bits != 0 &&
+ (bitfield_width == 0 ||
+ bitfield_width > unfilled_bits_in_unit) {
+ // We've reached the end of this allocation unit, so flush it
+ // and its bitfields.
+ unit_size_in_bits = align_to(unit_size_in_bits,
+ bitfield_align);
+ flush_allocation_unit(fields,
+ bitfield_unit_count,
+ unit_size_in_bits,
+ unit_align,
+ mem::replace(&mut bitfields_in_unit, vec![]));
+
+ // Now we're working on a fresh bitfield allocation unit, so reset
+ // the current unit size and alignment.
+ unit_size_in_bits = 0;
+ unit_align = 0;
+ }
+
+ // Only keep named bitfields around. Unnamed bitfields (with > 0
+ // bitsize) are used for padding. Because the `Bitfield` struct stores
+ // the bit-offset into its allocation unit where its bits begin, we
+ // don't need any padding bits hereafter.
+ if bitfield.name().is_some() {
+ bitfields_in_unit.push(Bitfield::new(unit_size_in_bits, bitfield));
+ }
+
+ unit_size_in_bits += bitfield_width;
+
+ max_align = cmp::max(max_align, bitfield_align);
+
+ // NB: The `bitfield_width` here is completely, absolutely intentional.
+ // Alignment of the allocation unit is based on the maximum bitfield
+ // width, not (directly) on the bitfields' types' alignment.
+ unit_align = cmp::max(unit_align, bitfield_width);
+
+ // Compute what the physical unit's final size would be given what we
+ // have seen so far, and use that to compute how many bits are still
+ // available in the unit.
+ let data_size = align_to(unit_size_in_bits, bitfield_align * 8);
+ unfilled_bits_in_unit = data_size - unit_size_in_bits;
+ }
+
+ if unit_size_in_bits != 0 {
+ // Flush the last allocation unit and its bitfields.
+ flush_allocation_unit(fields,
+ bitfield_unit_count,
+ unit_size_in_bits,
+ unit_align,
+ bitfields_in_unit);
+ }
+}
+
+/// A compound structure's fields are initially raw, and have bitfields that
+/// have not been grouped into allocation units. During this time, the fields
+/// are mutable and we build them up during parsing.
+///
+/// Then, once resolving typerefs is completed, we compute all structs' fields'
+/// bitfield allocation units, and they remain frozen and immutable forever
+/// after.
+#[derive(Debug)]
+enum CompFields {
+ BeforeComputingBitfieldUnits(Vec<RawField>),
+ AfterComputingBitfieldUnits(Vec<Field>),
+}
+
+impl Default for CompFields {
+ fn default() -> CompFields {
+ CompFields::BeforeComputingBitfieldUnits(vec![])
+ }
+}
+
+impl CompFields {
+ fn append_raw_field(&mut self, raw: RawField) {
+ match *self {
+ CompFields::BeforeComputingBitfieldUnits(ref mut raws) => {
+ raws.push(raw);
+ }
+ CompFields::AfterComputingBitfieldUnits(_) => {
+ panic!("Must not append new fields after computing bitfield allocation units");
+ }
+ }
+ }
+
+ fn compute_bitfield_units(&mut self, ctx: &BindgenContext) {
+ let raws = match *self {
+ CompFields::BeforeComputingBitfieldUnits(ref mut raws) => {
+ mem::replace(raws, vec![])
+ }
+ CompFields::AfterComputingBitfieldUnits(_) => {
+ panic!("Already computed bitfield units");
+ }
+ };
+
+ let fields_and_units = raw_fields_to_fields_and_bitfield_units(ctx, raws);
+ mem::replace(self, CompFields::AfterComputingBitfieldUnits(fields_and_units));
+ }
+}
+
+impl Trace for CompFields {
+ type Extra = ();
+
+ fn trace<T>(&self, context: &BindgenContext, tracer: &mut T, _: &())
+ where T: Tracer,
+ {
+ match *self {
+ CompFields::BeforeComputingBitfieldUnits(ref fields) => {
+ for f in fields {
+ tracer.visit_kind(f.ty(), EdgeKind::Field);
+ }
+ }
+ CompFields::AfterComputingBitfieldUnits(ref fields) => {
+ for f in fields {
+ f.trace(context, tracer, &());
+ }
+ }
+ }
+ }
+}
+
+/// Common data shared across different field types.
#[derive(Clone, Debug)]
-pub struct Field {
+pub struct FieldData {
/// The name of the field, empty if it's an unnamed bitfield width.
name: Option<String>,
+
/// The inner type.
ty: ItemId,
+
/// The doc comment on the field if any.
comment: Option<String>,
+
/// Annotations for this field, or the default.
annotations: Annotations,
+
/// If this field is a bitfield, and how many bits does it contain if it is.
bitfield: Option<u32>,
+
/// If the C++ field is marked as `mutable`
mutable: bool,
+
/// The offset of the field (in bits)
offset: Option<usize>,
}
-impl Field {
- /// Construct a new `Field`.
- pub fn new(name: Option<String>,
- ty: ItemId,
- comment: Option<String>,
- annotations: Option<Annotations>,
- bitfield: Option<u32>,
- mutable: bool,
- offset: Option<usize>)
- -> Field {
- Field {
- name: name,
- ty: ty,
- comment: comment,
- annotations: annotations.unwrap_or_default(),
- bitfield: bitfield,
- mutable: mutable,
- offset: offset,
- }
- }
-
- /// Get the name of this field.
- pub fn name(&self) -> Option<&str> {
+impl FieldMethods for FieldData {
+ fn name(&self) -> Option<&str> {
self.name.as_ref().map(|n| &**n)
}
- /// Get the type of this field.
- pub fn ty(&self) -> ItemId {
+ fn ty(&self) -> ItemId {
self.ty
}
- /// Get the comment for this field.
- pub fn comment(&self) -> Option<&str> {
+ fn comment(&self) -> Option<&str> {
self.comment.as_ref().map(|c| &**c)
}
- /// If this is a bitfield, how many bits does it need?
- pub fn bitfield(&self) -> Option<u32> {
+ fn bitfield(&self) -> Option<u32> {
self.bitfield
}
- /// Is this field marked as `mutable`?
- pub fn is_mutable(&self) -> bool {
+ fn is_mutable(&self) -> bool {
self.mutable
}
- /// Get the annotations for this field.
- pub fn annotations(&self) -> &Annotations {
+ fn annotations(&self) -> &Annotations {
&self.annotations
}
- /// The offset of the field (in bits)
- pub fn offset(&self) -> Option<usize> {
+ fn offset(&self) -> Option<usize> {
self.offset
}
}
@@ -178,7 +670,12 @@ impl CanDeriveDebug for Field {
type Extra = ();
fn can_derive_debug(&self, ctx: &BindgenContext, _: ()) -> bool {
- self.ty.can_derive_debug(ctx, ())
+ match *self {
+ Field::DataMember(ref data) => data.ty.can_derive_debug(ctx, ()),
+ Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => bitfields.iter().all(|b| {
+ b.ty().can_derive_debug(ctx, ())
+ }),
+ }
}
}
@@ -186,7 +683,12 @@ impl CanDeriveDefault for Field {
type Extra = ();
fn can_derive_default(&self, ctx: &BindgenContext, _: ()) -> bool {
- self.ty.can_derive_default(ctx, ())
+ match *self {
+ Field::DataMember(ref data) => data.ty.can_derive_default(ctx, ()),
+ Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => bitfields.iter().all(|b| {
+ b.ty().can_derive_default(ctx, ())
+ }),
+ }
}
}
@@ -194,11 +696,21 @@ impl<'a> CanDeriveCopy<'a> for Field {
type Extra = ();
fn can_derive_copy(&self, ctx: &BindgenContext, _: ()) -> bool {
- self.ty.can_derive_copy(ctx, ())
+ match *self {
+ Field::DataMember(ref data) => data.ty.can_derive_copy(ctx, ()),
+ Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => bitfields.iter().all(|b| {
+ b.ty().can_derive_copy(ctx, ())
+ }),
+ }
}
fn can_derive_copy_in_array(&self, ctx: &BindgenContext, _: ()) -> bool {
- self.ty.can_derive_copy_in_array(ctx, ())
+ match *self {
+ Field::DataMember(ref data) => data.ty.can_derive_copy_in_array(ctx, ()),
+ Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => bitfields.iter().all(|b| {
+ b.ty().can_derive_copy_in_array(ctx, ())
+ }),
+ }
}
}
@@ -247,12 +759,12 @@ pub struct CompInfo {
kind: CompKind,
/// The members of this struct or union.
- fields: Vec<Field>,
+ fields: CompFields,
- /// The abstract template parameters of this class. These are NOT concrete
- /// template arguments, and should always be a
- /// Type(TypeKind::Named(name)). For concrete template arguments, see the
- /// TypeKind::TemplateInstantiation.
+ /// The abstract template parameters of this class. Note that these are NOT
+ /// concrete template arguments, and should always be a
+ /// `Type(TypeKind::Named(name))`. For concrete template arguments, see
+ /// `TypeKind::TemplateInstantiation`.
template_params: Vec<ItemId>,
/// The method declarations inside this class, if in C++ mode.
@@ -332,7 +844,7 @@ impl CompInfo {
pub fn new(kind: CompKind) -> Self {
CompInfo {
kind: kind,
- fields: vec![],
+ fields: CompFields::default(),
template_params: vec![],
methods: vec![],
constructors: vec![],
@@ -355,7 +867,7 @@ impl CompInfo {
/// Is this compound type unsized?
pub fn is_unsized(&self, ctx: &BindgenContext) -> bool {
- !self.has_vtable(ctx) && self.fields.is_empty() &&
+ !self.has_vtable(ctx) && self.fields().is_empty() &&
self.base_members.iter().all(|base| {
ctx.resolve_type(base.ty).canonical_type(ctx).is_unsized(ctx)
})
@@ -378,9 +890,8 @@ impl CompInfo {
self.base_members.iter().any(|base| {
ctx.resolve_type(base.ty).has_destructor(ctx)
}) ||
- self.fields.iter().any(|field| {
- ctx.resolve_type(field.ty)
- .has_destructor(ctx)
+ self.fields().iter().any(|field| {
+ field.has_destructor(ctx)
})
}
};
@@ -400,6 +911,7 @@ impl CompInfo {
/// kind of unions, see test/headers/template_union.hpp
pub fn layout(&self, ctx: &BindgenContext) -> Option<Layout> {
use std::cmp;
+
// We can't do better than clang here, sorry.
if self.kind == CompKind::Struct {
return None;
@@ -407,9 +919,8 @@ impl CompInfo {
let mut max_size = 0;
let mut max_align = 0;
- for field in &self.fields {
- let field_layout = ctx.resolve_type(field.ty)
- .layout(ctx);
+ for field in self.fields() {
+ let field_layout = field.layout(ctx);
if let Some(layout) = field_layout {
max_size = cmp::max(max_size, layout.size);
@@ -422,7 +933,12 @@ impl CompInfo {
/// Get this type's set of fields.
pub fn fields(&self) -> &[Field] {
- &self.fields
+ match self.fields {
+ CompFields::AfterComputingBitfieldUnits(ref fields) => fields,
+ CompFields::BeforeComputingBitfieldUnits(_) => {
+ panic!("Should always have computed bitfield units first");
+ }
+ }
}
/// Does this type have any template parameters that aren't types
@@ -460,6 +976,11 @@ impl CompInfo {
self.kind
}
+ /// Is this a union?
+ pub fn is_union(&self) -> bool {
+ self.kind() == CompKind::Union
+ }
+
/// The set of types that this one inherits from.
pub fn base_members(&self) -> &[Base] {
&self.base_members
@@ -510,8 +1031,8 @@ impl CompInfo {
// nothing.
} else {
let field =
- Field::new(None, ty, None, None, None, false, offset);
- ci.fields.push(field);
+ RawField::new(None, ty, None, None, None, false, offset);
+ ci.fields.append_raw_field(field);
}
}
}
@@ -528,14 +1049,14 @@ impl CompInfo {
CXChildVisit_Continue
});
if !used {
- let field = Field::new(None,
+ let field = RawField::new(None,
ty,
None,
None,
None,
false,
offset);
- ci.fields.push(field);
+ ci.fields.append_raw_field(field);
}
}
@@ -558,14 +1079,14 @@ impl CompInfo {
let name = if name.is_empty() { None } else { Some(name) };
- let field = Field::new(name,
+ let field = RawField::new(name,
field_type,
comment,
annotations,
bit_width,
is_mutable,
offset);
- ci.fields.push(field);
+ ci.fields.append_raw_field(field);
// No we look for things like attributes and stuff.
cur.visit(|cur| {
@@ -744,8 +1265,8 @@ impl CompInfo {
if let Some((ty, _, offset)) = maybe_anonymous_struct_field {
let field =
- Field::new(None, ty, None, None, None, false, offset);
- ci.fields.push(field);
+ RawField::new(None, ty, None, None, None, false, offset);
+ ci.fields.append_raw_field(field);
}
Ok(ci)
@@ -817,6 +1338,53 @@ impl CompInfo {
pub fn is_forward_declaration(&self) -> bool {
self.is_forward_declaration
}
+
+ /// Compute this compound structure's bitfield allocation units.
+ pub fn compute_bitfield_units(&mut self, ctx: &BindgenContext) {
+ self.fields.compute_bitfield_units(ctx);
+ }
+}
+
+impl DotAttributes for CompInfo {
+ fn dot_attributes<W>(&self, ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ writeln!(out, "<tr><td>CompKind</td><td>{:?}</td></tr>", self.kind)?;
+
+ if self.has_vtable {
+ writeln!(out, "<tr><td>has_vtable</td><td>true</td></tr>")?;
+ }
+
+ if self.has_destructor {
+ writeln!(out, "<tr><td>has_destructor</td><td>true</td></tr>")?;
+ }
+
+ if self.has_nonempty_base {
+ writeln!(out, "<tr><td>has_nonempty_base</td><td>true</td></tr>")?;
+ }
+
+ if self.has_non_type_template_params {
+ writeln!(out, "<tr><td>has_non_type_template_params</td><td>true</td></tr>")?;
+ }
+
+ if self.packed {
+ writeln!(out, "<tr><td>packed</td><td>true</td></tr>")?;
+ }
+
+ if self.is_forward_declaration {
+ writeln!(out, "<tr><td>is_forward_declaration</td><td>true</td></tr>")?;
+ }
+
+ if !self.fields().is_empty() {
+ writeln!(out, r#"<tr><td>fields</td><td><table border="0">"#)?;
+ for field in self.fields() {
+ field.dot_attributes(ctx, out)?;
+ }
+ writeln!(out, "</table></td></tr>")?;
+ }
+
+ Ok(())
+ }
}
impl TemplateParameters for CompInfo {
@@ -865,7 +1433,7 @@ impl CanDeriveDebug for CompInfo {
self.base_members
.iter()
.all(|base| base.ty.can_derive_debug(ctx, ())) &&
- self.fields
+ self.fields()
.iter()
.all(|f| f.can_derive_debug(ctx, ()))
};
@@ -907,7 +1475,7 @@ impl CanDeriveDefault for CompInfo {
self.base_members
.iter()
.all(|base| base.ty.can_derive_default(ctx, ())) &&
- self.fields
+ self.fields()
.iter()
.all(|f| f.can_derive_default(ctx, ()));
@@ -953,7 +1521,7 @@ impl<'a> CanDeriveCopy<'a> for CompInfo {
self.base_members
.iter()
.all(|base| base.ty.can_derive_copy(ctx, ())) &&
- self.fields.iter().all(|field| field.can_derive_copy(ctx, ()))
+ self.fields().iter().all(|field| field.can_derive_copy(ctx, ()))
}
fn can_derive_copy_in_array(&self,
@@ -990,9 +1558,7 @@ impl Trace for CompInfo {
tracer.visit_kind(base.ty, EdgeKind::BaseMember);
}
- for field in self.fields() {
- tracer.visit_kind(field.ty(), EdgeKind::Field);
- }
+ self.fields.trace(context, tracer, &());
for &var in self.inner_vars() {
tracer.visit_kind(var, EdgeKind::InnerVar);
diff --git a/src/ir/context.rs b/src/ir/context.rs
index 95a026da..c5807d2b 100644
--- a/src/ir/context.rs
+++ b/src/ir/context.rs
@@ -21,6 +21,7 @@ use std::collections::{HashMap, hash_map};
use std::collections::btree_map::{self, BTreeMap};
use std::fmt;
use std::iter::IntoIterator;
+use std::mem;
use syntax::ast::Ident;
use syntax::codemap::{DUMMY_SP, Span};
use syntax::ext::base::ExtCtxt;
@@ -160,6 +161,10 @@ pub struct BindgenContext<'ctx> {
/// uses. See `ir::named` for more details. Always `Some` during the codegen
/// phase.
used_template_parameters: Option<HashMap<ItemId, ItemSet>>,
+
+ /// The set of `TypeKind::Comp` items found during parsing that need their
+ /// bitfield allocation units computed. Drained in `compute_bitfield_units`.
+ need_bitfield_allocation: Vec<ItemId>,
}
/// A traversal of whitelisted items.
@@ -247,6 +252,7 @@ impl<'ctx> BindgenContext<'ctx> {
options: options,
generated_bindegen_complex: Cell::new(false),
used_template_parameters: None,
+ need_bitfield_allocation: Default::default(),
};
me.add_item(root_module, None, None);
@@ -312,6 +318,10 @@ impl<'ctx> BindgenContext<'ctx> {
}
}
+ if is_type && item.expect_type().is_comp() {
+ self.need_bitfield_allocation.push(id);
+ }
+
let old_item = self.items.insert(id, item);
assert!(old_item.is_none(),
"should not have already associated an item with the given id");
@@ -486,6 +496,32 @@ impl<'ctx> BindgenContext<'ctx> {
}
}
+ /// Compute the bitfield allocation units for all `TypeKind::Comp` items we
+ /// parsed.
+ fn compute_bitfield_units(&mut self) {
+ assert!(self.collected_typerefs());
+
+ let need_bitfield_allocation = mem::replace(&mut self.need_bitfield_allocation, vec![]);
+ for id in need_bitfield_allocation {
+ // To appease the borrow checker, we temporarily remove this item
+ // from the context, and then replace it once we are done computing
+ // its bitfield units. We will never try and resolve this
+ // `TypeKind::Comp` item's id (which would now cause a panic) during
+ // bitfield unit computation because it is a non-scalar by
+ // definition, and non-scalar types may not be used as bitfields.
+ let mut item = self.items.remove(&id).unwrap();
+
+ item.kind_mut()
+ .as_type_mut()
+ .unwrap()
+ .as_comp_mut()
+ .unwrap()
+ .compute_bitfield_units(&*self);
+
+ self.items.insert(id, item);
+ }
+ }
+
/// Iterate over all items and replace any item that has been named in a
/// `replaces="SomeType"` annotation with the replacement type.
fn process_replacements(&mut self) {
@@ -617,6 +653,7 @@ impl<'ctx> BindgenContext<'ctx> {
if !self.collected_typerefs() {
self.resolve_typerefs();
+ self.compute_bitfield_units();
self.process_replacements();
}
diff --git a/src/ir/dot.rs b/src/ir/dot.rs
index 7472dd8e..40202b2c 100644
--- a/src/ir/dot.rs
+++ b/src/ir/dot.rs
@@ -30,7 +30,7 @@ pub fn write_dot_file<P>(ctx: &BindgenContext, path: P) -> io::Result<()>
for (id, item) in ctx.items() {
try!(writeln!(&mut dot_file,
- r#"{} [fontname="courier", label=< <table border="0">"#,
+ r#"{} [fontname="courier", label=< <table border="0" align="left">"#,
id.as_usize()));
try!(item.dot_attributes(ctx, &mut dot_file));
try!(writeln!(&mut dot_file, r#"</table> >];"#));
diff --git a/src/ir/item.rs b/src/ir/item.rs
index a60697b8..fdf2507d 100644
--- a/src/ir/item.rs
+++ b/src/ir/item.rs
@@ -829,6 +829,11 @@ impl DotAttributes for Item {
<tr><td>name</td><td>{}</td></tr>",
self.id,
self.name(ctx).get()));
+
+ if self.is_opaque(ctx) {
+ writeln!(out, "<tr><td>opaque</td><td>true</td></tr>")?;
+ }
+
self.kind.dot_attributes(ctx, out)
}
}
diff --git a/src/ir/ty.rs b/src/ir/ty.rs
index 3bb1965b..fd8e45c1 100644
--- a/src/ir/ty.rs
+++ b/src/ir/ty.rs
@@ -55,6 +55,15 @@ impl Type {
}
}
+ /// Get the underlying `CompInfo` for this type as a mutable reference, or
+ /// `None` if this is some other kind of type.
+ pub fn as_comp_mut(&mut self) -> Option<&mut CompInfo> {
+ match self.kind {
+ TypeKind::Comp(ref mut ci) => Some(ci),
+ _ => None,
+ }
+ }
+
/// Construct a new `Type`.
pub fn new(name: Option<String>,
layout: Option<Layout>,
@@ -413,7 +422,7 @@ impl DotAttributes for Type {
impl DotAttributes for TypeKind {
fn dot_attributes<W>(&self,
- _ctx: &BindgenContext,
+ ctx: &BindgenContext,
out: &mut W)
-> io::Result<()>
where W: io::Write,
@@ -443,7 +452,12 @@ impl DotAttributes for TypeKind {
TypeKind::ObjCSel => "ObjCSel",
TypeKind::ObjCInterface(..) => "ObjCInterface",
TypeKind::UnresolvedTypeRef(..) => unreachable!("there shouldn't be any more of these anymore"),
- })
+ })?;
+ if let TypeKind::Comp(ref comp) = *self {
+ comp.dot_attributes(ctx, out)?;
+ }
+
+ Ok(())
}
}
diff --git a/src/lib.rs b/src/lib.rs
index 61ae20df..cd5cf8e1 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -25,6 +25,7 @@ extern crate syntex_syntax as syntax;
extern crate aster;
extern crate quasi;
extern crate clang_sys;
+extern crate peeking_take_while;
extern crate regex;
#[macro_use]
extern crate lazy_static;