summaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/mod.rs170
-rw-r--r--src/codegen/struct_layout.rs306
2 files changed, 306 insertions, 170 deletions
diff --git a/src/codegen/mod.rs b/src/codegen/mod.rs
index ad6736b0..77f654e6 100644
--- a/src/codegen/mod.rs
+++ b/src/codegen/mod.rs
@@ -2,7 +2,8 @@ mod helpers;
mod struct_layout;
use self::helpers::{BlobTyBuilder, attributes};
-use self::struct_layout::StructLayoutTracker;
+use self::struct_layout::{align_to, bytes_from_bits};
+use self::struct_layout::{bytes_from_bits_pow2, StructLayoutTracker};
use aster;
use ir::annotations::FieldAccessorKind;
@@ -363,8 +364,7 @@ impl CodeGenerator for Module {
}
if item.id() == ctx.root_module() {
- let saw_union = result.saw_union;
- if saw_union && !ctx.options().unstable_rust {
+ if result.saw_union && !ctx.options().unstable_rust {
utils::prepend_union_types(ctx, &mut *result);
}
if result.saw_incomplete_array {
@@ -717,12 +717,12 @@ impl<'a> ItemToRustTy for Vtable<'a> {
}
struct Bitfield<'a> {
- index: usize,
+ index: &'a mut usize,
fields: Vec<&'a Field>,
}
impl<'a> Bitfield<'a> {
- fn new(index: usize, fields: Vec<&'a Field>) -> Self {
+ fn new(index: &'a mut usize, fields: Vec<&'a Field>) -> Self {
Bitfield {
index: index,
fields: fields,
@@ -732,89 +732,96 @@ impl<'a> Bitfield<'a> {
fn codegen_fields(self,
ctx: &BindgenContext,
fields: &mut Vec<ast::StructField>,
- methods: &mut Vec<ast::ImplItem>)
+ _methods: &mut Vec<ast::ImplItem>)
-> Layout {
use aster::struct_field::StructFieldBuilder;
- let mut total_width = self.fields
- .iter()
- .fold(0u32, |acc, f| acc + f.bitfield().unwrap());
-
- if !total_width.is_power_of_two() || total_width < 8 {
- total_width = cmp::max(8, total_width.next_power_of_two());
- }
- debug_assert_eq!(total_width % 8, 0);
- let total_width_in_bytes = total_width as usize / 8;
-
- let bitfield_layout = Layout::new(total_width_in_bytes,
- total_width_in_bytes);
- let bitfield_type = BlobTyBuilder::new(bitfield_layout).build();
- let field_name = format!("_bitfield_{}", self.index);
- let field_ident = ctx.ext_cx().ident_of(&field_name);
- let field = StructFieldBuilder::named(&field_name)
- .pub_()
- .build_ty(bitfield_type.clone());
- fields.push(field);
+ // NOTE: What follows is reverse-engineered from LLVM's
+ // lib/AST/RecordLayoutBuilder.cpp
+ //
+ // FIXME(emilio): There are some differences between Microsoft and the
+ // Itanium ABI, but we'll ignore those and stick to Itanium for now.
+ //
+ // Also, we need to handle packed bitfields and stuff.
+ // TODO(emilio): Take into account C++'s wide bitfields, and
+ // packing, sigh.
+ let mut total_size_in_bits = 0;
+ let mut max_align = 0;
+ let mut unfilled_bits_in_last_unit = 0;
+ let mut field_size_in_bits = 0;
+ *self.index += 1;
+ let mut last_field_name = format!("_bitfield_{}", self.index);
+ let mut last_field_align = 0;
- let mut offset = 0;
for field in self.fields {
let width = field.bitfield().unwrap();
- let field_name = field.name()
- .map(ToOwned::to_owned)
- .unwrap_or_else(|| format!("at_offset_{}", offset));
-
let field_item = ctx.resolve_item(field.ty());
let field_ty_layout = field_item.kind()
.expect_type()
.layout(ctx)
.expect("Bitfield without layout? Gah!");
- let field_type = field_item.to_rust_ty(ctx);
- let int_type = BlobTyBuilder::new(field_ty_layout).build();
+ let field_align = field_ty_layout.align;
- let getter_name = ctx.rust_ident(&field_name);
- let setter_name = ctx.ext_cx()
- .ident_of(&format!("set_{}", &field_name));
- let mask = ((1usize << width) - 1) << offset;
- let prefix = ctx.trait_prefix();
- // The transmute is unfortunate, but it's needed for enums in
- // bitfields.
- let item = quote_item!(ctx.ext_cx(),
- impl X {
- #[inline]
- pub fn $getter_name(&self) -> $field_type {
- unsafe {
- ::$prefix::mem::transmute(
- (
- (self.$field_ident &
- ($mask as $bitfield_type))
- >> $offset
- ) as $int_type
- )
- }
- }
+ if field_size_in_bits != 0 &&
+ (width == 0 || width as usize > unfilled_bits_in_last_unit) {
+ field_size_in_bits = align_to(field_size_in_bits, field_align);
+ // Push the new field.
+ let ty =
+ BlobTyBuilder::new(Layout::new(bytes_from_bits_pow2(field_size_in_bits),
+ bytes_from_bits_pow2(last_field_align)))
+ .build();
- #[inline]
- pub fn $setter_name(&mut self, val: $field_type) {
- self.$field_ident &= !($mask as $bitfield_type);
- self.$field_ident |=
- (val as $int_type as $bitfield_type << $offset) &
- ($mask as $bitfield_type);
- }
- }
- )
- .unwrap();
+ let field = StructFieldBuilder::named(&last_field_name)
+ .pub_()
+ .build_ty(ty);
+ fields.push(field);
- let items = match item.unwrap().node {
- ast::ItemKind::Impl(_, _, _, _, _, items) => items,
- _ => unreachable!(),
- };
+ // TODO(emilio): dedup this.
+ *self.index += 1;
+ last_field_name = format!("_bitfield_{}", self.index);
+
+ // Now reset the size and the rest of stuff.
+ // unfilled_bits_in_last_unit = 0;
+ field_size_in_bits = 0;
+ last_field_align = 0;
+ }
+
+ // TODO(emilio): Create the accessors. Problem here is that we still
+ // don't know which one is going to be the final alignment of the
+ // bitfield, and whether we have to index in it. Thus, we don't know
+ // which integer type do we need.
+ //
+ // We could push them to a Vec or something, but given how buggy
+ // they where maybe it's not a great idea?
+ field_size_in_bits += width as usize;
+ total_size_in_bits += width as usize;
+
+
+ let data_size = align_to(field_size_in_bits, field_align * 8);
+
+ max_align = cmp::max(max_align, field_align);
+
+ // NB: The width here is completely, absolutely intentional.
+ last_field_align = cmp::max(last_field_align, width as usize);
+
+ unfilled_bits_in_last_unit = data_size - field_size_in_bits;
+ }
+
+ if field_size_in_bits != 0 {
+ // Push the last field.
+ let ty =
+ BlobTyBuilder::new(Layout::new(bytes_from_bits_pow2(field_size_in_bits),
+ bytes_from_bits_pow2(last_field_align)))
+ .build();
- methods.extend(items.into_iter());
- offset += width;
+ let field = StructFieldBuilder::named(&last_field_name)
+ .pub_()
+ .build_ty(ty);
+ fields.push(field);
}
- bitfield_layout
+ Layout::new(bytes_from_bits(total_size_in_bits), max_align)
}
}
@@ -1062,12 +1069,10 @@ impl CodeGenerator for CompInfo {
debug_assert!(!current_bitfield_fields.is_empty());
let bitfield_fields =
mem::replace(&mut current_bitfield_fields, vec![]);
- bitfield_count += 1;
- let bitfield_layout = Bitfield::new(bitfield_count,
+ let bitfield_layout = Bitfield::new(&mut bitfield_count,
bitfield_fields)
.codegen_fields(ctx, &mut fields, &mut methods);
-
- struct_layout.saw_bitfield(bitfield_layout);
+ struct_layout.saw_bitfield_batch(bitfield_layout);
current_bitfield_width = None;
current_bitfield_layout = None;
@@ -1099,8 +1104,7 @@ impl CodeGenerator for CompInfo {
} else {
quote_ty!(ctx.ext_cx(), __BindgenUnionField<$ty>)
}
- } else if let Some(item) =
- field_ty.is_incomplete_array(ctx) {
+ } else if let Some(item) = field_ty.is_incomplete_array(ctx) {
result.saw_incomplete_array();
let inner = item.to_rust_ty(ctx);
@@ -1224,12 +1228,10 @@ impl CodeGenerator for CompInfo {
debug_assert!(!current_bitfield_fields.is_empty());
let bitfield_fields = mem::replace(&mut current_bitfield_fields,
vec![]);
- bitfield_count += 1;
- let bitfield_layout = Bitfield::new(bitfield_count,
+ let bitfield_layout = Bitfield::new(&mut bitfield_count,
bitfield_fields)
.codegen_fields(ctx, &mut fields, &mut methods);
-
- struct_layout.saw_bitfield(bitfield_layout);
+ struct_layout.saw_bitfield_batch(bitfield_layout);
}
debug_assert!(current_bitfield_fields.is_empty());
@@ -1268,7 +1270,7 @@ impl CodeGenerator for CompInfo {
}
} else if !is_union && !self.is_unsized(ctx) {
if let Some(padding_field) =
- layout.and_then(|layout| struct_layout.pad_struct(layout)) {
+ layout.and_then(|layout| struct_layout.pad_struct(&canonical_name, layout)) {
fields.push(padding_field);
}
@@ -2174,8 +2176,8 @@ impl ToRustTy for Type {
quote_ty!(ctx.ext_cx(), ::$prefix::option::Option<$ty>)
}
TypeKind::Array(item, len) => {
- let inner = item.to_rust_ty(ctx);
- aster::ty::TyBuilder::new().array(len).build(inner)
+ let ty = item.to_rust_ty(ctx);
+ aster::ty::TyBuilder::new().array(len).build(ty)
}
TypeKind::Enum(..) => {
let path = item.namespace_aware_canonical_path(ctx);
@@ -2190,7 +2192,7 @@ impl ToRustTy for Type {
.map(|arg| arg.to_rust_ty(ctx))
.collect::<Vec<_>>();
- path.segments.last_mut().unwrap().parameters = if
+ path.segments.last_mut().unwrap().parameters = if
template_args.is_empty() {
None
} else {
diff --git a/src/codegen/struct_layout.rs b/src/codegen/struct_layout.rs
index f8a88bc2..24938c16 100644
--- a/src/codegen/struct_layout.rs
+++ b/src/codegen/struct_layout.rs
@@ -7,7 +7,7 @@ use aster::struct_field::StructFieldBuilder;
use ir::comp::CompInfo;
use ir::context::BindgenContext;
use ir::layout::Layout;
-use ir::ty::Type;
+use ir::ty::{Type, TypeKind};
use std::cmp;
use std::mem;
@@ -21,6 +21,84 @@ pub struct StructLayoutTracker<'a, 'ctx: 'a> {
padding_count: usize,
latest_field_layout: Option<Layout>,
max_field_align: usize,
+ last_field_was_bitfield: bool,
+}
+
+/// Returns a size aligned to a given value.
+pub fn align_to(size: usize, align: usize) -> usize {
+ if align == 0 {
+ return size;
+ }
+
+ let rem = size % align;
+ if rem == 0 {
+ return size;
+ }
+
+ size + align - rem
+}
+
+/// Returns the amount of bytes from a given amount of bytes, rounding up.
+pub fn bytes_from_bits(n: usize) -> usize {
+ if n % 8 == 0 {
+ return n / 8;
+ }
+
+ n / 8 + 1
+}
+
+/// Returns the lower power of two byte count that can hold at most n bits.
+pub fn bytes_from_bits_pow2(mut n: usize) -> usize {
+ if n == 0 {
+ return 0;
+ }
+
+ if n <= 8 {
+ return 1;
+ }
+
+ if !n.is_power_of_two() {
+ n = n.next_power_of_two();
+ }
+
+ n / 8
+}
+
+#[test]
+fn test_align_to() {
+ assert_eq!(align_to(1, 1), 1);
+ assert_eq!(align_to(1, 2), 2);
+ assert_eq!(align_to(1, 4), 4);
+ assert_eq!(align_to(5, 1), 5);
+ assert_eq!(align_to(17, 4), 20);
+}
+
+#[test]
+fn test_bytes_from_bits_pow2() {
+ assert_eq!(bytes_from_bits_pow2(0), 0);
+ for i in 1..9 {
+ assert_eq!(bytes_from_bits_pow2(i), 1);
+ }
+ for i in 9..17 {
+ assert_eq!(bytes_from_bits_pow2(i), 2);
+ }
+ for i in 17..33 {
+ assert_eq!(bytes_from_bits_pow2(i), 4);
+ }
+}
+
+#[test]
+fn test_bytes_from_bits() {
+ assert_eq!(bytes_from_bits(0), 0);
+ for i in 1..9 {
+ assert_eq!(bytes_from_bits(i), 1);
+ }
+ for i in 9..17 {
+ assert_eq!(bytes_from_bits(i), 2);
+ }
+ for i in 17..25 {
+ assert_eq!(bytes_from_bits(i), 3);
+ }
}
impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
@@ -32,6 +110,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
padding_count: 0,
latest_field_layout: None,
max_field_align: 0,
+ last_field_was_bitfield: false,
}
}
@@ -43,112 +122,149 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
}
pub fn saw_base(&mut self, base_ty: &Type) {
- self.align_to_latest_field();
-
if let Some(layout) = base_ty.layout(self.ctx) {
+ self.align_to_latest_field(layout);
+
self.latest_offset += self.padding_bytes(layout) + layout.size;
self.latest_field_layout = Some(layout);
self.max_field_align = cmp::max(self.max_field_align, layout.align);
}
}
- pub fn saw_bitfield(&mut self, layout: Layout) {
- self.align_to_latest_field();
+ pub fn saw_bitfield_batch(&mut self, layout: Layout) {
+ self.align_to_latest_field(layout);
+
+ self.latest_offset += layout.size;
+
+ debug!("Offset: <bitfield>: {} -> {}",
+ self.latest_offset - layout.size,
+ self.latest_offset);
- self.latest_offset += self.padding_bytes(layout) + layout.size;
self.latest_field_layout = Some(layout);
- self.max_field_align = cmp::max(self.max_field_align, layout.align);
+ self.last_field_was_bitfield = true;
+ // NB: We intentionally don't update the max_field_align here, since our
+ // bitfields code doesn't necessarily guarantee it, so we need to
+ // actually generate the dummy alignment.
}
pub fn saw_union(&mut self, layout: Layout) {
- self.align_to_latest_field();
+ self.align_to_latest_field(layout);
self.latest_offset += self.padding_bytes(layout) + layout.size;
self.latest_field_layout = Some(layout);
self.max_field_align = cmp::max(self.max_field_align, layout.align);
}
+ /// Add a padding field if necessary for a given new field _before_ adding
+ /// that field.
pub fn pad_field(&mut self,
field_name: &str,
field_ty: &Type,
field_offset: Option<usize>)
-> Option<ast::StructField> {
- field_ty.layout(self.ctx).and_then(|field_layout| {
- self.align_to_latest_field();
+ let mut field_layout = match field_ty.layout(self.ctx) {
+ Some(l) => l,
+ None => return None,
+ };
+
+ if let TypeKind::Array(inner, len) = *field_ty.canonical_type(self.ctx).kind() {
+ // FIXME(emilio): As an _ultra_ hack, we correct the layout returned
+ // by arrays of structs that have a bigger alignment than what we
+ // can support.
+ //
+ // This means that the structs in the array are super-unsafe to
+ // access, since they won't be properly aligned, but *shrug*.
+ if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx) {
+ if layout.align > mem::size_of::<*mut ()>() {
+ field_layout.size =
+ align_to(layout.size, layout.align) * len;
+ field_layout.align = mem::size_of::<*mut ()>();
+ }
+ }
+ }
- let padding_layout = if self.comp.packed() {
- None
- } else {
- let calculated_layout = field_ty.as_comp()
- .and_then(|comp| comp.calc_layout(self.ctx))
- .unwrap_or(field_layout);
-
- let align = cmp::min(calculated_layout.align, mem::size_of::<*mut ()>());
-
- let (padding_bytes, need_padding) = match field_offset {
- Some(offset) if offset / 8 > self.latest_offset => {
- (offset / 8 - self.latest_offset, true)
- }
- _ if field_layout.align != 0 => {
- (self.padding_bytes(field_layout), (self.latest_offset % field_layout.align) != 0)
- }
- _ => {
- (0, false)
- }
- };
-
- self.latest_offset += padding_bytes;
-
- debug!("align field {} to {}/{} with {} padding bytes {:?}, calculated {:?}",
- field_name,
- self.latest_offset,
- field_offset.unwrap_or(0) / 8,
- padding_bytes,
- field_layout,
- calculated_layout);
-
- if need_padding &&
- (padding_bytes > calculated_layout.align ||
- field_layout.align > mem::size_of::<*mut ()>()) {
- Some(Layout::new(padding_bytes, align))
- } else {
- None
+ let will_merge_with_bitfield = self.align_to_latest_field(field_layout);
+
+ let padding_layout = if self.comp.packed() {
+ None
+ } else {
+ let padding_bytes = match field_offset {
+ Some(offset) if offset / 8 > self.latest_offset => {
+ offset / 8 - self.latest_offset
}
+ _ if will_merge_with_bitfield || field_layout.align == 0 => 0,
+ _ => self.padding_bytes(field_layout),
};
- self.latest_offset += field_ty.calc_size(self.ctx).unwrap_or(field_layout.size);
+ // Otherwise the padding is useless.
+ let need_padding = padding_bytes >= field_layout.align;
- self.latest_field_layout = Some(field_layout);
- self.max_field_align = cmp::max(self.max_field_align, field_layout.align);
+ self.latest_offset += padding_bytes;
- padding_layout.map(|layout| self.padding_field(layout))
- })
- }
+ debug!("Offset: <padding>: {} -> {}",
+ self.latest_offset - padding_bytes,
+ self.latest_offset);
- pub fn pad_struct(&mut self, layout: Layout) -> Option<ast::StructField> {
- if layout.size < self.latest_offset {
- warn!("calculate struct layout incorrect, too more {} bytes",
- self.latest_offset - layout.size);
+ debug!("align field {} to {}/{} with {} padding bytes {:?}",
+ field_name,
+ self.latest_offset,
+ field_offset.unwrap_or(0) / 8,
+ padding_bytes,
+ field_layout);
- None
- } else {
- let padding_bytes = layout.size - self.latest_offset;
- let struct_align = cmp::min(layout.align,
- mem::size_of::<*mut ()>());
-
- if padding_bytes > struct_align ||
- (layout.align > mem::size_of::<*mut ()>() && padding_bytes > 0) {
- let padding_align = if self.comp.packed() {
- 1
- } else {
- cmp::min(1 << padding_bytes.trailing_zeros(),
- mem::size_of::<*mut ()>())
- };
-
- Some(self.padding_field(Layout::new(padding_bytes, padding_align)))
+ if need_padding && padding_bytes != 0 {
+ Some(Layout::new(padding_bytes, field_layout.align))
} else {
None
}
+ };
+
+ self.latest_offset += field_layout.size;
+ self.latest_field_layout = Some(field_layout);
+ self.max_field_align = cmp::max(self.max_field_align, field_layout.align);
+ self.last_field_was_bitfield = false;
+
+ debug!("Offset: {}: {} -> {}",
+ field_name,
+ self.latest_offset - field_layout.size,
+ self.latest_offset);
+
+ padding_layout.map(|layout| self.padding_field(layout))
+ }
+
+ pub fn pad_struct(&mut self, name: &str, layout: Layout) -> Option<ast::StructField> {
+ if layout.size < self.latest_offset {
+ error!("Calculated wrong layout for {}, too more {} bytes",
+ name, self.latest_offset - layout.size);
+ return None
+ }
+
+ let padding_bytes = layout.size - self.latest_offset;
+
+ // We always pad to get to the correct size if the struct is one of
+ // those we can't align properly.
+ //
+ // Note that if the last field we saw was a bitfield, we may need to pad
+ // regardless, because bitfields don't respect alignment as strictly as
+ // other fields.
+ if padding_bytes > 0 &&
+ (padding_bytes >= layout.align ||
+ (self.last_field_was_bitfield &&
+ padding_bytes >= self.latest_field_layout.unwrap().align) ||
+ layout.align > mem::size_of::<*mut ()>()) {
+ let layout = if self.comp.packed() {
+ Layout::new(padding_bytes, 1)
+ } else if self.last_field_was_bitfield ||
+ layout.align > mem::size_of::<*mut ()>() {
+ // We've already given up on alignment here.
+ Layout::for_size(padding_bytes)
+ } else {
+ Layout::new(padding_bytes, layout.align)
+ };
+
+ Some(self.padding_field(layout))
+ } else {
+ None
}
}
@@ -166,15 +282,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
}
fn padding_bytes(&self, layout: Layout) -> usize {
- if layout.align == 0 {
- warn!("try to padding bytes without layout");
-
- 0
- } else if self.latest_offset % layout.align == 0 {
- 0
- } else {
- layout.align - (self.latest_offset % layout.align)
- }
+ align_to(self.latest_offset, layout.align) - self.latest_offset
}
fn padding_field(&mut self, layout: Layout) -> ast::StructField {
@@ -190,11 +298,37 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
StructFieldBuilder::named(padding_field_name).pub_().build_ty(ty)
}
- fn align_to_latest_field(&mut self) {
+ /// Returns whether the new field is known to merge with a bitfield.
+ ///
+ /// This is just to avoid doing the same check also in pad_field.
+ fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool {
if self.comp.packed() {
- // skip to align field when packed
- } else if let Some(layout) = self.latest_field_layout {
- self.latest_offset += self.padding_bytes(layout);
+ // Skip to align fields when packed.
+ return false;
+ }
+
+ let layout = match self.latest_field_layout {
+ Some(l) => l,
+ None => return false,
+ };
+
+ // If it was, we may or may not need to align, depending on what the
+ // current field alignment and the bitfield size and alignment are.
+ debug!("align_to_bitfield? {}: {:?} {:?}", self.last_field_was_bitfield,
+ layout, new_field_layout);
+
+ if self.last_field_was_bitfield &&
+ new_field_layout.align <= layout.size % layout.align &&
+ new_field_layout.size <= layout.size % layout.align {
+ // The new field will be coalesced into some of the remaining bits.
+ //
+ // FIXME(emilio): I think this may not catch everything?
+ debug!("Will merge with bitfield");
+ return true;
}
+
+ // Else, just align the obvious way.
+ self.latest_offset += self.padding_bytes(layout);
+ return false;
}
}