summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml1
-rw-r--r--example-graphviz-ir.pngbin842576 -> 1343520 bytes
-rw-r--r--src/clang.rs13
-rw-r--r--src/codegen/mod.rs14
-rw-r--r--src/codegen/struct_layout.rs6
-rw-r--r--src/ir/comp.rs31
-rw-r--r--src/ir/context.rs41
-rw-r--r--src/ir/dot.rs53
-rw-r--r--src/ir/function.rs30
-rw-r--r--src/ir/item.rs45
-rw-r--r--src/ir/item_kind.rs20
-rw-r--r--src/ir/layout.rs2
-rw-r--r--src/ir/mod.rs1
-rw-r--r--src/ir/module.rs12
-rw-r--r--src/ir/named.rs236
-rw-r--r--src/ir/objc.rs46
-rw-r--r--src/ir/traversal.rs124
-rw-r--r--src/ir/ty.rs190
-rw-r--r--src/ir/var.rs20
-rw-r--r--src/lib.rs22
-rw-r--r--src/options.rs3
-rw-r--r--tests/expectations/tests/anon_struct_in_union.rs92
-rw-r--r--tests/expectations/tests/layout_array_too_long.rs207
-rw-r--r--tests/expectations/tests/layout_large_align_field.rs419
-rw-r--r--tests/expectations/tests/objc_category.rs23
-rw-r--r--tests/expectations/tests/objc_class.rs21
-rw-r--r--tests/expectations/tests/objc_interface.rs4
-rw-r--r--tests/expectations/tests/objc_protocol.rs15
-rw-r--r--tests/expectations/tests/objc_sel_and_id.rs22
-rw-r--r--tests/headers/anon_struct_in_union.h7
-rw-r--r--tests/headers/layout_array_too_long.h60
-rw-r--r--tests/headers/layout_large_align_field.h97
-rw-r--r--tests/headers/objc_category.h10
-rw-r--r--tests/headers/objc_class.h10
-rw-r--r--tests/headers/objc_protocol.h8
-rw-r--r--tests/headers/objc_sel_and_id.h7
36 files changed, 1744 insertions, 168 deletions
diff --git a/.travis.yml b/.travis.yml
index ba5ec867..0d256172 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -9,7 +9,6 @@ addons:
os:
- linux
- - osx
rust:
- stable
diff --git a/example-graphviz-ir.png b/example-graphviz-ir.png
index c990f7e7..1e554a83 100644
--- a/example-graphviz-ir.png
+++ b/example-graphviz-ir.png
Binary files differ
diff --git a/src/clang.rs b/src/clang.rs
index 613e08e8..1a45eefa 100644
--- a/src/clang.rs
+++ b/src/clang.rs
@@ -875,7 +875,11 @@ impl Type {
pub fn named(&self) -> Type {
unsafe {
Type {
- x: clang_Type_getNamedType(self.x),
+ x: if clang_Type_getNamedType::is_loaded() {
+ clang_Type_getNamedType(self.x)
+ } else {
+ self.x
+ },
}
}
}
@@ -1498,6 +1502,13 @@ pub fn ast_dump(c: &Cursor, depth: isize) -> CXChildVisitResult {
&specialized);
}
}
+
+ if let Some(parent) = c.fallible_semantic_parent() {
+ println!("");
+ print_cursor(depth,
+ String::from(prefix) + "semantic-parent.",
+ &parent);
+ }
}
fn print_type<S: AsRef<str>>(depth: isize, prefix: S, ty: &Type) {
diff --git a/src/codegen/mod.rs b/src/codegen/mod.rs
index 77f654e6..46b0a3e7 100644
--- a/src/codegen/mod.rs
+++ b/src/codegen/mod.rs
@@ -10,6 +10,7 @@ use ir::annotations::FieldAccessorKind;
use ir::comp::{Base, CompInfo, CompKind, Field, Method, MethodKind};
use ir::context::{BindgenContext, ItemId};
use ir::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault};
+use ir::dot;
use ir::enum_ty::{Enum, EnumVariant, EnumVariantValue};
use ir::function::{Function, FunctionSig};
use ir::int::IntKind;
@@ -646,6 +647,9 @@ impl CodeGenerator for Type {
TypeKind::Enum(ref ei) => {
ei.codegen(ctx, result, whitelisted_items, item)
}
+ TypeKind::ObjCId | TypeKind::ObjCSel => {
+ result.saw_objc();
+ }
TypeKind::ObjCInterface(ref interface) => {
interface.codegen(ctx, result, whitelisted_items, item)
}
@@ -2275,6 +2279,8 @@ impl ToRustTy for Type {
let ident = ctx.rust_ident(&name);
quote_ty!(ctx.ext_cx(), $ident)
}
+ TypeKind::ObjCSel => quote_ty!(ctx.ext_cx(), objc::runtime::Sel),
+ TypeKind::ObjCId |
TypeKind::ObjCInterface(..) => quote_ty!(ctx.ext_cx(), id),
ref u @ TypeKind::UnresolvedTypeRef(..) => {
unreachable!("Should have been resolved after parsing {:?}!", u)
@@ -2460,10 +2466,12 @@ impl CodeGenerator for ObjCInterface {
}
+ let trait_name = self.rust_name();
+
let trait_block = aster::AstBuilder::new()
.item()
.pub_()
- .trait_(self.name())
+ .trait_(&trait_name)
.with_items(trait_items)
.build();
@@ -2472,7 +2480,7 @@ impl CodeGenerator for ObjCInterface {
.item()
.impl_()
.trait_()
- .id(self.name())
+ .id(&trait_name)
.build()
.with_items(impl_items)
.build_ty(ty_for_impl);
@@ -2502,7 +2510,7 @@ pub fn codegen(context: &mut BindgenContext) -> Vec<P<ast::Item>> {
}
if let Some(path) = context.options().emit_ir_graphviz.as_ref() {
- match context.emit_ir_graphviz(path.clone()) {
+ match dot::write_dot_file(context, path) {
Ok(()) => info!("Your dot file was generated successfully into: {}", path),
Err(e) => error!("{}", e),
}
diff --git a/src/codegen/struct_layout.rs b/src/codegen/struct_layout.rs
index 24938c16..724bef98 100644
--- a/src/codegen/struct_layout.rs
+++ b/src/codegen/struct_layout.rs
@@ -197,7 +197,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
};
// Otherwise the padding is useless.
- let need_padding = padding_bytes >= field_layout.align;
+ let need_padding = padding_bytes >= field_layout.align || field_layout.align > mem::size_of::<*mut ()>();
self.latest_offset += padding_bytes;
@@ -213,7 +213,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
field_layout);
if need_padding && padding_bytes != 0 {
- Some(Layout::new(padding_bytes, field_layout.align))
+ Some(Layout::new(padding_bytes, cmp::min(field_layout.align, mem::size_of::<*mut ()>())))
} else {
None
}
@@ -262,6 +262,8 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
Layout::new(padding_bytes, layout.align)
};
+ debug!("pad bytes to struct {}, {:?}", name, layout);
+
Some(self.padding_field(layout))
} else {
None
diff --git a/src/ir/comp.rs b/src/ir/comp.rs
index ce6ec25d..b97879f7 100644
--- a/src/ir/comp.rs
+++ b/src/ir/comp.rs
@@ -625,8 +625,19 @@ impl CompInfo {
// StructDecl to note incomplete structs that hasn't been
// forward-declared before, see:
//
+ // Also, clang seems to scope struct definitions inside
+ // unions to the whole translation unit. Since those are
+ // anonymous, let's just assume that if the cursor we've
+ // found is a definition it's a valid inner type.
+ //
+ // Note that doing this could be always ok, but let's just
+ // keep the union check for now.
+ //
// https://github.com/servo/rust-bindgen/issues/482
- if cur.semantic_parent() != cursor {
+ let is_inner_struct = cur.semantic_parent() == cursor ||
+ (kind == CompKind::Union &&
+ cur.is_definition());
+ if !is_inner_struct {
return CXChildVisit_Continue;
}
@@ -871,7 +882,7 @@ impl CompInfo {
}
impl TemplateDeclaration for CompInfo {
- fn template_params(&self, _ctx: &BindgenContext) -> Option<Vec<ItemId>> {
+ fn self_template_params(&self, _ctx: &BindgenContext) -> Option<Vec<ItemId>> {
if self.template_args.is_empty() {
None
} else {
@@ -1040,10 +1051,10 @@ impl Trace for CompInfo {
if let Some(template) = self.specialized_template() {
// This is an instantiation of a template declaration with concrete
// template type arguments.
- tracer.visit(template);
+ tracer.visit_kind(template, EdgeKind::TemplateDeclaration);
let args = item.applicable_template_args(context);
for a in args {
- tracer.visit(a);
+ tracer.visit_kind(a, EdgeKind::TemplateArgument);
}
} else {
let params = item.applicable_template_args(context);
@@ -1055,27 +1066,27 @@ impl Trace for CompInfo {
}
for base in self.base_members() {
- tracer.visit(base.ty);
+ tracer.visit_kind(base.ty, EdgeKind::BaseMember);
}
for field in self.fields() {
- tracer.visit(field.ty());
+ tracer.visit_kind(field.ty(), EdgeKind::Field);
}
for &ty in self.inner_types() {
- tracer.visit(ty);
+ tracer.visit_kind(ty, EdgeKind::InnerType);
}
for &var in self.inner_vars() {
- tracer.visit(var);
+ tracer.visit_kind(var, EdgeKind::InnerVar);
}
for method in self.methods() {
- tracer.visit(method.signature);
+ tracer.visit_kind(method.signature, EdgeKind::Method);
}
for &ctor in self.constructors() {
- tracer.visit(ctor);
+ tracer.visit_kind(ctor, EdgeKind::Constructor);
}
}
}
diff --git a/src/ir/context.rs b/src/ir/context.rs
index 7383c09a..27a43f20 100644
--- a/src/ir/context.rs
+++ b/src/ir/context.rs
@@ -5,7 +5,7 @@ use super::int::IntKind;
use super::item::{Item, ItemCanonicalPath, ItemSet};
use super::item_kind::ItemKind;
use super::module::{Module, ModuleKind};
-use super::traversal::{self, Edge, ItemTraversal, Trace};
+use super::traversal::{self, Edge, ItemTraversal};
use super::ty::{FloatKind, TemplateDeclaration, Type, TypeKind};
use BindgenOptions;
use cexpr;
@@ -18,8 +18,6 @@ use std::cell::Cell;
use std::collections::{HashMap, hash_map};
use std::collections::btree_map::{self, BTreeMap};
use std::fmt;
-use std::fs::File;
-use std::io::{self, Write};
use std::iter::IntoIterator;
use syntax::ast::Ident;
use syntax::codemap::{DUMMY_SP, Span};
@@ -636,7 +634,7 @@ impl<'ctx> BindgenContext<'ctx> {
.and_then(|canon_decl| {
self.get_resolved_type(&canon_decl)
.and_then(|template_decl_id| {
- template_decl_id.num_template_params(self)
+ template_decl_id.num_self_template_params(self)
.map(|num_params| {
(*canon_decl.cursor(),
template_decl_id,
@@ -660,7 +658,7 @@ impl<'ctx> BindgenContext<'ctx> {
.cloned()
})
.and_then(|template_decl| {
- template_decl.num_template_params(self)
+ template_decl.num_self_template_params(self)
.map(|num_template_params| {
(*template_decl.decl(),
template_decl.id(),
@@ -708,7 +706,7 @@ impl<'ctx> BindgenContext<'ctx> {
use clang_sys;
let num_expected_args = match self.resolve_type(template)
- .num_template_params(self) {
+ .num_self_template_params(self) {
Some(n) => n,
None => {
warn!("Tried to instantiate a template for which we could not \
@@ -1111,33 +1109,6 @@ impl<'ctx> BindgenContext<'ctx> {
&self.options
}
- /// Output graphviz dot file.
- pub fn emit_ir_graphviz(&self, path: String) -> io::Result<()> {
- let file = try!(File::create(path));
- let mut dot_file = io::BufWriter::new(file);
- writeln!(&mut dot_file, "digraph {{")?;
-
- let mut err: Option<io::Result<_>> = None;
-
- for (id, item) in self.items() {
- writeln!(&mut dot_file, "{} {};", id.0, item.dot_attributes(self))?;
-
- item.trace(self, &mut |sub_id: ItemId, _edge_kind| {
- match writeln!(&mut dot_file, "{} -> {};", id.0, sub_id.as_usize()) {
- Ok(_) => {},
- Err(e) => err = Some(Err(e)),
- }
- }, &());
-
- if err.is_some() {
- return err.unwrap();
- }
- }
-
- writeln!(&mut dot_file, "}}")?;
- Ok(())
- }
-
/// Tokenizes a namespace cursor in order to get the name and kind of the
/// namespace,
fn tokenize_namespace(&self,
@@ -1360,13 +1331,13 @@ impl PartialType {
}
impl TemplateDeclaration for PartialType {
- fn template_params(&self, _ctx: &BindgenContext) -> Option<Vec<ItemId>> {
+ fn self_template_params(&self, _ctx: &BindgenContext) -> Option<Vec<ItemId>> {
// Maybe at some point we will eagerly parse named types, but for now we
// don't and this information is unavailable.
None
}
- fn num_template_params(&self, _ctx: &BindgenContext) -> Option<usize> {
+ fn num_self_template_params(&self, _ctx: &BindgenContext) -> Option<usize> {
// Wouldn't it be nice if libclang would reliably give us this
// information‽
match self.decl().kind() {
diff --git a/src/ir/dot.rs b/src/ir/dot.rs
new file mode 100644
index 00000000..b7a117bb
--- /dev/null
+++ b/src/ir/dot.rs
@@ -0,0 +1,53 @@
+//! Generating Graphviz `dot` files from our IR.
+
+use std::fs::File;
+use std::io::{self, Write};
+use std::path::Path;
+use super::context::{BindgenContext, ItemId};
+use super::traversal::Trace;
+
+/// A trait for anything that can write attributes as `<table>` rows to a dot
+/// file.
+pub trait DotAttributes {
+ /// Write this thing's attributes to the given output. Each attribute must
+ /// be its own `<tr>...</tr>`.
+ fn dot_attributes<W>(&self, ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write;
+}
+
+/// Write a graphviz dot file containing our IR.
+pub fn write_dot_file<P>(ctx: &BindgenContext, path: P) -> io::Result<()>
+ where P: AsRef<Path>
+{
+ let file = try!(File::create(path));
+ let mut dot_file = io::BufWriter::new(file);
+ try!(writeln!(&mut dot_file, "digraph {{"));
+
+ let mut err: Option<io::Result<_>> = None;
+
+ for (id, item) in ctx.items() {
+ try!(writeln!(&mut dot_file,
+ r#"{} [fontname="courier", label=< <table border="0">"#,
+ id.as_usize()));
+ try!(item.dot_attributes(ctx, &mut dot_file));
+ try!(writeln!(&mut dot_file, r#"</table> >];"#));
+
+ item.trace(ctx, &mut |sub_id: ItemId, _edge_kind| {
+ if err.is_some() {
+ return;
+ }
+
+ match writeln!(&mut dot_file, "{} -> {};", id.as_usize(), sub_id.as_usize()) {
+ Ok(_) => {},
+ Err(e) => err = Some(Err(e)),
+ }
+ }, &());
+
+ if let Some(err) = err {
+ return err;
+ }
+ }
+
+ try!(writeln!(&mut dot_file, "}}"));
+ Ok(())
+}
diff --git a/src/ir/function.rs b/src/ir/function.rs
index 22b9c9b0..5864bbf8 100644
--- a/src/ir/function.rs
+++ b/src/ir/function.rs
@@ -1,12 +1,14 @@
//! Intermediate representation for C/C++ functions and methods.
use super::context::{BindgenContext, ItemId};
+use super::dot::DotAttributes;
use super::item::Item;
-use super::traversal::{Trace, Tracer};
+use super::traversal::{EdgeKind, Trace, Tracer};
use super::ty::TypeKind;
use clang;
use clang_sys::CXCallingConv;
use parse::{ClangItemParser, ClangSubItemParser, ParseError, ParseResult};
+use std::io;
use syntax::abi;
/// A function declaration, with a signature, arguments, and argument names.
@@ -59,6 +61,18 @@ impl Function {
}
}
+impl DotAttributes for Function {
+ fn dot_attributes<W>(&self, _ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ if let Some(ref mangled) = self.mangled_name {
+ try!(writeln!(out, "<tr><td>mangled name</td><td>{}</td></tr>", mangled));
+ }
+
+ Ok(())
+ }
+}
+
/// A function signature.
#[derive(Debug)]
pub struct FunctionSig {
@@ -91,7 +105,13 @@ fn get_abi(cc: CXCallingConv) -> Option<abi::Abi> {
}
/// Get the mangled name for the cursor's referent.
-pub fn cursor_mangling(cursor: &clang::Cursor) -> Option<String> {
+pub fn cursor_mangling(ctx: &BindgenContext,
+ cursor: &clang::Cursor)
+ -> Option<String> {
+ if !ctx.options().enable_mangling {
+ return None;
+ }
+
// We early return here because libclang may crash in some case
// if we pass in a variable inside a partial specialized template.
// See servo/rust-bindgen#67, and servo/rust-bindgen#462.
@@ -304,7 +324,7 @@ impl ClangSubItemParser for Function {
let name = cursor.spelling();
assert!(!name.is_empty(), "Empty function name?");
- let mut mangled_name = cursor_mangling(&cursor);
+ let mut mangled_name = cursor_mangling(context, &cursor);
if mangled_name.as_ref() == Some(&name) {
mangled_name = None;
}
@@ -322,10 +342,10 @@ impl Trace for FunctionSig {
fn trace<T>(&self, _: &BindgenContext, tracer: &mut T, _: &())
where T: Tracer,
{
- tracer.visit(self.return_type());
+ tracer.visit_kind(self.return_type(), EdgeKind::FunctionReturn);
for &(_, ty) in self.argument_types() {
- tracer.visit(ty);
+ tracer.visit_kind(ty, EdgeKind::FunctionParameter);
}
}
}
diff --git a/src/ir/item.rs b/src/ir/item.rs
index 8f16a96f..21b27f07 100644
--- a/src/ir/item.rs
+++ b/src/ir/item.rs
@@ -3,10 +3,11 @@
use super::annotations::Annotations;
use super::context::{BindgenContext, ItemId, PartialType};
use super::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault};
+use super::dot::{DotAttributes};
use super::function::Function;
use super::item_kind::ItemKind;
use super::module::Module;
-use super::traversal::{Trace, Tracer};
+use super::traversal::{EdgeKind, Trace, Tracer};
use super::ty::{TemplateDeclaration, Type, TypeKind};
use clang;
use clang_sys;
@@ -15,6 +16,7 @@ use std::cell::{Cell, RefCell};
use std::collections::BTreeSet;
use std::fmt::Write;
use std::iter;
+use std::io;
/// A trait to get the canonical name from an item.
///
@@ -203,7 +205,7 @@ impl Trace for Item {
tracer.visit(fun.signature());
}
ItemKind::Var(ref var) => {
- tracer.visit(var.ty());
+ tracer.visit_kind(var.ty(), EdgeKind::VarType);
}
ItemKind::Module(_) => {
// Module -> children edges are "weak", and we do not want to
@@ -372,20 +374,6 @@ impl Item {
self.id
}
- /// Get this `Item`'s dot attributes.
- pub fn dot_attributes(&self, ctx: &BindgenContext) -> String {
- format!("[fontname=\"courier\", label=< \
- <table border=\"0\"> \
- <tr><td>ItemId({})</td></tr> \
- <tr><td>name</td><td>{}</td></tr> \
- <tr><td>kind</td><td>{}</td></tr> \
- </table> \
- >]",
- self.id.as_usize(),
- self.name(ctx).get(),
- self.kind.kind_name())
- }
-
/// Get this `Item`'s parent's identifier.
///
/// For the root module, the parent's ID is its own ID.
@@ -928,23 +916,36 @@ impl Item {
/// A set of items.
pub type ItemSet = BTreeSet<ItemId>;
+impl DotAttributes for Item {
+ fn dot_attributes<W>(&self, ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ try!(writeln!(out,
+ "<tr><td>{:?}</td></tr>
+ <tr><td>name</td><td>{}</td></tr>",
+ self.id,
+ self.name(ctx).get()));
+ self.kind.dot_attributes(ctx, out)
+ }
+}
+
impl TemplateDeclaration for ItemId {
- fn template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
+ fn self_template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
ctx.resolve_item_fallible(*self)
- .and_then(|item| item.template_params(ctx))
+ .and_then(|item| item.self_template_params(ctx))
}
}
impl TemplateDeclaration for Item {
- fn template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
- self.kind.template_params(ctx)
+ fn self_template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
+ self.kind.self_template_params(ctx)
}
}
impl TemplateDeclaration for ItemKind {
- fn template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
+ fn self_template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
match *self {
- ItemKind::Type(ref ty) => ty.template_params(ctx),
+ ItemKind::Type(ref ty) => ty.self_template_params(ctx),
// If we start emitting bindings to explicitly instantiated
// functions, then we'll need to check ItemKind::Function for
// template params.
diff --git a/src/ir/item_kind.rs b/src/ir/item_kind.rs
index 3ff06731..6dfd6764 100644
--- a/src/ir/item_kind.rs
+++ b/src/ir/item_kind.rs
@@ -1,5 +1,8 @@
//! Different variants of an `Item` in our intermediate representation.
+use std::io;
+use super::context::BindgenContext;
+use super::dot::DotAttributes;
use super::function::Function;
use super::module::Module;
use super::ty::Type;
@@ -39,7 +42,7 @@ impl ItemKind {
ItemKind::Type(..) => "Type",
ItemKind::Function(..) => "Function",
ItemKind::Var(..) => "Var"
- }
+ }
}
/// Is this a module?
@@ -122,3 +125,18 @@ impl ItemKind {
self.as_var().expect("Not a var")
}
}
+
+impl DotAttributes for ItemKind {
+ fn dot_attributes<W>(&self, ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ try!(writeln!(out, "<tr><td>kind</td><td>{}</td></tr>", self.kind_name()));
+
+ match *self {
+ ItemKind::Module(ref module) => module.dot_attributes(ctx, out),
+ ItemKind::Type(ref ty) => ty.dot_attributes(ctx, out),
+ ItemKind::Function(ref func) => func.dot_attributes(ctx, out),
+ ItemKind::Var(ref var) => var.dot_attributes(ctx, out),
+ }
+ }
+}
diff --git a/src/ir/layout.rs b/src/ir/layout.rs
index 38379261..f21a501c 100644
--- a/src/ir/layout.rs
+++ b/src/ir/layout.rs
@@ -38,7 +38,7 @@ impl Layout {
/// alignment possible.
pub fn for_size(size: usize) -> Self {
let mut next_align = 2;
- while size % next_align == 0 && next_align <= 2 * mem::size_of::<*mut ()>() {
+ while size % next_align == 0 && next_align <= mem::size_of::<*mut ()>() {
next_align *= 2;
}
Layout {
diff --git a/src/ir/mod.rs b/src/ir/mod.rs
index e624e46b..ba549c51 100644
--- a/src/ir/mod.rs
+++ b/src/ir/mod.rs
@@ -7,6 +7,7 @@ pub mod annotations;
pub mod comp;
pub mod context;
pub mod derive;
+pub mod dot;
pub mod enum_ty;
pub mod function;
pub mod int;
diff --git a/src/ir/module.rs b/src/ir/module.rs
index 6b6c535b..6787e3f9 100644
--- a/src/ir/module.rs
+++ b/src/ir/module.rs
@@ -1,6 +1,8 @@
//! Intermediate representation for modules (AKA C++ namespaces).
+use std::io;
use super::context::{BindgenContext, ItemId};
+use super::dot::DotAttributes;
use clang;
use parse::{ClangSubItemParser, ParseError, ParseResult};
use parse_one;
@@ -56,6 +58,16 @@ impl Module {
}
}
+impl DotAttributes for Module {
+ fn dot_attributes<W>(&self, _ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ writeln!(out,
+ "<tr><td>ModuleKind</td><td>{:?}</td></tr>",
+ self.kind)
+ }
+}
+
impl ClangSubItemParser for Module {
fn parse(cursor: clang::Cursor,
ctx: &mut BindgenContext)
diff --git a/src/ir/named.rs b/src/ir/named.rs
index 7a6c597c..3c676662 100644
--- a/src/ir/named.rs
+++ b/src/ir/named.rs
@@ -76,11 +76,50 @@
//! fixed-point.
//!
//! We use the "monotone framework" for this fix-point analysis where our
-//! lattice is the powerset of the template parameters that appear in the input
-//! C++ header, our join function is set union, and we use the
-//! `ir::traversal::Trace` trait to implement the work-list optimization so we
-//! don't have to revisit every node in the graph when for every iteration
-//! towards the fix-point.
+//! lattice is the mapping from each IR item to the powerset of the template
+//! parameters that appear in the input C++ header, our join function is set
+//! union, and we use the `ir::traversal::Trace` trait to implement the
+//! work-list optimization so we don't have to revisit every node in the graph
+//! when for every iteration towards the fix-point.
+//!
+//! A lattice is a set with a partial ordering between elements, where there is
+//! a single least upper bound and a single greatest least bound for every
+//! subset. We are dealing with finite lattices, which means that it has a
+//! finite number of elements, and it follows that there exists a single top and
+//! a single bottom member of the lattice. For example, the power set of a
+//! finite set forms a finite lattice where partial ordering is defined by set
+//! inclusion, that is `a <= b` if `a` is a subset of `b`. Here is the finite
+//! lattice constructed from the set {0,1,2}:
+//!
+//! ```text
+//! .----- Top = {0,1,2} -----.
+//! / | \
+//! / | \
+//! / | \
+//! {0,1} -------. {0,2} .--------- {1,2}
+//! | \ / \ / |
+//! | / \ |
+//! | / \ / \ |
+//! {0} --------' {1} `---------- {2}
+//! \ | /
+//! \ | /
+//! \ | /
+//! `------ Bottom = {} ------'
+//! ```
+//!
+//! A monotone function `f` is a function where if `x <= y`, then it holds that
+//! `f(x) <= f(y)`. It should be clear that running a monotone function to a
+//! fix-point on a finite lattice will always terminate: `f` can only "move"
+//! along the lattice in a single direction, and therefore can only either find
+//! a fix-point in the middle of the lattice or continue to the top or bottom
+//! depending if it is ascending or descending the lattice respectively.
+//!
+//! For our analysis, we are collecting the set of template parameters used by
+//! any given IR node. The set of template parameters appearing in the program
+//! is finite. Our lattice is their powerset. We start at the bottom element,
+//! the empty set. Our analysis only adds members to the set of used template
+//! parameters, never removes them, so it is monotone, and therefore iteration
+//! to a fix-point will terminate.
//!
//! For a deeper introduction to the general form of this kind of analysis, see
//! [Static Program Analysis by Anders Møller and Michael I. Schwartzbach][spa].
@@ -173,42 +212,123 @@ pub fn analyze<Analysis>(extra: Analysis::Extra) -> Analysis::Output
analysis.into()
}
-/// An analysis that finds the set of template parameters that actually end up
-/// used in our generated bindings.
+/// An analysis that finds for each IR item its set of template parameters that
+/// it uses.
+///
+/// We use the following monotone constraint function:
+///
+/// ```ignore
+/// template_param_usage(v) =
+/// self_template_param_usage(v) union
+/// template_param_usage(w_0) union
+/// template_param_usage(w_1) union
+/// ...
+/// template_param_usage(w_n)
+/// ```
+///
+/// Where `v` has direct edges in the IR graph to each of `w_0`, `w_1`,
+/// ..., `w_n` (for example, if `v` were a struct type and `x` and `y`
+/// were the types of two of `v`'s fields). We ignore certain edges, such
+/// as edges from a template declaration to its template parameters'
+/// definitions for this analysis. If we didn't, then we would mistakenly
+/// determine that ever template parameter is always used.
+///
+/// Finally, `self_template_param_usage` is defined with the following cases:
+///
+/// * If `T` is a template parameter:
+///
+/// ```ignore
+/// self_template_param_usage(T) = { T }
+/// ```
+///
+/// * If `inst` is a template instantiation, `inst.args` are the template
+/// instantiation's template arguments, and `inst.decl` is the template
+/// declaration being instantiated:
+///
+/// ```ignore
+/// self_template_param_usage(inst) =
+/// { T: for T in inst.args, if T in template_param_usage(inst.decl) }
+/// ```
+///
+/// * And for all other IR items, the result is the empty set:
+///
+/// ```ignore
+/// self_template_param_usage(_) = { }
+/// ```
#[derive(Debug, Clone)]
pub struct UsedTemplateParameters<'a> {
ctx: &'a BindgenContext<'a>,
- used: ItemSet,
+ used: HashMap<ItemId, ItemSet>,
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
+impl<'a> UsedTemplateParameters<'a> {
+ fn consider_edge(kind: EdgeKind) -> bool {
+ match kind {
+ // For each of these kinds of edges, if the referent uses a template
+ // parameter, then it should be considered that the origin of the
+ // edge also uses the template parameter.
+ EdgeKind::TemplateArgument |
+ EdgeKind::BaseMember |
+ EdgeKind::Field |
+ EdgeKind::InnerType |
+ EdgeKind::InnerVar |
+ EdgeKind::Constructor |
+ EdgeKind::VarType |
+ EdgeKind::TypeReference => true,
+
+ // We can't emit machine code for new instantiations of function
+ // templates and class templates' methods (and don't detect explicit
+ // instantiations) so we must ignore template parameters that are
+ // only used by functions.
+ EdgeKind::Method |
+ EdgeKind::FunctionReturn |
+ EdgeKind::FunctionParameter => false,
+
+ // If we considered these edges, we would end up mistakenly claiming
+ // that every template parameter always used.
+ EdgeKind::TemplateDeclaration |
+ EdgeKind::TemplateParameterDefinition => false,
+
+ // Since we have to be careful about which edges we consider for
+ // this analysis to be correct, we ignore generic edges. We also
+ // avoid a `_` wild card to force authors of new edge kinds to
+ // determine whether they need to be considered by this analysis.
+ EdgeKind::Generic => false,
+ }
+ }
+}
+
impl<'a> MonotoneFramework for UsedTemplateParameters<'a> {
type Node = ItemId;
type Extra = &'a BindgenContext<'a>;
- type Output = ItemSet;
+ type Output = HashMap<ItemId, ItemSet>;
fn new(ctx: &'a BindgenContext<'a>) -> UsedTemplateParameters<'a> {
+ let mut used = HashMap::new();
let mut dependencies = HashMap::new();
for item in ctx.whitelisted_items() {
+ dependencies.entry(item).or_insert(vec![]);
+ used.insert(item, ItemSet::new());
+
{
// We reverse our natural IR graph edges to find dependencies
// between nodes.
- let mut add_reverse_edge = |sub_item, _| {
+ item.trace(ctx, &mut |sub_item, _| {
dependencies.entry(sub_item).or_insert(vec![]).push(item);
- };
- item.trace(ctx, &mut add_reverse_edge, &());
+ }, &());
}
// Additionally, whether a template instantiation's template
// arguments are used depends on whether the template declaration's
// generic template parameters are used.
- ctx.resolve_item_fallible(item)
- .and_then(|item| item.as_type())
+ ctx.resolve_item(item)
+ .as_type()
.map(|ty| match ty.kind() {
&TypeKind::TemplateInstantiation(decl, ref args) => {
let decl = ctx.resolve_type(decl);
- let params = decl.template_params(ctx)
+ let params = decl.self_template_params(ctx)
.expect("a template instantiation's referenced \
template declaration should have template \
parameters");
@@ -222,57 +342,65 @@ impl<'a> MonotoneFramework for UsedTemplateParameters<'a> {
UsedTemplateParameters {
ctx: ctx,
- used: ItemSet::new(),
+ used: used,
dependencies: dependencies,
}
}
- fn initial_worklist(&self) -> Vec<Self::Node> {
+ fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.whitelisted_items().collect()
}
- fn constrain(&mut self, item: ItemId) -> bool {
- let original_size = self.used.len();
+ fn constrain(&mut self, id: ItemId) -> bool {
+ let original_len = self.used[&id].len();
- item.trace(self.ctx, &mut |item, edge_kind| {
- if edge_kind == EdgeKind::TemplateParameterDefinition {
- // The definition of a template parameter is not considered a
- // use of said template parameter. Ignore this edge.
- return;
+ // First, add this item's self template parameter usage.
+ let item = self.ctx.resolve_item(id);
+ let ty_kind = item.as_type().map(|ty| ty.kind());
+ match ty_kind {
+ Some(&TypeKind::Named) => {
+ // This is a trivial use of the template type parameter.
+ self.used.get_mut(&id).unwrap().insert(id);
}
-
- let ty_kind = self.ctx.resolve_item(item)
- .as_type()
- .map(|ty| ty.kind());
-
- match ty_kind {
- Some(&TypeKind::Named) => {
- // This is a "trivial" use of the template type parameter.
- self.used.insert(item);
- },
- Some(&TypeKind::TemplateInstantiation(decl, ref args)) => {
- // A template instantiation's concrete template argument is
- // only used if the template declaration uses the
- // corresponding template parameter.
- let decl = self.ctx.resolve_type(decl);
- let params = decl.template_params(self.ctx)
- .expect("a template instantiation's referenced \
- template declaration should have template \
- parameters");
- for (arg, param) in args.iter().zip(params.iter()) {
- if self.used.contains(param) {
- if self.ctx.resolve_item(*arg).is_named() {
- self.used.insert(*arg);
- }
+ Some(&TypeKind::TemplateInstantiation(decl, ref args)) => {
+ // A template instantiation's concrete template argument is
+ // only used if the template declaration uses the
+ // corresponding template parameter.
+ let params = decl.self_template_params(self.ctx)
+ .expect("a template instantiation's referenced \
+ template declaration should have template \
+ parameters");
+ for (arg, param) in args.iter().zip(params.iter()) {
+ if self.used[&decl].contains(param) {
+ if self.ctx.resolve_item(*arg).is_named() {
+ self.used.get_mut(&id).unwrap().insert(*arg);
}
}
- },
- _ => return,
+ }
}
+ _ => {}
+ }
+
+ // Second, add the union of each of its referent item's template
+ // parameter usage.
+ item.trace(self.ctx, &mut |sub_id, edge_kind| {
+ if sub_id == id || !Self::consider_edge(edge_kind) {
+ return;
+ }
+
+ // This clone is unfortunate because we are potentially thrashing
+ // malloc. We could investigate replacing the ItemSet values with
+ // Rc<RefCell<ItemSet>> to make the borrow checker happy, but it
+ // isn't clear that the added indirection wouldn't outweigh the cost
+ // of malloc'ing a new ItemSet here. Ideally, `HashMap` would have a
+ // `split_entries` method analogous to `slice::split_at_mut`...
+ let to_add = self.used[&sub_id].clone();
+ self.used.get_mut(&id).unwrap().extend(to_add);
}, &());
- let new_size = self.used.len();
- new_size != original_size
+ let new_len = self.used[&id].len();
+ assert!(new_len >= original_len);
+ new_len != original_len
}
fn each_depending_on<F>(&self, item: ItemId, mut f: F)
@@ -286,8 +414,8 @@ impl<'a> MonotoneFramework for UsedTemplateParameters<'a> {
}
}
-impl<'a> From<UsedTemplateParameters<'a>> for ItemSet {
- fn from(used_templ_params: UsedTemplateParameters) -> ItemSet {
+impl<'a> From<UsedTemplateParameters<'a>> for HashMap<ItemId, ItemSet> {
+ fn from(used_templ_params: UsedTemplateParameters) -> Self {
used_templ_params.used
}
}
diff --git a/src/ir/objc.rs b/src/ir/objc.rs
index b3c3688b..963c8e20 100644
--- a/src/ir/objc.rs
+++ b/src/ir/objc.rs
@@ -4,17 +4,24 @@ use super::context::BindgenContext;
use super::function::FunctionSig;
use clang;
use clang_sys::CXChildVisit_Continue;
+use clang_sys::CXCursor_ObjCCategoryDecl;
+use clang_sys::CXCursor_ObjCClassRef;
use clang_sys::CXCursor_ObjCInstanceMethodDecl;
+use clang_sys::CXCursor_ObjCProtocolDecl;
/// Objective C interface as used in TypeKind
///
-/// Also protocols are parsed as this type
+/// Also protocols and categories are parsed as this type
#[derive(Debug)]
pub struct ObjCInterface {
/// The name
/// like, NSObject
name: String,
+ category: Option<String>,
+
+ is_protocol: bool,
+
/// List of the methods defined in this interfae
methods: Vec<ObjCInstanceMethod>,
}
@@ -37,6 +44,8 @@ impl ObjCInterface {
fn new(name: &str) -> ObjCInterface {
ObjCInterface {
name: name.to_owned(),
+ category: None,
+ is_protocol: false,
methods: Vec::new(),
}
}
@@ -47,6 +56,21 @@ impl ObjCInterface {
self.name.as_ref()
}
+ /// Formats the name for rust
+ /// Can be like NSObject, but with categories might be like NSObject_NSCoderMethods
+ /// and protocols are like protocol_NSObject
+ pub fn rust_name(&self) -> String {
+ if let Some(ref cat) = self.category {
+ format!("{}_{}", self.name(), cat)
+ } else {
+ if self.is_protocol {
+ format!("protocol_{}", self.name())
+ } else {
+ self.name().to_owned()
+ }
+ }
+ }
+
/// List of the methods defined in this interfae
pub fn methods(&self) -> &Vec<ObjCInstanceMethod> {
&self.methods
@@ -59,12 +83,24 @@ impl ObjCInterface {
let name = cursor.spelling();
let mut interface = Self::new(&name);
- cursor.visit(|cursor| {
- match cursor.kind() {
+ if cursor.kind() == CXCursor_ObjCProtocolDecl {
+ interface.is_protocol = true;
+ }
+
+ cursor.visit(|c| {
+ match c.kind() {
+ CXCursor_ObjCClassRef => {
+ if cursor.kind() == CXCursor_ObjCCategoryDecl {
+ // We are actually a category extension, and we found the reference
+ // to the original interface, so name this interface approriately
+ interface.name = c.spelling();
+ interface.category = Some(cursor.spelling());
+ }
+ }
CXCursor_ObjCInstanceMethodDecl => {
- let name = cursor.spelling();
+ let name = c.spelling();
let signature =
- FunctionSig::from_ty(&cursor.cur_type(), &cursor, ctx)
+ FunctionSig::from_ty(&c.cur_type(), &c, ctx)
.expect("Invalid function sig");
let method = ObjCInstanceMethod::new(&name, signature);
diff --git a/src/ir/traversal.rs b/src/ir/traversal.rs
index 8c5e32cf..30772aad 100644
--- a/src/ir/traversal.rs
+++ b/src/ir/traversal.rs
@@ -44,22 +44,137 @@ impl Into<ItemId> for Edge {
}
/// The kind of edge reference. This is useful when we wish to only consider
-/// certain kinds of edges for a particular traversal.
+/// certain kinds of edges for a particular traversal or analysis.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum EdgeKind {
/// A generic, catch-all edge.
Generic,
/// An edge from a template declaration, to the definition of a named type
- /// parameter. For example, the edge Foo -> T in the following snippet:
+ /// parameter. For example, the edge from `Foo<T>` to `T` in the following
+ /// snippet:
///
/// ```C++
/// template<typename T>
+ /// class Foo { };
+ /// ```
+ TemplateParameterDefinition,
+
+ /// An edge from a template instantiation to the template declaration that
+ /// is being instantiated. For example, the edge from `Foo<int>` to
+ /// to `Foo<T>`:
+ ///
+ /// ```C++
+ /// template<typename T>
+ /// class Foo { };
+ ///
+ /// using Bar = Foo<int>;
+ /// ```
+ TemplateDeclaration,
+
+ /// An edge from a template instantiation to its template argument. For
+ /// example, `Foo<Bar>` to `Bar`:
+ ///
+ /// ```C++
+ /// template<typename T>
+ /// class Foo { };
+ ///
+ /// class Bar { };
+ ///
+ /// using FooBar = Foo<Bar>;
+ /// ```
+ TemplateArgument,
+
+ /// An edge from a compound type to one of its base member types. For
+ /// example, the edge from `Bar` to `Foo`:
+ ///
+ /// ```C++
+ /// class Foo { };
+ ///
+ /// class Bar : public Foo { };
+ /// ```
+ BaseMember,
+
+ /// An edge from a compound type to the types of one of its fields. For
+ /// example, the edge from `Foo` to `int`:
+ ///
+ /// ```C++
/// class Foo {
/// int x;
/// };
/// ```
- TemplateParameterDefinition,
+ Field,
+
+ /// An edge from an class or struct type to an inner type member. For
+ /// example, the edge from `Foo` to `Foo::Bar` here:
+ ///
+ /// ```C++
+ /// class Foo {
+ /// struct Bar { };
+ /// };
+ /// ```
+ InnerType,
+
+ /// An edge from an class or struct type to an inner static variable. For
+ /// example, the edge from `Foo` to `Foo::BAR` here:
+ ///
+ /// ```C++
+ /// class Foo {
+ /// static const char* BAR;
+ /// };
+ /// ```
+ InnerVar,
+
+ /// An edge from a class or struct type to one of its method functions. For
+ /// example, the edge from `Foo` to `Foo::bar`:
+ ///
+ /// ```C++
+ /// class Foo {
+ /// bool bar(int x, int y);
+ /// };
+ /// ```
+ Method,
+
+ /// An edge from a class or struct type to one of its constructor
+ /// functions. For example, the edge from `Foo` to `Foo::Foo(int x, int y)`:
+ ///
+ /// ```C++
+ /// class Foo {
+ /// int my_x;
+ /// int my_y;
+ ///
+ /// public:
+ /// Foo(int x, int y);
+ /// };
+ /// ```
+ Constructor,
+
+ /// An edge from a function declaration to its return type. For example, the
+ /// edge from `foo` to `int`:
+ ///
+ /// ```C++
+ /// int foo(char* string);
+ /// ```
+ FunctionReturn,
+
+ /// An edge from a function declaration to one of its parameter types. For
+ /// example, the edge from `foo` to `char*`:
+ ///
+ /// ```C++
+ /// int foo(char* string);
+ /// ```
+ FunctionParameter,
+
+ /// An edge from a static variable to its type. For example, the edge from
+ /// `FOO` to `const char*`:
+ ///
+ /// ```C++
+ /// static const char* FOO;
+ /// ```
+ VarType,
+
+ /// An edge from a non-templated alias or typedef to the referenced type.
+ TypeReference,
}
/// A predicate to allow visiting only sub-sets of the whole IR graph by
@@ -211,7 +326,8 @@ impl<F> Tracer for F
}
/// Trace all of the outgoing edges to other items. Implementations should call
-/// `tracer.visit(edge)` for each of their outgoing edges.
+/// one of `tracer.visit(edge)` or `tracer.visit_kind(edge, EdgeKind::Whatever)`
+/// for each of their outgoing edges.
pub trait Trace {
/// If a particular type needs extra information beyond what it has in
/// `self` and `context` to find its referenced items, its implementation
diff --git a/src/ir/ty.rs b/src/ir/ty.rs
index c3ec4039..ce42a171 100644
--- a/src/ir/ty.rs
+++ b/src/ir/ty.rs
@@ -3,27 +3,63 @@
use super::comp::CompInfo;
use super::context::{BindgenContext, ItemId};
use super::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault};
+use super::dot::DotAttributes;
use super::enum_ty::Enum;
use super::function::FunctionSig;
use super::int::IntKind;
-use super::item::Item;
+use super::item::{Item, ItemAncestors};
use super::layout::Layout;
use super::objc::ObjCInterface;
-use super::traversal::{Trace, Tracer};
+use super::traversal::{EdgeKind, Trace, Tracer};
use clang::{self, Cursor};
use parse::{ClangItemParser, ParseError, ParseResult};
+use std::io;
use std::mem;
-/// Template declaration related methods.
+/// Template declaration (and such declaration's template parameters) related
+/// methods.
+///
+/// Consider this example:
+///
+/// ```c++
+/// template <typename T, typename U>
+/// class Foo {
+/// template <typename V>
+/// using Bar = V*;
+///
+/// class Inner {
+/// T x;
+/// U y;
+/// Bar<int> z;
+/// };
+/// };
+///
+/// class Qux {
+/// int y;
+/// };
+/// ```
+///
+/// The following table depicts the results of each trait method when invoked on
+/// `Foo`, `Bar`, and `Qux`.
+///
+/// +------+----------------------+--------------------------+------------------------+
+/// |Decl. | self_template_params | num_self_template_params | all_template_parameters|
+/// +------+----------------------+--------------------------+------------------------+
+/// |Foo | Some([T, U]) | Some(2) | Some([T, U]) |
+/// |Bar | Some([V]) | Some(1) | Some([T, U, V]) |
+/// |Inner | None | None | Some([T, U]) |
+/// |Qux | None | None | None |
+/// +------+----------------------+--------------------------+------------------------+
pub trait TemplateDeclaration {
/// Get the set of `ItemId`s that make up this template declaration's free
/// template parameters.
///
/// Note that these might *not* all be named types: C++ allows
- /// constant-value template parameters. Of course, Rust does not allow
- /// generic parameters to be anything but types, so we must treat them as
- /// opaque, and avoid instantiating them.
- fn template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>>;
+ /// constant-value template parameters as well as template-template
+ /// parameters. Of course, Rust does not allow generic parameters to be
+ /// anything but types, so we must treat them as opaque, and avoid
+ /// instantiating them.
+ fn self_template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>>;
/// Get the number of free template parameters this template declaration
/// has.
@@ -32,8 +68,38 @@ pub trait TemplateDeclaration {
/// `template_params` returns `None`. This is useful when we only have
/// partial information about the template declaration, such as when we are
/// in the middle of parsing it.
- fn num_template_params(&self, ctx: &BindgenContext) -> Option<usize> {
- self.template_params(ctx).map(|params| params.len())
+ fn num_self_template_params(&self, ctx: &BindgenContext) -> Option<usize> {
+ self.self_template_params(ctx).map(|params| params.len())
+ }
+
+ /// Get the complete set of template parameters that can affect this
+ /// declaration.
+ ///
+ /// Note that this item doesn't need to be a template declaration itself for
+ /// `Some` to be returned here (in contrast to `self_template_params`). If
+ /// this item is a member of a template declaration, then the parent's
+ /// template parameters are included here.
+ ///
+ /// In the example above, `Inner` depends on both of the `T` and `U` type
+ /// parameters, even though it is not itself a template declaration and
+ /// therefore has no type parameters itself. Perhaps it helps to think about
+ /// how we would fully reference such a member type in C++:
+ /// `Foo<int,char>::Inner`. `Foo` *must* be instantiated with template
+ /// arguments before we can gain access to the `Inner` member type.
+ fn all_template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>>
+ where Self: ItemAncestors
+ {
+ let each_self_params: Vec<Vec<_>> = self.ancestors(ctx)
+ .filter_map(|id| id.self_template_params(ctx))
+ .collect();
+ if each_self_params.is_empty() {
+ None
+ } else {
+ Some(each_self_params.into_iter()
+ .rev()
+ .flat_map(|params| params)
+ .collect())
+ }
}
}
@@ -356,6 +422,8 @@ impl Type {
TypeKind::NullPtr |
TypeKind::BlockPointer |
TypeKind::Pointer(..) |
+ TypeKind::ObjCId |
+ TypeKind::ObjCSel |
TypeKind::ObjCInterface(..) => Some(self),
TypeKind::ResolvedTypeRef(inner) |
@@ -384,6 +452,63 @@ impl Type {
}
}
+impl DotAttributes for Type {
+ fn dot_attributes<W>(&self, ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ if let Some(ref layout) = self.layout {
+ try!(writeln!(out,
+ "<tr><td>size</td><td>{}</td></tr>
+ <tr><td>align</td><td>{}</td></tr>",
+ layout.size,
+ layout.align));
+ if layout.packed {
+ try!(writeln!(out, "<tr><td>packed</td><td>true</td></tr>"));
+ }
+ }
+
+ if self.is_const {
+ try!(writeln!(out, "<tr><td>const</td><td>true</td></tr>"));
+ }
+
+ self.kind.dot_attributes(ctx, out)
+ }
+}
+
+impl DotAttributes for TypeKind {
+ fn dot_attributes<W>(&self, _ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ write!(out,
+ "<tr><td>TypeKind</td><td>{}</td></tr>",
+ match *self {
+ TypeKind::Void => "Void",
+ TypeKind::NullPtr => "NullPtr",
+ TypeKind::Comp(..) => "Comp",
+ TypeKind::Int(..) => "Int",
+ TypeKind::Float(..) => "Float",
+ TypeKind::Complex(..) => "Complex",
+ TypeKind::Alias(..) => "Alias",
+ TypeKind::TemplateAlias(..) => "TemplateAlias",
+ TypeKind::Array(..) => "Array",
+ TypeKind::Function(..) => "Function",
+ TypeKind::Enum(..) => "Enum",
+ TypeKind::Pointer(..) => "Pointer",
+ TypeKind::BlockPointer => "BlockPointer",
+ TypeKind::Reference(..) => "Reference",
+ TypeKind::TemplateInstantiation(..) => "TemplateInstantiation",
+ TypeKind::ResolvedTypeRef(..) => "ResolvedTypeRef",
+ TypeKind::Named => "Named",
+ TypeKind::ObjCId => "ObjCId",
+ TypeKind::ObjCSel => "ObjCSel",
+ TypeKind::ObjCInterface(..) => "ObjCInterface",
+ TypeKind::UnresolvedTypeRef(..) => {
+ unreachable!("there shouldn't be any more of these anymore")
+ }
+ })
+ }
+}
+
#[test]
fn is_invalid_named_type_valid() {
let ty = Type::new(Some("foo".into()), None, TypeKind::Named, false);
@@ -430,18 +555,18 @@ fn is_invalid_named_type_empty_name() {
impl TemplateDeclaration for Type {
- fn template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
- self.kind.template_params(ctx)
+ fn self_template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
+ self.kind.self_template_params(ctx)
}
}
impl TemplateDeclaration for TypeKind {
- fn template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
+ fn self_template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
match *self {
TypeKind::ResolvedTypeRef(id) => {
- ctx.resolve_type(id).template_params(ctx)
+ ctx.resolve_type(id).self_template_params(ctx)
}
- TypeKind::Comp(ref comp) => comp.template_params(ctx),
+ TypeKind::Comp(ref comp) => comp.self_template_params(ctx),
TypeKind::TemplateAlias(_, ref args) => Some(args.clone()),
TypeKind::TemplateInstantiation(..) |
@@ -459,6 +584,8 @@ impl TemplateDeclaration for TypeKind {
TypeKind::UnresolvedTypeRef(..) |
TypeKind::Named |
TypeKind::Alias(_) |
+ TypeKind::ObjCId |
+ TypeKind::ObjCSel |
TypeKind::ObjCInterface(_) => None,
}
}
@@ -505,6 +632,8 @@ impl CanDeriveDefault for Type {
TypeKind::NullPtr |
TypeKind::Pointer(..) |
TypeKind::BlockPointer |
+ TypeKind::ObjCId |
+ TypeKind::ObjCSel |
TypeKind::ObjCInterface(..) |
TypeKind::Enum(..) => false,
TypeKind::Function(..) |
@@ -650,6 +779,12 @@ pub enum TypeKind {
/// Objective C interface. Always referenced through a pointer
ObjCInterface(ObjCInterface),
+
+ /// Objective C 'id' type, points to any object
+ ObjCId,
+
+ /// Objective C selector type
+ ObjCSel,
}
impl Type {
@@ -681,6 +816,8 @@ impl Type {
TypeKind::Reference(..) |
TypeKind::NullPtr |
TypeKind::BlockPointer |
+ TypeKind::ObjCId |
+ TypeKind::ObjCSel |
TypeKind::Pointer(..) => false,
TypeKind::ObjCInterface(..) => true, // dunno?
@@ -730,8 +867,10 @@ impl Type {
// Parse objc protocols as if they were interfaces
let mut ty_kind = ty.kind();
if let Some(loc) = location {
- if loc.kind() == CXCursor_ObjCProtocolDecl {
- ty_kind = CXType_ObjCInterface;
+ match loc.kind() {
+ CXCursor_ObjCProtocolDecl |
+ CXCursor_ObjCCategoryDecl => ty_kind = CXType_ObjCInterface,
+ _ => {}
}
}
@@ -1089,6 +1228,9 @@ impl Type {
parent_id,
ctx);
}
+ CXType_ObjCId => TypeKind::ObjCId,
+ CXType_ObjCSel => TypeKind::ObjCSel,
+ CXType_ObjCClass |
CXType_ObjCInterface => {
let interface = ObjCInterface::from_ty(&location.unwrap(), ctx)
.expect("Not a valid objc interface?");
@@ -1124,14 +1266,18 @@ impl Trace for Type {
TypeKind::Array(inner, _) |
TypeKind::Alias(inner) |
TypeKind::ResolvedTypeRef(inner) => {
- tracer.visit(inner);
+ tracer.visit_kind(inner, EdgeKind::TypeReference);
+ }
+ TypeKind::TemplateAlias(inner, ref template_params) => {
+ tracer.visit_kind(inner, EdgeKind::TypeReference);
+ for &item in template_params {
+ tracer.visit_kind(item, EdgeKind::TemplateParameterDefinition);
+ }
}
-
- TypeKind::TemplateAlias(inner, ref template_args) |
TypeKind::TemplateInstantiation(inner, ref template_args) => {
- tracer.visit(inner);
+ tracer.visit_kind(inner, EdgeKind::TemplateDeclaration);
for &item in template_args {
- tracer.visit(item);
+ tracer.visit_kind(item, EdgeKind::TemplateArgument);
}
}
TypeKind::Comp(ref ci) => ci.trace(context, tracer, item),
@@ -1157,6 +1303,8 @@ impl Trace for Type {
TypeKind::Int(_) |
TypeKind::Float(_) |
TypeKind::Complex(_) |
+ TypeKind::ObjCId |
+ TypeKind::ObjCSel |
TypeKind::BlockPointer => {}
}
}
diff --git a/src/ir/var.rs b/src/ir/var.rs
index 6cfcdae7..7b610da4 100644
--- a/src/ir/var.rs
+++ b/src/ir/var.rs
@@ -1,6 +1,7 @@
//! Intermediate representation of variables.
use super::context::{BindgenContext, ItemId};
+use super::dot::DotAttributes;
use super::function::cursor_mangling;
use super::int::IntKind;
use super::item::Item;
@@ -8,6 +9,7 @@ use super::ty::{FloatKind, TypeKind};
use cexpr;
use clang;
use parse::{ClangItemParser, ClangSubItemParser, ParseError, ParseResult};
+use std::io;
use std::num::Wrapping;
/// The type for a constant variable.
@@ -84,6 +86,22 @@ impl Var {
}
}
+impl DotAttributes for Var {
+ fn dot_attributes<W>(&self, _ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ if self.is_const {
+ try!(writeln!(out, "<tr><td>const</td><td>true</td></tr>"));
+ }
+
+ if let Some(ref mangled) = self.mangled_name {
+ try!(writeln!(out, "<tr><td>mangled name</td><td>{}</td></tr>", mangled));
+ }
+
+ Ok(())
+ }
+}
+
impl ClangSubItemParser for Var {
fn parse(cursor: clang::Cursor,
ctx: &mut BindgenContext)
@@ -238,7 +256,7 @@ impl ClangSubItemParser for Var {
.map(VarType::String)
};
- let mangling = cursor_mangling(&cursor);
+ let mangling = cursor_mangling(ctx, &cursor);
let var = Var::new(name, mangling, ty, value, is_const);
Ok(ParseResult::New(var, Some(cursor)))
diff --git a/src/lib.rs b/src/lib.rs
index 83408632..94a13c61 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -212,6 +212,18 @@ impl Builder {
self
}
+ /// Whether to use the clang-provided name mangling. This is true and
+ /// probably needed for C++ features.
+ ///
+ /// However, some old libclang versions seem to return incorrect results in
+ /// some cases for non-mangled functions, see [1], so we allow disabling it.
+ ///
+ /// [1]: https://github.com/servo/rust-bindgen/issues/528
+ pub fn trust_clang_mangling(mut self, doit: bool) -> Self {
+ self.options.enable_mangling = doit;
+ self
+ }
+
/// Generate a C/C++ file that includes the header and has dummy uses of
/// every type defined in the header.
pub fn dummy_uses<T: Into<String>>(mut self, dummy_uses: T) -> Builder {
@@ -572,6 +584,15 @@ pub struct BindgenOptions {
/// Intead of emitting 'use objc;' to files generated from objective c files,
/// generate '#[macro_use] extern crate objc;'
pub objc_extern_crate: bool,
+
+ /// Whether to use the clang-provided name mangling. This is true and
+ /// probably needed for C++ features.
+ ///
+ /// However, some old libclang versions seem to return incorrect results in
+ /// some cases for non-mangled functions, see [1], so we allow disabling it.
+ ///
+ /// [1]: https://github.com/servo/rust-bindgen/issues/528
+ pub enable_mangling: bool,
}
/// TODO(emilio): This is sort of a lie (see the error message that results from
@@ -626,6 +647,7 @@ impl Default for BindgenOptions {
generate_comments: true,
whitelist_recursively: true,
objc_extern_crate: false,
+ enable_mangling: true,
}
}
}
diff --git a/src/options.rs b/src/options.rs
index e54ee012..a62aa73d 100644
--- a/src/options.rs
+++ b/src/options.rs
@@ -60,6 +60,9 @@ pub fn builder_from_flags<I>
Arg::with_name("objc-extern-crate")
.long("objc-extern-crate")
.help("Use extern crate instead of use for objc"),
+ Arg::with_name("distrust-clang-mangling")
+ .long("distrust-clang-mangling")
+ .help("Do not trust the libclang-provided mangling"),
Arg::with_name("builtins")
.long("builtins")
.help("Output bindings for builtin definitions, e.g. \
diff --git a/tests/expectations/tests/anon_struct_in_union.rs b/tests/expectations/tests/anon_struct_in_union.rs
new file mode 100644
index 00000000..97a342cf
--- /dev/null
+++ b/tests/expectations/tests/anon_struct_in_union.rs
@@ -0,0 +1,92 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+
+#[repr(C)]
+pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
+impl <T> __BindgenUnionField<T> {
+ #[inline]
+ pub fn new() -> Self { __BindgenUnionField(::std::marker::PhantomData) }
+ #[inline]
+ pub unsafe fn as_ref(&self) -> &T { ::std::mem::transmute(self) }
+ #[inline]
+ pub unsafe fn as_mut(&mut self) -> &mut T { ::std::mem::transmute(self) }
+}
+impl <T> ::std::default::Default for __BindgenUnionField<T> {
+ #[inline]
+ fn default() -> Self { Self::new() }
+}
+impl <T> ::std::clone::Clone for __BindgenUnionField<T> {
+ #[inline]
+ fn clone(&self) -> Self { Self::new() }
+}
+impl <T> ::std::marker::Copy for __BindgenUnionField<T> { }
+impl <T> ::std::fmt::Debug for __BindgenUnionField<T> {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ fmt.write_str("__BindgenUnionField")
+ }
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct s {
+ pub u: s__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct s__bindgen_ty_1 {
+ pub field: __BindgenUnionField<s__bindgen_ty_1_inner>,
+ pub bindgen_union_field: u32,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct s__bindgen_ty_1_inner {
+ pub b: ::std::os::raw::c_int,
+}
+#[test]
+fn bindgen_test_layout_s__bindgen_ty_1_inner() {
+ assert_eq!(::std::mem::size_of::<s__bindgen_ty_1_inner>() , 4usize ,
+ concat ! ( "Size of: " , stringify ! ( s__bindgen_ty_1_inner )
+ ));
+ assert_eq! (::std::mem::align_of::<s__bindgen_ty_1_inner>() , 4usize ,
+ concat ! (
+ "Alignment of " , stringify ! ( s__bindgen_ty_1_inner ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const s__bindgen_ty_1_inner ) ) . b as * const
+ _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( s__bindgen_ty_1_inner )
+ , "::" , stringify ! ( b ) ));
+}
+impl Clone for s__bindgen_ty_1_inner {
+ fn clone(&self) -> Self { *self }
+}
+#[test]
+fn bindgen_test_layout_s__bindgen_ty_1() {
+ assert_eq!(::std::mem::size_of::<s__bindgen_ty_1>() , 4usize , concat ! (
+ "Size of: " , stringify ! ( s__bindgen_ty_1 ) ));
+ assert_eq! (::std::mem::align_of::<s__bindgen_ty_1>() , 4usize , concat !
+ ( "Alignment of " , stringify ! ( s__bindgen_ty_1 ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const s__bindgen_ty_1 ) ) . field as * const _
+ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( s__bindgen_ty_1 ) ,
+ "::" , stringify ! ( field ) ));
+}
+impl Clone for s__bindgen_ty_1 {
+ fn clone(&self) -> Self { *self }
+}
+#[test]
+fn bindgen_test_layout_s() {
+ assert_eq!(::std::mem::size_of::<s>() , 4usize , concat ! (
+ "Size of: " , stringify ! ( s ) ));
+ assert_eq! (::std::mem::align_of::<s>() , 4usize , concat ! (
+ "Alignment of " , stringify ! ( s ) ));
+ assert_eq! (unsafe { & ( * ( 0 as * const s ) ) . u as * const _ as usize
+ } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( s ) , "::" , stringify
+ ! ( u ) ));
+}
+impl Clone for s {
+ fn clone(&self) -> Self { *self }
+}
diff --git a/tests/expectations/tests/layout_array_too_long.rs b/tests/expectations/tests/layout_array_too_long.rs
new file mode 100644
index 00000000..c395f08d
--- /dev/null
+++ b/tests/expectations/tests/layout_array_too_long.rs
@@ -0,0 +1,207 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+
+pub const RTE_CACHE_LINE_SIZE: ::std::os::raw::c_uint = 64;
+pub const RTE_LIBRTE_IP_FRAG_MAX_FRAG: ::std::os::raw::c_uint = 4;
+pub const IP_LAST_FRAG_IDX: _bindgen_ty_1 = _bindgen_ty_1::IP_LAST_FRAG_IDX;
+pub const IP_FIRST_FRAG_IDX: _bindgen_ty_1 = _bindgen_ty_1::IP_FIRST_FRAG_IDX;
+pub const IP_MIN_FRAG_NUM: _bindgen_ty_1 = _bindgen_ty_1::IP_MIN_FRAG_NUM;
+pub const IP_MAX_FRAG_NUM: _bindgen_ty_1 = _bindgen_ty_1::IP_MAX_FRAG_NUM;
+#[repr(u32)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub enum _bindgen_ty_1 {
+ IP_LAST_FRAG_IDX = 0,
+ IP_FIRST_FRAG_IDX = 1,
+ IP_MIN_FRAG_NUM = 2,
+ IP_MAX_FRAG_NUM = 4,
+}
+/** @internal fragmented mbuf */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag {
+ /**< offset into the packet */
+ pub ofs: u16,
+ /**< length of fragment */
+ pub len: u16,
+ /**< fragment mbuf */
+ pub mb: *mut rte_mbuf,
+}
+#[test]
+fn bindgen_test_layout_ip_frag() {
+ assert_eq!(::std::mem::size_of::<ip_frag>() , 16usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_frag ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . ofs as * const _ as usize }
+ , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( ofs ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . len as * const _ as usize }
+ , 2usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( len ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . mb as * const _ as usize }
+ , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( mb ) ));
+}
+impl Clone for ip_frag {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct ip_frag_key {
+ /**< src address, first 8 bytes used for IPv4 */
+ pub src_dst: [u64; 4usize],
+ /**< dst address */
+ pub id: u32,
+ /**< src/dst key length */
+ pub key_len: u32,
+}
+#[test]
+fn bindgen_test_layout_ip_frag_key() {
+ assert_eq!(::std::mem::size_of::<ip_frag_key>() , 40usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag_key ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag_key>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_frag_key ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . src_dst as * const _ as
+ usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( src_dst ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . id as * const _ as
+ usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( id ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . key_len as * const _ as
+ usize } , 36usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( key_len ) ));
+}
+impl Clone for ip_frag_key {
+ fn clone(&self) -> Self { *self }
+}
+/**
+ * @internal Fragmented packet to reassemble.
+ * First two entries in the frags[] array are for the last and first fragments.
+ */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag_pkt {
+ /**< LRU list */
+ pub lru: ip_frag_pkt__bindgen_ty_1,
+ /**< fragmentation key */
+ pub key: ip_frag_key,
+ /**< creation timestamp */
+ pub start: u64,
+ /**< expected reassembled size */
+ pub total_size: u32,
+ /**< size of fragments received */
+ pub frag_size: u32,
+ /**< index of next entry to fill */
+ pub last_idx: u32,
+ /**< fragments */
+ pub frags: [ip_frag; 4usize],
+ pub __bindgen_padding_0: [u64; 6usize],
+}
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag_pkt__bindgen_ty_1 {
+ pub tqe_next: *mut ip_frag_pkt,
+ pub tqe_prev: *mut *mut ip_frag_pkt,
+}
+#[test]
+fn bindgen_test_layout_ip_frag_pkt__bindgen_ty_1() {
+ assert_eq!(::std::mem::size_of::<ip_frag_pkt__bindgen_ty_1>() , 16usize ,
+ concat ! (
+ "Size of: " , stringify ! ( ip_frag_pkt__bindgen_ty_1 ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag_pkt__bindgen_ty_1>() , 8usize ,
+ concat ! (
+ "Alignment of " , stringify ! ( ip_frag_pkt__bindgen_ty_1 )
+ ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt__bindgen_ty_1 ) ) . tqe_next
+ as * const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! (
+ ip_frag_pkt__bindgen_ty_1 ) , "::" , stringify ! ( tqe_next )
+ ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt__bindgen_ty_1 ) ) . tqe_prev
+ as * const _ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! (
+ ip_frag_pkt__bindgen_ty_1 ) , "::" , stringify ! ( tqe_prev )
+ ));
+}
+impl Clone for ip_frag_pkt__bindgen_ty_1 {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag_pkt__bindgen_ty_1 {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+#[test]
+fn bindgen_test_layout_ip_frag_pkt() {
+ assert_eq!(::std::mem::size_of::<ip_frag_pkt>() , 192usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag_pkt ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . lru as * const _ as
+ usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( lru ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . key as * const _ as
+ usize } , 16usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( key ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . start as * const _ as
+ usize } , 56usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( start ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . total_size as * const _
+ as usize } , 64usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( total_size ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . frag_size as * const _
+ as usize } , 68usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( frag_size ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . last_idx as * const _
+ as usize } , 72usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( last_idx ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . frags as * const _ as
+ usize } , 80usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( frags ) ));
+}
+impl Clone for ip_frag_pkt {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag_pkt {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/**< fragment mbuf */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct rte_mbuf {
+ pub _address: u8,
+}
+impl Clone for rte_mbuf {
+ fn clone(&self) -> Self { *self }
+}
diff --git a/tests/expectations/tests/layout_large_align_field.rs b/tests/expectations/tests/layout_large_align_field.rs
new file mode 100644
index 00000000..820e4210
--- /dev/null
+++ b/tests/expectations/tests/layout_large_align_field.rs
@@ -0,0 +1,419 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+
+#[repr(C)]
+#[derive(Default)]
+pub struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>);
+impl <T> __IncompleteArrayField<T> {
+ #[inline]
+ pub fn new() -> Self {
+ __IncompleteArrayField(::std::marker::PhantomData)
+ }
+ #[inline]
+ pub unsafe fn as_ptr(&self) -> *const T { ::std::mem::transmute(self) }
+ #[inline]
+ pub unsafe fn as_mut_ptr(&mut self) -> *mut T {
+ ::std::mem::transmute(self)
+ }
+ #[inline]
+ pub unsafe fn as_slice(&self, len: usize) -> &[T] {
+ ::std::slice::from_raw_parts(self.as_ptr(), len)
+ }
+ #[inline]
+ pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
+ ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
+ }
+}
+impl <T> ::std::fmt::Debug for __IncompleteArrayField<T> {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ fmt.write_str("__IncompleteArrayField")
+ }
+}
+impl <T> ::std::clone::Clone for __IncompleteArrayField<T> {
+ #[inline]
+ fn clone(&self) -> Self { Self::new() }
+}
+impl <T> ::std::marker::Copy for __IncompleteArrayField<T> { }
+pub const RTE_CACHE_LINE_SIZE: ::std::os::raw::c_uint = 64;
+pub const RTE_LIBRTE_IP_FRAG_MAX_FRAG: ::std::os::raw::c_uint = 4;
+pub const IP_LAST_FRAG_IDX: _bindgen_ty_1 = _bindgen_ty_1::IP_LAST_FRAG_IDX;
+pub const IP_FIRST_FRAG_IDX: _bindgen_ty_1 = _bindgen_ty_1::IP_FIRST_FRAG_IDX;
+pub const IP_MIN_FRAG_NUM: _bindgen_ty_1 = _bindgen_ty_1::IP_MIN_FRAG_NUM;
+pub const IP_MAX_FRAG_NUM: _bindgen_ty_1 = _bindgen_ty_1::IP_MAX_FRAG_NUM;
+#[repr(u32)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub enum _bindgen_ty_1 {
+ IP_LAST_FRAG_IDX = 0,
+ IP_FIRST_FRAG_IDX = 1,
+ IP_MIN_FRAG_NUM = 2,
+ IP_MAX_FRAG_NUM = 4,
+}
+/** @internal fragmented mbuf */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag {
+ /**< offset into the packet */
+ pub ofs: u16,
+ /**< length of fragment */
+ pub len: u16,
+ /**< fragment mbuf */
+ pub mb: *mut rte_mbuf,
+}
+#[test]
+fn bindgen_test_layout_ip_frag() {
+ assert_eq!(::std::mem::size_of::<ip_frag>() , 16usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_frag ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . ofs as * const _ as usize }
+ , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( ofs ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . len as * const _ as usize }
+ , 2usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( len ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . mb as * const _ as usize }
+ , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( mb ) ));
+}
+impl Clone for ip_frag {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct ip_frag_key {
+ /**< src address, first 8 bytes used for IPv4 */
+ pub src_dst: [u64; 4usize],
+ /**< dst address */
+ pub id: u32,
+ /**< src/dst key length */
+ pub key_len: u32,
+}
+#[test]
+fn bindgen_test_layout_ip_frag_key() {
+ assert_eq!(::std::mem::size_of::<ip_frag_key>() , 40usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag_key ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag_key>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_frag_key ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . src_dst as * const _ as
+ usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( src_dst ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . id as * const _ as
+ usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( id ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . key_len as * const _ as
+ usize } , 36usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( key_len ) ));
+}
+impl Clone for ip_frag_key {
+ fn clone(&self) -> Self { *self }
+}
+/**
+ * @internal Fragmented packet to reassemble.
+ * First two entries in the frags[] array are for the last and first fragments.
+ */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag_pkt {
+ /**< LRU list */
+ pub lru: ip_frag_pkt__bindgen_ty_1,
+ /**< fragmentation key */
+ pub key: ip_frag_key,
+ /**< creation timestamp */
+ pub start: u64,
+ /**< expected reassembled size */
+ pub total_size: u32,
+ /**< size of fragments received */
+ pub frag_size: u32,
+ /**< index of next entry to fill */
+ pub last_idx: u32,
+ /**< fragments */
+ pub frags: [ip_frag; 4usize],
+ pub __bindgen_padding_0: [u64; 6usize],
+}
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag_pkt__bindgen_ty_1 {
+ pub tqe_next: *mut ip_frag_pkt,
+ pub tqe_prev: *mut *mut ip_frag_pkt,
+}
+#[test]
+fn bindgen_test_layout_ip_frag_pkt__bindgen_ty_1() {
+ assert_eq!(::std::mem::size_of::<ip_frag_pkt__bindgen_ty_1>() , 16usize ,
+ concat ! (
+ "Size of: " , stringify ! ( ip_frag_pkt__bindgen_ty_1 ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag_pkt__bindgen_ty_1>() , 8usize ,
+ concat ! (
+ "Alignment of " , stringify ! ( ip_frag_pkt__bindgen_ty_1 )
+ ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt__bindgen_ty_1 ) ) . tqe_next
+ as * const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! (
+ ip_frag_pkt__bindgen_ty_1 ) , "::" , stringify ! ( tqe_next )
+ ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt__bindgen_ty_1 ) ) . tqe_prev
+ as * const _ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! (
+ ip_frag_pkt__bindgen_ty_1 ) , "::" , stringify ! ( tqe_prev )
+ ));
+}
+impl Clone for ip_frag_pkt__bindgen_ty_1 {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag_pkt__bindgen_ty_1 {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+#[test]
+fn bindgen_test_layout_ip_frag_pkt() {
+ assert_eq!(::std::mem::size_of::<ip_frag_pkt>() , 192usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag_pkt ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . lru as * const _ as
+ usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( lru ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . key as * const _ as
+ usize } , 16usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( key ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . start as * const _ as
+ usize } , 56usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( start ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . total_size as * const _
+ as usize } , 64usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( total_size ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . frag_size as * const _
+ as usize } , 68usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( frag_size ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . last_idx as * const _
+ as usize } , 72usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( last_idx ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . frags as * const _ as
+ usize } , 80usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( frags ) ));
+}
+impl Clone for ip_frag_pkt {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag_pkt {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_pkt_list {
+ pub tqh_first: *mut ip_frag_pkt,
+ pub tqh_last: *mut *mut ip_frag_pkt,
+}
+#[test]
+fn bindgen_test_layout_ip_pkt_list() {
+ assert_eq!(::std::mem::size_of::<ip_pkt_list>() , 16usize , concat ! (
+ "Size of: " , stringify ! ( ip_pkt_list ) ));
+ assert_eq! (::std::mem::align_of::<ip_pkt_list>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_pkt_list ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_pkt_list ) ) . tqh_first as * const _
+ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_pkt_list ) , "::" ,
+ stringify ! ( tqh_first ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_pkt_list ) ) . tqh_last as * const _
+ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_pkt_list ) , "::" ,
+ stringify ! ( tqh_last ) ));
+}
+impl Clone for ip_pkt_list {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_pkt_list {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/** fragmentation table statistics */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct ip_frag_tbl_stat {
+ /**< total # of find/insert attempts. */
+ pub find_num: u64,
+ /**< # of add ops. */
+ pub add_num: u64,
+ /**< # of del ops. */
+ pub del_num: u64,
+ /**< # of reuse (del/add) ops. */
+ pub reuse_num: u64,
+ /**< total # of add failures. */
+ pub fail_total: u64,
+ /**< # of 'no space' add failures. */
+ pub fail_nospace: u64,
+ pub __bindgen_padding_0: [u64; 2usize],
+}
+#[test]
+fn bindgen_test_layout_ip_frag_tbl_stat() {
+ assert_eq!(::std::mem::size_of::<ip_frag_tbl_stat>() , 64usize , concat !
+ ( "Size of: " , stringify ! ( ip_frag_tbl_stat ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . find_num as *
+ const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( find_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . add_num as * const
+ _ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( add_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . del_num as * const
+ _ as usize } , 16usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( del_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . reuse_num as *
+ const _ as usize } , 24usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( reuse_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . fail_total as *
+ const _ as usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( fail_total ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . fail_nospace as *
+ const _ as usize } , 40usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( fail_nospace ) ));
+}
+impl Clone for ip_frag_tbl_stat {
+ fn clone(&self) -> Self { *self }
+}
+/** fragmentation table */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct rte_ip_frag_tbl {
+ /**< ttl for table entries. */
+ pub max_cycles: u64,
+ /**< hash value mask. */
+ pub entry_mask: u32,
+ /**< max entries allowed. */
+ pub max_entries: u32,
+ /**< entries in use. */
+ pub use_entries: u32,
+ /**< hash assocaitivity. */
+ pub bucket_entries: u32,
+ /**< total size of the table. */
+ pub nb_entries: u32,
+ /**< num of associativity lines. */
+ pub nb_buckets: u32,
+ /**< last used entry. */
+ pub last: *mut ip_frag_pkt,
+ /**< LRU list for table entries. */
+ pub lru: ip_pkt_list,
+ pub __bindgen_padding_0: u64,
+ /**< statistics counters. */
+ pub stat: ip_frag_tbl_stat,
+ /**< hash table. */
+ pub pkt: __IncompleteArrayField<ip_frag_pkt>,
+}
+#[test]
+fn bindgen_test_layout_rte_ip_frag_tbl() {
+ assert_eq!(::std::mem::size_of::<rte_ip_frag_tbl>() , 128usize , concat !
+ ( "Size of: " , stringify ! ( rte_ip_frag_tbl ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . max_cycles as *
+ const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( max_cycles ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . entry_mask as *
+ const _ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( entry_mask ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . max_entries as *
+ const _ as usize } , 12usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( max_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . use_entries as *
+ const _ as usize } , 16usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( use_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . bucket_entries as *
+ const _ as usize } , 20usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( bucket_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . nb_entries as *
+ const _ as usize } , 24usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( nb_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . nb_buckets as *
+ const _ as usize } , 28usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( nb_buckets ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . last as * const _
+ as usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( last ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . lru as * const _ as
+ usize } , 40usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( lru ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . stat as * const _
+ as usize } , 64usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( stat ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . pkt as * const _ as
+ usize } , 128usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( pkt ) ));
+}
+impl Clone for rte_ip_frag_tbl {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for rte_ip_frag_tbl {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/**< fragment mbuf */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct rte_mbuf {
+ pub _address: u8,
+}
+impl Clone for rte_mbuf {
+ fn clone(&self) -> Self { *self }
+}
diff --git a/tests/expectations/tests/objc_category.rs b/tests/expectations/tests/objc_category.rs
new file mode 100644
index 00000000..d358e132
--- /dev/null
+++ b/tests/expectations/tests/objc_category.rs
@@ -0,0 +1,23 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+#![cfg(target_os="macos")]
+
+#[macro_use]
+extern crate objc;
+#[allow(non_camel_case_types)]
+pub type id = *mut objc::runtime::Object;
+pub trait Foo {
+ unsafe fn method(self);
+}
+impl Foo for id {
+ unsafe fn method(self) { msg_send!(self , method) }
+}
+pub trait Foo_BarCategory {
+ unsafe fn categoryMethod(self);
+}
+impl Foo_BarCategory for id {
+ unsafe fn categoryMethod(self) { msg_send!(self , categoryMethod) }
+}
diff --git a/tests/expectations/tests/objc_class.rs b/tests/expectations/tests/objc_class.rs
new file mode 100644
index 00000000..9aa30c1a
--- /dev/null
+++ b/tests/expectations/tests/objc_class.rs
@@ -0,0 +1,21 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+#![cfg(target_os="macos")]
+
+#[macro_use]
+extern crate objc;
+#[allow(non_camel_case_types)]
+pub type id = *mut objc::runtime::Object;
+pub trait Foo {
+ unsafe fn method(self);
+}
+impl Foo for id {
+ unsafe fn method(self) { msg_send!(self , method) }
+}
+extern "C" {
+ #[link_name = "fooVar"]
+ pub static mut fooVar: *mut id;
+}
diff --git a/tests/expectations/tests/objc_interface.rs b/tests/expectations/tests/objc_interface.rs
index 027cf57e..3ca67b89 100644
--- a/tests/expectations/tests/objc_interface.rs
+++ b/tests/expectations/tests/objc_interface.rs
@@ -11,5 +11,5 @@ extern crate objc;
pub type id = *mut objc::runtime::Object;
pub trait Foo { }
impl Foo for id { }
-pub trait bar { }
-impl bar for id { }
+pub trait protocol_bar { }
+impl protocol_bar for id { }
diff --git a/tests/expectations/tests/objc_protocol.rs b/tests/expectations/tests/objc_protocol.rs
new file mode 100644
index 00000000..a21d4baa
--- /dev/null
+++ b/tests/expectations/tests/objc_protocol.rs
@@ -0,0 +1,15 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+#![cfg(target_os="macos")]
+
+#[macro_use]
+extern crate objc;
+#[allow(non_camel_case_types)]
+pub type id = *mut objc::runtime::Object;
+pub trait protocol_Foo { }
+impl protocol_Foo for id { }
+pub trait Foo { }
+impl Foo for id { }
diff --git a/tests/expectations/tests/objc_sel_and_id.rs b/tests/expectations/tests/objc_sel_and_id.rs
new file mode 100644
index 00000000..d72b0bc7
--- /dev/null
+++ b/tests/expectations/tests/objc_sel_and_id.rs
@@ -0,0 +1,22 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+#![cfg(target_os="macos")]
+
+#[macro_use]
+extern crate objc;
+#[allow(non_camel_case_types)]
+pub type id = *mut objc::runtime::Object;
+extern "C" {
+ #[link_name = "object"]
+ pub static mut object: id;
+}
+extern "C" {
+ #[link_name = "selector"]
+ pub static mut selector: objc::runtime::Sel;
+}
+extern "C" {
+ pub fn f(object: id, selector: objc::runtime::Sel);
+}
diff --git a/tests/headers/anon_struct_in_union.h b/tests/headers/anon_struct_in_union.h
new file mode 100644
index 00000000..880a8b54
--- /dev/null
+++ b/tests/headers/anon_struct_in_union.h
@@ -0,0 +1,7 @@
+struct s {
+ union {
+ struct inner {
+ int b;
+ } field;
+ } u;
+};
diff --git a/tests/headers/layout_array_too_long.h b/tests/headers/layout_array_too_long.h
new file mode 100644
index 00000000..9be037ab
--- /dev/null
+++ b/tests/headers/layout_array_too_long.h
@@ -0,0 +1,60 @@
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+
+#define RTE_CACHE_LINE_SIZE 64
+
+/**
+ * Force alignment
+ */
+#define __rte_aligned(a) __attribute__((__aligned__(a)))
+
+/**
+ * Force alignment to cache line.
+ */
+#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
+
+#define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
+
+enum {
+ IP_LAST_FRAG_IDX, /**< index of last fragment */
+ IP_FIRST_FRAG_IDX, /**< index of first fragment */
+ IP_MIN_FRAG_NUM, /**< minimum number of fragments */
+ IP_MAX_FRAG_NUM = RTE_LIBRTE_IP_FRAG_MAX_FRAG,
+ /**< maximum number of fragments per packet */
+};
+
+/** @internal fragmented mbuf */
+struct ip_frag {
+ uint16_t ofs; /**< offset into the packet */
+ uint16_t len; /**< length of fragment */
+ struct rte_mbuf *mb; /**< fragment mbuf */
+};
+
+/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+struct ip_frag_key {
+ uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */
+ uint32_t id; /**< dst address */
+ uint32_t key_len; /**< src/dst key length */
+};
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/**
+ * @internal Fragmented packet to reassemble.
+ * First two entries in the frags[] array are for the last and first fragments.
+ */
+struct ip_frag_pkt {
+ TAILQ_ENTRY(ip_frag_pkt) lru; /**< LRU list */
+ struct ip_frag_key key; /**< fragmentation key */
+ uint64_t start; /**< creation timestamp */
+ uint32_t total_size; /**< expected reassembled size */
+ uint32_t frag_size; /**< size of fragments received */
+ uint32_t last_idx; /**< index of next entry to fill */
+ struct ip_frag frags[IP_MAX_FRAG_NUM]; /**< fragments */
+} __rte_cache_aligned;
diff --git a/tests/headers/layout_large_align_field.h b/tests/headers/layout_large_align_field.h
new file mode 100644
index 00000000..f4f412c6
--- /dev/null
+++ b/tests/headers/layout_large_align_field.h
@@ -0,0 +1,97 @@
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+
+#define RTE_CACHE_LINE_SIZE 64
+
+/**
+ * Force alignment
+ */
+#define __rte_aligned(a) __attribute__((__aligned__(a)))
+
+/**
+ * Force alignment to cache line.
+ */
+#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
+
+#define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
+
+enum {
+ IP_LAST_FRAG_IDX, /**< index of last fragment */
+ IP_FIRST_FRAG_IDX, /**< index of first fragment */
+ IP_MIN_FRAG_NUM, /**< minimum number of fragments */
+ IP_MAX_FRAG_NUM = RTE_LIBRTE_IP_FRAG_MAX_FRAG,
+ /**< maximum number of fragments per packet */
+};
+
+/** @internal fragmented mbuf */
+struct ip_frag {
+ uint16_t ofs; /**< offset into the packet */
+ uint16_t len; /**< length of fragment */
+ struct rte_mbuf *mb; /**< fragment mbuf */
+};
+
+/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+struct ip_frag_key {
+ uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */
+ uint32_t id; /**< dst address */
+ uint32_t key_len; /**< src/dst key length */
+};
+
+/*
+ * Tail queue declarations.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/**
+ * @internal Fragmented packet to reassemble.
+ * First two entries in the frags[] array are for the last and first fragments.
+ */
+struct ip_frag_pkt {
+ TAILQ_ENTRY(ip_frag_pkt) lru; /**< LRU list */
+ struct ip_frag_key key; /**< fragmentation key */
+ uint64_t start; /**< creation timestamp */
+ uint32_t total_size; /**< expected reassembled size */
+ uint32_t frag_size; /**< size of fragments received */
+ uint32_t last_idx; /**< index of next entry to fill */
+ struct ip_frag frags[IP_MAX_FRAG_NUM]; /**< fragments */
+} __rte_cache_aligned;
+
+TAILQ_HEAD(ip_pkt_list, ip_frag_pkt); /**< @internal fragments tailq */
+
+/** fragmentation table statistics */
+struct ip_frag_tbl_stat {
+ uint64_t find_num; /**< total # of find/insert attempts. */
+ uint64_t add_num; /**< # of add ops. */
+ uint64_t del_num; /**< # of del ops. */
+ uint64_t reuse_num; /**< # of reuse (del/add) ops. */
+ uint64_t fail_total; /**< total # of add failures. */
+ uint64_t fail_nospace; /**< # of 'no space' add failures. */
+} __rte_cache_aligned;
+
+/** fragmentation table */
+struct rte_ip_frag_tbl {
+ uint64_t max_cycles; /**< ttl for table entries. */
+ uint32_t entry_mask; /**< hash value mask. */
+ uint32_t max_entries; /**< max entries allowed. */
+ uint32_t use_entries; /**< entries in use. */
+ uint32_t bucket_entries; /**< hash assocaitivity. */
+ uint32_t nb_entries; /**< total size of the table. */
+ uint32_t nb_buckets; /**< num of associativity lines. */
+ struct ip_frag_pkt *last; /**< last used entry. */
+ struct ip_pkt_list lru; /**< LRU list for table entries. */
+ struct ip_frag_tbl_stat stat; /**< statistics counters. */
+ __extension__ struct ip_frag_pkt pkt[0]; /**< hash table. */
+};
diff --git a/tests/headers/objc_category.h b/tests/headers/objc_category.h
new file mode 100644
index 00000000..c464b72e
--- /dev/null
+++ b/tests/headers/objc_category.h
@@ -0,0 +1,10 @@
+// bindgen-flags: --objc-extern-crate -- -x objective-c
+// bindgen-osx-only
+
+@interface Foo
+-(void)method;
+@end
+
+@interface Foo (BarCategory)
+-(void)categoryMethod;
+@end
diff --git a/tests/headers/objc_class.h b/tests/headers/objc_class.h
new file mode 100644
index 00000000..cea72e78
--- /dev/null
+++ b/tests/headers/objc_class.h
@@ -0,0 +1,10 @@
+// bindgen-flags: --objc-extern-crate -- -x objective-c
+// bindgen-osx-only
+
+@class Foo;
+
+Foo* fooVar;
+
+@interface Foo
+-(void)method;
+@end
diff --git a/tests/headers/objc_protocol.h b/tests/headers/objc_protocol.h
new file mode 100644
index 00000000..0c760fa5
--- /dev/null
+++ b/tests/headers/objc_protocol.h
@@ -0,0 +1,8 @@
+// bindgen-flags: --objc-extern-crate -- -x objective-c
+// bindgen-osx-only
+
+@protocol Foo
+@end
+
+@interface Foo <Foo>
+@end
diff --git a/tests/headers/objc_sel_and_id.h b/tests/headers/objc_sel_and_id.h
new file mode 100644
index 00000000..3c8c6561
--- /dev/null
+++ b/tests/headers/objc_sel_and_id.h
@@ -0,0 +1,7 @@
+// bindgen-flags: --objc-extern-crate -- -x objective-c
+// bindgen-osx-only
+
+id object;
+SEL selector;
+
+void f(id object, SEL selector);