summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/codegen/impl_debug.rs17
-rw-r--r--src/codegen/impl_partialeq.rs11
-rw-r--r--src/codegen/mod.rs62
-rw-r--r--src/ir/comp.rs156
-rw-r--r--src/ir/context.rs96
-rw-r--r--tests/expectations/tests/bitfield_large_overflow.rs24
-rw-r--r--tests/expectations/tests/derive-bitfield-method-same-name.rs136
-rw-r--r--tests/headers/bitfield_large_overflow.hpp2
-rw-r--r--tests/headers/derive-bitfield-method-same-name.hpp13
9 files changed, 391 insertions, 126 deletions
diff --git a/src/codegen/impl_debug.rs b/src/codegen/impl_debug.rs
index 7ef108da..e0204f4d 100644
--- a/src/codegen/impl_debug.rs
+++ b/src/codegen/impl_debug.rs
@@ -28,7 +28,6 @@ pub fn gen_debug_impl(
&Field::Bitfields(ref bu) => bu.impl_debug(ctx, ()),
});
-
for (i, (fstring, toks)) in processed_fields.enumerate() {
if i > 0 {
format_string.push_str(", ");
@@ -91,15 +90,19 @@ impl<'a> ImplDebug<'a> for BitfieldUnit {
) -> Option<(String, Vec<quote::Tokens>)> {
let mut format_string = String::new();
let mut tokens = vec![];
- for (i, bu) in self.bitfields().iter().enumerate() {
+ for (i, bitfield) in self.bitfields().iter().enumerate() {
if i > 0 {
format_string.push_str(", ");
}
- format_string.push_str(&format!("{} : {{:?}}", bu.name()));
- let name_ident = ctx.rust_ident_raw(bu.name());
- tokens.push(quote! {
- self.#name_ident ()
- });
+
+ if let Some(bitfield_name) = bitfield.name() {
+ format_string.push_str(&format!("{} : {{:?}}", bitfield_name));
+ let getter_name = bitfield.getter_name();
+ let name_ident = ctx.rust_ident_raw(getter_name);
+ tokens.push(quote! {
+ self.#name_ident ()
+ });
+ }
}
Some((format_string, tokens))
diff --git a/src/codegen/impl_partialeq.rs b/src/codegen/impl_partialeq.rs
index 7ac96003..02783808 100644
--- a/src/codegen/impl_partialeq.rs
+++ b/src/codegen/impl_partialeq.rs
@@ -51,10 +51,13 @@ pub fn gen_partialeq_impl(
tokens.push(gen_field(ctx, ty_item, name));
}
Field::Bitfields(ref bu) => for bitfield in bu.bitfields() {
- let name_ident = ctx.rust_ident_raw(bitfield.name());
- tokens.push(quote! {
- self.#name_ident () == other.#name_ident ()
- });
+ if let Some(_) = bitfield.name() {
+ let getter_name = bitfield.getter_name();
+ let name_ident = ctx.rust_ident_raw(getter_name);
+ tokens.push(quote! {
+ self.#name_ident () == other.#name_ident ()
+ });
+ }
},
}
}
diff --git a/src/codegen/mod.rs b/src/codegen/mod.rs
index 9ccd79c1..ea9fec41 100644
--- a/src/codegen/mod.rs
+++ b/src/codegen/mod.rs
@@ -1185,6 +1185,10 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit {
let mut ctor_impl = quote! { 0 };
for bf in self.bitfields() {
+ // Codegen not allowed for anonymous bitfields
+ if bf.name().is_none() {
+ continue;
+ }
bf.codegen(
ctx,
fields_should_be_private,
@@ -1198,7 +1202,7 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit {
(&unit_field_name, unit_field_int_ty.clone()),
);
- let param_name = bitfield_getter_name(ctx, parent, bf.name());
+ let param_name = bitfield_getter_name(ctx, bf);
let bitfield_ty_item = ctx.resolve_item(bf.ty());
let bitfield_ty = bitfield_ty_item.expect_type();
let bitfield_ty =
@@ -1232,59 +1236,21 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit {
}
}
-fn parent_has_method(
- ctx: &BindgenContext,
- parent: &CompInfo,
- name: &str,
-) -> bool {
- parent.methods().iter().any(|method| {
- let method_name = match *ctx.resolve_item(method.signature()).kind() {
- ItemKind::Function(ref func) => func.name(),
- ref otherwise => {
- panic!(
- "a method's signature should always be a \
- item of kind ItemKind::Function, found: \
- {:?}",
- otherwise
- )
- }
- };
-
- method_name == name || ctx.rust_mangle(&method_name) == name
- })
-}
-
fn bitfield_getter_name(
ctx: &BindgenContext,
- parent: &CompInfo,
- bitfield_name: &str,
+ bitfield: &Bitfield,
) -> quote::Tokens {
- let name = ctx.rust_mangle(bitfield_name);
-
- if parent_has_method(ctx, parent, &name) {
- let mut name = name.to_string();
- name.push_str("_bindgen_bitfield");
- let name = ctx.rust_ident(name);
- return quote! { #name };
- }
-
- let name = ctx.rust_ident(name);
+ let name = bitfield.getter_name();
+ let name = ctx.rust_ident_raw(name);
quote! { #name }
}
fn bitfield_setter_name(
ctx: &BindgenContext,
- parent: &CompInfo,
- bitfield_name: &str,
+ bitfield: &Bitfield,
) -> quote::Tokens {
- let setter = format!("set_{}", bitfield_name);
- let mut setter = ctx.rust_mangle(&setter).to_string();
-
- if parent_has_method(ctx, parent, &setter) {
- setter.push_str("_bindgen_bitfield");
- }
-
- let setter = ctx.rust_ident(setter);
+ let setter = bitfield.setter_name();
+ let setter = ctx.rust_ident_raw(setter);
quote! { #setter }
}
@@ -1297,7 +1263,7 @@ impl<'a> FieldCodegen<'a> for Bitfield {
_fields_should_be_private: bool,
_codegen_depth: usize,
_accessor_kind: FieldAccessorKind,
- parent: &CompInfo,
+ _parent: &CompInfo,
_result: &mut CodegenResult,
_struct_layout: &mut StructLayoutTracker,
_fields: &mut F,
@@ -1308,8 +1274,8 @@ impl<'a> FieldCodegen<'a> for Bitfield {
M: Extend<quote::Tokens>,
{
let prefix = ctx.trait_prefix();
- let getter_name = bitfield_getter_name(ctx, parent, self.name());
- let setter_name = bitfield_setter_name(ctx, parent, self.name());
+ let getter_name = bitfield_getter_name(ctx, self);
+ let setter_name = bitfield_setter_name(ctx, self);
let unit_field_ident = quote::Ident::new(unit_field_name);
let bitfield_ty_item = ctx.resolve_item(self.ty());
diff --git a/src/ir/comp.rs b/src/ir/comp.rs
index 6a90bcbf..230ba5b4 100644
--- a/src/ir/comp.rs
+++ b/src/ir/comp.rs
@@ -17,6 +17,7 @@ use peeking_take_while::PeekableExt;
use std::cmp;
use std::io;
use std::mem;
+use std::collections::HashMap;
/// The kind of compound type.
#[derive(Debug, Copy, Clone, PartialEq)]
@@ -276,7 +277,7 @@ impl DotAttributes for Bitfield {
writeln!(
out,
"<tr><td>{} : {}</td><td>{:?}</td></tr>",
- self.name(),
+ self.name().unwrap_or("(anonymous)"),
self.width(),
self.ty()
)
@@ -292,17 +293,28 @@ pub struct Bitfield {
/// The field data for this bitfield.
data: FieldData,
+
+ /// Name of the generated Rust getter for this bitfield.
+ ///
+ /// Should be assigned before codegen.
+ getter_name: Option<String>,
+
+ /// Name of the generated Rust setter for this bitfield.
+ ///
+ /// Should be assigned before codegen.
+ setter_name: Option<String>,
}
impl Bitfield {
/// Construct a new bitfield.
fn new(offset_into_unit: usize, raw: RawField) -> Bitfield {
assert!(raw.bitfield().is_some());
- assert!(raw.name().is_some());
Bitfield {
offset_into_unit: offset_into_unit,
data: raw.0,
+ getter_name: None,
+ setter_name: None,
}
}
@@ -333,9 +345,28 @@ impl Bitfield {
self.data.bitfield().unwrap()
}
- /// Get the name of this bitfield.
- pub fn name(&self) -> &str {
- self.data.name().unwrap()
+ /// Name of the generated Rust getter for this bitfield.
+ ///
+ /// Panics if called before assigning bitfield accessor names or if
+ /// this bitfield have no name.
+ pub fn getter_name(&self) -> &str {
+ assert!(self.name().is_some(), "`Bitfield::getter_name` called on anonymous field");
+ self.getter_name.as_ref().expect(
+ "`Bitfield::getter_name` should only be called after\
+ assigning bitfield accessor names",
+ )
+ }
+
+ /// Name of the generated Rust setter for this bitfield.
+ ///
+ /// Panics if called before assigning bitfield accessor names or if
+ /// this bitfield have no name.
+ pub fn setter_name(&self) -> &str {
+ assert!(self.name().is_some(), "`Bitfield::setter_name` called on anonymous field");
+ self.setter_name.as_ref().expect(
+ "`Bitfield::setter_name` should only be called\
+ after assigning bitfield accessor names",
+ )
}
}
@@ -581,13 +612,12 @@ fn bitfields_to_allocation_units<E, I>(
}
}
- // Only keep named bitfields around. Unnamed bitfields (with > 0
- // bitsize) are used for padding. Because the `Bitfield` struct stores
- // the bit-offset into its allocation unit where its bits begin, we
- // don't need any padding bits hereafter.
- if bitfield.name().is_some() {
- bitfields_in_unit.push(Bitfield::new(offset, bitfield));
- }
+ // Always keep all bitfields around. While unnamed bitifields are used
+ // for padding (and usually not needed hereafter), large unnamed
+ // bitfields over their types size cause weird allocation size behavior from clang.
+ // Therefore, all bitfields needed to be kept around in order to check for this
+ // and make the struct opaque in this case
+ bitfields_in_unit.push(Bitfield::new(offset, bitfield));
max_align = cmp::max(max_align, bitfield_align);
@@ -668,30 +698,79 @@ impl CompFields {
);
}
- fn deanonymize_fields(&mut self) {
+ fn deanonymize_fields(&mut self, ctx: &BindgenContext, methods: &[Method]) {
let fields = match *self {
- CompFields::AfterComputingBitfieldUnits(ref mut fields) => {
- fields
- }
+ CompFields::AfterComputingBitfieldUnits(ref mut fields) => fields,
CompFields::BeforeComputingBitfieldUnits(_) => {
panic!("Not yet computed bitfield units.");
}
};
+ fn has_method(methods: &[Method], ctx: &BindgenContext, name: &str) -> bool {
+ methods.iter().any(|method| {
+ let method_name = ctx.resolve_func(method.signature()).name();
+ method_name == name || ctx.rust_mangle(&method_name) == name
+ })
+ }
+
+ struct AccessorNamesPair {
+ getter: String,
+ setter: String,
+ }
+
+ let mut accessor_names: HashMap<String, AccessorNamesPair> = fields
+ .iter()
+ .flat_map(|field| match *field {
+ Field::Bitfields(ref bu) => &*bu.bitfields,
+ Field::DataMember(_) => &[],
+ })
+ .filter_map(|bitfield| bitfield.name())
+ .map(|bitfield_name| {
+ let bitfield_name = bitfield_name.to_string();
+ let getter = {
+ let mut getter = ctx.rust_mangle(&bitfield_name).to_string();
+ if has_method(methods, ctx, &getter) {
+ getter.push_str("_bindgen_bitfield");
+ }
+ getter
+ };
+ let setter = {
+ let setter = format!("set_{}", bitfield_name);
+ let mut setter = ctx.rust_mangle(&setter).to_string();
+ if has_method(methods, ctx, &setter) {
+ setter.push_str("_bindgen_bitfield");
+ }
+ setter
+ };
+ (bitfield_name, AccessorNamesPair { getter, setter })
+ })
+ .collect();
+
let mut anon_field_counter = 0;
for field in fields.iter_mut() {
- let field_data = match *field {
- Field::DataMember(ref mut fd) => fd,
- Field::Bitfields(_) => continue,
- };
+ match *field {
+ Field::DataMember(FieldData { ref mut name, .. }) => {
+ if let Some(_) = *name {
+ continue;
+ }
- if let Some(_) = field_data.name {
- continue;
- }
+ anon_field_counter += 1;
+ let generated_name = format!("__bindgen_anon_{}", anon_field_counter);
+ *name = Some(generated_name);
+ }
+ Field::Bitfields(ref mut bu) => for bitfield in &mut bu.bitfields {
+ if bitfield.name().is_none() {
+ continue;
+ }
- anon_field_counter += 1;
- let name = format!("__bindgen_anon_{}", anon_field_counter);
- field_data.name = Some(name);
+ if let Some(AccessorNamesPair { getter, setter }) =
+ accessor_names.remove(bitfield.name().unwrap())
+ {
+ bitfield.getter_name = Some(getter);
+ bitfield.setter_name = Some(setter);
+ }
+ },
+ }
}
}
}
@@ -1404,8 +1483,8 @@ impl CompInfo {
}
/// Assign for each anonymous field a generated name.
- pub fn deanonymize_fields(&mut self) {
- self.fields.deanonymize_fields();
+ pub fn deanonymize_fields(&mut self, ctx: &BindgenContext) {
+ self.fields.deanonymize_fields(ctx, &self.methods);
}
/// Returns whether the current union can be represented as a Rust `union`
@@ -1480,8 +1559,25 @@ impl DotAttributes for CompInfo {
impl IsOpaque for CompInfo {
type Extra = ();
- fn is_opaque(&self, _: &BindgenContext, _: &()) -> bool {
- self.has_non_type_template_params
+ fn is_opaque(&self, ctx: &BindgenContext, _: &()) -> bool {
+ // Early return to avoid extra computation
+ if self.has_non_type_template_params {
+ return true
+ }
+
+ self.fields().iter().any(|f| match *f {
+ Field::DataMember(_) => {
+ false
+ },
+ Field::Bitfields(ref unit) => {
+ unit.bitfields().iter().any(|bf| {
+ let bitfield_layout = ctx.resolve_type(bf.ty())
+ .layout(ctx)
+ .expect("Bitfield without layout? Gah!");
+ bf.width() / 8 > bitfield_layout.size as u32
+ })
+ }
+ })
}
}
diff --git a/src/ir/context.rs b/src/ir/context.rs
index 27a34162..8f361094 100644
--- a/src/ir/context.rs
+++ b/src/ir/context.rs
@@ -15,6 +15,7 @@ use super::module::{Module, ModuleKind};
use super::template::{TemplateInstantiation, TemplateParameters};
use super::traversal::{self, Edge, ItemTraversal};
use super::ty::{FloatKind, Type, TypeKind};
+use super::function::Function;
use super::super::time::Timer;
use BindgenOptions;
use callbacks::ParseCallbacks;
@@ -964,6 +965,30 @@ impl BindgenContext {
}
}
+ /// Temporarily loan `Item` with the given `ItemId`. This provides means to
+ /// mutably borrow `Item` while having a reference to `BindgenContext`.
+ ///
+ /// `Item` with the given `ItemId` is removed from the context, given
+ /// closure is executed and then `Item` is placed back.
+ ///
+ /// # Panics
+ ///
+ /// Panics if attempt to resolve given `ItemId` inside the given
+ /// closure is made.
+ fn with_loaned_item<F, T>(&mut self, id: ItemId, f: F) -> T
+ where
+ F: (FnOnce(&BindgenContext, &mut Item) -> T)
+ {
+ let mut item = self.items.remove(&id).unwrap();
+
+ let result = f(self, &mut item);
+
+ let existing = self.items.insert(id, item);
+ assert!(existing.is_none());
+
+ result
+ }
+
/// Compute the bitfield allocation units for all `TypeKind::Comp` items we
/// parsed.
fn compute_bitfield_units(&mut self) {
@@ -972,34 +997,43 @@ impl BindgenContext {
let need_bitfield_allocation =
mem::replace(&mut self.need_bitfield_allocation, vec![]);
for id in need_bitfield_allocation {
- // To appease the borrow checker, we temporarily remove this item
- // from the context, and then replace it once we are done computing
- // its bitfield units. We will never try and resolve this
- // `TypeKind::Comp` item's id (which would now cause a panic) during
- // bitfield unit computation because it is a non-scalar by
- // definition, and non-scalar types may not be used as bitfields.
- let mut item = self.items.remove(&id).unwrap();
-
- item.kind_mut()
- .as_type_mut()
- .unwrap()
- .as_comp_mut()
- .unwrap()
- .compute_bitfield_units(&*self);
-
- self.items.insert(id, item);
+ self.with_loaned_item(id, |ctx, item| {
+ item.kind_mut()
+ .as_type_mut()
+ .unwrap()
+ .as_comp_mut()
+ .unwrap()
+ .compute_bitfield_units(ctx);
+ });
}
}
/// Assign a new generated name for each anonymous field.
fn deanonymize_fields(&mut self) {
let _t = self.timer("deanonymize_fields");
- let comp_types = self.items
- .values_mut()
- .filter_map(|item| item.kind_mut().as_type_mut())
- .filter_map(Type::as_comp_mut);
- for comp_info in comp_types {
- comp_info.deanonymize_fields();
+
+ let comp_item_ids: Vec<ItemId> = self.items
+ .iter()
+ .filter_map(|(id, item)| {
+ if let Some(ty) = item.kind().as_type() {
+ if let Some(_comp) = ty.as_comp() {
+ return Some(id);
+ }
+ }
+ None
+ })
+ .cloned()
+ .collect();
+
+ for id in comp_item_ids {
+ self.with_loaned_item(id, |ctx, item| {
+ item.kind_mut()
+ .as_type_mut()
+ .unwrap()
+ .as_comp_mut()
+ .unwrap()
+ .deanonymize_fields(ctx);
+ });
}
}
@@ -1137,8 +1171,6 @@ impl BindgenContext {
{
self.in_codegen = true;
- self.assert_no_dangling_references();
-
if !self.collected_typerefs() {
self.resolve_typerefs();
self.compute_bitfield_units();
@@ -1147,8 +1179,6 @@ impl BindgenContext {
self.deanonymize_fields();
- // And assert once again, because resolving type refs and processing
- // replacements both mutate the IR graph.
self.assert_no_dangling_references();
// Compute the whitelisted set after processing replacements and
@@ -1395,12 +1425,20 @@ impl BindgenContext {
self.root_module
}
- /// Resolve the given `ItemId` as a type.
+ /// Resolve a type with the given id.
///
- /// Panics if there is no item for the given `ItemId` or if the resolved
+ /// Panics if there is no item for the given `TypeId` or if the resolved
/// item is not a `Type`.
pub fn resolve_type(&self, type_id: TypeId) -> &Type {
- self.items.get(&type_id.into()).unwrap().kind().expect_type()
+ self.resolve_item(type_id).kind().expect_type()
+ }
+
+ /// Resolve a function with the given id.
+ ///
+ /// Panics if there is no item for the given `FunctionId` or if the resolved
+ /// item is not a `Function`.
+ pub fn resolve_func(&self, func_id: FunctionId) -> &Function {
+ self.resolve_item(func_id).kind().expect_function()
}
/// Resolve the given `ItemId` as a type, or `None` if there is no item with
diff --git a/tests/expectations/tests/bitfield_large_overflow.rs b/tests/expectations/tests/bitfield_large_overflow.rs
index 523570e4..fb2029ea 100644
--- a/tests/expectations/tests/bitfield_large_overflow.rs
+++ b/tests/expectations/tests/bitfield_large_overflow.rs
@@ -5,17 +5,29 @@
#[repr(C)]
+#[derive(Debug, Default, Copy)]
pub struct _bindgen_ty_1 {
- pub _bitfield_1: [u8; 128usize],
- pub __bindgen_align: [u64; 0usize],
+ pub _bindgen_opaque_blob: [u64; 10usize],
}
-impl Default for _bindgen_ty_1 {
- fn default() -> Self {
- unsafe { ::std::mem::zeroed() }
+#[test]
+fn bindgen_test_layout__bindgen_ty_1() {
+ assert_eq!(
+ ::std::mem::size_of::<_bindgen_ty_1>(),
+ 80usize,
+ concat!("Size of: ", stringify!(_bindgen_ty_1))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<_bindgen_ty_1>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(_bindgen_ty_1))
+ );
+}
+impl Clone for _bindgen_ty_1 {
+ fn clone(&self) -> Self {
+ *self
}
}
extern "C" {
#[link_name = "a"]
pub static mut a: _bindgen_ty_1;
}
-
diff --git a/tests/expectations/tests/derive-bitfield-method-same-name.rs b/tests/expectations/tests/derive-bitfield-method-same-name.rs
new file mode 100644
index 00000000..1873d0d6
--- /dev/null
+++ b/tests/expectations/tests/derive-bitfield-method-same-name.rs
@@ -0,0 +1,136 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals)]
+
+
+
+/// Because this struct have array larger than 32 items
+/// and --with-derive-partialeq --impl-partialeq --impl-debug is provided,
+/// this struct should manually implement `Debug` and `PartialEq`.
+#[repr(C)]
+#[derive(Copy)]
+pub struct Foo {
+ pub large: [::std::os::raw::c_int; 33usize],
+ pub _bitfield_1: [u8; 2usize],
+ pub __bindgen_padding_0: u16,
+}
+#[test]
+fn bindgen_test_layout_Foo() {
+ assert_eq!(
+ ::std::mem::size_of::<Foo>(),
+ 136usize,
+ concat!("Size of: ", stringify!(Foo))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<Foo>(),
+ 4usize,
+ concat!("Alignment of ", stringify!(Foo))
+ );
+ assert_eq!(
+ unsafe { &(*(0 as *const Foo)).large as *const _ as usize },
+ 0usize,
+ concat!(
+ "Alignment of field: ",
+ stringify!(Foo),
+ "::",
+ stringify!(large)
+ )
+ );
+}
+extern "C" {
+ #[link_name = "_ZN3Foo4typeEv"]
+ pub fn Foo_type(this: *mut Foo) -> ::std::os::raw::c_char;
+}
+extern "C" {
+ #[link_name = "_ZN3Foo9set_type_Ec"]
+ pub fn Foo_set_type_(this: *mut Foo, c: ::std::os::raw::c_char);
+}
+extern "C" {
+ #[link_name = "_ZN3Foo8set_typeEc"]
+ pub fn Foo_set_type(this: *mut Foo, c: ::std::os::raw::c_char);
+}
+impl Clone for Foo {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+impl Default for Foo {
+ fn default() -> Self {
+ unsafe { ::std::mem::zeroed() }
+ }
+}
+impl ::std::fmt::Debug for Foo {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ write!(
+ f,
+ "Foo {{ large: [{}], type_ : {:?}, }}",
+ self.large
+ .iter()
+ .enumerate()
+ .map(|(i, v)| format!("{}{:?}", if i > 0 { ", " } else { "" }, v))
+ .collect::<String>(),
+ self.type__bindgen_bitfield()
+ )
+ }
+}
+impl ::std::cmp::PartialEq for Foo {
+ fn eq(&self, other: &Foo) -> bool {
+ &self.large[..] == &other.large[..]
+ && self.type__bindgen_bitfield() == other.type__bindgen_bitfield()
+ }
+}
+impl Foo {
+ #[inline]
+ pub fn type__bindgen_bitfield(&self) -> ::std::os::raw::c_char {
+ let mut unit_field_val: u16 = unsafe { ::std::mem::uninitialized() };
+ unsafe {
+ ::std::ptr::copy_nonoverlapping(
+ &self._bitfield_1 as *const _ as *const u8,
+ &mut unit_field_val as *mut u16 as *mut u8,
+ ::std::mem::size_of::<u16>(),
+ )
+ };
+ let mask = 7u64 as u16;
+ let val = (unit_field_val & mask) >> 0usize;
+ unsafe { ::std::mem::transmute(val as u8) }
+ }
+ #[inline]
+ pub fn set_type__bindgen_bitfield(&mut self, val: ::std::os::raw::c_char) {
+ let mask = 7u64 as u16;
+ let val = val as u8 as u16;
+ let mut unit_field_val: u16 = unsafe { ::std::mem::uninitialized() };
+ unsafe {
+ ::std::ptr::copy_nonoverlapping(
+ &self._bitfield_1 as *const _ as *const u8,
+ &mut unit_field_val as *mut u16 as *mut u8,
+ ::std::mem::size_of::<u16>(),
+ )
+ };
+ unit_field_val &= !mask;
+ unit_field_val |= (val << 0usize) & mask;
+ unsafe {
+ ::std::ptr::copy_nonoverlapping(
+ &unit_field_val as *const _ as *const u8,
+ &mut self._bitfield_1 as *mut _ as *mut u8,
+ ::std::mem::size_of::<u16>(),
+ );
+ }
+ }
+ #[inline]
+ pub fn new_bitfield_1(type__bindgen_bitfield: ::std::os::raw::c_char) -> u16 {
+ (0 | ((type__bindgen_bitfield as u8 as u16) << 0usize) & (7u64 as u16))
+ }
+ #[inline]
+ pub unsafe fn type_(&mut self) -> ::std::os::raw::c_char {
+ Foo_type(self)
+ }
+ #[inline]
+ pub unsafe fn set_type_(&mut self, c: ::std::os::raw::c_char) {
+ Foo_set_type_(self, c)
+ }
+ #[inline]
+ pub unsafe fn set_type(&mut self, c: ::std::os::raw::c_char) {
+ Foo_set_type(self, c)
+ }
+}
diff --git a/tests/headers/bitfield_large_overflow.hpp b/tests/headers/bitfield_large_overflow.hpp
index 227829b8..9e040ae3 100644
--- a/tests/headers/bitfield_large_overflow.hpp
+++ b/tests/headers/bitfield_large_overflow.hpp
@@ -1,5 +1,3 @@
-// bindgen-flags: --no-layout-tests
-
struct {
unsigned : 632;
} a;
diff --git a/tests/headers/derive-bitfield-method-same-name.hpp b/tests/headers/derive-bitfield-method-same-name.hpp
new file mode 100644
index 00000000..4b7b21e9
--- /dev/null
+++ b/tests/headers/derive-bitfield-method-same-name.hpp
@@ -0,0 +1,13 @@
+// bindgen-flags: --with-derive-partialeq --impl-partialeq --impl-debug
+
+/// Because this struct have array larger than 32 items
+/// and --with-derive-partialeq --impl-partialeq --impl-debug is provided,
+/// this struct should manually implement `Debug` and `PartialEq`.
+struct Foo {
+ int large[33];
+ char type_ : 3;
+ unsigned : 8;
+ char type();
+ void set_type_(char c);
+ void set_type(char c);
+};