summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE.md63
-rw-r--r--CONTRIBUTING.md32
-rw-r--r--Cargo.lock82
-rw-r--r--Cargo.toml4
-rw-r--r--build.rs1
-rw-r--r--example-graphviz-ir.pngbin0 -> 1343520 bytes
-rw-r--r--src/clang.rs4
-rw-r--r--src/codegen/mod.rs178
-rw-r--r--src/codegen/struct_layout.rs308
-rw-r--r--src/ir/comp.rs72
-rw-r--r--src/ir/dot.rs53
-rw-r--r--src/ir/enum_ty.rs17
-rw-r--r--src/ir/function.rs14
-rw-r--r--src/ir/item.rs22
-rw-r--r--src/ir/item_kind.rs28
-rw-r--r--src/ir/layout.rs25
-rw-r--r--src/ir/mod.rs2
-rw-r--r--src/ir/module.rs12
-rw-r--r--src/ir/named.rs471
-rw-r--r--src/ir/traversal.rs8
-rw-r--r--src/ir/ty.rs121
-rw-r--r--src/ir/var.rs18
-rw-r--r--src/lib.rs11
-rw-r--r--src/options.rs9
-rw-r--r--tests/expectations/tests/anon_enum.rs5
-rw-r--r--tests/expectations/tests/bitfield_align.rs114
-rw-r--r--tests/expectations/tests/bitfield_method_mangling.rs48
-rw-r--r--tests/expectations/tests/issue-410.rs3
-rw-r--r--tests/expectations/tests/jsval_layout_opaque.rs32
-rw-r--r--tests/expectations/tests/layout_align.rs39
-rw-r--r--tests/expectations/tests/layout_array.rs19
-rw-r--r--tests/expectations/tests/layout_eth_conf.rs151
-rw-r--r--tests/expectations/tests/layout_large_align_field.rs419
-rw-r--r--tests/expectations/tests/layout_mbuf.rs199
-rw-r--r--tests/expectations/tests/only_bitfields.rs27
-rw-r--r--tests/expectations/tests/struct_typedef.rs61
-rw-r--r--tests/expectations/tests/struct_typedef_ns.rs79
-rw-r--r--tests/expectations/tests/struct_with_bitfields.rs100
-rw-r--r--tests/expectations/tests/union_fields.rs35
-rw-r--r--tests/expectations/tests/union_with_anon_struct_bitfield.rs29
-rw-r--r--tests/expectations/tests/unknown_attr.rs25
-rw-r--r--tests/expectations/tests/weird_bitfields.rs94
-rw-r--r--tests/headers/bitfield_align.h41
-rw-r--r--tests/headers/layout_large_align_field.h97
-rw-r--r--tests/headers/struct_typedef.h15
-rw-r--r--tests/headers/struct_typedef_ns.hpp21
-rw-r--r--tests/headers/unknown_attr.h2
47 files changed, 2142 insertions, 1068 deletions
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 00000000..68dfb68e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,63 @@
+### Input C/C++ Header
+
+```C++
+// Insert your (minimal) C/C++ header here
+```
+
+### Bindgen Invokation
+
+<!--
+
+Place either the `bindgen::Builder` **OR** the command line flags used here.
+
+```Rust
+bindgen::Builder::default()
+ .header("input.h")
+ .generate()
+ .unwrap()
+```
+
+OR
+
+```
+$ bindgen input.h --whatever --flags
+```
+
+-->
+
+### Actual Results
+
+<!--
+
+```
+Insert panic message and backtrace here.
+```
+
+and/or
+
+```rust
+// Insert the (incorrect) generated bindings here
+```
+
+and/or
+
+```
+Insert compilation errors generated when compiling the bindings with rustc here
+```
+
+-->
+
+### Expected Results
+
+<!--
+Replace this with a description of what you expected instead of the actual
+results. The more precise, the better! For example, if a struct in the generated
+bindings is missing a field that exists in the C/C++ struct, not that here.
+-->
+
+### `RUST_LOG=bindgen` Output
+
+```
+Insert debug logging when running bindgen with the RUST_LOG=bindgen environment
+variable set.
+```
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index cbaaf3c4..df32998b 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -16,6 +16,7 @@ out to us in a GitHub issue, or stop by
- [Running All Tests](#running-all-tests)
- [Authoring New Tests](#authoring-new-tests)
- [Automatic code formatting](#automatic-code-formatting)
+- [Generating Graphviz Dot Files](#generating-graphviz-dot-files)
- [Debug Logging](#debug-logging)
- [Using `creduce` to Minimize Test Cases](#using-creduce-to-minimize-test-cases)
- [Isolating Your Test Case](#isolating-your-test-case)
@@ -135,6 +136,37 @@ $ cargo fmt
The code style is described in the `rustfmt.toml` file in top level of the repo.
+## Generating Graphviz Dot Files
+
+We can generate [Graphviz](http://graphviz.org/pdf/dotguide.pdf) dot files from
+our internal representation of a C/C++ input header, and then you can create a
+PNG or PDF from it with Graphviz's `dot` program. This is very useful when
+debugging bindgen!
+
+First, make sure you have Graphviz and `dot` installed:
+
+```
+$ brew install graphviz # OS X
+$ sudo dnf install graphviz # Fedora
+$ # Etc...
+```
+
+Then, use the `--emit-ir-graphviz` flag to generate a `dot` file from our IR:
+
+```
+$ cargo run -- example.hpp --emit-ir-graphviz output.dot
+```
+
+Finally, convert the `dot` file to an image:
+
+```
+$ dot -Tpng output.dot -o output.png
+```
+
+The final result will look something like this:
+
+[![An example graphviz rendering of our IR](./example-graphviz-ir.png)](./example-graphviz-ir.png)
+
## Debug Logging
To help debug what `bindgen` is doing, you can define the environment variable
diff --git a/Cargo.lock b/Cargo.lock
index 09583435..089e0f24 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1,12 +1,12 @@
[root]
name = "bindgen"
-version = "0.21.3"
+version = "0.22.0"
dependencies = [
"aster 0.38.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cexpr 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "clang-sys 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "clap 2.19.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clang-sys 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clap 2.20.4 (registry+https://github.com/rust-lang/crates.io-index)",
"diff 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -21,7 +21,7 @@ dependencies = [
[[package]]
name = "aho-corasick"
-version = "0.6.1"
+version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -60,27 +60,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "clang-sys"
-version = "0.12.0"
+version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)",
- "libloading 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libloading 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "clap"
-version = "2.19.3"
+version = "2.20.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)",
- "strsim 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "term_size 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-segmentation 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "term_size 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"vec_map 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -92,7 +92,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "dtoa"
-version = "0.2.2"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@@ -111,7 +111,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "itoa"
-version = "0.1.1"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@@ -135,12 +135,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "libloading"
-version = "0.3.1"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "target_build_utils 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "target_build_utils 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -234,7 +234,7 @@ name = "regex"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "aho-corasick 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "aho-corasick 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -253,18 +253,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "serde"
-version = "0.8.23"
+version = "0.9.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "serde_json"
-version = "0.8.6"
+version = "0.9.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "dtoa 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "itoa 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "dtoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde 0.8.23 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -279,7 +279,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "strsim"
-version = "0.5.2"
+version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@@ -300,7 +300,7 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex_pos 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "term 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "term 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -323,23 +323,23 @@ dependencies = [
"rustc-serialize 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex_errors 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex_pos 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "term 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "term 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "target_build_utils"
-version = "0.1.2"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
"phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde_json 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_json 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "term"
-version = "0.4.4"
+version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -348,7 +348,7 @@ dependencies = [
[[package]]
name = "term_size"
-version = "0.2.1"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -376,7 +376,7 @@ dependencies = [
[[package]]
name = "unicode-segmentation"
-version = "0.1.3"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@@ -423,23 +423,23 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
-"checksum aho-corasick 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4f660b942762979b56c9f07b4b36bb559776fbad102f05d6771e1b629e8fd5bf"
+"checksum aho-corasick 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0638fd549427caa90c499814196d1b9e3725eb4d15d7339d6de073a680ed0ca2"
"checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6"
"checksum aster 0.38.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2c9b49e42a449c0b79d8acb91db37621de0978064dca7d3288ddcf030123e5b3"
"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
"checksum cexpr 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "393a5f0088efbe41f9d1fcd062f24e83c278608420e62109feb2c8abee07de7d"
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
-"checksum clang-sys 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "822ea22bbbef9f5934e9477860545fb0311a1759e43a276de42e2856c605aa2b"
-"checksum clap 2.19.3 (registry+https://github.com/rust-lang/crates.io-index)" = "95b78f3fe0fc94c13c731714363260e04b557a637166f33a4570d3189d642374"
+"checksum clang-sys 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4f98f0715ff67f27ca6a2f8f0ffc2a56f8edbc7acd57489c29eadc3a15c4eafe"
+"checksum clap 2.20.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a60af5cb867dd4ee2378398acde80c73b466b58a963f598061ce7e394800998d"
"checksum diff 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e48977eec6d3b7707462c2dc2e1363ad91b5dd822cf942537ccdc2085dc87587"
-"checksum dtoa 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0dd841b58510c9618291ffa448da2e4e0f699d984d436122372f446dae62263d"
+"checksum dtoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "80c8b71fd71146990a9742fc06dcbbde19161a267e0ad4e572c35162f4578c90"
"checksum env_logger 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "99971fb1b635fe7a0ee3c4d065845bb93cca80a23b5613b5613391ece5de4144"
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
-"checksum itoa 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ae3088ea4baeceb0284ee9eea42f591226e6beaecf65373e41b38d95a1b8e7a1"
+"checksum itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eb2f404fbc66fd9aac13e998248505e7ecb2ad8e44ab6388684c5fb11c6c251c"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum lazy_static 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6abe0ee2e758cd6bc8a2cd56726359007748fbf4128da998b65d0b70f881e19b"
"checksum libc 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)" = "684f330624d8c3784fb9558ca46c4ce488073a8d22450415c5eb4f4cfb0d11b5"
-"checksum libloading 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "84816a8c6ed8163dfe0dbdd2b09d35c6723270ea77a4c7afa4bedf038a36cb99"
+"checksum libloading 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fd1835a714c1f67ba073a493493c23686a480e2614e208c921834808b1f19d8f"
"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054"
"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4"
"checksum nom 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b8c256fd9471521bcb84c3cdba98921497f1a331cbc15b8030fc63b82050ce"
@@ -454,21 +454,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4278c17d0f6d62dfef0ab00028feb45bd7d2102843f80763474eeb1be8a10c01"
"checksum regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9191b1f57603095f105d317e375d19b1c9c5c3185ea9633a99a6dcbed04457"
"checksum rustc-serialize 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "237546c689f20bb44980270c73c3b9edd0891c1be49cc1274406134a66d3957b"
-"checksum serde 0.8.23 (registry+https://github.com/rust-lang/crates.io-index)" = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8"
-"checksum serde_json 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)" = "67f7d2e9edc3523a9c8ec8cd6ec481b3a27810aafee3e625d311febd3e656b4c"
+"checksum serde 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1e0ed773960f90a78567fcfbe935284adf50c5d7cf119aa2cf43bb0b4afa69bb"
+"checksum serde_json 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e095e4e94e7382b76f48e93bd845ffddda62df8dfd4c163b1bfa93d40e22e13a"
"checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2"
"checksum siphasher 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2ffc669b726f2bc9a3bcff66e5e23b56ba6bf70e22a34c3d7b6d0b3450b65b84"
-"checksum strsim 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "67f84c44fbb2f91db7fef94554e6b2ac05909c9c0b0bc23bb98d3a1aebfe7f7c"
+"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"
"checksum syntex 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb3f52553a966675982404dc34028291b347e0c9a9c0b0b34f2da6be8a0443f8"
"checksum syntex_errors 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dee2f6e49c075f71332bb775219d5982bee6732d26227fa1ae1b53cdb12f5cc5"
"checksum syntex_pos 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8df3921c7945dfb9ffc53aa35adb2cf4313b5ab5f079c3619b3d4eb82a0efc2b"
"checksum syntex_syntax 0.54.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dc960085bae44591e22d01f6c0e82a8aec832f8659aca556cdf8ecbdac2bb47b"
-"checksum target_build_utils 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "54c550e226618cd35334b75e92bfa5437c61474bdb75c38bf330ab5a8037b77c"
-"checksum term 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3deff8a2b3b6607d6d7cc32ac25c0b33709453ca9cceac006caac51e963cf94a"
-"checksum term_size 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3f7f5f3f71b0040cecc71af239414c23fd3c73570f5ff54cf50e03cef637f2a0"
+"checksum target_build_utils 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f42dc058080c19c6a58bdd1bf962904ee4f5ef1fe2a81b529f31dacc750c679f"
+"checksum term 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d168af3930b369cfe245132550579d47dfd873d69470755a19c2c6568dbbd989"
+"checksum term_size 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "71662702fe5cd2cf95edd4ad655eea42f24a87a0e44059cbaa4e55260b7bc331"
"checksum thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4437c97558c70d129e40629a5b385b3fb1ffac301e63941335e4d354081ec14a"
"checksum thread_local 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7793b722f0f77ce716e7f1acf416359ca32ff24d04ffbac4269f44a4a83be05d"
-"checksum unicode-segmentation 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c3bc443ded17b11305ffffe6b37e2076f328a5a8cb6aa877b1b98f77699e98b5"
+"checksum unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18127285758f0e2c6cf325bb3f3d138a12fee27de4f23e146cd6a179f26c2cf3"
"checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f"
"checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc"
"checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91"
diff --git a/Cargo.toml b/Cargo.toml
index a5472af3..a9521be5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,7 +13,7 @@ name = "bindgen"
readme = "README.md"
repository = "https://github.com/servo/rust-bindgen"
documentation = "https://docs.rs/bindgen"
-version = "0.21.3"
+version = "0.22.0"
build = "build.rs"
exclude = ["tests/headers", "tests/expectations", "bindgen-integration", "ci"]
@@ -39,7 +39,7 @@ quasi_codegen = "0.29"
[dependencies]
cexpr = "0.2"
cfg-if = "0.1.0"
-clang-sys = { version = "0.12", features = ["runtime", "clang_3_9"] }
+clang-sys = { version = "0.14", features = ["runtime", "clang_3_9"] }
lazy_static = "0.2.1"
rustc-serialize = "0.3.19"
syntex_syntax = "0.54"
diff --git a/build.rs b/build.rs
index c3633cd4..4e99c779 100644
--- a/build.rs
+++ b/build.rs
@@ -11,6 +11,7 @@ mod codegen {
quasi_codegen::expand(&src, &dst).unwrap();
println!("cargo:rerun-if-changed=src/codegen/mod.rs");
println!("cargo:rerun-if-changed=src/codegen/helpers.rs");
+ println!("cargo:rerun-if-changed=src/codegen/struct_layout.rs");
}
}
diff --git a/example-graphviz-ir.png b/example-graphviz-ir.png
new file mode 100644
index 00000000..1e554a83
--- /dev/null
+++ b/example-graphviz-ir.png
Binary files differ
diff --git a/src/clang.rs b/src/clang.rs
index 4b3ae696..a3919579 100644
--- a/src/clang.rs
+++ b/src/clang.rs
@@ -662,10 +662,6 @@ pub enum LayoutError {
impl ::std::convert::From<i32> for LayoutError {
fn from(val: i32) -> Self {
use self::LayoutError::*;
- let val = match CXTypeLayoutError::from_raw(val) {
- Some(val) => val,
- None => return Unknown,
- };
match val {
CXTypeLayoutError_Invalid => Invalid,
diff --git a/src/codegen/mod.rs b/src/codegen/mod.rs
index 92e34874..aab59946 100644
--- a/src/codegen/mod.rs
+++ b/src/codegen/mod.rs
@@ -2,13 +2,15 @@ mod helpers;
mod struct_layout;
use self::helpers::{BlobTyBuilder, attributes};
-use self::struct_layout::StructLayoutTracker;
+use self::struct_layout::{align_to, bytes_from_bits};
+use self::struct_layout::{bytes_from_bits_pow2, StructLayoutTracker};
use aster;
use ir::annotations::FieldAccessorKind;
use ir::comp::{Base, CompInfo, CompKind, Field, Method, MethodKind};
use ir::context::{BindgenContext, ItemId};
use ir::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault};
+use ir::dot;
use ir::enum_ty::{Enum, EnumVariant, EnumVariantValue};
use ir::function::{Function, FunctionSig};
use ir::int::IntKind;
@@ -363,8 +365,7 @@ impl CodeGenerator for Module {
}
if item.id() == ctx.root_module() {
- let saw_union = result.saw_union;
- if saw_union && !ctx.options().unstable_rust {
+ if result.saw_union && !ctx.options().unstable_rust {
utils::prepend_union_types(ctx, &mut *result);
}
if result.saw_incomplete_array {
@@ -717,12 +718,12 @@ impl<'a> ItemToRustTy for Vtable<'a> {
}
struct Bitfield<'a> {
- index: usize,
+ index: &'a mut usize,
fields: Vec<&'a Field>,
}
impl<'a> Bitfield<'a> {
- fn new(index: usize, fields: Vec<&'a Field>) -> Self {
+ fn new(index: &'a mut usize, fields: Vec<&'a Field>) -> Self {
Bitfield {
index: index,
fields: fields,
@@ -732,89 +733,96 @@ impl<'a> Bitfield<'a> {
fn codegen_fields(self,
ctx: &BindgenContext,
fields: &mut Vec<ast::StructField>,
- methods: &mut Vec<ast::ImplItem>)
+ _methods: &mut Vec<ast::ImplItem>)
-> Layout {
use aster::struct_field::StructFieldBuilder;
- let mut total_width = self.fields
- .iter()
- .fold(0u32, |acc, f| acc + f.bitfield().unwrap());
-
- if !total_width.is_power_of_two() || total_width < 8 {
- total_width = cmp::max(8, total_width.next_power_of_two());
- }
- debug_assert_eq!(total_width % 8, 0);
- let total_width_in_bytes = total_width as usize / 8;
-
- let bitfield_layout = Layout::new(total_width_in_bytes,
- total_width_in_bytes);
- let bitfield_type = BlobTyBuilder::new(bitfield_layout).build();
- let field_name = format!("_bitfield_{}", self.index);
- let field_ident = ctx.ext_cx().ident_of(&field_name);
- let field = StructFieldBuilder::named(&field_name)
- .pub_()
- .build_ty(bitfield_type.clone());
- fields.push(field);
+ // NOTE: What follows is reverse-engineered from LLVM's
+ // lib/AST/RecordLayoutBuilder.cpp
+ //
+ // FIXME(emilio): There are some differences between Microsoft and the
+ // Itanium ABI, but we'll ignore those and stick to Itanium for now.
+ //
+ // Also, we need to handle packed bitfields and stuff.
+ // TODO(emilio): Take into account C++'s wide bitfields, and
+ // packing, sigh.
+ let mut total_size_in_bits = 0;
+ let mut max_align = 0;
+ let mut unfilled_bits_in_last_unit = 0;
+ let mut field_size_in_bits = 0;
+ *self.index += 1;
+ let mut last_field_name = format!("_bitfield_{}", self.index);
+ let mut last_field_align = 0;
- let mut offset = 0;
for field in self.fields {
let width = field.bitfield().unwrap();
- let field_name = field.name()
- .map(ToOwned::to_owned)
- .unwrap_or_else(|| format!("at_offset_{}", offset));
-
let field_item = ctx.resolve_item(field.ty());
let field_ty_layout = field_item.kind()
.expect_type()
.layout(ctx)
.expect("Bitfield without layout? Gah!");
- let field_type = field_item.to_rust_ty(ctx);
- let int_type = BlobTyBuilder::new(field_ty_layout).build();
+ let field_align = field_ty_layout.align;
- let getter_name = ctx.rust_ident(&field_name);
- let setter_name = ctx.ext_cx()
- .ident_of(&format!("set_{}", &field_name));
- let mask = ((1usize << width) - 1) << offset;
- let prefix = ctx.trait_prefix();
- // The transmute is unfortunate, but it's needed for enums in
- // bitfields.
- let item = quote_item!(ctx.ext_cx(),
- impl X {
- #[inline]
- pub fn $getter_name(&self) -> $field_type {
- unsafe {
- ::$prefix::mem::transmute(
- (
- (self.$field_ident &
- ($mask as $bitfield_type))
- >> $offset
- ) as $int_type
- )
- }
- }
+ if field_size_in_bits != 0 &&
+ (width == 0 || width as usize > unfilled_bits_in_last_unit) {
+ field_size_in_bits = align_to(field_size_in_bits, field_align);
+ // Push the new field.
+ let ty =
+ BlobTyBuilder::new(Layout::new(bytes_from_bits_pow2(field_size_in_bits),
+ bytes_from_bits_pow2(last_field_align)))
+ .build();
- #[inline]
- pub fn $setter_name(&mut self, val: $field_type) {
- self.$field_ident &= !($mask as $bitfield_type);
- self.$field_ident |=
- (val as $int_type as $bitfield_type << $offset) &
- ($mask as $bitfield_type);
- }
- }
- )
- .unwrap();
+ let field = StructFieldBuilder::named(&last_field_name)
+ .pub_()
+ .build_ty(ty);
+ fields.push(field);
- let items = match item.unwrap().node {
- ast::ItemKind::Impl(_, _, _, _, _, items) => items,
- _ => unreachable!(),
- };
+ // TODO(emilio): dedup this.
+ *self.index += 1;
+ last_field_name = format!("_bitfield_{}", self.index);
+
+ // Now reset the size and the rest of stuff.
+ // unfilled_bits_in_last_unit = 0;
+ field_size_in_bits = 0;
+ last_field_align = 0;
+ }
- methods.extend(items.into_iter());
- offset += width;
+ // TODO(emilio): Create the accessors. Problem here is that we still
+ // don't know which one is going to be the final alignment of the
+ // bitfield, and whether we have to index in it. Thus, we don't know
+ // which integer type do we need.
+ //
+ // We could push them to a Vec or something, but given how buggy
+ // they where maybe it's not a great idea?
+ field_size_in_bits += width as usize;
+ total_size_in_bits += width as usize;
+
+
+ let data_size = align_to(field_size_in_bits, field_align * 8);
+
+ max_align = cmp::max(max_align, field_align);
+
+ // NB: The width here is completely, absolutely intentional.
+ last_field_align = cmp::max(last_field_align, width as usize);
+
+ unfilled_bits_in_last_unit = data_size - field_size_in_bits;
}
- bitfield_layout
+ if field_size_in_bits != 0 {
+ // Push the last field.
+ let ty =
+ BlobTyBuilder::new(Layout::new(bytes_from_bits_pow2(field_size_in_bits),
+ bytes_from_bits_pow2(last_field_align)))
+ .build();
+
+ let field = StructFieldBuilder::named(&last_field_name)
+ .pub_()
+ .build_ty(ty);
+ fields.push(field);
+ }
+
+ Layout::new(bytes_from_bits(total_size_in_bits), max_align)
}
}
@@ -1062,12 +1070,10 @@ impl CodeGenerator for CompInfo {
debug_assert!(!current_bitfield_fields.is_empty());
let bitfield_fields =
mem::replace(&mut current_bitfield_fields, vec![]);
- bitfield_count += 1;
- let bitfield_layout = Bitfield::new(bitfield_count,
+ let bitfield_layout = Bitfield::new(&mut bitfield_count,
bitfield_fields)
.codegen_fields(ctx, &mut fields, &mut methods);
-
- struct_layout.saw_bitfield(bitfield_layout);
+ struct_layout.saw_bitfield_batch(bitfield_layout);
current_bitfield_width = None;
current_bitfield_layout = None;
@@ -1099,8 +1105,7 @@ impl CodeGenerator for CompInfo {
} else {
quote_ty!(ctx.ext_cx(), __BindgenUnionField<$ty>)
}
- } else if let Some(item) =
- field_ty.is_incomplete_array(ctx) {
+ } else if let Some(item) = field_ty.is_incomplete_array(ctx) {
result.saw_incomplete_array();
let inner = item.to_rust_ty(ctx);
@@ -1224,12 +1229,10 @@ impl CodeGenerator for CompInfo {
debug_assert!(!current_bitfield_fields.is_empty());
let bitfield_fields = mem::replace(&mut current_bitfield_fields,
vec![]);
- bitfield_count += 1;
- let bitfield_layout = Bitfield::new(bitfield_count,
+ let bitfield_layout = Bitfield::new(&mut bitfield_count,
bitfield_fields)
.codegen_fields(ctx, &mut fields, &mut methods);
-
- struct_layout.saw_bitfield(bitfield_layout);
+ struct_layout.saw_bitfield_batch(bitfield_layout);
}
debug_assert!(current_bitfield_fields.is_empty());
@@ -1268,7 +1271,7 @@ impl CodeGenerator for CompInfo {
}
} else if !is_union && !self.is_unsized(ctx) {
if let Some(padding_field) =
- layout.and_then(|layout| struct_layout.pad_struct(layout)) {
+ layout.and_then(|layout| struct_layout.pad_struct(&canonical_name, layout)) {
fields.push(padding_field);
}
@@ -2174,8 +2177,8 @@ impl ToRustTy for Type {
quote_ty!(ctx.ext_cx(), ::$prefix::option::Option<$ty>)
}
TypeKind::Array(item, len) => {
- let inner = item.to_rust_ty(ctx);
- aster::ty::TyBuilder::new().array(len).build(inner)
+ let ty = item.to_rust_ty(ctx);
+ aster::ty::TyBuilder::new().array(len).build(ty)
}
TypeKind::Enum(..) => {
let path = item.namespace_aware_canonical_path(ctx);
@@ -2190,7 +2193,7 @@ impl ToRustTy for Type {
.map(|arg| arg.to_rust_ty(ctx))
.collect::<Vec<_>>();
- path.segments.last_mut().unwrap().parameters = if
+ path.segments.last_mut().unwrap().parameters = if
template_args.is_empty() {
None
} else {
@@ -2499,6 +2502,13 @@ pub fn codegen(context: &mut BindgenContext) -> Vec<P<ast::Item>> {
}
}
+ if let Some(path) = context.options().emit_ir_graphviz.as_ref() {
+ match dot::write_dot_file(context, path) {
+ Ok(()) => info!("Your dot file was generated successfully into: {}", path),
+ Err(e) => error!("{}", e),
+ }
+ }
+
context.resolve_item(context.root_module())
.codegen(context, &mut result, &whitelisted_items, &());
diff --git a/src/codegen/struct_layout.rs b/src/codegen/struct_layout.rs
index f8a88bc2..724bef98 100644
--- a/src/codegen/struct_layout.rs
+++ b/src/codegen/struct_layout.rs
@@ -7,7 +7,7 @@ use aster::struct_field::StructFieldBuilder;
use ir::comp::CompInfo;
use ir::context::BindgenContext;
use ir::layout::Layout;
-use ir::ty::Type;
+use ir::ty::{Type, TypeKind};
use std::cmp;
use std::mem;
@@ -21,6 +21,84 @@ pub struct StructLayoutTracker<'a, 'ctx: 'a> {
padding_count: usize,
latest_field_layout: Option<Layout>,
max_field_align: usize,
+ last_field_was_bitfield: bool,
+}
+
+/// Returns a size aligned to a given value.
+pub fn align_to(size: usize, align: usize) -> usize {
+ if align == 0 {
+ return size;
+ }
+
+ let rem = size % align;
+ if rem == 0 {
+ return size;
+ }
+
+ size + align - rem
+}
+
+/// Returns the amount of bytes from a given amount of bytes, rounding up.
+pub fn bytes_from_bits(n: usize) -> usize {
+ if n % 8 == 0 {
+ return n / 8;
+ }
+
+ n / 8 + 1
+}
+
+/// Returns the lower power of two byte count that can hold at most n bits.
+pub fn bytes_from_bits_pow2(mut n: usize) -> usize {
+ if n == 0 {
+ return 0;
+ }
+
+ if n <= 8 {
+ return 1;
+ }
+
+ if !n.is_power_of_two() {
+ n = n.next_power_of_two();
+ }
+
+ n / 8
+}
+
+#[test]
+fn test_align_to() {
+ assert_eq!(align_to(1, 1), 1);
+ assert_eq!(align_to(1, 2), 2);
+ assert_eq!(align_to(1, 4), 4);
+ assert_eq!(align_to(5, 1), 5);
+ assert_eq!(align_to(17, 4), 20);
+}
+
+#[test]
+fn test_bytes_from_bits_pow2() {
+ assert_eq!(bytes_from_bits_pow2(0), 0);
+ for i in 1..9 {
+ assert_eq!(bytes_from_bits_pow2(i), 1);
+ }
+ for i in 9..17 {
+ assert_eq!(bytes_from_bits_pow2(i), 2);
+ }
+ for i in 17..33 {
+ assert_eq!(bytes_from_bits_pow2(i), 4);
+ }
+}
+
+#[test]
+fn test_bytes_from_bits() {
+ assert_eq!(bytes_from_bits(0), 0);
+ for i in 1..9 {
+ assert_eq!(bytes_from_bits(i), 1);
+ }
+ for i in 9..17 {
+ assert_eq!(bytes_from_bits(i), 2);
+ }
+ for i in 17..25 {
+ assert_eq!(bytes_from_bits(i), 3);
+ }
}
impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
@@ -32,6 +110,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
padding_count: 0,
latest_field_layout: None,
max_field_align: 0,
+ last_field_was_bitfield: false,
}
}
@@ -43,112 +122,151 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
}
pub fn saw_base(&mut self, base_ty: &Type) {
- self.align_to_latest_field();
-
if let Some(layout) = base_ty.layout(self.ctx) {
+ self.align_to_latest_field(layout);
+
self.latest_offset += self.padding_bytes(layout) + layout.size;
self.latest_field_layout = Some(layout);
self.max_field_align = cmp::max(self.max_field_align, layout.align);
}
}
- pub fn saw_bitfield(&mut self, layout: Layout) {
- self.align_to_latest_field();
+ pub fn saw_bitfield_batch(&mut self, layout: Layout) {
+ self.align_to_latest_field(layout);
+
+ self.latest_offset += layout.size;
+
+ debug!("Offset: <bitfield>: {} -> {}",
+ self.latest_offset - layout.size,
+ self.latest_offset);
- self.latest_offset += self.padding_bytes(layout) + layout.size;
self.latest_field_layout = Some(layout);
- self.max_field_align = cmp::max(self.max_field_align, layout.align);
+ self.last_field_was_bitfield = true;
+ // NB: We intentionally don't update the max_field_align here, since our
+ // bitfields code doesn't necessarily guarantee it, so we need to
+ // actually generate the dummy alignment.
}
pub fn saw_union(&mut self, layout: Layout) {
- self.align_to_latest_field();
+ self.align_to_latest_field(layout);
self.latest_offset += self.padding_bytes(layout) + layout.size;
self.latest_field_layout = Some(layout);
self.max_field_align = cmp::max(self.max_field_align, layout.align);
}
+ /// Add a padding field if necessary for a given new field _before_ adding
+ /// that field.
pub fn pad_field(&mut self,
field_name: &str,
field_ty: &Type,
field_offset: Option<usize>)
-> Option<ast::StructField> {
- field_ty.layout(self.ctx).and_then(|field_layout| {
- self.align_to_latest_field();
+ let mut field_layout = match field_ty.layout(self.ctx) {
+ Some(l) => l,
+ None => return None,
+ };
+
+ if let TypeKind::Array(inner, len) = *field_ty.canonical_type(self.ctx).kind() {
+ // FIXME(emilio): As an _ultra_ hack, we correct the layout returned
+ // by arrays of structs that have a bigger alignment than what we
+ // can support.
+ //
+ // This means that the structs in the array are super-unsafe to
+ // access, since they won't be properly aligned, but *shrug*.
+ if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx) {
+ if layout.align > mem::size_of::<*mut ()>() {
+ field_layout.size =
+ align_to(layout.size, layout.align) * len;
+ field_layout.align = mem::size_of::<*mut ()>();
+ }
+ }
+ }
- let padding_layout = if self.comp.packed() {
- None
- } else {
- let calculated_layout = field_ty.as_comp()
- .and_then(|comp| comp.calc_layout(self.ctx))
- .unwrap_or(field_layout);
-
- let align = cmp::min(calculated_layout.align, mem::size_of::<*mut ()>());
-
- let (padding_bytes, need_padding) = match field_offset {
- Some(offset) if offset / 8 > self.latest_offset => {
- (offset / 8 - self.latest_offset, true)
- }
- _ if field_layout.align != 0 => {
- (self.padding_bytes(field_layout), (self.latest_offset % field_layout.align) != 0)
- }
- _ => {
- (0, false)
- }
- };
-
- self.latest_offset += padding_bytes;
-
- debug!("align field {} to {}/{} with {} padding bytes {:?}, calculated {:?}",
- field_name,
- self.latest_offset,
- field_offset.unwrap_or(0) / 8,
- padding_bytes,
- field_layout,
- calculated_layout);
-
- if need_padding &&
- (padding_bytes > calculated_layout.align ||
- field_layout.align > mem::size_of::<*mut ()>()) {
- Some(Layout::new(padding_bytes, align))
- } else {
- None
+ let will_merge_with_bitfield = self.align_to_latest_field(field_layout);
+
+ let padding_layout = if self.comp.packed() {
+ None
+ } else {
+ let padding_bytes = match field_offset {
+ Some(offset) if offset / 8 > self.latest_offset => {
+ offset / 8 - self.latest_offset
}
+ _ if will_merge_with_bitfield || field_layout.align == 0 => 0,
+ _ => self.padding_bytes(field_layout),
};
- self.latest_offset += field_ty.calc_size(self.ctx).unwrap_or(field_layout.size);
+ // Otherwise the padding is useless.
+ let need_padding = padding_bytes >= field_layout.align || field_layout.align > mem::size_of::<*mut ()>();
- self.latest_field_layout = Some(field_layout);
- self.max_field_align = cmp::max(self.max_field_align, field_layout.align);
+ self.latest_offset += padding_bytes;
- padding_layout.map(|layout| self.padding_field(layout))
- })
- }
+ debug!("Offset: <padding>: {} -> {}",
+ self.latest_offset - padding_bytes,
+ self.latest_offset);
- pub fn pad_struct(&mut self, layout: Layout) -> Option<ast::StructField> {
- if layout.size < self.latest_offset {
- warn!("calculate struct layout incorrect, too more {} bytes",
- self.latest_offset - layout.size);
+ debug!("align field {} to {}/{} with {} padding bytes {:?}",
+ field_name,
+ self.latest_offset,
+ field_offset.unwrap_or(0) / 8,
+ padding_bytes,
+ field_layout);
- None
- } else {
- let padding_bytes = layout.size - self.latest_offset;
- let struct_align = cmp::min(layout.align,
- mem::size_of::<*mut ()>());
-
- if padding_bytes > struct_align ||
- (layout.align > mem::size_of::<*mut ()>() && padding_bytes > 0) {
- let padding_align = if self.comp.packed() {
- 1
- } else {
- cmp::min(1 << padding_bytes.trailing_zeros(),
- mem::size_of::<*mut ()>())
- };
-
- Some(self.padding_field(Layout::new(padding_bytes, padding_align)))
+ if need_padding && padding_bytes != 0 {
+ Some(Layout::new(padding_bytes, cmp::min(field_layout.align, mem::size_of::<*mut ()>())))
} else {
None
}
+ };
+
+ self.latest_offset += field_layout.size;
+ self.latest_field_layout = Some(field_layout);
+ self.max_field_align = cmp::max(self.max_field_align, field_layout.align);
+ self.last_field_was_bitfield = false;
+
+ debug!("Offset: {}: {} -> {}",
+ field_name,
+ self.latest_offset - field_layout.size,
+ self.latest_offset);
+
+ padding_layout.map(|layout| self.padding_field(layout))
+ }
+
+ pub fn pad_struct(&mut self, name: &str, layout: Layout) -> Option<ast::StructField> {
+ if layout.size < self.latest_offset {
+ error!("Calculated wrong layout for {}, too more {} bytes",
+ name, self.latest_offset - layout.size);
+ return None
+ }
+
+ let padding_bytes = layout.size - self.latest_offset;
+
+ // We always pad to get to the correct size if the struct is one of
+ // those we can't align properly.
+ //
+ // Note that if the last field we saw was a bitfield, we may need to pad
+ // regardless, because bitfields don't respect alignment as strictly as
+ // other fields.
+ if padding_bytes > 0 &&
+ (padding_bytes >= layout.align ||
+ (self.last_field_was_bitfield &&
+ padding_bytes >= self.latest_field_layout.unwrap().align) ||
+ layout.align > mem::size_of::<*mut ()>()) {
+ let layout = if self.comp.packed() {
+ Layout::new(padding_bytes, 1)
+ } else if self.last_field_was_bitfield ||
+ layout.align > mem::size_of::<*mut ()>() {
+ // We've already given up on alignment here.
+ Layout::for_size(padding_bytes)
+ } else {
+ Layout::new(padding_bytes, layout.align)
+ };
+
+ debug!("pad bytes to struct {}, {:?}", name, layout);
+
+ Some(self.padding_field(layout))
+ } else {
+ None
}
}
@@ -166,15 +284,7 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
}
fn padding_bytes(&self, layout: Layout) -> usize {
- if layout.align == 0 {
- warn!("try to padding bytes without layout");
-
- 0
- } else if self.latest_offset % layout.align == 0 {
- 0
- } else {
- layout.align - (self.latest_offset % layout.align)
- }
+ align_to(self.latest_offset, layout.align) - self.latest_offset
}
fn padding_field(&mut self, layout: Layout) -> ast::StructField {
@@ -190,11 +300,37 @@ impl<'a, 'ctx> StructLayoutTracker<'a, 'ctx> {
StructFieldBuilder::named(padding_field_name).pub_().build_ty(ty)
}
- fn align_to_latest_field(&mut self) {
+ /// Returns whether the new field is known to merge with a bitfield.
+ ///
+ /// This is just to avoid doing the same check also in pad_field.
+ fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool {
if self.comp.packed() {
- // skip to align field when packed
- } else if let Some(layout) = self.latest_field_layout {
- self.latest_offset += self.padding_bytes(layout);
+ // Skip to align fields when packed.
+ return false;
+ }
+
+ let layout = match self.latest_field_layout {
+ Some(l) => l,
+ None => return false,
+ };
+
+ // If it was, we may or may not need to align, depending on what the
+ // current field alignment and the bitfield size and alignment are.
+ debug!("align_to_bitfield? {}: {:?} {:?}", self.last_field_was_bitfield,
+ layout, new_field_layout);
+
+ if self.last_field_was_bitfield &&
+ new_field_layout.align <= layout.size % layout.align &&
+ new_field_layout.size <= layout.size % layout.align {
+ // The new field will be coalesced into some of the remaining bits.
+ //
+ // FIXME(emilio): I think this may not catch everything?
+ debug!("Will merge with bitfield");
+ return true;
}
+
+ // Else, just align the obvious way.
+ self.latest_offset += self.padding_bytes(layout);
+ return false;
}
}
diff --git a/src/ir/comp.rs b/src/ir/comp.rs
index fff4655c..ce6ec25d 100644
--- a/src/ir/comp.rs
+++ b/src/ir/comp.rs
@@ -408,73 +408,25 @@ impl CompInfo {
/// members. This is not ideal, but clang fails to report the size for these
/// kind of unions, see test/headers/template_union.hpp
pub fn layout(&self, ctx: &BindgenContext) -> Option<Layout> {
+ use std::cmp;
// We can't do better than clang here, sorry.
if self.kind == CompKind::Struct {
- None
- } else {
- self.calc_layout(ctx)
+ return None
}
- }
-
- /// Compute the layout of this type.
- pub fn calc_layout(&self, ctx: &BindgenContext) -> Option<Layout> {
- use std::cmp;
- use std::mem;
-
- if self.kind == CompKind::Struct {
- let mut latest_offset_in_bits = 0;
- let mut max_align = 0;
-
- if self.needs_explicit_vtable(ctx) {
- latest_offset_in_bits += mem::size_of::<*mut ()>() * 8;
- max_align = mem::size_of::<*mut ()>();
- }
-
- for field in &self.fields {
- if let Some(bits) = field.bitfield() {
- latest_offset_in_bits += bits as usize;
- } else {
- let field_ty = ctx.resolve_type(field.ty);
- if let Some(field_layout) =
- field_ty.as_comp()
- .and_then(|comp| comp.calc_layout(ctx))
- .or_else(|| field_ty.layout(ctx)) {
+ let mut max_size = 0;
+ let mut max_align = 0;
+ for field in &self.fields {
+ let field_layout = ctx.resolve_type(field.ty)
+ .layout(ctx);
- let n = (latest_offset_in_bits / 8) %
- field_layout.align;
-
- if !self.packed && n != 0 {
- latest_offset_in_bits += (field_layout.align - n) *
- 8;
- }
-
- latest_offset_in_bits += field_layout.size * 8;
- max_align = cmp::max(max_align, field_layout.align);
- }
- }
- }
-
- if latest_offset_in_bits == 0 && max_align == 0 {
- None
- } else {
- Some(Layout::new((latest_offset_in_bits + 7) / 8, max_align))
- }
- } else {
- let mut max_size = 0;
- let mut max_align = 0;
- for field in &self.fields {
- let field_layout = ctx.resolve_type(field.ty)
- .layout(ctx);
-
- if let Some(layout) = field_layout {
- max_size = cmp::max(max_size, layout.size);
- max_align = cmp::max(max_align, layout.align);
- }
+ if let Some(layout) = field_layout {
+ max_size = cmp::max(max_size, layout.size);
+ max_align = cmp::max(max_align, layout.align);
}
-
- Some(Layout::new(max_size, max_align))
}
+
+ Some(Layout::new(max_size, max_align))
}
/// Get this type's set of fields.
diff --git a/src/ir/dot.rs b/src/ir/dot.rs
new file mode 100644
index 00000000..b7a117bb
--- /dev/null
+++ b/src/ir/dot.rs
@@ -0,0 +1,53 @@
+//! Generating Graphviz `dot` files from our IR.
+
+use std::fs::File;
+use std::io::{self, Write};
+use std::path::Path;
+use super::context::{BindgenContext, ItemId};
+use super::traversal::Trace;
+
+/// A trait for anything that can write attributes as `<table>` rows to a dot
+/// file.
+pub trait DotAttributes {
+ /// Write this thing's attributes to the given output. Each attribute must
+ /// be its own `<tr>...</tr>`.
+ fn dot_attributes<W>(&self, ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write;
+}
+
+/// Write a graphviz dot file containing our IR.
+pub fn write_dot_file<P>(ctx: &BindgenContext, path: P) -> io::Result<()>
+ where P: AsRef<Path>
+{
+ let file = try!(File::create(path));
+ let mut dot_file = io::BufWriter::new(file);
+ try!(writeln!(&mut dot_file, "digraph {{"));
+
+ let mut err: Option<io::Result<_>> = None;
+
+ for (id, item) in ctx.items() {
+ try!(writeln!(&mut dot_file,
+ r#"{} [fontname="courier", label=< <table border="0">"#,
+ id.as_usize()));
+ try!(item.dot_attributes(ctx, &mut dot_file));
+ try!(writeln!(&mut dot_file, r#"</table> >];"#));
+
+ item.trace(ctx, &mut |sub_id: ItemId, _edge_kind| {
+ if err.is_some() {
+ return;
+ }
+
+ match writeln!(&mut dot_file, "{} -> {};", id.as_usize(), sub_id.as_usize()) {
+ Ok(_) => {},
+ Err(e) => err = Some(Err(e)),
+ }
+ }, &());
+
+ if let Some(err) = err {
+ return err;
+ }
+ }
+
+ try!(writeln!(&mut dot_file, "}}"));
+ Ok(())
+}
diff --git a/src/ir/enum_ty.rs b/src/ir/enum_ty.rs
index 0a85577e..3470e033 100644
--- a/src/ir/enum_ty.rs
+++ b/src/ir/enum_ty.rs
@@ -5,8 +5,6 @@ use super::item::Item;
use super::ty::TypeKind;
use clang;
use ir::annotations::Annotations;
-use ir::int::IntKind;
-use ir::layout::Layout;
use parse::{ClangItemParser, ParseError};
/// An enum representing custom handling that can be given to a variant.
@@ -51,19 +49,6 @@ impl Enum {
&self.variants
}
- /// Compute the layout of this type.
- pub fn calc_layout(&self, ctx: &BindgenContext) -> Option<Layout> {
- self.repr
- .map(|repr| ctx.resolve_type(repr))
- .and_then(|repr| match *repr.canonical_type(ctx).kind() {
- TypeKind::Int(int_kind) => Some(int_kind),
- _ => None,
- })
- .unwrap_or(IntKind::Int)
- .known_size()
- .map(|size| Layout::new(size, size))
- }
-
/// Construct an enumeration from the given Clang type.
pub fn from_ty(ty: &clang::Type,
ctx: &mut BindgenContext)
@@ -114,7 +99,7 @@ impl Enum {
Annotations::new(&cursor)
.and_then(|anno| if anno.hide() {
Some(EnumVariantCustomBehavior::Hide)
- } else if
+ } else if
anno.constify_enum_variant() {
Some(EnumVariantCustomBehavior::Constify)
} else {
diff --git a/src/ir/function.rs b/src/ir/function.rs
index 22b9c9b0..daa30b89 100644
--- a/src/ir/function.rs
+++ b/src/ir/function.rs
@@ -1,12 +1,14 @@
//! Intermediate representation for C/C++ functions and methods.
use super::context::{BindgenContext, ItemId};
+use super::dot::DotAttributes;
use super::item::Item;
use super::traversal::{Trace, Tracer};
use super::ty::TypeKind;
use clang;
use clang_sys::CXCallingConv;
use parse::{ClangItemParser, ClangSubItemParser, ParseError, ParseResult};
+use std::io;
use syntax::abi;
/// A function declaration, with a signature, arguments, and argument names.
@@ -59,6 +61,18 @@ impl Function {
}
}
+impl DotAttributes for Function {
+ fn dot_attributes<W>(&self, _ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ if let Some(ref mangled) = self.mangled_name {
+ try!(writeln!(out, "<tr><td>mangled name</td><td>{}</td></tr>", mangled));
+ }
+
+ Ok(())
+ }
+}
+
/// A function signature.
#[derive(Debug)]
pub struct FunctionSig {
diff --git a/src/ir/item.rs b/src/ir/item.rs
index 4c65c433..bd401aba 100644
--- a/src/ir/item.rs
+++ b/src/ir/item.rs
@@ -3,6 +3,7 @@
use super::annotations::Annotations;
use super::context::{BindgenContext, ItemId, PartialType};
use super::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault};
+use super::dot::{DotAttributes};
use super::function::Function;
use super::item_kind::ItemKind;
use super::module::Module;
@@ -15,6 +16,7 @@ use std::cell::{Cell, RefCell};
use std::collections::BTreeSet;
use std::fmt::Write;
use std::iter;
+use std::io;
/// A trait to get the canonical name from an item.
///
@@ -472,6 +474,13 @@ impl Item {
self.kind().as_type()
}
+ /// Is this item a named template type parameter?
+ pub fn is_named(&self) -> bool {
+ self.as_type()
+ .map(|ty| ty.is_named())
+ .unwrap_or(false)
+ }
+
/// Get a reference to this item's underlying `Function`. Panic if this is
/// some other kind of item.
pub fn expect_function(&self) -> &Function {
@@ -907,6 +916,19 @@ impl Item {
/// A set of items.
pub type ItemSet = BTreeSet<ItemId>;
+impl DotAttributes for Item {
+ fn dot_attributes<W>(&self, ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ try!(writeln!(out,
+ "<tr><td>{:?}</td></tr>
+ <tr><td>name</td><td>{}</td></tr>",
+ self.id,
+ self.name(ctx).get()));
+ self.kind.dot_attributes(ctx, out)
+ }
+}
+
impl TemplateDeclaration for ItemId {
fn template_params(&self, ctx: &BindgenContext) -> Option<Vec<ItemId>> {
ctx.resolve_item_fallible(*self)
diff --git a/src/ir/item_kind.rs b/src/ir/item_kind.rs
index d9e4690c..6dfd6764 100644
--- a/src/ir/item_kind.rs
+++ b/src/ir/item_kind.rs
@@ -1,5 +1,8 @@
//! Different variants of an `Item` in our intermediate representation.
+use std::io;
+use super::context::BindgenContext;
+use super::dot::DotAttributes;
use super::function::Function;
use super::module::Module;
use super::ty::Type;
@@ -32,6 +35,16 @@ impl ItemKind {
}
}
+ /// Transform our `ItemKind` into a string.
+ pub fn kind_name(&self) -> &'static str {
+ match *self {
+ ItemKind::Module(..) => "Module",
+ ItemKind::Type(..) => "Type",
+ ItemKind::Function(..) => "Function",
+ ItemKind::Var(..) => "Var"
+ }
+ }
+
/// Is this a module?
pub fn is_module(&self) -> bool {
self.as_module().is_some()
@@ -112,3 +125,18 @@ impl ItemKind {
self.as_var().expect("Not a var")
}
}
+
+impl DotAttributes for ItemKind {
+ fn dot_attributes<W>(&self, ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ try!(writeln!(out, "<tr><td>kind</td><td>{}</td></tr>", self.kind_name()));
+
+ match *self {
+ ItemKind::Module(ref module) => module.dot_attributes(ctx, out),
+ ItemKind::Type(ref ty) => ty.dot_attributes(ctx, out),
+ ItemKind::Function(ref func) => func.dot_attributes(ctx, out),
+ ItemKind::Var(ref var) => var.dot_attributes(ctx, out),
+ }
+ }
+}
diff --git a/src/ir/layout.rs b/src/ir/layout.rs
index 03d43b51..f21a501c 100644
--- a/src/ir/layout.rs
+++ b/src/ir/layout.rs
@@ -3,10 +3,10 @@
use super::context::BindgenContext;
use super::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault};
use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
-use std::cmp;
+use std::{cmp, mem};
/// A type that represents the struct layout of a type.
-#[derive(Debug, Clone, Copy)]
+#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Layout {
/// The size (in bytes) of this layout.
pub size: usize,
@@ -16,6 +16,13 @@ pub struct Layout {
pub packed: bool,
}
+#[test]
+fn test_layout_for_size() {
+ let ptr_size = mem::size_of::<*mut ()>();
+ assert_eq!(Layout::for_size(ptr_size), Layout::new(ptr_size, ptr_size));
+ assert_eq!(Layout::for_size(3 * ptr_size), Layout::new(3 * ptr_size, ptr_size));
+}
+
impl Layout {
/// Construct a new `Layout` with the given `size` and `align`. It is not
/// packed.
@@ -27,6 +34,20 @@ impl Layout {
}
}
+ /// Creates a non-packed layout for a given size, trying to use the maximum
+ /// alignment possible.
+ pub fn for_size(size: usize) -> Self {
+ let mut next_align = 2;
+ while size % next_align == 0 && next_align <= mem::size_of::<*mut ()>() {
+ next_align *= 2;
+ }
+ Layout {
+ size: size,
+ align: next_align / 2,
+ packed: false,
+ }
+ }
+
/// Is this a zero-sized layout?
pub fn is_zero(&self) -> bool {
self.size == 0 && self.align == 0
diff --git a/src/ir/mod.rs b/src/ir/mod.rs
index 9fe0beb5..ba549c51 100644
--- a/src/ir/mod.rs
+++ b/src/ir/mod.rs
@@ -7,6 +7,7 @@ pub mod annotations;
pub mod comp;
pub mod context;
pub mod derive;
+pub mod dot;
pub mod enum_ty;
pub mod function;
pub mod int;
@@ -14,6 +15,7 @@ pub mod item;
pub mod item_kind;
pub mod layout;
pub mod module;
+pub mod named;
pub mod traversal;
pub mod ty;
pub mod var;
diff --git a/src/ir/module.rs b/src/ir/module.rs
index 6b6c535b..6787e3f9 100644
--- a/src/ir/module.rs
+++ b/src/ir/module.rs
@@ -1,6 +1,8 @@
//! Intermediate representation for modules (AKA C++ namespaces).
+use std::io;
use super::context::{BindgenContext, ItemId};
+use super::dot::DotAttributes;
use clang;
use parse::{ClangSubItemParser, ParseError, ParseResult};
use parse_one;
@@ -56,6 +58,16 @@ impl Module {
}
}
+impl DotAttributes for Module {
+ fn dot_attributes<W>(&self, _ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ writeln!(out,
+ "<tr><td>ModuleKind</td><td>{:?}</td></tr>",
+ self.kind)
+ }
+}
+
impl ClangSubItemParser for Module {
fn parse(cursor: clang::Cursor,
ctx: &mut BindgenContext)
diff --git a/src/ir/named.rs b/src/ir/named.rs
new file mode 100644
index 00000000..7a6c597c
--- /dev/null
+++ b/src/ir/named.rs
@@ -0,0 +1,471 @@
+//! Discover which template type parameters are actually used.
+//!
+//! ### Why do we care?
+//!
+//! C++ allows ignoring template parameters, while Rust does not. Usually we can
+//! blindly stick a `PhantomData<T>` inside a generic Rust struct to make up for
+//! this. That doesn't work for templated type aliases, however:
+//!
+//! ```C++
+//! template <typename T>
+//! using Fml = int;
+//! ```
+//!
+//! If we generate the naive Rust code for this alias, we get:
+//!
+//! ```ignore
+//! pub type Fml<T> = ::std::os::raw::int;
+//! ```
+//!
+//! And this is rejected by `rustc` due to the unused type parameter.
+//!
+//! (Aside: in these simple cases, `libclang` will often just give us the
+//! aliased type directly, and we will never even know we were dealing with
+//! aliases, let alone templated aliases. It's the more convoluted scenarios
+//! where we get to have some fun...)
+//!
+//! For such problematic template aliases, we could generate a tuple whose
+//! second member is a `PhantomData<T>`. Or, if we wanted to go the extra mile,
+//! we could even generate some smarter wrapper that implements `Deref`,
+//! `DerefMut`, `From`, `Into`, `AsRef`, and `AsMut` to the actually aliased
+//! type. However, this is still lackluster:
+//!
+//! 1. Even with a billion conversion-trait implementations, using the generated
+//! bindings is rather un-ergonomic.
+//! 2. With either of these solutions, we need to keep track of which aliases
+//! we've transformed like this in order to generate correct uses of the
+//! wrapped type.
+//!
+//! Given that we have to properly track which template parameters ended up used
+//! for (2), we might as well leverage that information to make ergonomic
+//! bindings that don't contain any unused type parameters at all, and
+//! completely avoid the pain of (1).
+//!
+//! ### How do we determine which template parameters are used?
+//!
+//! Determining which template parameters are actually used is a trickier
+//! problem than it might seem at a glance. On the one hand, trivial uses are
+//! easy to detect:
+//!
+//! ```C++
+//! template <typename T>
+//! class Foo {
+//! T trivial_use_of_t;
+//! };
+//! ```
+//!
+//! It gets harder when determining if one template parameter is used depends on
+//! determining if another template parameter is used. In this example, whether
+//! `U` is used depends on whether `T` is used.
+//!
+//! ```C++
+//! template <typename T>
+//! class DoesntUseT {
+//! int x;
+//! };
+//!
+//! template <typename U>
+//! class Fml {
+//! DoesntUseT<U> lololol;
+//! };
+//! ```
+//!
+//! We can express the set of used template parameters as a constraint solving
+//! problem (where the set of template parameters used by a given IR item is the
+//! union of its sub-item's used template parameters) and iterate to a
+//! fixed-point.
+//!
+//! We use the "monotone framework" for this fix-point analysis where our
+//! lattice is the powerset of the template parameters that appear in the input
+//! C++ header, our join function is set union, and we use the
+//! `ir::traversal::Trace` trait to implement the work-list optimization so we
+//! don't have to revisit every node in the graph when for every iteration
+//! towards the fix-point.
+//!
+//! For a deeper introduction to the general form of this kind of analysis, see
+//! [Static Program Analysis by Anders Møller and Michael I. Schwartzbach][spa].
+//!
+//! [spa]: https://cs.au.dk/~amoeller/spa/spa.pdf
+
+use std::collections::HashMap;
+use std::fmt;
+use super::context::{BindgenContext, ItemId};
+use super::item::ItemSet;
+use super::traversal::{EdgeKind, Trace};
+use super::ty::{TemplateDeclaration, TypeKind};
+
+/// An analysis in the monotone framework.
+///
+/// Implementors of this trait must maintain the following two invariants:
+///
+/// 1. The concrete data must be a member of a finite-height lattice.
+/// 2. The concrete `constrain` method must be monotone: that is,
+/// if `x <= y`, then `constrain(x) <= constrain(y)`.
+///
+/// If these invariants do not hold, iteration to a fix-point might never
+/// complete.
+///
+/// For a simple example analysis, see the `ReachableFrom` type in the `tests`
+/// module below.
+pub trait MonotoneFramework: Sized + fmt::Debug {
+ /// The type of node in our dependency graph.
+ ///
+ /// This is just generic (and not `ItemId`) so that we can easily unit test
+ /// without constructing real `Item`s and their `ItemId`s.
+ type Node: Copy;
+
+ /// Any extra data that is needed during computation.
+ ///
+ /// Again, this is just generic (and not `&BindgenContext`) so that we can
+ /// easily unit test without constructing real `BindgenContext`s full of
+ /// real `Item`s and real `ItemId`s.
+ type Extra: Sized;
+
+ /// The final output of this analysis. Once we have reached a fix-point, we
+ /// convert `self` into this type, and return it as the final result of the
+ /// analysis.
+ type Output: From<Self>;
+
+ /// Construct a new instance of this analysis.
+ fn new(extra: Self::Extra) -> Self;
+
+ /// Get the initial set of nodes from which to start the analysis. Unless
+ /// you are sure of some domain-specific knowledge, this should be the
+ /// complete set of nodes.
+ fn initial_worklist(&self) -> Vec<Self::Node>;
+
+ /// Update the analysis for the given node.
+ ///
+ /// If this results in changing our internal state (ie, we discovered that
+ /// we have not reached a fix-point and iteration should continue), return
+ /// `true`. Otherwise, return `false`. When `constrain` returns false for
+ /// all nodes in the set, we have reached a fix-point and the analysis is
+ /// complete.
+ fn constrain(&mut self, node: Self::Node) -> bool;
+
+ /// For each node `d` that depends on the given `node`'s current answer when
+ /// running `constrain(d)`, call `f(d)`. This informs us which new nodes to
+ /// queue up in the worklist when `constrain(node)` reports updated
+ /// information.
+ fn each_depending_on<F>(&self, node: Self::Node, f: F)
+ where F: FnMut(Self::Node);
+}
+
+/// Run an analysis in the monotone framework.
+// TODO: This allow(...) is just temporary until we replace
+// `Item::signature_contains_named_type` with
+// `analyze::<UsedTemplateParameters>`.
+#[allow(dead_code)]
+pub fn analyze<Analysis>(extra: Analysis::Extra) -> Analysis::Output
+ where Analysis: MonotoneFramework
+{
+ let mut analysis = Analysis::new(extra);
+ let mut worklist = analysis.initial_worklist();
+
+ while let Some(node) = worklist.pop() {
+ if analysis.constrain(node) {
+ analysis.each_depending_on(node, |needs_work| {
+ worklist.push(needs_work);
+ });
+ }
+ }
+
+ analysis.into()
+}
+
+/// An analysis that finds the set of template parameters that actually end up
+/// used in our generated bindings.
+#[derive(Debug, Clone)]
+pub struct UsedTemplateParameters<'a> {
+ ctx: &'a BindgenContext<'a>,
+ used: ItemSet,
+ dependencies: HashMap<ItemId, Vec<ItemId>>,
+}
+
+impl<'a> MonotoneFramework for UsedTemplateParameters<'a> {
+ type Node = ItemId;
+ type Extra = &'a BindgenContext<'a>;
+ type Output = ItemSet;
+
+ fn new(ctx: &'a BindgenContext<'a>) -> UsedTemplateParameters<'a> {
+ let mut dependencies = HashMap::new();
+
+ for item in ctx.whitelisted_items() {
+ {
+ // We reverse our natural IR graph edges to find dependencies
+ // between nodes.
+ let mut add_reverse_edge = |sub_item, _| {
+ dependencies.entry(sub_item).or_insert(vec![]).push(item);
+ };
+ item.trace(ctx, &mut add_reverse_edge, &());
+ }
+
+ // Additionally, whether a template instantiation's template
+ // arguments are used depends on whether the template declaration's
+ // generic template parameters are used.
+ ctx.resolve_item_fallible(item)
+ .and_then(|item| item.as_type())
+ .map(|ty| match ty.kind() {
+ &TypeKind::TemplateInstantiation(decl, ref args) => {
+ let decl = ctx.resolve_type(decl);
+ let params = decl.template_params(ctx)
+ .expect("a template instantiation's referenced \
+ template declaration should have template \
+ parameters");
+ for (arg, param) in args.iter().zip(params.iter()) {
+ dependencies.entry(*arg).or_insert(vec![]).push(*param);
+ }
+ }
+ _ => {}
+ });
+ }
+
+ UsedTemplateParameters {
+ ctx: ctx,
+ used: ItemSet::new(),
+ dependencies: dependencies,
+ }
+ }
+
+ fn initial_worklist(&self) -> Vec<Self::Node> {
+ self.ctx.whitelisted_items().collect()
+ }
+
+ fn constrain(&mut self, item: ItemId) -> bool {
+ let original_size = self.used.len();
+
+ item.trace(self.ctx, &mut |item, edge_kind| {
+ if edge_kind == EdgeKind::TemplateParameterDefinition {
+ // The definition of a template parameter is not considered a
+ // use of said template parameter. Ignore this edge.
+ return;
+ }
+
+ let ty_kind = self.ctx.resolve_item(item)
+ .as_type()
+ .map(|ty| ty.kind());
+
+ match ty_kind {
+ Some(&TypeKind::Named) => {
+ // This is a "trivial" use of the template type parameter.
+ self.used.insert(item);
+ },
+ Some(&TypeKind::TemplateInstantiation(decl, ref args)) => {
+ // A template instantiation's concrete template argument is
+ // only used if the template declaration uses the
+ // corresponding template parameter.
+ let decl = self.ctx.resolve_type(decl);
+ let params = decl.template_params(self.ctx)
+ .expect("a template instantiation's referenced \
+ template declaration should have template \
+ parameters");
+ for (arg, param) in args.iter().zip(params.iter()) {
+ if self.used.contains(param) {
+ if self.ctx.resolve_item(*arg).is_named() {
+ self.used.insert(*arg);
+ }
+ }
+ }
+ },
+ _ => return,
+ }
+ }, &());
+
+ let new_size = self.used.len();
+ new_size != original_size
+ }
+
+ fn each_depending_on<F>(&self, item: ItemId, mut f: F)
+ where F: FnMut(Self::Node)
+ {
+ if let Some(edges) = self.dependencies.get(&item) {
+ for item in edges {
+ f(*item);
+ }
+ }
+ }
+}
+
+impl<'a> From<UsedTemplateParameters<'a>> for ItemSet {
+ fn from(used_templ_params: UsedTemplateParameters) -> ItemSet {
+ used_templ_params.used
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::{HashMap, HashSet};
+ use super::*;
+
+ // Here we find the set of nodes that are reachable from any given
+ // node. This is a lattice mapping nodes to subsets of all nodes. Our join
+ // function is set union.
+ //
+ // This is our test graph:
+ //
+ // +---+ +---+
+ // | | | |
+ // | 1 | .----| 2 |
+ // | | | | |
+ // +---+ | +---+
+ // | | ^
+ // | | |
+ // | +---+ '------'
+ // '----->| |
+ // | 3 |
+ // .------| |------.
+ // | +---+ |
+ // | ^ |
+ // v | v
+ // +---+ | +---+ +---+
+ // | | | | | | |
+ // | 4 | | | 5 |--->| 6 |
+ // | | | | | | |
+ // +---+ | +---+ +---+
+ // | | | |
+ // | | | v
+ // | +---+ | +---+
+ // | | | | | |
+ // '----->| 7 |<-----' | 8 |
+ // | | | |
+ // +---+ +---+
+ //
+ // And here is the mapping from a node to the set of nodes that are
+ // reachable from it within the test graph:
+ //
+ // 1: {3,4,5,6,7,8}
+ // 2: {2}
+ // 3: {3,4,5,6,7,8}
+ // 4: {3,4,5,6,7,8}
+ // 5: {3,4,5,6,7,8}
+ // 6: {8}
+ // 7: {3,4,5,6,7,8}
+ // 8: {}
+
+ #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
+ struct Node(usize);
+
+ #[derive(Clone, Debug, Default, PartialEq, Eq)]
+ struct Graph(HashMap<Node, Vec<Node>>);
+
+ impl Graph {
+ fn make_test_graph() -> Graph {
+ let mut g = Graph::default();
+ g.0.insert(Node(1), vec![Node(3)]);
+ g.0.insert(Node(2), vec![Node(2)]);
+ g.0.insert(Node(3), vec![Node(4), Node(5)]);
+ g.0.insert(Node(4), vec![Node(7)]);
+ g.0.insert(Node(5), vec![Node(6), Node(7)]);
+ g.0.insert(Node(6), vec![Node(8)]);
+ g.0.insert(Node(7), vec![Node(3)]);
+ g.0.insert(Node(8), vec![]);
+ g
+ }
+
+ fn reverse(&self) -> Graph {
+ let mut reversed = Graph::default();
+ for (node, edges) in self.0.iter() {
+ reversed.0.entry(*node).or_insert(vec![]);
+ for referent in edges.iter() {
+ reversed.0.entry(*referent).or_insert(vec![]).push(*node);
+ }
+ }
+ reversed
+ }
+ }
+
+ #[derive(Clone, Debug, PartialEq, Eq)]
+ struct ReachableFrom<'a> {
+ reachable: HashMap<Node, HashSet<Node>>,
+ graph: &'a Graph,
+ reversed: Graph,
+ }
+
+ impl<'a> MonotoneFramework for ReachableFrom<'a> {
+ type Node = Node;
+ type Extra = &'a Graph;
+ type Output = HashMap<Node, HashSet<Node>>;
+
+ fn new(graph: &'a Graph) -> ReachableFrom {
+ let reversed = graph.reverse();
+ ReachableFrom {
+ reachable: Default::default(),
+ graph: graph,
+ reversed: reversed,
+ }
+ }
+
+ fn initial_worklist(&self) -> Vec<Node> {
+ self.graph.0.keys().cloned().collect()
+ }
+
+ fn constrain(&mut self, node: Node) -> bool {
+ // The set of nodes reachable from a node `x` is
+ //
+ // reachable(x) = s_0 U s_1 U ... U reachable(s_0) U reachable(s_1) U ...
+ //
+ // where there exist edges from `x` to each of `s_0, s_1, ...`.
+ //
+ // Yes, what follows is a **terribly** inefficient set union
+ // implementation. Don't copy this code outside of this test!
+
+ let original_size = self.reachable.entry(node).or_insert(HashSet::new()).len();
+
+ for sub_node in self.graph.0[&node].iter() {
+ self.reachable.get_mut(&node).unwrap().insert(*sub_node);
+
+ let sub_reachable = self.reachable
+ .entry(*sub_node)
+ .or_insert(HashSet::new())
+ .clone();
+
+ for transitive in sub_reachable {
+ self.reachable.get_mut(&node).unwrap().insert(transitive);
+ }
+ }
+
+ let new_size = self.reachable[&node].len();
+ original_size != new_size
+ }
+
+ fn each_depending_on<F>(&self, node: Node, mut f: F)
+ where F: FnMut(Node)
+ {
+ for dep in self.reversed.0[&node].iter() {
+ f(*dep);
+ }
+ }
+ }
+
+ impl<'a> From<ReachableFrom<'a>> for HashMap<Node, HashSet<Node>> {
+ fn from(reachable: ReachableFrom<'a>) -> Self {
+ reachable.reachable
+ }
+ }
+
+ #[test]
+ fn monotone() {
+ let g = Graph::make_test_graph();
+ let reachable = analyze::<ReachableFrom>(&g);
+ println!("reachable = {:#?}", reachable);
+
+ fn nodes<A>(nodes: A) -> HashSet<Node>
+ where A: AsRef<[usize]>
+ {
+ nodes.as_ref().iter().cloned().map(Node).collect()
+ }
+
+ let mut expected = HashMap::new();
+ expected.insert(Node(1), nodes([3,4,5,6,7,8]));
+ expected.insert(Node(2), nodes([2]));
+ expected.insert(Node(3), nodes([3,4,5,6,7,8]));
+ expected.insert(Node(4), nodes([3,4,5,6,7,8]));
+ expected.insert(Node(5), nodes([3,4,5,6,7,8]));
+ expected.insert(Node(6), nodes([8]));
+ expected.insert(Node(7), nodes([3,4,5,6,7,8]));
+ expected.insert(Node(8), nodes([]));
+ println!("expected = {:#?}", expected);
+
+ assert_eq!(reachable, expected);
+ }
+}
diff --git a/src/ir/traversal.rs b/src/ir/traversal.rs
index eb4fc686..8c5e32cf 100644
--- a/src/ir/traversal.rs
+++ b/src/ir/traversal.rs
@@ -202,6 +202,14 @@ pub trait Tracer {
}
}
+impl<F> Tracer for F
+ where F: FnMut(ItemId, EdgeKind)
+{
+ fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) {
+ (*self)(item, kind)
+ }
+}
+
/// Trace all of the outgoing edges to other items. Implementations should call
/// `tracer.visit(edge)` for each of their outgoing edges.
pub trait Trace {
diff --git a/src/ir/ty.rs b/src/ir/ty.rs
index ec168f02..44a88744 100644
--- a/src/ir/ty.rs
+++ b/src/ir/ty.rs
@@ -3,6 +3,7 @@
use super::comp::CompInfo;
use super::context::{BindgenContext, ItemId};
use super::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault};
+use super::dot::DotAttributes;
use super::enum_ty::Enum;
use super::function::FunctionSig;
use super::int::IntKind;
@@ -12,6 +13,7 @@ use super::objc::ObjCInterface;
use super::traversal::{Trace, Tracer};
use clang::{self, Cursor};
use parse::{ClangItemParser, ParseError, ParseResult};
+use std::io;
use std::mem;
/// Template declaration related methods.
@@ -311,19 +313,21 @@ impl Type {
match self.kind {
TypeKind::Named => {
let name = self.name().expect("Unnamed named type?");
- let mut chars = name.chars();
- let first = chars.next().unwrap();
- let mut remaining = chars;
-
- let valid = (first.is_alphabetic() || first == '_') &&
- remaining.all(|c| c.is_alphanumeric() || c == '_');
-
- !valid
+ !Self::is_valid_identifier(&name)
}
_ => false,
}
}
+ /// Checks whether the name looks like an identifier,
+ /// i.e. is alphanumeric (including '_') and does not start with a digit.
+ pub fn is_valid_identifier(name: &str) -> bool {
+ let mut chars = name.chars();
+ let first_valid = chars.next().map(|c| c.is_alphabetic() || c == '_').unwrap_or(false);
+
+ first_valid && chars.all(|c| c.is_alphanumeric() || c == '_')
+ }
+
/// See safe_canonical_type.
pub fn canonical_type<'tr>(&'tr self,
ctx: &'tr BindgenContext)
@@ -380,41 +384,63 @@ impl Type {
_ => false,
}
}
+}
- /// If this type has a known size, return it (in bytes).
- pub fn calc_size(&self, ctx: &BindgenContext) -> Option<usize> {
- match self.kind {
- TypeKind::Comp(ref ci) => {
- ci.calc_layout(ctx).map(|layout| layout.size)
- }
- TypeKind::Enum(ref enum_ty) => {
- enum_ty.calc_layout(ctx).map(|layout| layout.size)
- }
- TypeKind::Int(int_kind) => int_kind.known_size(),
- TypeKind::Float(float_kind) => Some(float_kind.known_size()),
- TypeKind::Complex(float_kind) => Some(float_kind.known_size() * 2),
- TypeKind::Reference(..) |
- TypeKind::NullPtr |
- TypeKind::Pointer(..) |
- TypeKind::BlockPointer |
- TypeKind::Function(..) |
- TypeKind::ObjCInterface(..) => Some(mem::size_of::<*mut ()>()),
- TypeKind::ResolvedTypeRef(inner) |
- TypeKind::Alias(inner) |
- TypeKind::TemplateAlias(inner, _) |
- TypeKind::TemplateInstantiation(inner, _) => {
- ctx.resolve_type(inner).calc_size(ctx)
- }
- TypeKind::Array(inner, len) => {
- ctx.resolve_type(inner)
- .layout(ctx)
- .map(|layout| layout.size * len)
+impl DotAttributes for Type {
+ fn dot_attributes<W>(&self, ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ if let Some(ref layout) = self.layout {
+ try!(writeln!(out,
+ "<tr><td>size</td><td>{}</td></tr>
+ <tr><td>align</td><td>{}</td></tr>",
+ layout.size,
+ layout.align));
+ if layout.packed {
+ try!(writeln!(out, "<tr><td>packed</td><td>true</td></tr>"));
}
- TypeKind::Void | TypeKind::Named => None,
- TypeKind::UnresolvedTypeRef(..) => unreachable!(),
}
+
+ if self.is_const {
+ try!(writeln!(out, "<tr><td>const</td><td>true</td></tr>"));
+ }
+
+ self.kind.dot_attributes(ctx, out)
}
}
+
+impl DotAttributes for TypeKind {
+ fn dot_attributes<W>(&self, _ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ write!(out,
+ "<tr><td>TypeKind</td><td>{}</td></tr>",
+ match *self {
+ TypeKind::Void => "Void",
+ TypeKind::NullPtr => "NullPtr",
+ TypeKind::Comp(..) => "Comp",
+ TypeKind::Int(..) => "Int",
+ TypeKind::Float(..) => "Float",
+ TypeKind::Complex(..) => "Complex",
+ TypeKind::Alias(..) => "Alias",
+ TypeKind::TemplateAlias(..) => "TemplateAlias",
+ TypeKind::Array(..) => "Array",
+ TypeKind::Function(..) => "Function",
+ TypeKind::Enum(..) => "Enum",
+ TypeKind::Pointer(..) => "Pointer",
+ TypeKind::BlockPointer => "BlockPointer",
+ TypeKind::Reference(..) => "Reference",
+ TypeKind::TemplateInstantiation(..) => "TemplateInstantiation",
+ TypeKind::ResolvedTypeRef(..) => "ResolvedTypeRef",
+ TypeKind::Named => "Named",
+ TypeKind::ObjCInterface(..) => "ObjCInterface",
+ TypeKind::UnresolvedTypeRef(..) => {
+ unreachable!("there shouldn't be any more of these anymore")
+ }
+ })
+ }
+}
+
#[test]
fn is_invalid_named_type_valid() {
let ty = Type::new(Some("foo".into()), None, TypeKind::Named, false);
@@ -454,7 +480,6 @@ fn is_invalid_named_type_unnamed() {
}
#[test]
-#[should_panic]
fn is_invalid_named_type_empty_name() {
let ty = Type::new(Some("".into()), None, TypeKind::Named, false);
assert!(ty.is_invalid_named_type())
@@ -1074,12 +1099,30 @@ impl Type {
}
CXType_Enum => {
let enum_ = Enum::from_ty(ty, ctx).expect("Not an enum?");
+
+ if name.is_empty() {
+ let pretty_name = ty.spelling();
+ if Self::is_valid_identifier(&pretty_name) {
+ name = pretty_name;
+ }
+ }
+
TypeKind::Enum(enum_)
}
CXType_Record => {
let complex =
CompInfo::from_ty(potential_id, ty, location, ctx)
.expect("Not a complex type?");
+
+ if name.is_empty() {
+ // The pretty-printed name may contain typedefed name,
+ // but may also be "struct (anonymous at .h:1)"
+ let pretty_name = ty.spelling();
+ if Self::is_valid_identifier(&pretty_name) {
+ name = pretty_name;
+ }
+ }
+
TypeKind::Comp(complex)
}
// FIXME: We stub vectors as arrays since in 99% of the cases the
diff --git a/src/ir/var.rs b/src/ir/var.rs
index 6cfcdae7..c6d7a1c5 100644
--- a/src/ir/var.rs
+++ b/src/ir/var.rs
@@ -1,6 +1,7 @@
//! Intermediate representation of variables.
use super::context::{BindgenContext, ItemId};
+use super::dot::DotAttributes;
use super::function::cursor_mangling;
use super::int::IntKind;
use super::item::Item;
@@ -8,6 +9,7 @@ use super::ty::{FloatKind, TypeKind};
use cexpr;
use clang;
use parse::{ClangItemParser, ClangSubItemParser, ParseError, ParseResult};
+use std::io;
use std::num::Wrapping;
/// The type for a constant variable.
@@ -84,6 +86,22 @@ impl Var {
}
}
+impl DotAttributes for Var {
+ fn dot_attributes<W>(&self, _ctx: &BindgenContext, out: &mut W) -> io::Result<()>
+ where W: io::Write
+ {
+ if self.is_const {
+ try!(writeln!(out, "<tr><td>const</td><td>true</td></tr>"));
+ }
+
+ if let Some(ref mangled) = self.mangled_name {
+ try!(writeln!(out, "<tr><td>mangled name</td><td>{}</td></tr>", mangled));
+ }
+
+ Ok(())
+ }
+}
+
impl ClangSubItemParser for Var {
fn parse(cursor: clang::Cursor,
ctx: &mut BindgenContext)
diff --git a/src/lib.rs b/src/lib.rs
index 7bf98064..42363ebd 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -175,6 +175,13 @@ impl Builder {
self
}
+ /// Set the output graphviz file.
+ pub fn emit_ir_graphviz<T: Into<String>>(mut self, path: T) -> Builder {
+ let path = path.into();
+ self.options.emit_ir_graphviz = Some(path);
+ self
+ }
+
/// Whether the generated bindings should contain documentation comments or
/// not.
///
@@ -491,6 +498,9 @@ pub struct BindgenOptions {
/// True if we should dump our internal IR for debugging purposes.
pub emit_ir: bool,
+ /// Output graphviz dot file.
+ pub emit_ir_graphviz: Option<String>,
+
/// True if we should emulate C++ namespaces with Rust modules in the
/// generated bindings.
pub enable_cxx_namespaces: bool,
@@ -595,6 +605,7 @@ impl Default for BindgenOptions {
links: vec![],
emit_ast: false,
emit_ir: false,
+ emit_ir_graphviz: None,
derive_debug: true,
derive_default: false,
enable_cxx_namespaces: false,
diff --git a/src/options.rs b/src/options.rs
index 49bad841..e54ee012 100644
--- a/src/options.rs
+++ b/src/options.rs
@@ -84,6 +84,11 @@ pub fn builder_from_flags<I>
Arg::with_name("emit-ir")
.long("emit-ir")
.help("Output our internal IR for debugging purposes."),
+ Arg::with_name("emit-ir-graphviz")
+ .long("emit-ir-graphviz")
+ .help("Dump graphviz dot file.")
+ .value_name("path")
+ .takes_value(true),
Arg::with_name("enable-cxx-namespaces")
.long("enable-cxx-namespaces")
.help("Enable support for C++ namespaces."),
@@ -270,6 +275,10 @@ pub fn builder_from_flags<I>
builder = builder.emit_ir();
}
+ if let Some(path) = matches.value_of("emit-ir-graphviz") {
+ builder = builder.emit_ir_graphviz(path);
+ }
+
if matches.is_present("enable-cxx-namespaces") {
builder = builder.enable_cxx_namespaces();
}
diff --git a/tests/expectations/tests/anon_enum.rs b/tests/expectations/tests/anon_enum.rs
index 07ea4810..71abc77b 100644
--- a/tests/expectations/tests/anon_enum.rs
+++ b/tests/expectations/tests/anon_enum.rs
@@ -34,9 +34,6 @@ fn bindgen_test_layout_Test() {
impl Clone for Test {
fn clone(&self) -> Self { *self }
}
-pub const Foo: _bindgen_ty_1 = _bindgen_ty_1::Foo;
-pub const Bar: _bindgen_ty_1 = _bindgen_ty_1::Bar;
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
-pub enum _bindgen_ty_1 { Foo = 0, Bar = 1, }
-pub use self::_bindgen_ty_1 as Baz;
+pub enum Baz { Foo = 0, Bar = 1, }
diff --git a/tests/expectations/tests/bitfield_align.rs b/tests/expectations/tests/bitfield_align.rs
new file mode 100644
index 00000000..ffc170b1
--- /dev/null
+++ b/tests/expectations/tests/bitfield_align.rs
@@ -0,0 +1,114 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct A {
+ pub x: ::std::os::raw::c_uchar,
+ pub _bitfield_1: [u8; 2usize],
+ pub y: ::std::os::raw::c_uchar,
+ pub __bindgen_align: [u32; 0usize],
+}
+#[test]
+fn bindgen_test_layout_A() {
+ assert_eq!(::std::mem::size_of::<A>() , 4usize , concat ! (
+ "Size of: " , stringify ! ( A ) ));
+ assert_eq! (::std::mem::align_of::<A>() , 4usize , concat ! (
+ "Alignment of " , stringify ! ( A ) ));
+ assert_eq! (unsafe { & ( * ( 0 as * const A ) ) . x as * const _ as usize
+ } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( A ) , "::" , stringify
+ ! ( x ) ));
+ assert_eq! (unsafe { & ( * ( 0 as * const A ) ) . y as * const _ as usize
+ } , 3usize , concat ! (
+ "Alignment of field: " , stringify ! ( A ) , "::" , stringify
+ ! ( y ) ));
+}
+impl Clone for A {
+ fn clone(&self) -> Self { *self }
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct B {
+ pub _bitfield_1: u32,
+ pub __bindgen_align: [u32; 0usize],
+}
+#[test]
+fn bindgen_test_layout_B() {
+ assert_eq!(::std::mem::size_of::<B>() , 4usize , concat ! (
+ "Size of: " , stringify ! ( B ) ));
+ assert_eq! (::std::mem::align_of::<B>() , 4usize , concat ! (
+ "Alignment of " , stringify ! ( B ) ));
+}
+impl Clone for B {
+ fn clone(&self) -> Self { *self }
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct C {
+ pub x: ::std::os::raw::c_uchar,
+ pub _bitfield_1: u8,
+ pub baz: ::std::os::raw::c_uint,
+}
+#[test]
+fn bindgen_test_layout_C() {
+ assert_eq!(::std::mem::size_of::<C>() , 8usize , concat ! (
+ "Size of: " , stringify ! ( C ) ));
+ assert_eq! (::std::mem::align_of::<C>() , 4usize , concat ! (
+ "Alignment of " , stringify ! ( C ) ));
+ assert_eq! (unsafe { & ( * ( 0 as * const C ) ) . x as * const _ as usize
+ } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( C ) , "::" , stringify
+ ! ( x ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const C ) ) . baz as * const _ as usize } ,
+ 4usize , concat ! (
+ "Alignment of field: " , stringify ! ( C ) , "::" , stringify
+ ! ( baz ) ));
+}
+impl Clone for C {
+ fn clone(&self) -> Self { *self }
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct Date1 {
+ pub _bitfield_1: [u8; 2usize],
+ pub _bitfield_2: u8,
+ pub __bindgen_align: [u16; 0usize],
+}
+#[test]
+fn bindgen_test_layout_Date1() {
+ assert_eq!(::std::mem::size_of::<Date1>() , 4usize , concat ! (
+ "Size of: " , stringify ! ( Date1 ) ));
+ assert_eq! (::std::mem::align_of::<Date1>() , 2usize , concat ! (
+ "Alignment of " , stringify ! ( Date1 ) ));
+}
+impl Clone for Date1 {
+ fn clone(&self) -> Self { *self }
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct Date2 {
+ pub _bitfield_1: [u8; 2usize],
+ pub _bitfield_2: u8,
+ pub byte: ::std::os::raw::c_uchar,
+ pub __bindgen_align: [u16; 0usize],
+}
+#[test]
+fn bindgen_test_layout_Date2() {
+ assert_eq!(::std::mem::size_of::<Date2>() , 4usize , concat ! (
+ "Size of: " , stringify ! ( Date2 ) ));
+ assert_eq! (::std::mem::align_of::<Date2>() , 2usize , concat ! (
+ "Alignment of " , stringify ! ( Date2 ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const Date2 ) ) . byte as * const _ as usize }
+ , 3usize , concat ! (
+ "Alignment of field: " , stringify ! ( Date2 ) , "::" ,
+ stringify ! ( byte ) ));
+}
+impl Clone for Date2 {
+ fn clone(&self) -> Self { *self }
+}
diff --git a/tests/expectations/tests/bitfield_method_mangling.rs b/tests/expectations/tests/bitfield_method_mangling.rs
index 0ab5fce5..f21b2089 100644
--- a/tests/expectations/tests/bitfield_method_mangling.rs
+++ b/tests/expectations/tests/bitfield_method_mangling.rs
@@ -6,46 +6,20 @@
#[repr(C)]
#[derive(Debug, Default, Copy)]
-pub struct _bindgen_ty_1 {
+pub struct mach_msg_type_descriptor_t {
pub _bitfield_1: u32,
+ pub __bindgen_align: [u32; 0usize],
}
#[test]
-fn bindgen_test_layout__bindgen_ty_1() {
- assert_eq!(::std::mem::size_of::<_bindgen_ty_1>() , 4usize , concat ! (
- "Size of: " , stringify ! ( _bindgen_ty_1 ) ));
- assert_eq! (::std::mem::align_of::<_bindgen_ty_1>() , 4usize , concat ! (
- "Alignment of " , stringify ! ( _bindgen_ty_1 ) ));
+fn bindgen_test_layout_mach_msg_type_descriptor_t() {
+ assert_eq!(::std::mem::size_of::<mach_msg_type_descriptor_t>() , 4usize ,
+ concat ! (
+ "Size of: " , stringify ! ( mach_msg_type_descriptor_t ) ));
+ assert_eq! (::std::mem::align_of::<mach_msg_type_descriptor_t>() , 4usize
+ , concat ! (
+ "Alignment of " , stringify ! ( mach_msg_type_descriptor_t )
+ ));
}
-impl Clone for _bindgen_ty_1 {
+impl Clone for mach_msg_type_descriptor_t {
fn clone(&self) -> Self { *self }
}
-impl _bindgen_ty_1 {
- #[inline]
- pub fn pad3(&self) -> ::std::os::raw::c_uint {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (16777215usize as u32))
- >> 0u32) as u32)
- }
- }
- #[inline]
- pub fn set_pad3(&mut self, val: ::std::os::raw::c_uint) {
- self._bitfield_1 &= !(16777215usize as u32);
- self._bitfield_1 |=
- ((val as u32 as u32) << 0u32) & (16777215usize as u32);
- }
- #[inline]
- pub fn type_(&self) -> ::std::os::raw::c_uint {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 &
- (4278190080usize as u32)) >> 24u32) as
- u32)
- }
- }
- #[inline]
- pub fn set_type(&mut self, val: ::std::os::raw::c_uint) {
- self._bitfield_1 &= !(4278190080usize as u32);
- self._bitfield_1 |=
- ((val as u32 as u32) << 24u32) & (4278190080usize as u32);
- }
-}
-pub type mach_msg_type_descriptor_t = _bindgen_ty_1;
diff --git a/tests/expectations/tests/issue-410.rs b/tests/expectations/tests/issue-410.rs
index 2fe0f99b..3c6cd280 100644
--- a/tests/expectations/tests/issue-410.rs
+++ b/tests/expectations/tests/issue-410.rs
@@ -38,6 +38,5 @@ pub mod root {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
- pub enum _bindgen_ty_1 { }
- pub use self::super::root::_bindgen_ty_1 as JSWhyMagic;
+ pub enum JSWhyMagic { }
}
diff --git a/tests/expectations/tests/jsval_layout_opaque.rs b/tests/expectations/tests/jsval_layout_opaque.rs
index 848286e7..eb5306d2 100644
--- a/tests/expectations/tests/jsval_layout_opaque.rs
+++ b/tests/expectations/tests/jsval_layout_opaque.rs
@@ -111,6 +111,7 @@ pub struct jsval_layout {
#[derive(Debug, Copy)]
pub struct jsval_layout__bindgen_ty_1 {
pub _bitfield_1: u64,
+ pub __bindgen_align: [u64; 0usize],
}
#[test]
fn bindgen_test_layout_jsval_layout__bindgen_ty_1() {
@@ -128,37 +129,6 @@ impl Clone for jsval_layout__bindgen_ty_1 {
impl Default for jsval_layout__bindgen_ty_1 {
fn default() -> Self { unsafe { ::std::mem::zeroed() } }
}
-impl jsval_layout__bindgen_ty_1 {
- #[inline]
- pub fn payload47(&self) -> u64 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 &
- (140737488355327usize as u64)) >>
- 0u32) as u64)
- }
- }
- #[inline]
- pub fn set_payload47(&mut self, val: u64) {
- self._bitfield_1 &= !(140737488355327usize as u64);
- self._bitfield_1 |=
- ((val as u64 as u64) << 0u32) & (140737488355327usize as u64);
- }
- #[inline]
- pub fn tag(&self) -> JSValueTag {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 &
- (18446603336221196288usize as u64)) >>
- 47u32) as u32)
- }
- }
- #[inline]
- pub fn set_tag(&mut self, val: JSValueTag) {
- self._bitfield_1 &= !(18446603336221196288usize as u64);
- self._bitfield_1 |=
- ((val as u32 as u64) << 47u32) &
- (18446603336221196288usize as u64);
- }
-}
#[repr(C)]
#[derive(Debug, Default, Copy)]
pub struct jsval_layout__bindgen_ty_2 {
diff --git a/tests/expectations/tests/layout_align.rs b/tests/expectations/tests/layout_align.rs
index a21fa4d6..9085480c 100644
--- a/tests/expectations/tests/layout_align.rs
+++ b/tests/expectations/tests/layout_align.rs
@@ -71,6 +71,7 @@ pub struct rte_eth_link {
/**< ETH_SPEED_NUM_ */
pub link_speed: u32,
pub _bitfield_1: u8,
+ pub __bindgen_padding_0: [u8; 3usize],
pub __bindgen_align: [u64; 0usize],
}
#[test]
@@ -88,41 +89,3 @@ fn bindgen_test_layout_rte_eth_link() {
impl Clone for rte_eth_link {
fn clone(&self) -> Self { *self }
}
-impl rte_eth_link {
- #[inline]
- pub fn link_duplex(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
- 0u32) as u16)
- }
- }
- #[inline]
- pub fn set_link_duplex(&mut self, val: u16) {
- self._bitfield_1 &= !(1usize as u8);
- self._bitfield_1 |= ((val as u16 as u8) << 0u32) & (1usize as u8);
- }
- #[inline]
- pub fn link_autoneg(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (2usize as u8)) >>
- 1u32) as u16)
- }
- }
- #[inline]
- pub fn set_link_autoneg(&mut self, val: u16) {
- self._bitfield_1 &= !(2usize as u8);
- self._bitfield_1 |= ((val as u16 as u8) << 1u32) & (2usize as u8);
- }
- #[inline]
- pub fn link_status(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (4usize as u8)) >>
- 2u32) as u16)
- }
- }
- #[inline]
- pub fn set_link_status(&mut self, val: u16) {
- self._bitfield_1 &= !(4usize as u8);
- self._bitfield_1 |= ((val as u16 as u8) << 2u32) & (4usize as u8);
- }
-}
diff --git a/tests/expectations/tests/layout_array.rs b/tests/expectations/tests/layout_array.rs
index c28d3ec8..2cc85785 100644
--- a/tests/expectations/tests/layout_array.rs
+++ b/tests/expectations/tests/layout_array.rs
@@ -117,26 +117,25 @@ impl Default for rte_mempool_ops {
*/
#[repr(C)]
#[derive(Debug, Default, Copy)]
-pub struct _bindgen_ty_1 {
+pub struct rte_spinlock_t {
/**< lock status 0 = unlocked, 1 = locked */
pub locked: ::std::os::raw::c_int,
}
#[test]
-fn bindgen_test_layout__bindgen_ty_1() {
- assert_eq!(::std::mem::size_of::<_bindgen_ty_1>() , 4usize , concat ! (
- "Size of: " , stringify ! ( _bindgen_ty_1 ) ));
- assert_eq! (::std::mem::align_of::<_bindgen_ty_1>() , 4usize , concat ! (
- "Alignment of " , stringify ! ( _bindgen_ty_1 ) ));
+fn bindgen_test_layout_rte_spinlock_t() {
+ assert_eq!(::std::mem::size_of::<rte_spinlock_t>() , 4usize , concat ! (
+ "Size of: " , stringify ! ( rte_spinlock_t ) ));
+ assert_eq! (::std::mem::align_of::<rte_spinlock_t>() , 4usize , concat ! (
+ "Alignment of " , stringify ! ( rte_spinlock_t ) ));
assert_eq! (unsafe {
- & ( * ( 0 as * const _bindgen_ty_1 ) ) . locked as * const _
+ & ( * ( 0 as * const rte_spinlock_t ) ) . locked as * const _
as usize } , 0usize , concat ! (
- "Alignment of field: " , stringify ! ( _bindgen_ty_1 ) , "::"
+ "Alignment of field: " , stringify ! ( rte_spinlock_t ) , "::"
, stringify ! ( locked ) ));
}
-impl Clone for _bindgen_ty_1 {
+impl Clone for rte_spinlock_t {
fn clone(&self) -> Self { *self }
}
-pub type rte_spinlock_t = _bindgen_ty_1;
/**
* Structure storing the table of registered ops structs, each of which contain
* the function pointers for the mempool ops functions.
diff --git a/tests/expectations/tests/layout_eth_conf.rs b/tests/expectations/tests/layout_eth_conf.rs
index 16788d06..ae46f5c6 100644
--- a/tests/expectations/tests/layout_eth_conf.rs
+++ b/tests/expectations/tests/layout_eth_conf.rs
@@ -88,7 +88,7 @@ pub struct rte_eth_rxmode {
pub max_rx_pkt_len: u32,
/**< hdr buf size (header_split enabled).*/
pub split_hdr_size: u16,
- pub _bitfield_1: u16,
+ pub _bitfield_1: [u8; 2usize],
}
#[test]
fn bindgen_test_layout_rte_eth_rxmode() {
@@ -118,116 +118,6 @@ impl Clone for rte_eth_rxmode {
impl Default for rte_eth_rxmode {
fn default() -> Self { unsafe { ::std::mem::zeroed() } }
}
-impl rte_eth_rxmode {
- #[inline]
- pub fn header_split(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (1usize as u16)) >>
- 0u32) as u16)
- }
- }
- #[inline]
- pub fn set_header_split(&mut self, val: u16) {
- self._bitfield_1 &= !(1usize as u16);
- self._bitfield_1 |= ((val as u16 as u16) << 0u32) & (1usize as u16);
- }
- #[inline]
- pub fn hw_ip_checksum(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (2usize as u16)) >>
- 1u32) as u16)
- }
- }
- #[inline]
- pub fn set_hw_ip_checksum(&mut self, val: u16) {
- self._bitfield_1 &= !(2usize as u16);
- self._bitfield_1 |= ((val as u16 as u16) << 1u32) & (2usize as u16);
- }
- #[inline]
- pub fn hw_vlan_filter(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (4usize as u16)) >>
- 2u32) as u16)
- }
- }
- #[inline]
- pub fn set_hw_vlan_filter(&mut self, val: u16) {
- self._bitfield_1 &= !(4usize as u16);
- self._bitfield_1 |= ((val as u16 as u16) << 2u32) & (4usize as u16);
- }
- #[inline]
- pub fn hw_vlan_strip(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (8usize as u16)) >>
- 3u32) as u16)
- }
- }
- #[inline]
- pub fn set_hw_vlan_strip(&mut self, val: u16) {
- self._bitfield_1 &= !(8usize as u16);
- self._bitfield_1 |= ((val as u16 as u16) << 3u32) & (8usize as u16);
- }
- #[inline]
- pub fn hw_vlan_extend(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (16usize as u16)) >>
- 4u32) as u16)
- }
- }
- #[inline]
- pub fn set_hw_vlan_extend(&mut self, val: u16) {
- self._bitfield_1 &= !(16usize as u16);
- self._bitfield_1 |= ((val as u16 as u16) << 4u32) & (16usize as u16);
- }
- #[inline]
- pub fn jumbo_frame(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (32usize as u16)) >>
- 5u32) as u16)
- }
- }
- #[inline]
- pub fn set_jumbo_frame(&mut self, val: u16) {
- self._bitfield_1 &= !(32usize as u16);
- self._bitfield_1 |= ((val as u16 as u16) << 5u32) & (32usize as u16);
- }
- #[inline]
- pub fn hw_strip_crc(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (64usize as u16)) >>
- 6u32) as u16)
- }
- }
- #[inline]
- pub fn set_hw_strip_crc(&mut self, val: u16) {
- self._bitfield_1 &= !(64usize as u16);
- self._bitfield_1 |= ((val as u16 as u16) << 6u32) & (64usize as u16);
- }
- #[inline]
- pub fn enable_scatter(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (128usize as u16)) >>
- 7u32) as u16)
- }
- }
- #[inline]
- pub fn set_enable_scatter(&mut self, val: u16) {
- self._bitfield_1 &= !(128usize as u16);
- self._bitfield_1 |= ((val as u16 as u16) << 7u32) & (128usize as u16);
- }
- #[inline]
- pub fn enable_lro(&self) -> u16 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (256usize as u16)) >>
- 8u32) as u16)
- }
- }
- #[inline]
- pub fn set_enable_lro(&mut self, val: u16) {
- self._bitfield_1 &= !(256usize as u16);
- self._bitfield_1 |= ((val as u16 as u16) << 8u32) & (256usize as u16);
- }
-}
#[repr(u32)]
/**
* A set of values to identify what method is to be used to transmit
@@ -250,6 +140,7 @@ pub struct rte_eth_txmode {
pub mq_mode: rte_eth_tx_mq_mode,
pub pvid: u16,
pub _bitfield_1: u8,
+ pub __bindgen_padding_0: u8,
}
#[test]
fn bindgen_test_layout_rte_eth_txmode() {
@@ -274,44 +165,6 @@ impl Clone for rte_eth_txmode {
impl Default for rte_eth_txmode {
fn default() -> Self { unsafe { ::std::mem::zeroed() } }
}
-impl rte_eth_txmode {
- #[inline]
- pub fn hw_vlan_reject_tagged(&self) -> u8 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
- 0u32) as u8)
- }
- }
- #[inline]
- pub fn set_hw_vlan_reject_tagged(&mut self, val: u8) {
- self._bitfield_1 &= !(1usize as u8);
- self._bitfield_1 |= ((val as u8 as u8) << 0u32) & (1usize as u8);
- }
- #[inline]
- pub fn hw_vlan_reject_untagged(&self) -> u8 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (2usize as u8)) >>
- 1u32) as u8)
- }
- }
- #[inline]
- pub fn set_hw_vlan_reject_untagged(&mut self, val: u8) {
- self._bitfield_1 &= !(2usize as u8);
- self._bitfield_1 |= ((val as u8 as u8) << 1u32) & (2usize as u8);
- }
- #[inline]
- pub fn hw_vlan_insert_pvid(&self) -> u8 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (4usize as u8)) >>
- 2u32) as u8)
- }
- }
- #[inline]
- pub fn set_hw_vlan_insert_pvid(&mut self, val: u8) {
- self._bitfield_1 &= !(4usize as u8);
- self._bitfield_1 |= ((val as u8 as u8) << 2u32) & (4usize as u8);
- }
-}
/**
* A structure used to configure the Receive Side Scaling (RSS) feature
* of an Ethernet port.
diff --git a/tests/expectations/tests/layout_large_align_field.rs b/tests/expectations/tests/layout_large_align_field.rs
new file mode 100644
index 00000000..820e4210
--- /dev/null
+++ b/tests/expectations/tests/layout_large_align_field.rs
@@ -0,0 +1,419 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+
+#[repr(C)]
+#[derive(Default)]
+pub struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>);
+impl <T> __IncompleteArrayField<T> {
+ #[inline]
+ pub fn new() -> Self {
+ __IncompleteArrayField(::std::marker::PhantomData)
+ }
+ #[inline]
+ pub unsafe fn as_ptr(&self) -> *const T { ::std::mem::transmute(self) }
+ #[inline]
+ pub unsafe fn as_mut_ptr(&mut self) -> *mut T {
+ ::std::mem::transmute(self)
+ }
+ #[inline]
+ pub unsafe fn as_slice(&self, len: usize) -> &[T] {
+ ::std::slice::from_raw_parts(self.as_ptr(), len)
+ }
+ #[inline]
+ pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
+ ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
+ }
+}
+impl <T> ::std::fmt::Debug for __IncompleteArrayField<T> {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ fmt.write_str("__IncompleteArrayField")
+ }
+}
+impl <T> ::std::clone::Clone for __IncompleteArrayField<T> {
+ #[inline]
+ fn clone(&self) -> Self { Self::new() }
+}
+impl <T> ::std::marker::Copy for __IncompleteArrayField<T> { }
+pub const RTE_CACHE_LINE_SIZE: ::std::os::raw::c_uint = 64;
+pub const RTE_LIBRTE_IP_FRAG_MAX_FRAG: ::std::os::raw::c_uint = 4;
+pub const IP_LAST_FRAG_IDX: _bindgen_ty_1 = _bindgen_ty_1::IP_LAST_FRAG_IDX;
+pub const IP_FIRST_FRAG_IDX: _bindgen_ty_1 = _bindgen_ty_1::IP_FIRST_FRAG_IDX;
+pub const IP_MIN_FRAG_NUM: _bindgen_ty_1 = _bindgen_ty_1::IP_MIN_FRAG_NUM;
+pub const IP_MAX_FRAG_NUM: _bindgen_ty_1 = _bindgen_ty_1::IP_MAX_FRAG_NUM;
+#[repr(u32)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub enum _bindgen_ty_1 {
+ IP_LAST_FRAG_IDX = 0,
+ IP_FIRST_FRAG_IDX = 1,
+ IP_MIN_FRAG_NUM = 2,
+ IP_MAX_FRAG_NUM = 4,
+}
+/** @internal fragmented mbuf */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag {
+ /**< offset into the packet */
+ pub ofs: u16,
+ /**< length of fragment */
+ pub len: u16,
+ /**< fragment mbuf */
+ pub mb: *mut rte_mbuf,
+}
+#[test]
+fn bindgen_test_layout_ip_frag() {
+ assert_eq!(::std::mem::size_of::<ip_frag>() , 16usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_frag ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . ofs as * const _ as usize }
+ , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( ofs ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . len as * const _ as usize }
+ , 2usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( len ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag ) ) . mb as * const _ as usize }
+ , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag ) , "::" ,
+ stringify ! ( mb ) ));
+}
+impl Clone for ip_frag {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct ip_frag_key {
+ /**< src address, first 8 bytes used for IPv4 */
+ pub src_dst: [u64; 4usize],
+ /**< dst address */
+ pub id: u32,
+ /**< src/dst key length */
+ pub key_len: u32,
+}
+#[test]
+fn bindgen_test_layout_ip_frag_key() {
+ assert_eq!(::std::mem::size_of::<ip_frag_key>() , 40usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag_key ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag_key>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_frag_key ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . src_dst as * const _ as
+ usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( src_dst ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . id as * const _ as
+ usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( id ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_key ) ) . key_len as * const _ as
+ usize } , 36usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_key ) , "::" ,
+ stringify ! ( key_len ) ));
+}
+impl Clone for ip_frag_key {
+ fn clone(&self) -> Self { *self }
+}
+/**
+ * @internal Fragmented packet to reassemble.
+ * First two entries in the frags[] array are for the last and first fragments.
+ */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag_pkt {
+ /**< LRU list */
+ pub lru: ip_frag_pkt__bindgen_ty_1,
+ /**< fragmentation key */
+ pub key: ip_frag_key,
+ /**< creation timestamp */
+ pub start: u64,
+ /**< expected reassembled size */
+ pub total_size: u32,
+ /**< size of fragments received */
+ pub frag_size: u32,
+ /**< index of next entry to fill */
+ pub last_idx: u32,
+ /**< fragments */
+ pub frags: [ip_frag; 4usize],
+ pub __bindgen_padding_0: [u64; 6usize],
+}
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_frag_pkt__bindgen_ty_1 {
+ pub tqe_next: *mut ip_frag_pkt,
+ pub tqe_prev: *mut *mut ip_frag_pkt,
+}
+#[test]
+fn bindgen_test_layout_ip_frag_pkt__bindgen_ty_1() {
+ assert_eq!(::std::mem::size_of::<ip_frag_pkt__bindgen_ty_1>() , 16usize ,
+ concat ! (
+ "Size of: " , stringify ! ( ip_frag_pkt__bindgen_ty_1 ) ));
+ assert_eq! (::std::mem::align_of::<ip_frag_pkt__bindgen_ty_1>() , 8usize ,
+ concat ! (
+ "Alignment of " , stringify ! ( ip_frag_pkt__bindgen_ty_1 )
+ ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt__bindgen_ty_1 ) ) . tqe_next
+ as * const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! (
+ ip_frag_pkt__bindgen_ty_1 ) , "::" , stringify ! ( tqe_next )
+ ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt__bindgen_ty_1 ) ) . tqe_prev
+ as * const _ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! (
+ ip_frag_pkt__bindgen_ty_1 ) , "::" , stringify ! ( tqe_prev )
+ ));
+}
+impl Clone for ip_frag_pkt__bindgen_ty_1 {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag_pkt__bindgen_ty_1 {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+#[test]
+fn bindgen_test_layout_ip_frag_pkt() {
+ assert_eq!(::std::mem::size_of::<ip_frag_pkt>() , 192usize , concat ! (
+ "Size of: " , stringify ! ( ip_frag_pkt ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . lru as * const _ as
+ usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( lru ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . key as * const _ as
+ usize } , 16usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( key ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . start as * const _ as
+ usize } , 56usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( start ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . total_size as * const _
+ as usize } , 64usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( total_size ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . frag_size as * const _
+ as usize } , 68usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( frag_size ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . last_idx as * const _
+ as usize } , 72usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( last_idx ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_pkt ) ) . frags as * const _ as
+ usize } , 80usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_pkt ) , "::" ,
+ stringify ! ( frags ) ));
+}
+impl Clone for ip_frag_pkt {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_frag_pkt {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct ip_pkt_list {
+ pub tqh_first: *mut ip_frag_pkt,
+ pub tqh_last: *mut *mut ip_frag_pkt,
+}
+#[test]
+fn bindgen_test_layout_ip_pkt_list() {
+ assert_eq!(::std::mem::size_of::<ip_pkt_list>() , 16usize , concat ! (
+ "Size of: " , stringify ! ( ip_pkt_list ) ));
+ assert_eq! (::std::mem::align_of::<ip_pkt_list>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( ip_pkt_list ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_pkt_list ) ) . tqh_first as * const _
+ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_pkt_list ) , "::" ,
+ stringify ! ( tqh_first ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_pkt_list ) ) . tqh_last as * const _
+ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_pkt_list ) , "::" ,
+ stringify ! ( tqh_last ) ));
+}
+impl Clone for ip_pkt_list {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for ip_pkt_list {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/** fragmentation table statistics */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct ip_frag_tbl_stat {
+ /**< total # of find/insert attempts. */
+ pub find_num: u64,
+ /**< # of add ops. */
+ pub add_num: u64,
+ /**< # of del ops. */
+ pub del_num: u64,
+ /**< # of reuse (del/add) ops. */
+ pub reuse_num: u64,
+ /**< total # of add failures. */
+ pub fail_total: u64,
+ /**< # of 'no space' add failures. */
+ pub fail_nospace: u64,
+ pub __bindgen_padding_0: [u64; 2usize],
+}
+#[test]
+fn bindgen_test_layout_ip_frag_tbl_stat() {
+ assert_eq!(::std::mem::size_of::<ip_frag_tbl_stat>() , 64usize , concat !
+ ( "Size of: " , stringify ! ( ip_frag_tbl_stat ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . find_num as *
+ const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( find_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . add_num as * const
+ _ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( add_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . del_num as * const
+ _ as usize } , 16usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( del_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . reuse_num as *
+ const _ as usize } , 24usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( reuse_num ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . fail_total as *
+ const _ as usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( fail_total ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const ip_frag_tbl_stat ) ) . fail_nospace as *
+ const _ as usize } , 40usize , concat ! (
+ "Alignment of field: " , stringify ! ( ip_frag_tbl_stat ) ,
+ "::" , stringify ! ( fail_nospace ) ));
+}
+impl Clone for ip_frag_tbl_stat {
+ fn clone(&self) -> Self { *self }
+}
+/** fragmentation table */
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct rte_ip_frag_tbl {
+ /**< ttl for table entries. */
+ pub max_cycles: u64,
+ /**< hash value mask. */
+ pub entry_mask: u32,
+ /**< max entries allowed. */
+ pub max_entries: u32,
+ /**< entries in use. */
+ pub use_entries: u32,
+ /**< hash assocaitivity. */
+ pub bucket_entries: u32,
+ /**< total size of the table. */
+ pub nb_entries: u32,
+ /**< num of associativity lines. */
+ pub nb_buckets: u32,
+ /**< last used entry. */
+ pub last: *mut ip_frag_pkt,
+ /**< LRU list for table entries. */
+ pub lru: ip_pkt_list,
+ pub __bindgen_padding_0: u64,
+ /**< statistics counters. */
+ pub stat: ip_frag_tbl_stat,
+ /**< hash table. */
+ pub pkt: __IncompleteArrayField<ip_frag_pkt>,
+}
+#[test]
+fn bindgen_test_layout_rte_ip_frag_tbl() {
+ assert_eq!(::std::mem::size_of::<rte_ip_frag_tbl>() , 128usize , concat !
+ ( "Size of: " , stringify ! ( rte_ip_frag_tbl ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . max_cycles as *
+ const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( max_cycles ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . entry_mask as *
+ const _ as usize } , 8usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( entry_mask ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . max_entries as *
+ const _ as usize } , 12usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( max_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . use_entries as *
+ const _ as usize } , 16usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( use_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . bucket_entries as *
+ const _ as usize } , 20usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( bucket_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . nb_entries as *
+ const _ as usize } , 24usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( nb_entries ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . nb_buckets as *
+ const _ as usize } , 28usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( nb_buckets ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . last as * const _
+ as usize } , 32usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( last ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . lru as * const _ as
+ usize } , 40usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( lru ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . stat as * const _
+ as usize } , 64usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( stat ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const rte_ip_frag_tbl ) ) . pkt as * const _ as
+ usize } , 128usize , concat ! (
+ "Alignment of field: " , stringify ! ( rte_ip_frag_tbl ) ,
+ "::" , stringify ! ( pkt ) ));
+}
+impl Clone for rte_ip_frag_tbl {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for rte_ip_frag_tbl {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+/**< fragment mbuf */
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct rte_mbuf {
+ pub _address: u8,
+}
+impl Clone for rte_mbuf {
+ fn clone(&self) -> Self { *self }
+}
diff --git a/tests/expectations/tests/layout_mbuf.rs b/tests/expectations/tests/layout_mbuf.rs
index 189b50a5..8ad13f11 100644
--- a/tests/expectations/tests/layout_mbuf.rs
+++ b/tests/expectations/tests/layout_mbuf.rs
@@ -39,26 +39,25 @@ pub type MARKER64 = [u64; 0usize];
*/
#[repr(C)]
#[derive(Debug, Default, Copy)]
-pub struct _bindgen_ty_1 {
+pub struct rte_atomic16_t {
/**< An internal counter value. */
pub cnt: i16,
}
#[test]
-fn bindgen_test_layout__bindgen_ty_1() {
- assert_eq!(::std::mem::size_of::<_bindgen_ty_1>() , 2usize , concat ! (
- "Size of: " , stringify ! ( _bindgen_ty_1 ) ));
- assert_eq! (::std::mem::align_of::<_bindgen_ty_1>() , 2usize , concat ! (
- "Alignment of " , stringify ! ( _bindgen_ty_1 ) ));
+fn bindgen_test_layout_rte_atomic16_t() {
+ assert_eq!(::std::mem::size_of::<rte_atomic16_t>() , 2usize , concat ! (
+ "Size of: " , stringify ! ( rte_atomic16_t ) ));
+ assert_eq! (::std::mem::align_of::<rte_atomic16_t>() , 2usize , concat ! (
+ "Alignment of " , stringify ! ( rte_atomic16_t ) ));
assert_eq! (unsafe {
- & ( * ( 0 as * const _bindgen_ty_1 ) ) . cnt as * const _ as
+ & ( * ( 0 as * const rte_atomic16_t ) ) . cnt as * const _ as
usize } , 0usize , concat ! (
- "Alignment of field: " , stringify ! ( _bindgen_ty_1 ) , "::"
+ "Alignment of field: " , stringify ! ( rte_atomic16_t ) , "::"
, stringify ! ( cnt ) ));
}
-impl Clone for _bindgen_ty_1 {
+impl Clone for rte_atomic16_t {
fn clone(&self) -> Self { *self }
}
-pub type rte_atomic16_t = _bindgen_ty_1;
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -159,7 +158,8 @@ pub struct rte_mbuf__bindgen_ty_2 {
#[repr(C)]
#[derive(Debug, Default, Copy)]
pub struct rte_mbuf__bindgen_ty_2__bindgen_ty_1 {
- pub _bitfield_1: u32,
+ pub _bitfield_1: [u8; 4usize],
+ pub __bindgen_align: [u32; 0usize],
}
#[test]
fn bindgen_test_layout_rte_mbuf__bindgen_ty_2__bindgen_ty_1() {
@@ -175,98 +175,6 @@ fn bindgen_test_layout_rte_mbuf__bindgen_ty_2__bindgen_ty_1() {
impl Clone for rte_mbuf__bindgen_ty_2__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
-impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 {
- #[inline]
- pub fn l2_type(&self) -> u32 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (15usize as u32)) >>
- 0u32) as u32)
- }
- }
- #[inline]
- pub fn set_l2_type(&mut self, val: u32) {
- self._bitfield_1 &= !(15usize as u32);
- self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (15usize as u32);
- }
- #[inline]
- pub fn l3_type(&self) -> u32 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (240usize as u32)) >>
- 4u32) as u32)
- }
- }
- #[inline]
- pub fn set_l3_type(&mut self, val: u32) {
- self._bitfield_1 &= !(240usize as u32);
- self._bitfield_1 |= ((val as u32 as u32) << 4u32) & (240usize as u32);
- }
- #[inline]
- pub fn l4_type(&self) -> u32 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (3840usize as u32)) >>
- 8u32) as u32)
- }
- }
- #[inline]
- pub fn set_l4_type(&mut self, val: u32) {
- self._bitfield_1 &= !(3840usize as u32);
- self._bitfield_1 |=
- ((val as u32 as u32) << 8u32) & (3840usize as u32);
- }
- #[inline]
- pub fn tun_type(&self) -> u32 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (61440usize as u32)) >>
- 12u32) as u32)
- }
- }
- #[inline]
- pub fn set_tun_type(&mut self, val: u32) {
- self._bitfield_1 &= !(61440usize as u32);
- self._bitfield_1 |=
- ((val as u32 as u32) << 12u32) & (61440usize as u32);
- }
- #[inline]
- pub fn inner_l2_type(&self) -> u32 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (983040usize as u32))
- >> 16u32) as u32)
- }
- }
- #[inline]
- pub fn set_inner_l2_type(&mut self, val: u32) {
- self._bitfield_1 &= !(983040usize as u32);
- self._bitfield_1 |=
- ((val as u32 as u32) << 16u32) & (983040usize as u32);
- }
- #[inline]
- pub fn inner_l3_type(&self) -> u32 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (15728640usize as u32))
- >> 20u32) as u32)
- }
- }
- #[inline]
- pub fn set_inner_l3_type(&mut self, val: u32) {
- self._bitfield_1 &= !(15728640usize as u32);
- self._bitfield_1 |=
- ((val as u32 as u32) << 20u32) & (15728640usize as u32);
- }
- #[inline]
- pub fn inner_l4_type(&self) -> u32 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 &
- (251658240usize as u32)) >> 24u32) as
- u32)
- }
- }
- #[inline]
- pub fn set_inner_l4_type(&mut self, val: u32) {
- self._bitfield_1 &= !(251658240usize as u32);
- self._bitfield_1 |=
- ((val as u32 as u32) << 24u32) & (251658240usize as u32);
- }
-}
#[test]
fn bindgen_test_layout_rte_mbuf__bindgen_ty_2() {
assert_eq!(::std::mem::size_of::<rte_mbuf__bindgen_ty_2>() , 4usize ,
@@ -500,7 +408,8 @@ pub struct rte_mbuf__bindgen_ty_5 {
#[repr(C)]
#[derive(Debug, Default, Copy)]
pub struct rte_mbuf__bindgen_ty_5__bindgen_ty_1 {
- pub _bitfield_1: u64,
+ pub _bitfield_1: [u16; 4usize],
+ pub __bindgen_align: [u64; 0usize],
}
#[test]
fn bindgen_test_layout_rte_mbuf__bindgen_ty_5__bindgen_ty_1() {
@@ -516,88 +425,6 @@ fn bindgen_test_layout_rte_mbuf__bindgen_ty_5__bindgen_ty_1() {
impl Clone for rte_mbuf__bindgen_ty_5__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
-impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 {
- #[inline]
- pub fn l2_len(&self) -> u64 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (127usize as u64)) >>
- 0u32) as u64)
- }
- }
- #[inline]
- pub fn set_l2_len(&mut self, val: u64) {
- self._bitfield_1 &= !(127usize as u64);
- self._bitfield_1 |= ((val as u64 as u64) << 0u32) & (127usize as u64);
- }
- #[inline]
- pub fn l3_len(&self) -> u64 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (65408usize as u64)) >>
- 7u32) as u64)
- }
- }
- #[inline]
- pub fn set_l3_len(&mut self, val: u64) {
- self._bitfield_1 &= !(65408usize as u64);
- self._bitfield_1 |=
- ((val as u64 as u64) << 7u32) & (65408usize as u64);
- }
- #[inline]
- pub fn l4_len(&self) -> u64 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (16711680usize as u64))
- >> 16u32) as u64)
- }
- }
- #[inline]
- pub fn set_l4_len(&mut self, val: u64) {
- self._bitfield_1 &= !(16711680usize as u64);
- self._bitfield_1 |=
- ((val as u64 as u64) << 16u32) & (16711680usize as u64);
- }
- #[inline]
- pub fn tso_segsz(&self) -> u64 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 &
- (1099494850560usize as u64)) >> 24u32)
- as u64)
- }
- }
- #[inline]
- pub fn set_tso_segsz(&mut self, val: u64) {
- self._bitfield_1 &= !(1099494850560usize as u64);
- self._bitfield_1 |=
- ((val as u64 as u64) << 24u32) & (1099494850560usize as u64);
- }
- #[inline]
- pub fn outer_l3_len(&self) -> u64 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 &
- (561850441793536usize as u64)) >>
- 40u32) as u64)
- }
- }
- #[inline]
- pub fn set_outer_l3_len(&mut self, val: u64) {
- self._bitfield_1 &= !(561850441793536usize as u64);
- self._bitfield_1 |=
- ((val as u64 as u64) << 40u32) & (561850441793536usize as u64);
- }
- #[inline]
- pub fn outer_l2_len(&self) -> u64 {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 &
- (71494644084506624usize as u64)) >>
- 49u32) as u64)
- }
- }
- #[inline]
- pub fn set_outer_l2_len(&mut self, val: u64) {
- self._bitfield_1 &= !(71494644084506624usize as u64);
- self._bitfield_1 |=
- ((val as u64 as u64) << 49u32) & (71494644084506624usize as u64);
- }
-}
#[test]
fn bindgen_test_layout_rte_mbuf__bindgen_ty_5() {
assert_eq!(::std::mem::size_of::<rte_mbuf__bindgen_ty_5>() , 8usize ,
diff --git a/tests/expectations/tests/only_bitfields.rs b/tests/expectations/tests/only_bitfields.rs
index 9252097d..643725d9 100644
--- a/tests/expectations/tests/only_bitfields.rs
+++ b/tests/expectations/tests/only_bitfields.rs
@@ -8,6 +8,7 @@
#[derive(Debug, Default, Copy)]
pub struct C {
pub _bitfield_1: u8,
+ pub __bindgen_align: [u8; 0usize],
}
#[test]
fn bindgen_test_layout_C() {
@@ -19,29 +20,3 @@ fn bindgen_test_layout_C() {
impl Clone for C {
fn clone(&self) -> Self { *self }
}
-impl C {
- #[inline]
- pub fn a(&self) -> bool {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
- 0u32) as u8)
- }
- }
- #[inline]
- pub fn set_a(&mut self, val: bool) {
- self._bitfield_1 &= !(1usize as u8);
- self._bitfield_1 |= ((val as u8 as u8) << 0u32) & (1usize as u8);
- }
- #[inline]
- pub fn b(&self) -> bool {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (254usize as u8)) >>
- 1u32) as u8)
- }
- }
- #[inline]
- pub fn set_b(&mut self, val: bool) {
- self._bitfield_1 &= !(254usize as u8);
- self._bitfield_1 |= ((val as u8 as u8) << 1u32) & (254usize as u8);
- }
-}
diff --git a/tests/expectations/tests/struct_typedef.rs b/tests/expectations/tests/struct_typedef.rs
new file mode 100644
index 00000000..63811bda
--- /dev/null
+++ b/tests/expectations/tests/struct_typedef.rs
@@ -0,0 +1,61 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+
+#[repr(C)]
+#[derive(Debug, Default, Copy)]
+pub struct typedef_named_struct {
+ pub has_name: bool,
+}
+#[test]
+fn bindgen_test_layout_typedef_named_struct() {
+ assert_eq!(::std::mem::size_of::<typedef_named_struct>() , 1usize , concat
+ ! ( "Size of: " , stringify ! ( typedef_named_struct ) ));
+ assert_eq! (::std::mem::align_of::<typedef_named_struct>() , 1usize ,
+ concat ! (
+ "Alignment of " , stringify ! ( typedef_named_struct ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const typedef_named_struct ) ) . has_name as *
+ const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( typedef_named_struct )
+ , "::" , stringify ! ( has_name ) ));
+}
+impl Clone for typedef_named_struct {
+ fn clone(&self) -> Self { *self }
+}
+#[repr(C)]
+#[derive(Debug, Copy)]
+pub struct _bindgen_ty_1 {
+ pub no_name: *mut ::std::os::raw::c_void,
+}
+#[test]
+fn bindgen_test_layout__bindgen_ty_1() {
+ assert_eq!(::std::mem::size_of::<_bindgen_ty_1>() , 8usize , concat ! (
+ "Size of: " , stringify ! ( _bindgen_ty_1 ) ));
+ assert_eq! (::std::mem::align_of::<_bindgen_ty_1>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( _bindgen_ty_1 ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const _bindgen_ty_1 ) ) . no_name as * const _
+ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( _bindgen_ty_1 ) , "::"
+ , stringify ! ( no_name ) ));
+}
+impl Clone for _bindgen_ty_1 {
+ fn clone(&self) -> Self { *self }
+}
+impl Default for _bindgen_ty_1 {
+ fn default() -> Self { unsafe { ::std::mem::zeroed() } }
+}
+pub type struct_ptr_t = *mut _bindgen_ty_1;
+pub type struct_ptr_ptr_t = *mut *mut _bindgen_ty_1;
+#[repr(u32)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub enum typedef_named_enum { ENUM_HAS_NAME = 1, }
+pub const ENUM_IS_ANON: _bindgen_ty_2 = _bindgen_ty_2::ENUM_IS_ANON;
+#[repr(u32)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub enum _bindgen_ty_2 { ENUM_IS_ANON = 0, }
+pub type enum_ptr_t = *mut _bindgen_ty_2;
+pub type enum_ptr_ptr_t = *mut *mut _bindgen_ty_2;
diff --git a/tests/expectations/tests/struct_typedef_ns.rs b/tests/expectations/tests/struct_typedef_ns.rs
new file mode 100644
index 00000000..d7ada7fd
--- /dev/null
+++ b/tests/expectations/tests/struct_typedef_ns.rs
@@ -0,0 +1,79 @@
+/* automatically generated by rust-bindgen */
+
+
+#![allow(non_snake_case)]
+
+
+pub mod root {
+ #[allow(unused_imports)]
+ use self::super::root;
+ pub mod whatever {
+ #[allow(unused_imports)]
+ use self::super::super::root;
+ #[repr(C)]
+ #[derive(Debug, Default, Copy)]
+ pub struct _bindgen_ty_1 {
+ pub foo: ::std::os::raw::c_int,
+ }
+ #[test]
+ fn bindgen_test_layout__bindgen_ty_1() {
+ assert_eq!(::std::mem::size_of::<_bindgen_ty_1>() , 4usize ,
+ concat ! ( "Size of: " , stringify ! ( _bindgen_ty_1 )
+ ));
+ assert_eq! (::std::mem::align_of::<_bindgen_ty_1>() , 4usize ,
+ concat ! (
+ "Alignment of " , stringify ! ( _bindgen_ty_1 ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const _bindgen_ty_1 ) ) . foo as *
+ const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( _bindgen_ty_1 )
+ , "::" , stringify ! ( foo ) ));
+ }
+ impl Clone for _bindgen_ty_1 {
+ fn clone(&self) -> Self { *self }
+ }
+ pub type typedef_struct = root::whatever::_bindgen_ty_1;
+ pub const whatever_BAR: root::whatever::_bindgen_ty_2 =
+ _bindgen_ty_2::BAR;
+ #[repr(u32)]
+ #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+ pub enum _bindgen_ty_2 { BAR = 1, }
+ pub use self::super::super::root::whatever::_bindgen_ty_2 as
+ typedef_enum;
+ }
+ pub mod _bindgen_mod_id_12 {
+ #[allow(unused_imports)]
+ use self::super::super::root;
+ #[repr(C)]
+ #[derive(Debug, Default, Copy)]
+ pub struct _bindgen_ty_1 {
+ pub foo: ::std::os::raw::c_int,
+ }
+ #[test]
+ fn bindgen_test_layout__bindgen_ty_1() {
+ assert_eq!(::std::mem::size_of::<_bindgen_ty_1>() , 4usize ,
+ concat ! ( "Size of: " , stringify ! ( _bindgen_ty_1 )
+ ));
+ assert_eq! (::std::mem::align_of::<_bindgen_ty_1>() , 4usize ,
+ concat ! (
+ "Alignment of " , stringify ! ( _bindgen_ty_1 ) ));
+ assert_eq! (unsafe {
+ & ( * ( 0 as * const _bindgen_ty_1 ) ) . foo as *
+ const _ as usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( _bindgen_ty_1 )
+ , "::" , stringify ! ( foo ) ));
+ }
+ impl Clone for _bindgen_ty_1 {
+ fn clone(&self) -> Self { *self }
+ }
+ pub type typedef_struct = root::_bindgen_mod_id_12::_bindgen_ty_1;
+ pub const _bindgen_mod_id_12_BAR:
+ root::_bindgen_mod_id_12::_bindgen_ty_2 =
+ _bindgen_ty_2::BAR;
+ #[repr(u32)]
+ #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+ pub enum _bindgen_ty_2 { BAR = 1, }
+ pub use self::super::super::root::_bindgen_mod_id_12::_bindgen_ty_2 as
+ typedef_enum;
+ }
+}
diff --git a/tests/expectations/tests/struct_with_bitfields.rs b/tests/expectations/tests/struct_with_bitfields.rs
index 861ad662..2f9ba42d 100644
--- a/tests/expectations/tests/struct_with_bitfields.rs
+++ b/tests/expectations/tests/struct_with_bitfields.rs
@@ -27,103 +27,3 @@ fn bindgen_test_layout_bitfield() {
impl Clone for bitfield {
fn clone(&self) -> Self { *self }
}
-impl bitfield {
- #[inline]
- pub fn a(&self) -> ::std::os::raw::c_ushort {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (1usize as u8)) >>
- 0u32) as u16)
- }
- }
- #[inline]
- pub fn set_a(&mut self, val: ::std::os::raw::c_ushort) {
- self._bitfield_1 &= !(1usize as u8);
- self._bitfield_1 |= ((val as u16 as u8) << 0u32) & (1usize as u8);
- }
- #[inline]
- pub fn b(&self) -> ::std::os::raw::c_ushort {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (2usize as u8)) >>
- 1u32) as u16)
- }
- }
- #[inline]
- pub fn set_b(&mut self, val: ::std::os::raw::c_ushort) {
- self._bitfield_1 &= !(2usize as u8);
- self._bitfield_1 |= ((val as u16 as u8) << 1u32) & (2usize as u8);
- }
- #[inline]
- pub fn c(&self) -> ::std::os::raw::c_ushort {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (4usize as u8)) >>
- 2u32) as u16)
- }
- }
- #[inline]
- pub fn set_c(&mut self, val: ::std::os::raw::c_ushort) {
- self._bitfield_1 &= !(4usize as u8);
- self._bitfield_1 |= ((val as u16 as u8) << 2u32) & (4usize as u8);
- }
- #[inline]
- pub fn at_offset_3(&self) -> ::std::os::raw::c_ushort {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (8usize as u8)) >>
- 3u32) as u16)
- }
- }
- #[inline]
- pub fn set_at_offset_3(&mut self, val: ::std::os::raw::c_ushort) {
- self._bitfield_1 &= !(8usize as u8);
- self._bitfield_1 |= ((val as u16 as u8) << 3u32) & (8usize as u8);
- }
- #[inline]
- pub fn at_offset_4(&self) -> ::std::os::raw::c_ushort {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (48usize as u8)) >>
- 4u32) as u16)
- }
- }
- #[inline]
- pub fn set_at_offset_4(&mut self, val: ::std::os::raw::c_ushort) {
- self._bitfield_1 &= !(48usize as u8);
- self._bitfield_1 |= ((val as u16 as u8) << 4u32) & (48usize as u8);
- }
- #[inline]
- pub fn d(&self) -> ::std::os::raw::c_ushort {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (192usize as u8)) >>
- 6u32) as u16)
- }
- }
- #[inline]
- pub fn set_d(&mut self, val: ::std::os::raw::c_ushort) {
- self._bitfield_1 &= !(192usize as u8);
- self._bitfield_1 |= ((val as u16 as u8) << 6u32) & (192usize as u8);
- }
- #[inline]
- pub fn f(&self) -> ::std::os::raw::c_uint {
- unsafe {
- ::std::mem::transmute(((self._bitfield_2 & (3usize as u8)) >>
- 0u32) as u32)
- }
- }
- #[inline]
- pub fn set_f(&mut self, val: ::std::os::raw::c_uint) {
- self._bitfield_2 &= !(3usize as u8);
- self._bitfield_2 |= ((val as u32 as u8) << 0u32) & (3usize as u8);
- }
- #[inline]
- pub fn g(&self) -> ::std::os::raw::c_uint {
- unsafe {
- ::std::mem::transmute(((self._bitfield_3 &
- (4294967295usize as u32)) >> 0u32) as
- u32)
- }
- }
- #[inline]
- pub fn set_g(&mut self, val: ::std::os::raw::c_uint) {
- self._bitfield_3 &= !(4294967295usize as u32);
- self._bitfield_3 |=
- ((val as u32 as u32) << 0u32) & (4294967295usize as u32);
- }
-}
diff --git a/tests/expectations/tests/union_fields.rs b/tests/expectations/tests/union_fields.rs
index 823a0b8b..8c8ef7d5 100644
--- a/tests/expectations/tests/union_fields.rs
+++ b/tests/expectations/tests/union_fields.rs
@@ -30,35 +30,34 @@ impl <T> ::std::fmt::Debug for __BindgenUnionField<T> {
}
#[repr(C)]
#[derive(Debug, Default, Copy)]
-pub struct _bindgen_ty_1 {
+pub struct nsStyleUnion {
pub mInt: __BindgenUnionField<::std::os::raw::c_int>,
pub mFloat: __BindgenUnionField<f32>,
pub mPointer: __BindgenUnionField<*mut ::std::os::raw::c_void>,
pub bindgen_union_field: u64,
}
#[test]
-fn bindgen_test_layout__bindgen_ty_1() {
- assert_eq!(::std::mem::size_of::<_bindgen_ty_1>() , 8usize , concat ! (
- "Size of: " , stringify ! ( _bindgen_ty_1 ) ));
- assert_eq! (::std::mem::align_of::<_bindgen_ty_1>() , 8usize , concat ! (
- "Alignment of " , stringify ! ( _bindgen_ty_1 ) ));
+fn bindgen_test_layout_nsStyleUnion() {
+ assert_eq!(::std::mem::size_of::<nsStyleUnion>() , 8usize , concat ! (
+ "Size of: " , stringify ! ( nsStyleUnion ) ));
+ assert_eq! (::std::mem::align_of::<nsStyleUnion>() , 8usize , concat ! (
+ "Alignment of " , stringify ! ( nsStyleUnion ) ));
assert_eq! (unsafe {
- & ( * ( 0 as * const _bindgen_ty_1 ) ) . mInt as * const _ as
+ & ( * ( 0 as * const nsStyleUnion ) ) . mInt as * const _ as
usize } , 0usize , concat ! (
- "Alignment of field: " , stringify ! ( _bindgen_ty_1 ) , "::"
- , stringify ! ( mInt ) ));
+ "Alignment of field: " , stringify ! ( nsStyleUnion ) , "::" ,
+ stringify ! ( mInt ) ));
assert_eq! (unsafe {
- & ( * ( 0 as * const _bindgen_ty_1 ) ) . mFloat as * const _
- as usize } , 0usize , concat ! (
- "Alignment of field: " , stringify ! ( _bindgen_ty_1 ) , "::"
- , stringify ! ( mFloat ) ));
+ & ( * ( 0 as * const nsStyleUnion ) ) . mFloat as * const _ as
+ usize } , 0usize , concat ! (
+ "Alignment of field: " , stringify ! ( nsStyleUnion ) , "::" ,
+ stringify ! ( mFloat ) ));
assert_eq! (unsafe {
- & ( * ( 0 as * const _bindgen_ty_1 ) ) . mPointer as * const _
+ & ( * ( 0 as * const nsStyleUnion ) ) . mPointer as * const _
as usize } , 0usize , concat ! (
- "Alignment of field: " , stringify ! ( _bindgen_ty_1 ) , "::"
- , stringify ! ( mPointer ) ));
+ "Alignment of field: " , stringify ! ( nsStyleUnion ) , "::" ,
+ stringify ! ( mPointer ) ));
}
-impl Clone for _bindgen_ty_1 {
+impl Clone for nsStyleUnion {
fn clone(&self) -> Self { *self }
}
-pub type nsStyleUnion = _bindgen_ty_1;
diff --git a/tests/expectations/tests/union_with_anon_struct_bitfield.rs b/tests/expectations/tests/union_with_anon_struct_bitfield.rs
index d14a38bb..f102117f 100644
--- a/tests/expectations/tests/union_with_anon_struct_bitfield.rs
+++ b/tests/expectations/tests/union_with_anon_struct_bitfield.rs
@@ -39,6 +39,7 @@ pub struct foo {
#[derive(Debug, Default, Copy)]
pub struct foo__bindgen_ty_1 {
pub _bitfield_1: u32,
+ pub __bindgen_align: [u32; 0usize],
}
#[test]
fn bindgen_test_layout_foo__bindgen_ty_1() {
@@ -50,34 +51,6 @@ fn bindgen_test_layout_foo__bindgen_ty_1() {
impl Clone for foo__bindgen_ty_1 {
fn clone(&self) -> Self { *self }
}
-impl foo__bindgen_ty_1 {
- #[inline]
- pub fn b(&self) -> ::std::os::raw::c_int {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (127usize as u32)) >>
- 0u32) as u32)
- }
- }
- #[inline]
- pub fn set_b(&mut self, val: ::std::os::raw::c_int) {
- self._bitfield_1 &= !(127usize as u32);
- self._bitfield_1 |= ((val as u32 as u32) << 0u32) & (127usize as u32);
- }
- #[inline]
- pub fn c(&self) -> ::std::os::raw::c_int {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 &
- (4294967168usize as u32)) >> 7u32) as
- u32)
- }
- }
- #[inline]
- pub fn set_c(&mut self, val: ::std::os::raw::c_int) {
- self._bitfield_1 &= !(4294967168usize as u32);
- self._bitfield_1 |=
- ((val as u32 as u32) << 7u32) & (4294967168usize as u32);
- }
-}
#[test]
fn bindgen_test_layout_foo() {
assert_eq!(::std::mem::size_of::<foo>() , 4usize , concat ! (
diff --git a/tests/expectations/tests/unknown_attr.rs b/tests/expectations/tests/unknown_attr.rs
index efb86102..23d9ff75 100644
--- a/tests/expectations/tests/unknown_attr.rs
+++ b/tests/expectations/tests/unknown_attr.rs
@@ -6,30 +6,29 @@
#[repr(C)]
#[derive(Debug, Default, Copy)]
-pub struct _bindgen_ty_1 {
+pub struct max_align_t {
pub __clang_max_align_nonce1: ::std::os::raw::c_longlong,
pub __bindgen_padding_0: u64,
- pub __clang_max_align_nonce2: f64,
+ pub __clang_max_align_nonce2: ::std::os::raw::c_longlong,
pub __bindgen_padding_1: u64,
}
#[test]
-fn bindgen_test_layout__bindgen_ty_1() {
- assert_eq!(::std::mem::size_of::<_bindgen_ty_1>() , 32usize , concat ! (
- "Size of: " , stringify ! ( _bindgen_ty_1 ) ));
+fn bindgen_test_layout_max_align_t() {
+ assert_eq!(::std::mem::size_of::<max_align_t>() , 32usize , concat ! (
+ "Size of: " , stringify ! ( max_align_t ) ));
assert_eq! (unsafe {
- & ( * ( 0 as * const _bindgen_ty_1 ) ) .
+ & ( * ( 0 as * const max_align_t ) ) .
__clang_max_align_nonce1 as * const _ as usize } , 0usize ,
concat ! (
- "Alignment of field: " , stringify ! ( _bindgen_ty_1 ) , "::"
- , stringify ! ( __clang_max_align_nonce1 ) ));
+ "Alignment of field: " , stringify ! ( max_align_t ) , "::" ,
+ stringify ! ( __clang_max_align_nonce1 ) ));
assert_eq! (unsafe {
- & ( * ( 0 as * const _bindgen_ty_1 ) ) .
+ & ( * ( 0 as * const max_align_t ) ) .
__clang_max_align_nonce2 as * const _ as usize } , 16usize ,
concat ! (
- "Alignment of field: " , stringify ! ( _bindgen_ty_1 ) , "::"
- , stringify ! ( __clang_max_align_nonce2 ) ));
+ "Alignment of field: " , stringify ! ( max_align_t ) , "::" ,
+ stringify ! ( __clang_max_align_nonce2 ) ));
}
-impl Clone for _bindgen_ty_1 {
+impl Clone for max_align_t {
fn clone(&self) -> Self { *self }
}
-pub type max_align_t = _bindgen_ty_1;
diff --git a/tests/expectations/tests/weird_bitfields.rs b/tests/expectations/tests/weird_bitfields.rs
index a2841e34..466904e8 100644
--- a/tests/expectations/tests/weird_bitfields.rs
+++ b/tests/expectations/tests/weird_bitfields.rs
@@ -15,7 +15,7 @@ pub enum nsStyleSVGOpacitySource {
#[derive(Debug, Copy)]
pub struct Weird {
pub mStrokeDasharrayLength: ::std::os::raw::c_uint,
- pub _bitfield_1: u32,
+ pub _bitfield_1: [u16; 2usize],
pub mClipRule: ::std::os::raw::c_uchar,
pub mColorInterpolation: ::std::os::raw::c_uchar,
pub mColorInterpolationFilters: ::std::os::raw::c_uchar,
@@ -27,7 +27,8 @@ pub struct Weird {
pub mStrokeLinejoin: ::std::os::raw::c_uchar,
pub mTextAnchor: ::std::os::raw::c_uchar,
pub mTextRendering: ::std::os::raw::c_uchar,
- pub _bitfield_2: u16,
+ pub _bitfield_2: u8,
+ pub _bitfield_3: u8,
}
#[test]
fn bindgen_test_layout_Weird() {
@@ -102,92 +103,3 @@ impl Clone for Weird {
impl Default for Weird {
fn default() -> Self { unsafe { ::std::mem::zeroed() } }
}
-impl Weird {
- #[inline]
- pub fn bitTest(&self) -> ::std::os::raw::c_uint {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 & (65535usize as u32)) >>
- 0u32) as u32)
- }
- }
- #[inline]
- pub fn set_bitTest(&mut self, val: ::std::os::raw::c_uint) {
- self._bitfield_1 &= !(65535usize as u32);
- self._bitfield_1 |=
- ((val as u32 as u32) << 0u32) & (65535usize as u32);
- }
- #[inline]
- pub fn bitTest2(&self) -> ::std::os::raw::c_uint {
- unsafe {
- ::std::mem::transmute(((self._bitfield_1 &
- (2147418112usize as u32)) >> 16u32) as
- u32)
- }
- }
- #[inline]
- pub fn set_bitTest2(&mut self, val: ::std::os::raw::c_uint) {
- self._bitfield_1 &= !(2147418112usize as u32);
- self._bitfield_1 |=
- ((val as u32 as u32) << 16u32) & (2147418112usize as u32);
- }
- #[inline]
- pub fn mFillOpacitySource(&self) -> nsStyleSVGOpacitySource {
- unsafe {
- ::std::mem::transmute(((self._bitfield_2 & (7usize as u16)) >>
- 0u32) as u32)
- }
- }
- #[inline]
- pub fn set_mFillOpacitySource(&mut self, val: nsStyleSVGOpacitySource) {
- self._bitfield_2 &= !(7usize as u16);
- self._bitfield_2 |= ((val as u32 as u16) << 0u32) & (7usize as u16);
- }
- #[inline]
- pub fn mStrokeOpacitySource(&self) -> nsStyleSVGOpacitySource {
- unsafe {
- ::std::mem::transmute(((self._bitfield_2 & (56usize as u16)) >>
- 3u32) as u32)
- }
- }
- #[inline]
- pub fn set_mStrokeOpacitySource(&mut self, val: nsStyleSVGOpacitySource) {
- self._bitfield_2 &= !(56usize as u16);
- self._bitfield_2 |= ((val as u32 as u16) << 3u32) & (56usize as u16);
- }
- #[inline]
- pub fn mStrokeDasharrayFromObject(&self) -> bool {
- unsafe {
- ::std::mem::transmute(((self._bitfield_2 & (64usize as u16)) >>
- 6u32) as u8)
- }
- }
- #[inline]
- pub fn set_mStrokeDasharrayFromObject(&mut self, val: bool) {
- self._bitfield_2 &= !(64usize as u16);
- self._bitfield_2 |= ((val as u8 as u16) << 6u32) & (64usize as u16);
- }
- #[inline]
- pub fn mStrokeDashoffsetFromObject(&self) -> bool {
- unsafe {
- ::std::mem::transmute(((self._bitfield_2 & (128usize as u16)) >>
- 7u32) as u8)
- }
- }
- #[inline]
- pub fn set_mStrokeDashoffsetFromObject(&mut self, val: bool) {
- self._bitfield_2 &= !(128usize as u16);
- self._bitfield_2 |= ((val as u8 as u16) << 7u32) & (128usize as u16);
- }
- #[inline]
- pub fn mStrokeWidthFromObject(&self) -> bool {
- unsafe {
- ::std::mem::transmute(((self._bitfield_2 & (256usize as u16)) >>
- 8u32) as u8)
- }
- }
- #[inline]
- pub fn set_mStrokeWidthFromObject(&mut self, val: bool) {
- self._bitfield_2 &= !(256usize as u16);
- self._bitfield_2 |= ((val as u8 as u16) << 8u32) & (256usize as u16);
- }
-}
diff --git a/tests/headers/bitfield_align.h b/tests/headers/bitfield_align.h
new file mode 100644
index 00000000..82b53099
--- /dev/null
+++ b/tests/headers/bitfield_align.h
@@ -0,0 +1,41 @@
+struct A {
+ unsigned char x;
+ unsigned b1 : 1;
+ unsigned b2 : 1;
+ unsigned b3 : 1;
+ unsigned b4 : 1;
+ unsigned b5 : 1;
+ unsigned b6 : 1;
+ unsigned b7 : 1;
+ unsigned b8 : 1;
+ unsigned b9 : 1;
+ unsigned b10 : 1;
+ unsigned char y;
+};
+
+struct B {
+ unsigned foo : 31;
+ unsigned char bar : 1;
+};
+
+struct C {
+ unsigned char x;
+ unsigned b1 : 1;
+ unsigned b2 : 1;
+ unsigned baz;
+};
+
+struct Date1 {
+ unsigned short nWeekDay : 3; // 0..7 (3 bits)
+ unsigned short nMonthDay : 6; // 0..31 (6 bits)
+ unsigned short nMonth : 5; // 0..12 (5 bits)
+ unsigned short nYear : 8; // 0..100 (8 bits)
+};
+
+struct Date2 {
+ unsigned short nWeekDay : 3; // 0..7 (3 bits)
+ unsigned short nMonthDay : 6; // 0..31 (6 bits)
+ unsigned short nMonth : 5; // 0..12 (5 bits)
+ unsigned short nYear : 8; // 0..100 (8 bits)
+ unsigned char byte;
+};
diff --git a/tests/headers/layout_large_align_field.h b/tests/headers/layout_large_align_field.h
new file mode 100644
index 00000000..f4f412c6
--- /dev/null
+++ b/tests/headers/layout_large_align_field.h
@@ -0,0 +1,97 @@
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+
+#define RTE_CACHE_LINE_SIZE 64
+
+/**
+ * Force alignment
+ */
+#define __rte_aligned(a) __attribute__((__aligned__(a)))
+
+/**
+ * Force alignment to cache line.
+ */
+#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
+
+#define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
+
+enum {
+ IP_LAST_FRAG_IDX, /**< index of last fragment */
+ IP_FIRST_FRAG_IDX, /**< index of first fragment */
+ IP_MIN_FRAG_NUM, /**< minimum number of fragments */
+ IP_MAX_FRAG_NUM = RTE_LIBRTE_IP_FRAG_MAX_FRAG,
+ /**< maximum number of fragments per packet */
+};
+
+/** @internal fragmented mbuf */
+struct ip_frag {
+ uint16_t ofs; /**< offset into the packet */
+ uint16_t len; /**< length of fragment */
+ struct rte_mbuf *mb; /**< fragment mbuf */
+};
+
+/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+struct ip_frag_key {
+ uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */
+ uint32_t id; /**< dst address */
+ uint32_t key_len; /**< src/dst key length */
+};
+
+/*
+ * Tail queue declarations.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/**
+ * @internal Fragmented packet to reassemble.
+ * First two entries in the frags[] array are for the last and first fragments.
+ */
+struct ip_frag_pkt {
+ TAILQ_ENTRY(ip_frag_pkt) lru; /**< LRU list */
+ struct ip_frag_key key; /**< fragmentation key */
+ uint64_t start; /**< creation timestamp */
+ uint32_t total_size; /**< expected reassembled size */
+ uint32_t frag_size; /**< size of fragments received */
+ uint32_t last_idx; /**< index of next entry to fill */
+ struct ip_frag frags[IP_MAX_FRAG_NUM]; /**< fragments */
+} __rte_cache_aligned;
+
+TAILQ_HEAD(ip_pkt_list, ip_frag_pkt); /**< @internal fragments tailq */
+
+/** fragmentation table statistics */
+struct ip_frag_tbl_stat {
+ uint64_t find_num; /**< total # of find/insert attempts. */
+ uint64_t add_num; /**< # of add ops. */
+ uint64_t del_num; /**< # of del ops. */
+ uint64_t reuse_num; /**< # of reuse (del/add) ops. */
+ uint64_t fail_total; /**< total # of add failures. */
+ uint64_t fail_nospace; /**< # of 'no space' add failures. */
+} __rte_cache_aligned;
+
+/** fragmentation table */
+struct rte_ip_frag_tbl {
+ uint64_t max_cycles; /**< ttl for table entries. */
+ uint32_t entry_mask; /**< hash value mask. */
+ uint32_t max_entries; /**< max entries allowed. */
+ uint32_t use_entries; /**< entries in use. */
+ uint32_t bucket_entries; /**< hash assocaitivity. */
+ uint32_t nb_entries; /**< total size of the table. */
+ uint32_t nb_buckets; /**< num of associativity lines. */
+ struct ip_frag_pkt *last; /**< last used entry. */
+ struct ip_pkt_list lru; /**< LRU list for table entries. */
+ struct ip_frag_tbl_stat stat; /**< statistics counters. */
+ __extension__ struct ip_frag_pkt pkt[0]; /**< hash table. */
+};
diff --git a/tests/headers/struct_typedef.h b/tests/headers/struct_typedef.h
new file mode 100644
index 00000000..fdce9a72
--- /dev/null
+++ b/tests/headers/struct_typedef.h
@@ -0,0 +1,15 @@
+typedef struct {
+ _Bool has_name;
+} typedef_named_struct;
+
+typedef struct {
+ void *no_name;
+} *struct_ptr_t, **struct_ptr_ptr_t;
+
+typedef enum {
+ ENUM_HAS_NAME=1
+} typedef_named_enum;
+
+typedef enum {
+ ENUM_IS_ANON
+} *enum_ptr_t, **enum_ptr_ptr_t;
diff --git a/tests/headers/struct_typedef_ns.hpp b/tests/headers/struct_typedef_ns.hpp
new file mode 100644
index 00000000..bc89eb2b
--- /dev/null
+++ b/tests/headers/struct_typedef_ns.hpp
@@ -0,0 +1,21 @@
+// bindgen-flags: --enable-cxx-namespaces
+
+namespace whatever {
+ typedef struct {
+ int foo;
+ } typedef_struct;
+
+ typedef enum {
+ BAR=1
+ } typedef_enum;
+}
+
+namespace {
+ typedef struct {
+ int foo;
+ } typedef_struct;
+
+ typedef enum {
+ BAR=1
+ } typedef_enum;
+}
diff --git a/tests/headers/unknown_attr.h b/tests/headers/unknown_attr.h
index f87e9f0b..1e89fb14 100644
--- a/tests/headers/unknown_attr.h
+++ b/tests/headers/unknown_attr.h
@@ -1,6 +1,6 @@
typedef struct {
long long __clang_max_align_nonce1
__attribute__((__aligned__(__alignof__(long long))));
- long double __clang_max_align_nonce2
+ long long __clang_max_align_nonce2
__attribute__((__aligned__(__alignof__(long double))));
} max_align_t;