Bug 1878375 - Synchronize vendored Rust libraries with mozilla-central. r=darktrojan

mozilla-central: 5f32152690020a32702097b55f1b5c44804d0db4
comm-central: 3989f0dd325a3366139e2cbc6c4afc75b001b916

Differential Revision: https://phabricator.services.mozilla.com/D211313

--HG--
extra : amend_source : af4821583c91b6df3ca62e87741881df3b01aa8e
This commit is contained in:
Thunderbird Updatebot 2024-05-23 04:36:32 +00:00
Родитель 7a60376027
Коммит 55fa0729dd
110 изменённых файлов: 6047 добавлений и 13113 удалений

Просмотреть файл

@ -61,19 +61,19 @@ git = "https://github.com/mozilla/application-services"
rev = "e0563d725f852f617878ecc13a03cdf50c85cd5a"
replace-with = "vendored-sources"
[source."git+https://github.com/mozilla/audioipc?rev=409e11f8de6288e9ddfe269654523735302e59e6"]
[source."git+https://github.com/mozilla/audioipc?rev=3495905752a4263827f5d43737f9ca3ed0243ce0"]
git = "https://github.com/mozilla/audioipc"
rev = "409e11f8de6288e9ddfe269654523735302e59e6"
rev = "3495905752a4263827f5d43737f9ca3ed0243ce0"
replace-with = "vendored-sources"
[source."git+https://github.com/mozilla/cubeb-coreaudio-rs?rev=4ca174cf83ebe32b3198478c2211d69678845bc7"]
[source."git+https://github.com/mozilla/cubeb-coreaudio-rs?rev=0989726a1b9b640a30dfdf3ea005a12c73ab8155"]
git = "https://github.com/mozilla/cubeb-coreaudio-rs"
rev = "4ca174cf83ebe32b3198478c2211d69678845bc7"
rev = "0989726a1b9b640a30dfdf3ea005a12c73ab8155"
replace-with = "vendored-sources"
[source."git+https://github.com/mozilla/cubeb-pulse-rs?rev=8ff972c8e2ec1782ff262ac4071c0415e69b1367"]
[source."git+https://github.com/mozilla/cubeb-pulse-rs?rev=8678dcab1c287de79c4c184ccc2e065bc62b70e2"]
git = "https://github.com/mozilla/cubeb-pulse-rs"
rev = "8ff972c8e2ec1782ff262ac4071c0415e69b1367"
rev = "8678dcab1c287de79c4c184ccc2e065bc62b70e2"
replace-with = "vendored-sources"
[source."git+https://github.com/mozilla/midir.git?rev=85156e360a37d851734118104619f86bd18e94c6"]

74
rust/Cargo.lock сгенерированный
Просмотреть файл

@ -136,7 +136,7 @@ version = "0.38.0+1.3.281"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bb44936d800fea8f016d7f2311c6a4f97aebd5dc86f09906139ec848cf3a46f"
dependencies = [
"libloading 0.8.3",
"libloading",
]
[[package]]
@ -236,7 +236,7 @@ dependencies = [
[[package]]
name = "audioipc2"
version = "0.6.0"
source = "git+https://github.com/mozilla/audioipc?rev=409e11f8de6288e9ddfe269654523735302e59e6#409e11f8de6288e9ddfe269654523735302e59e6"
source = "git+https://github.com/mozilla/audioipc?rev=3495905752a4263827f5d43737f9ca3ed0243ce0#3495905752a4263827f5d43737f9ca3ed0243ce0"
dependencies = [
"arrayvec",
"ashmem",
@ -264,7 +264,7 @@ dependencies = [
[[package]]
name = "audioipc2-client"
version = "0.6.0"
source = "git+https://github.com/mozilla/audioipc?rev=409e11f8de6288e9ddfe269654523735302e59e6#409e11f8de6288e9ddfe269654523735302e59e6"
source = "git+https://github.com/mozilla/audioipc?rev=3495905752a4263827f5d43737f9ca3ed0243ce0#3495905752a4263827f5d43737f9ca3ed0243ce0"
dependencies = [
"audio_thread_priority",
"audioipc2",
@ -275,7 +275,7 @@ dependencies = [
[[package]]
name = "audioipc2-server"
version = "0.6.0"
source = "git+https://github.com/mozilla/audioipc?rev=409e11f8de6288e9ddfe269654523735302e59e6#409e11f8de6288e9ddfe269654523735302e59e6"
source = "git+https://github.com/mozilla/audioipc?rev=3495905752a4263827f5d43737f9ca3ed0243ce0#3495905752a4263827f5d43737f9ca3ed0243ce0"
dependencies = [
"audio_thread_priority",
"audioipc2",
@ -525,26 +525,6 @@ dependencies = [
"num_cpus",
]
[[package]]
name = "bytemuck"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15"
dependencies = [
"bytemuck_derive",
]
[[package]]
name = "bytemuck_derive"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "byteorder"
version = "1.5.0"
@ -696,13 +676,13 @@ checksum = "bb7bdea464ae038f09197b82430b921c53619fc8d2bcaf7b151013b3ca008017"
[[package]]
name = "clang-sys"
version = "1.6.0"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a"
checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1"
dependencies = [
"glob",
"libc",
"libloading 0.7.999",
"libloading",
]
[[package]]
@ -801,7 +781,7 @@ dependencies = [
[[package]]
name = "coreaudio-sys-utils"
version = "0.1.0"
source = "git+https://github.com/mozilla/cubeb-coreaudio-rs?rev=4ca174cf83ebe32b3198478c2211d69678845bc7#4ca174cf83ebe32b3198478c2211d69678845bc7"
source = "git+https://github.com/mozilla/cubeb-coreaudio-rs?rev=0989726a1b9b640a30dfdf3ea005a12c73ab8155#0989726a1b9b640a30dfdf3ea005a12c73ab8155"
dependencies = [
"core-foundation-sys",
"coreaudio-sys",
@ -985,27 +965,27 @@ dependencies = [
[[package]]
name = "cubeb"
version = "0.12.0"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db57570f2617f0214c11721e8d2325816d9dc936c2c472661ac5d90a30fba98"
checksum = "3d105547cf8036cdb30e796ce0d06832af4766106a44574402fa2fd3c861a042"
dependencies = [
"cubeb-core",
]
[[package]]
name = "cubeb-backend"
version = "0.12.0"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b00b0f3b84e315571bd8c4e18794180633066267a413f2f05bca65001adc8410"
checksum = "67361fe9b49b4599e2a230ce322529b6ddd91df14897c872dcede716f8fbca81"
dependencies = [
"cubeb-core",
]
[[package]]
name = "cubeb-core"
version = "0.12.0"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2380c03a7df0ea3744f6a210d6340f423935e53cbf2fd68ada84b5e808e46ac7"
checksum = "ac08d314dd1ec6d41d9ccdeec70899c98ed3b89845367000dd6096099481bc73"
dependencies = [
"bitflags 1.999.999",
"cubeb-sys",
@ -1014,7 +994,7 @@ dependencies = [
[[package]]
name = "cubeb-coreaudio"
version = "0.1.0"
source = "git+https://github.com/mozilla/cubeb-coreaudio-rs?rev=4ca174cf83ebe32b3198478c2211d69678845bc7#4ca174cf83ebe32b3198478c2211d69678845bc7"
source = "git+https://github.com/mozilla/cubeb-coreaudio-rs?rev=0989726a1b9b640a30dfdf3ea005a12c73ab8155#0989726a1b9b640a30dfdf3ea005a12c73ab8155"
dependencies = [
"atomic",
"audio-mixer",
@ -1033,7 +1013,7 @@ dependencies = [
[[package]]
name = "cubeb-pulse"
version = "0.5.0"
source = "git+https://github.com/mozilla/cubeb-pulse-rs?rev=8ff972c8e2ec1782ff262ac4071c0415e69b1367#8ff972c8e2ec1782ff262ac4071c0415e69b1367"
source = "git+https://github.com/mozilla/cubeb-pulse-rs?rev=8678dcab1c287de79c4c184ccc2e065bc62b70e2#8678dcab1c287de79c4c184ccc2e065bc62b70e2"
dependencies = [
"cubeb-backend",
"pulse",
@ -1044,9 +1024,9 @@ dependencies = [
[[package]]
name = "cubeb-sys"
version = "0.12.0"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c20c457d7b34dad6e0c1a9c759c96b4420b9e9917a572998b81835799a07e1d"
checksum = "26073cd50c7b6ba4272204839f56921557609a0d67e092882cbb903df94cab39"
dependencies = [
"cmake",
"pkg-config",
@ -1058,7 +1038,7 @@ version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=18b758e3889bdd6ffa769085de15e2b96a0c1eb5#18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
dependencies = [
"bitflags 2.5.0",
"libloading 0.8.3",
"libloading",
"winapi",
]
@ -1487,7 +1467,6 @@ version = "0.22.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b52c2ef4a78da0ba68fbe1fd920627411096d2ac478f7f4c9f3a54ba6705bade"
dependencies = [
"bytemuck",
"num-traits",
"serde",
]
@ -2766,13 +2745,6 @@ dependencies = [
"pkg-config",
]
[[package]]
name = "libloading"
version = "0.7.999"
dependencies = [
"libloading 0.8.3",
]
[[package]]
name = "libloading"
version = "0.8.3"
@ -3902,7 +3874,7 @@ dependencies = [
[[package]]
name = "pulse"
version = "0.3.0"
source = "git+https://github.com/mozilla/cubeb-pulse-rs?rev=8ff972c8e2ec1782ff262ac4071c0415e69b1367#8ff972c8e2ec1782ff262ac4071c0415e69b1367"
source = "git+https://github.com/mozilla/cubeb-pulse-rs?rev=8678dcab1c287de79c4c184ccc2e065bc62b70e2#8678dcab1c287de79c4c184ccc2e065bc62b70e2"
dependencies = [
"bitflags 2.5.0",
"pulse-ffi",
@ -3911,7 +3883,7 @@ dependencies = [
[[package]]
name = "pulse-ffi"
version = "0.1.0"
source = "git+https://github.com/mozilla/cubeb-pulse-rs?rev=8ff972c8e2ec1782ff262ac4071c0415e69b1367#8ff972c8e2ec1782ff262ac4071c0415e69b1367"
source = "git+https://github.com/mozilla/cubeb-pulse-rs?rev=8678dcab1c287de79c4c184ccc2e065bc62b70e2#8678dcab1c287de79c4c184ccc2e065bc62b70e2"
dependencies = [
"libc",
]
@ -5462,7 +5434,6 @@ dependencies = [
"bincode",
"bitflags 2.5.0",
"build-parallel",
"bytemuck",
"byteorder",
"derive_more 0.99.999",
"etagere",
@ -5499,7 +5470,6 @@ version = "0.62.0"
dependencies = [
"app_units",
"bitflags 2.5.0",
"bytemuck",
"byteorder",
"crossbeam-channel",
"euclid",
@ -5615,7 +5585,7 @@ dependencies = [
"js-sys",
"khronos-egl",
"libc",
"libloading 0.8.3",
"libloading",
"log",
"metal",
"naga",

Просмотреть файл

@ -45,7 +45,6 @@ autocfg = { path = "../../third_party/rust/autocfg" }
goblin = { path = "../../build/rust/goblin" }
memoffset = { path = "../../build/rust/memoffset" }
hashbrown = { path = "../../build/rust/hashbrown" }
libloading = { path = "../../build/rust/libloading" }
socket2 = { path = "../../build/rust/socket2" }
js-sys = { path = "../../build/rust/dummy-web/js-sys" }
wasm-bindgen = { path = "../../build/rust/dummy-web/wasm-bindgen" }

Просмотреть файл

@ -1 +1 @@
{"mc_workspace_toml": "6f25d02c89be33ad1cffaaeb26f77be4bded63874912deca26a2d589168697dac054f64f96579d6abba291a4bdca9e7897457161e98703a44bcaf84e759c8ac3", "mc_gkrust_toml": "7b85230288493ac65c5496e68dd5b14661b51bc7667784c7afd8608bccb814195ca20d8e6a29f207190024607037f6621fa527c370be69bb9e28fc1cb51a8700", "mc_cargo_lock": "18476891b69803e983b8d97c52a21fd8540ccc29434b1a10fb858c61c2ec909cb3fd7f84407c4785cc90e499e91dcfa26811d19c0530a937080fb3282c27cebb"}
{"mc_workspace_toml": "f1ce18006dbf908d8c7444f4bb796d0958b2492c79c7831b510a1093250eca426596d3f0ca27355a7e885e0c93179f8205a2ccc738ee0031c63b09b60e089c00", "mc_gkrust_toml": "63065de10ae7b60f6a36ae337c06879fc727ec58f881a5e9f08d0fd18a22dbe49c55a562f27bd7a3c44489e6be8d4a20a00cab12fa622e001a10d11a1267f7cc", "mc_cargo_lock": "fb1df8fb8b5bd6187dc786fc2af5fb9dd4bb44666b6ef51088fc91e50f2b244009cd176d75b1dc345cb3afe7464fc9aca1fa0ab244fdf10481582d7a1e27e705"}

Просмотреть файл

@ -20,8 +20,8 @@ ews_xpcom = { version = "0.1.0", path = "../ews_xpcom" }
aa-stroke = { git = "https://github.com/FirefoxGraphics/aa-stroke", rev = "d94278ed9c7020f50232689a26d1277eb0eb74d2" }
app_services_logger = { path = "../../../services/common/app_services_logger" }
audio_thread_priority = { version = "0.32" }
audioipc2-client = { git = "https://github.com/mozilla/audioipc", rev = "409e11f8de6288e9ddfe269654523735302e59e6", optional = true }
audioipc2-server = { git = "https://github.com/mozilla/audioipc", rev = "409e11f8de6288e9ddfe269654523735302e59e6", optional = true }
audioipc2-client = { git = "https://github.com/mozilla/audioipc", rev = "3495905752a4263827f5d43737f9ca3ed0243ce0", optional = true }
audioipc2-server = { git = "https://github.com/mozilla/audioipc", rev = "3495905752a4263827f5d43737f9ca3ed0243ce0", optional = true }
authrs_bridge = { path = "../../../dom/webauthn/authrs_bridge" }
binary_http = { path = "../../../netwerk/protocol/http/binary_http" }
bitsdownload = { path = "../../../toolkit/components/bitsdownload", optional = true }
@ -31,9 +31,9 @@ cert_storage = { path = "../../../security/manager/ssl/cert_storage" }
chardetng_c = { version = "0.1.1" }
cose-c = { version = "0.1.5" }
crypto_hash = { path = "../../../security/manager/ssl/crypto_hash" }
cubeb-coreaudio = { git = "https://github.com/mozilla/cubeb-coreaudio-rs", rev = "4ca174cf83ebe32b3198478c2211d69678845bc7", optional = true }
cubeb-pulse = { git = "https://github.com/mozilla/cubeb-pulse-rs", rev = "8ff972c8e2ec1782ff262ac4071c0415e69b1367", optional = true, features = ['pulse-dlopen'] }
cubeb-sys = { version = "0.12.0", optional = true, features = ['gecko-in-tree'] }
cubeb-coreaudio = { git = "https://github.com/mozilla/cubeb-coreaudio-rs", rev = "0989726a1b9b640a30dfdf3ea005a12c73ab8155", optional = true }
cubeb-pulse = { git = "https://github.com/mozilla/cubeb-pulse-rs", rev = "8678dcab1c287de79c4c184ccc2e065bc62b70e2", optional = true, features = ['pulse-dlopen'] }
cubeb-sys = { version = "0.13", optional = true, features = ['gecko-in-tree'] }
dap_ffi = { path = "../../../toolkit/components/telemetry/dap/ffi" }
data-encoding-ffi = { path = "../../../dom/fs/parent/rust/data-encoding-ffi" }
data_storage = { path = "../../../security/manager/ssl/data_storage" }

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"e94c46bbd290f02adccc7ae932285416d7e021bfde80abb2fb31a2c05426e732","cbindgen.toml":"fb6abe1671497f432a06e40b1db7ed7cd2cceecbd9a2382193ad7534e8855e34","src/context.rs":"a0559e92b554ef3156ab2bf2f1424555c8ef4a7977b9f43ac8500a9f399f8d99","src/lib.rs":"c87d9d57a16a9286cde730978db692df0fbc70cc69dd4f4677198d6843031fd8","src/send_recv.rs":"859abe75b521eb4297c84b30423814b5b87f3c7741ad16fe72189212e123e1ac","src/stream.rs":"90dc6a85552f3569ab1847de4247a46bcff2f5aef0c4d43fa2376589df015b25"},"package":null}
{"files":{"Cargo.toml":"b4fad65749eb0988ce4e6b6a2aae51e58ae22eca97cf61dfb011e951a0909f0e","cbindgen.toml":"fb6abe1671497f432a06e40b1db7ed7cd2cceecbd9a2382193ad7534e8855e34","src/context.rs":"a0559e92b554ef3156ab2bf2f1424555c8ef4a7977b9f43ac8500a9f399f8d99","src/lib.rs":"c87d9d57a16a9286cde730978db692df0fbc70cc69dd4f4677198d6843031fd8","src/send_recv.rs":"859abe75b521eb4297c84b30423814b5b87f3c7741ad16fe72189212e123e1ac","src/stream.rs":"90dc6a85552f3569ab1847de4247a46bcff2f5aef0c4d43fa2376589df015b25"},"package":null}

Просмотреть файл

@ -21,7 +21,7 @@ description = "Cubeb Backend for talking to remote cubeb server."
license = "ISC"
[dependencies]
cubeb-backend = "0.12"
cubeb-backend = "0.13"
log = "0.4"
[dependencies.audio_thread_priority]

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"77997660e305851d9c0e656aac7159b999452a36f3436d8b2f402edd36fef853","cbindgen.toml":"fb6abe1671497f432a06e40b1db7ed7cd2cceecbd9a2382193ad7534e8855e34","src/lib.rs":"d70079c66de72c3469504f1f0c9cf5e510644cac17f2d8300b8d12218740e07b","src/server.rs":"187e2236aa9f2fb6cc4a533d40714a71504afa5ef9d849ac28b7f26032859c29"},"package":null}
{"files":{"Cargo.toml":"62eab883f31c0c088ff865fe2e4305d987b7b534f6cdfe1e5812072a2ec13f8b","cbindgen.toml":"fb6abe1671497f432a06e40b1db7ed7cd2cceecbd9a2382193ad7534e8855e34","src/lib.rs":"d70079c66de72c3469504f1f0c9cf5e510644cac17f2d8300b8d12218740e07b","src/server.rs":"187e2236aa9f2fb6cc4a533d40714a71504afa5ef9d849ac28b7f26032859c29"},"package":null}

Просмотреть файл

@ -21,7 +21,7 @@ description = "Remote cubeb server"
license = "ISC"
[dependencies]
cubeb-core = "0.12.0"
cubeb-core = "0.13"
log = "0.4"
once_cell = "1.2.0"
slab = "0.4"

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"dff67ba050be15275d357b7f001df6caa6ae1f93b2acd61ac7d6ee269a1f5960","benches/serialization.rs":"d56855d868dab6aa22c8b03a61084535351b76c94b68d8b1d20764e352fe473f","build.rs":"65df9a97c6cdaa3faf72581f04ac289197b0b1797d69d22c1796e957ff1089e2","src/codec.rs":"86068272e220696d8d7e369072326349e7598e5a24223d98179c3251bb7b3ff1","src/errors.rs":"67a4a994d0724397657581cde153bdfc05ce86e7efc467f23fafc8f64df80fa4","src/ipccore.rs":"db73e916468c54d3497d75ffcab3bf23067771ed7b2e1a23c714429f56f59ec3","src/lib.rs":"a6fcac8b44318435db60313d3ef32ff3fada390bea8978c8414c40744998b98b","src/messages.rs":"d4f6d4f41b7fd3cc7deae726657e1100f315f4cd10c5fe6ce8a57c03c8e26ca9","src/rpccore.rs":"025b6614f1c42b96b0a8e74fd7881032d338c66e0d67ec0af70f910a9e30ebe1","src/shm.rs":"c00d16f4af510d12e704ae865f7348ad64ddef180e42b18e7dd95c4be35a9c80","src/sys/mod.rs":"e6fa1d260abf093e1f7b50185195e2d3aee0eb8c9774c6f253953b5896d838f3","src/sys/unix/cmsg.rs":"9529e8f8429db86f7c5df132953d3054e603852270f3c6938cdb5f630b2711f1","src/sys/unix/cmsghdr.c":"d7344b3dc15cdce410c68669b848bb81f7fe36362cd3699668cb613fa05180f8","src/sys/unix/mod.rs":"59835f0d5509940078b1820a54f49fc5514adeb3e45e7d21e3ab917431da2e74","src/sys/unix/msg.rs":"0e297d73bae9414184f85c2209cca0a3fde6d999a3f1d3f42faa3f56b6d57233","src/sys/windows/mod.rs":"7eaabb76e62c6962b636320e2bbf79a78fce61659c799a798f7dd6d56b0be8a1"},"package":null}
{"files":{"Cargo.toml":"5dc7153bf7291eea52bdc5561440c1c646272f8c913e8782bc2c84b7ed2f8b76","benches/serialization.rs":"d56855d868dab6aa22c8b03a61084535351b76c94b68d8b1d20764e352fe473f","build.rs":"65df9a97c6cdaa3faf72581f04ac289197b0b1797d69d22c1796e957ff1089e2","src/codec.rs":"86068272e220696d8d7e369072326349e7598e5a24223d98179c3251bb7b3ff1","src/errors.rs":"67a4a994d0724397657581cde153bdfc05ce86e7efc467f23fafc8f64df80fa4","src/ipccore.rs":"db73e916468c54d3497d75ffcab3bf23067771ed7b2e1a23c714429f56f59ec3","src/lib.rs":"a6fcac8b44318435db60313d3ef32ff3fada390bea8978c8414c40744998b98b","src/messages.rs":"d4f6d4f41b7fd3cc7deae726657e1100f315f4cd10c5fe6ce8a57c03c8e26ca9","src/rpccore.rs":"025b6614f1c42b96b0a8e74fd7881032d338c66e0d67ec0af70f910a9e30ebe1","src/shm.rs":"c00d16f4af510d12e704ae865f7348ad64ddef180e42b18e7dd95c4be35a9c80","src/sys/mod.rs":"e6fa1d260abf093e1f7b50185195e2d3aee0eb8c9774c6f253953b5896d838f3","src/sys/unix/cmsg.rs":"9529e8f8429db86f7c5df132953d3054e603852270f3c6938cdb5f630b2711f1","src/sys/unix/cmsghdr.c":"d7344b3dc15cdce410c68669b848bb81f7fe36362cd3699668cb613fa05180f8","src/sys/unix/mod.rs":"59835f0d5509940078b1820a54f49fc5514adeb3e45e7d21e3ab917431da2e74","src/sys/unix/msg.rs":"25244de3eba920fa42e032f8fa4ea4913a9fdeb5124ade61e707f6cc6dd946b0","src/sys/windows/mod.rs":"7eaabb76e62c6962b636320e2bbf79a78fce61659c799a798f7dd6d56b0be8a1"},"package":null}

2
third_party/rust/audioipc2/Cargo.toml поставляемый
Просмотреть файл

@ -29,7 +29,7 @@ bincode = "1.3"
byteorder = "1"
bytes = "1"
crossbeam-queue = "0.3"
cubeb = "0.12"
cubeb = "0.13"
log = "0.4"
scopeguard = "1.1.0"
serde = "1"

Просмотреть файл

@ -34,7 +34,7 @@ pub(crate) fn recv_msg_with_flags(
flags: libc::c_int,
) -> io::Result<(usize, usize, libc::c_int)> {
let slice = unix::as_os_slice_mut(bufs);
let len = cmp::min(<libc::c_int>::max_value() as usize, slice.len());
let len = cmp::min(<libc::c_int>::MAX as usize, slice.len());
let (control, controllen) = if cmsg.len() == 0 {
(ptr::null_mut(), 0)
} else {
@ -63,7 +63,7 @@ pub(crate) fn send_msg_with_flags(
flags: libc::c_int,
) -> io::Result<usize> {
let slice = unix::as_os_slice(bufs);
let len = cmp::min(<libc::c_int>::max_value() as usize, slice.len());
let len = cmp::min(<libc::c_int>::MAX as usize, slice.len());
let (control, controllen) = if cmsg.is_empty() {
(ptr::null_mut(), 0)
} else {

Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"695a6f6acca3567c6bfd2c9130c5f0ae9703a10fbb917a5545d8f365627adef5","LICENSE-APACHE":"870e20c217d15bcfcbe53d7c5867cd8fac44a4ca0b41fc1eb843557e16063eba","LICENSE-MIT":"0b2d108c9c686a74ac312990ee8377902756a2a081a7af3b0f9d68abf0a8f1a1","LICENSE-ZLIB":"682b4c81b85e83ce6cc6e1ace38fdd97aeb4de0e972bd2b44aa0916c54af8c96","README.md":"167493de1f1ad16d13c778494ae344cd71306622c89d19002eaf7f4185c1f728","changelog.md":"ee1cec3147cb82f540841653edba28d90d726af0413e42979b95f00a22af2c05","rustfmt.toml":"f4c215534437936f924c937dbb1677f614761589300d6b389f3b518b3eb551b8","src/allocation.rs":"996f500fd89e19c8f44bc7b7c6d097efaf8ea0b659c4e6f6a506b49fd47fba1b","src/anybitpattern.rs":"0053be9c471e76d32acf237cb94dce49074a4711d4b0a199cf257e5de8b93f77","src/checked.rs":"311c268a8afd7006ad7bd6331f5e661dead186c3b7ab490859cee157b18d7ba3","src/contiguous.rs":"867e162651b435aa0298caad1d81f46877c22c74a2766d9e79be0ab3c615ce46","src/internal.rs":"ec4ed032d82bdb8e4039a648e7282dec14606d4175c7eea3f66a60e543c1c8ff","src/lib.rs":"e3982cc16eb38a10bd8c0179e48ee61be3c54d268255d34adf5ca91df9f0599c","src/must.rs":"20a4077f8fbdb0d2660bc754a874d05d007167a687d8ea9baf8411c4a751b73d","src/no_uninit.rs":"4ab2f5ed29bff0b33630661154eb548f3e55581bfcf576a90397b7f8d5323201","src/offset_of.rs":"2afd190ef0462b30ade786fe813a91e7bf41cc2fa99a1d79002cbafab5964f37","src/pod.rs":"0dd26433c0ad9c9a4882f175d5f056d54b5fcec905eb0df907c9ba4d8c828597","src/pod_in_option.rs":"73bbe1d69f32d909695ce26d131aa2d81eaa31e2b4532256ebfe1a6ba68675c1","src/transparent.rs":"0704a14de6af47c39c79b45ee3e63b28ba6500534cf7629f578f15b1ebf46f6d","src/zeroable.rs":"3897421dcfd66808a23c3c15114efc9be901c002355d365e889997cba8f80703","src/zeroable_in_option.rs":"f74799ac3eee50116ec63a0ae4d3e351e0ab7ac807d01b4b59027bf6a68d6de6","tests/array_tests.rs":"98ca7a0dcd93e65f70d4db19643e707cafae5a249561ab151998cedb89b2e036","tests/cast_slice_tests.rs":"83310a834e75214f711466b119729ed2b3f53b5c9714bc7a2fe3fd9e7a48f993","tests/checked_tests.rs":"27965acf20e46482b09ee56aaa2536868821be651a3b95052f40e554ecde9917","tests/derive.rs":"93b5ab70ecdd726811af9dee1702e23e964b8ceac59f727889f6a2678ad90d65","tests/doc_tests.rs":"f20708319fde62d8957909d51ee976fce394ad0891ebc4bbcf336ab026a34092","tests/offset_of_tests.rs":"fb5f91e17f984050969f8b06f1de58b5c1e80802c5deb992d3188f5ec274690f","tests/std_tests.rs":"967d4fb4cae24a374633c9b68f1ff65f86ba4c8a0e980adfe69dcaf60a9049c2","tests/transparent.rs":"ecef6e0987e28121b480942e58ce4534f13fe35667bde7f5c6e04e590b02f6a3","tests/wrapper_forgets.rs":"c6330546f6aa696245625056e7323b3916e3fb1a9fbecefe9c9e62d3726812d9"},"package":"5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15"}

75
third_party/rust/bytemuck/Cargo.toml поставляемый
Просмотреть файл

@ -1,75 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "bytemuck"
version = "1.15.0"
authors = ["Lokathor <zefria@gmail.com>"]
exclude = ["/pedantic.bat"]
description = "A crate for mucking around with piles of bytes."
readme = "README.md"
keywords = [
"transmute",
"bytes",
"casting",
]
categories = [
"encoding",
"no-std",
]
license = "Zlib OR Apache-2.0 OR MIT"
repository = "https://github.com/Lokathor/bytemuck"
[package.metadata.docs.rs]
features = [
"nightly_docs",
"derive",
"extern_crate_alloc",
"extern_crate_std",
"zeroable_maybe_uninit",
"zeroable_atomics",
"min_const_generics",
"wasm_simd",
"must_cast",
]
[package.metadata.playground]
features = [
"derive",
"extern_crate_alloc",
"extern_crate_std",
"zeroable_maybe_uninit",
"zeroable_atomics",
"min_const_generics",
"wasm_simd",
"must_cast",
]
[dependencies.bytemuck_derive]
version = "1.4"
optional = true
[features]
aarch64_simd = []
align_offset = []
derive = ["bytemuck_derive"]
extern_crate_alloc = []
extern_crate_std = ["extern_crate_alloc"]
min_const_generics = []
must_cast = []
nightly_docs = []
nightly_portable_simd = []
nightly_stdsimd = []
unsound_ptr_pod_impl = []
wasm_simd = []
zeroable_atomics = []
zeroable_maybe_uninit = []

61
third_party/rust/bytemuck/LICENSE-APACHE поставляемый
Просмотреть файл

@ -1,61 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

9
third_party/rust/bytemuck/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,9 +0,0 @@
MIT License
Copyright (c) 2019 Daniel "Lokathor" Gee.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

11
third_party/rust/bytemuck/LICENSE-ZLIB поставляемый
Просмотреть файл

@ -1,11 +0,0 @@
Copyright (c) 2019 Daniel "Lokathor" Gee.
This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.

60
third_party/rust/bytemuck/README.md поставляемый
Просмотреть файл

@ -1,60 +0,0 @@
* **[Latest Docs.rs Here](https://docs.rs/bytemuck/)**
[![License:Zlib](https://img.shields.io/badge/License-Zlib-brightgreen.svg)](https://opensource.org/licenses/Zlib)
![Minimum Rust Version](https://img.shields.io/badge/Min%20Rust-1.34-green.svg)
[![crates.io](https://img.shields.io/crates/v/bytemuck.svg)](https://crates.io/crates/bytemuck)
# bytemuck
A crate for mucking around with piles of bytes.
This crate lets you safely perform "bit cast" operations between data types.
That's where you take a value and just reinterpret the bits as being some other
type of value, without changing the bits.
* This is **not** like the [`as` keyword][keyword-as]
* This is **not** like the [`From` trait][from-trait]
* It is **most like** [`f32::to_bits`][f32-to_bits], just generalized to let you
convert between all sorts of data types.
[keyword-as]: https://doc.rust-lang.org/nightly/std/keyword.as.html
[from-trait]: https://doc.rust-lang.org/nightly/core/convert/trait.From.html
[f32-to_bits]: https://doc.rust-lang.org/nightly/std/primitive.f32.html#method.to_bits
### Here's the part you're more likely to care about: *you can do this with slices too!*
When a slice is involved it's not a *direct* bitcast. Instead, the `cast_slice`
and `cast_slice_mut` functions will pull apart a slice's data and give you a new
slice that's the same span of memory just viewed as the new type. If the size of
the slice's element changes then the length of the slice you get back will be
changed accordingly.
This lets you cast a slice of color values into a slice of `u8` and send it to
the GPU, or things like that. I'm sure there's other examples, but honestly this
crate is as popular as it is mostly because of Rust's 3D graphics community
wanting to cast slices of different types into byte slices for sending to the
GPU. Hi friends! Push those vertices, or whatever it is that you all do.
## See Also
While `bytemuck` is full of unsafe code, I've also started a "sibling crate"
called [bitfrob](https://docs.rs/bitfrob/latest/bitfrob/), which is where
operations that are 100% safe will be added.
## Stability
* The crate is 1.0 and I consider this it to be "basically done". New features
are usually being accepted when other people want to put in the work, but
myself I wanna move on to using `bytemuck` in bigger projects.
* The default build of the `bytemuck` crate will continue to work with `rustc-1.34`
for at least the rest of the `1.y.z` versions.
* Any other cargo features of the crate **are not** held to the same standard, and
may work only on the latest Stable or even only on latest Nightly.
**Future Plans:** Once the [Safe Transmute Project][pg-st] completes and
stabilizes ("eventually") this crate will be updated to use that as the
underlying mechanism for transmutation bounds, and a 2.0 version of `bytemuck`
will be released. The hope is for the 1.0 to 2.0 transition to be as seamless as
possible, but the future is always uncertain.
[pg-st]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html

298
third_party/rust/bytemuck/changelog.md поставляемый
Просмотреть файл

@ -1,298 +0,0 @@
# `bytemuck` changelog
## 1.15.0
This primarily relaxes the bounds on a `From` impl.
Previously:
> `impl<T: NoUninit> From<Box<T>> for BoxBytes`
Now:
> `impl<T: ?Sized + sealed::BoxBytesOf> From<Box<T>> for BoxBytes`
All related functions and methods are similarly updated.
We believe this to be backwards compatible with all previous uses,
and now `BoxBytes` can be converted to/from more types than before.
## 1.14.3
* The new std simd nightly features are apparently arch-specific.
This adjusts the feature activation to be x86/ x86_64 only.
## 1.14.2
* Changes the name of the Nightly feature activated by the crate's
`nightly_stdsimd` feature. This is needed as of (approximately) Nightly
2024-02-06 and later, because the Nightly feature was changed.
## 1.14.1
* docs clarifications.
## 1.14
* `write_zeroes` and `fill_zeroes` functions: Writes (to one) or fills (a slice)
zero bytes to all bytes covered by the provided reference. If your type has
padding, this will even zero out the padding bytes.
* `align_offset` feature: causes pointer alignment checks to use the
`align_offset` pointer method rather than as-casting the pointer to `usize`.
This *may* improve codegen, if the compiler would have otherwise thought that
the pointer address escaped. No formal benchmarks have been done either way.
* `must_cast` feature: Adds `must_*` family of functions. These functions will
fail to compile if the cast requested can't be statically known to succeed.
The error messages can be kinda bad when this happens, but eliminating the
possibility of a runtime error might be worth it to you.
## 1.13.1
* Remove the requirement for the *source* data type to be `AnyBitPattern` on
`pod_collect_to_vec`, allowing you to pod collect vecs of `char` into vecs of
`u32`, or whatever.
## 1.13
* Now depends on `bytemuck_derive-1.4.0`
* Various small enhancements that would have been patch version updates, but
which have been rolled into this minor version update.
## 1.12.4
* This has additional impls for existing traits and cleans up some internal code,
but there's no new functions so I guess it counts as just a patch release.
## 1.12.3
* This bugfix makes the crate do stuff with `Arc` or not based on the
`target_has_atomic` config. Previously, some targets that have allocation but
not atomics were getting errors. This raises the MSRV of the
`extern_crate_alloc` feature to 1.60, but opt-in features are *not* considered
to be hard locked to 1.34 like the basic build of the crate is.
## 1.12.2
* Fixes `try_pod_read_unaligned` bug that made it always fail unless the target
type was exactly pointer sized in which case UB *could* happen. The
`CheckedBitPattern::is_valid_bit_pattern` was being asked to check that a
*reference* to the `pod` value was a valid bit pattern, rather than the actual
bit pattern itself, and so the check could in some cases be illegally
bypassed.
## 1.12.1
* Patch bumped the required `bytemuck_derive` version because of a regression in
how it handled `align(N)` attributes.
## 1.12
* This minor version bump is caused by a version bump in our `bytemuck_derive`
dependency, which is in turn caused by a mixup in the minimum version of `syn`
that `bytemuck_derive` uses. See [Issue
122](https://github.com/Lokathor/bytemuck/issues/122). There's not any
specific "new" API as you might normally expect from a minor version bump.
* [pali](https://github.com/pali6) fixed a problem with SPIR-V builds being
broken. The error handling functions were trying to be generic over `Display`,
which the error types normally support, except on SPIR-V targets (which run on
the GPU and don't have text formatting).
## 1.11
* [WaffleLapkin](https://github.com/WaffleLapkin) added `wrap_box` and `peel_box`
to the `TransparentWrapperAlloc` trait. Default impls of these functions are
provided, and (as usual with the transparent trait stuff) you should not override
the default versions.
## 1.10
* [TheEdward162](https://github.com/TheEdward162) added the `ZeroableInOption`
and `PodInOption` traits. These are for types that are `Zeroable` or `Pod`
*when in an option*, but not on their own. We provide impls for the various
"NonZeroINTEGER" types in `core`, and if you need to newtype a NonZero value
then you can impl these traits when you use `repr(transparent)`.
## 1.9.1
* Bumped the minimum `bytemuck_derive` dependency version from `1.0` to `1.1`.
The fact that `bytemuck` and `bytemuck_derive` are separate crates at all is
an unfortunate technical limit of current Rust, woe and calamity.
## 1.9.0
* [fu5ha](https://github.com/fu5ha) added the `NoUninit`, `AnyBitPattern`, and
`CheckedBitPattern` traits. This allows for a more fine-grained level of
detail in what casting operations are allowed for a type. Types that already
implement `Zeroable` and `Pod` will have a blanket impl for these new traits.
This is a "preview" of the direction that the crate will probably go in the
eventual 2.0 version. We're still waiting on [Project Safe
Transmute](https://github.com/rust-lang/project-safe-transmute) for an actual
2.0 version of the crate, but until then please enjoy this preview.
* Also Fusha added better support for `union` types in the derive macros. I
still don't know how any of the proc-macro stuff works at all, so please
direct questions to her.
## 1.8.0
* `try_pod_read_unaligned` and `pod_read_unaligned` let you go from `&[u8]` to
`T:Pod` without worrying about alignment.
## 1.7.3
* Experimental support for the `portable_simd` language extension under the
`nightly_portable_simd` cargo feature. As the name implies, this is an
experimental crate feature and it's **not** part of the semver contract. All
it does is add the appropriate `Zeroable` and `Pod` impls.
## 1.7.2
* Why does this repo keep being hit with publishing problems? What did I do to
deserve this curse, Ferris? This doesn't ever happen with tinyvec or fermium,
only bytemuck.
## 1.7.1
* **Soundness Fix:** The wrap/peel methods for owned value conversion, added to
`TransparentWrapper` in 1.6, can cause a double-drop if used with types that
impl `Drop`. The fix was simply to add a `ManuallyDrop` layer around the value
before doing the `transmute_copy` that is used to wrap/peel. While this fix
could technically be backported to the 1.6 series, since 1.7 is semver
compatible anyway the 1.6 series has simply been yanked.
## 1.7
* In response to [Unsafe Code Guidelines Issue
#286](https://github.com/rust-lang/unsafe-code-guidelines/issues/286), this
version of Bytemuck has a ***Soundness-Required Breaking Change***. This is
"allowed" under Rust's backwards-compatibility guidelines, but it's still
annoying of course so we're trying to keep the damage minimal.
* **The Reason:** It turns out that pointer values should not have been `Pod`. More
specifically, `ptr as usize` is *not* the same operation as calling
`transmute::<_, usize>(ptr)`.
* LLVM has yet to fully sort out their story, but until they do, transmuting
pointers can cause miscompilations. They may fix things up in the future,
but we're not gonna just wait and have broken code in the mean time.
* **The Fix:** The breaking change is that the `Pod` impls for `*const T`,
`*mut T`, and `Option<NonNull<T>` are now gated behind the
`unsound_ptr_pod_impl` feature, which is off by default.
* You are *strongly discouraged* from using this feature, but if a dependency
of yours doesn't work when you upgrade to 1.7 because it relied on pointer
casting, then you might wish to temporarily enable the feature just to get
that dependency to build. Enabled features are global across all users of a
given semver compatible version, so if you enable the feature in your own
crate, your dependency will also end up getting the feature too, and then
it'll be able to compile.
* Please move away from using this feature as soon as you can. Consider it to
*already* be deprecated.
* [PR 65](https://github.com/Lokathor/bytemuck/pull/65)
## 1.6.3
* Small goof with an errant `;`, so [PR 69](https://github.com/Lokathor/bytemuck/pull/69)
*actually* got things working on SPIR-V.
## 1.6.2
cargo upload goof! ignore this one.
## 1.6.1
* [DJMcNab](https://github.com/DJMcNab) did a fix so that the crate can build for SPIR-V
[PR 67](https://github.com/Lokathor/bytemuck/pull/67)
## 1.6
* The `TransparentWrapper` trait now has more methods. More ways to wrap, and
now you can "peel" too! Note that we don't call it "unwrap" because that name
is too strongly associated with the Option/Result methods.
Thanks to [LU15W1R7H](https://github.com/LU15W1R7H) for doing
[PR 58](https://github.com/Lokathor/bytemuck/pull/58)
* Min Const Generics! Now there's Pod and Zeroable for arrays of any size when
you turn on the `min_const_generics` crate feature.
[zakarumych](https://github.com/zakarumych) got the work started in
[PR 59](https://github.com/Lokathor/bytemuck/pull/59),
and [chorman0773](https://github.com/chorman0773) finished off the task in
[PR 63](https://github.com/Lokathor/bytemuck/pull/63)
## 1.5.1
* Fix `bytes_of` failing on zero sized types.
[PR 53](https://github.com/Lokathor/bytemuck/pull/53)
## 1.5
* Added `pod_collect_to_vec`, which will gather a slice into a vec,
allowing you to change the pod type while also safely ignoring alignment.
[PR 50](https://github.com/Lokathor/bytemuck/pull/50)
## 1.4.2
* [Kimundi](https://github.com/Kimundi) fixed an issue that could make `try_zeroed_box`
stack overflow for large values at low optimization levels.
[PR 43](https://github.com/Lokathor/bytemuck/pull/43)
## 1.4.1
* [thomcc](https://github.com/thomcc) fixed up the CI and patched over a soundness hole in `offset_of!`.
[PR 38](https://github.com/Lokathor/bytemuck/pull/38)
## 1.4
* [icewind1991](https://github.com/icewind1991) has contributed the proc-macros
for deriving impls of `Pod`, `TransparentWrapper`, `Zeroable`!! Everyone has
been waiting for this one folks! It's a big deal. Just enable the `derive`
cargo feature and then you'll be able to derive the traits on your types. It
generates all the appropriate tests for you.
* The `zeroable_maybe_uninit` feature now adds a `Zeroable` impl to the
`MaybeUninit` type. This is only behind a feature flag because `MaybeUninit`
didn't exist back in `1.34.0` (the minimum rust version of `bytemuck`).
## 1.3.1
* The entire crate is now available under the `Apache-2.0 OR MIT` license as
well as the previous `Zlib` license
[#24](https://github.com/Lokathor/bytemuck/pull/24).
* [HeroicKatora](https://github.com/HeroicKatora) added the
`try_zeroed_slice_box` function
[#10](https://github.com/Lokathor/bytemuck/pull/17). `zeroed_slice_box` is
also available.
* The `offset_of!` macro now supports a 2-arg version. For types that impl
Default, it'll just make an instance using `default` and then call over to the
3-arg version.
* The `PodCastError` type now supports `Hash` and `Display`. Also if you enable
the `extern_crate_std` feature then it will support `std::error::Error`.
* We now provide a `TransparentWrapper<T>` impl for `core::num::Wrapper<T>`.
* The error type of `try_from_bytes` and `try_from_bytes_mut` when the input
isn't aligned has been corrected from being `AlignmentMismatch` (intended for
allocation casting only) to `TargetAlignmentGreaterAndInputNotAligned`.
## 1.3.0
* Had a bug because the CI was messed up! It wasn't soundness related, because
it prevented the crate from building entirely if the `extern_crate_alloc`
feature was used. Still, this is yanked, sorry.
## 1.2.0
* [thomcc](https://github.com/thomcc) added many things:
* A fully sound `offset_of!` macro
[#10](https://github.com/Lokathor/bytemuck/pull/10)
* A `Contiguous` trait for when you've got enums with declared values
all in a row [#12](https://github.com/Lokathor/bytemuck/pull/12)
* A `TransparentWrapper` marker trait for when you want to more clearly
enable adding and removing a wrapper struct to its inner value
[#15](https://github.com/Lokathor/bytemuck/pull/15)
* Now MIRI is run on CI in every single push!
[#16](https://github.com/Lokathor/bytemuck/pull/16)
## 1.1.0
* [SimonSapin](https://github.com/SimonSapin) added `from_bytes`,
`from_bytes_mut`, `try_from_bytes`, and `try_from_bytes_mut` ([PR
Link](https://github.com/Lokathor/bytemuck/pull/8))
## 1.0.1
* Changed to the [zlib](https://opensource.org/licenses/Zlib) license.
* Added much more proper documentation.
* Reduced the minimum Rust version to 1.34

16
third_party/rust/bytemuck/rustfmt.toml поставляемый
Просмотреть файл

@ -1,16 +0,0 @@
# Based on
# https://github.com/rust-lang/rustfmt/blob/rustfmt-1.4.19/Configurations.md
# Stable
edition = "2018"
fn_args_layout = "Compressed"
max_width = 80
tab_spaces = 2
use_field_init_shorthand = true
use_try_shorthand = true
use_small_heuristics = "Max"
# Unstable
format_code_in_doc_comments = true
imports_granularity = "Crate"
wrap_comments = true

882
third_party/rust/bytemuck/src/allocation.rs поставляемый
Просмотреть файл

@ -1,882 +0,0 @@
#![cfg(feature = "extern_crate_alloc")]
//! Stuff to boost things in the `alloc` crate.
//!
//! * You must enable the `extern_crate_alloc` feature of `bytemuck` or you will
//! not be able to use this module! This is generally done by adding the
//! feature to the dependency in Cargo.toml like so:
//!
//! `bytemuck = { version = "VERSION_YOU_ARE_USING", features =
//! ["extern_crate_alloc"]}`
use super::*;
#[cfg(target_has_atomic = "ptr")]
use alloc::sync::Arc;
use alloc::{
alloc::{alloc_zeroed, Layout},
boxed::Box,
rc::Rc,
vec,
vec::Vec,
};
use core::ops::{Deref, DerefMut};
/// As [`try_cast_box`](try_cast_box), but unwraps for you.
#[inline]
pub fn cast_box<A: NoUninit, B: AnyBitPattern>(input: Box<A>) -> Box<B> {
try_cast_box(input).map_err(|(e, _v)| e).unwrap()
}
/// Attempts to cast the content type of a [`Box`](alloc::boxed::Box).
///
/// On failure you get back an error along with the starting `Box`.
///
/// ## Failure
///
/// * The start and end content type of the `Box` must have the exact same
/// alignment.
/// * The start and end size of the `Box` must have the exact same size.
#[inline]
pub fn try_cast_box<A: NoUninit, B: AnyBitPattern>(
input: Box<A>,
) -> Result<Box<B>, (PodCastError, Box<A>)> {
if align_of::<A>() != align_of::<B>() {
Err((PodCastError::AlignmentMismatch, input))
} else if size_of::<A>() != size_of::<B>() {
Err((PodCastError::SizeMismatch, input))
} else {
// Note(Lokathor): This is much simpler than with the Vec casting!
let ptr: *mut B = Box::into_raw(input) as *mut B;
Ok(unsafe { Box::from_raw(ptr) })
}
}
/// Allocates a `Box<T>` with all of the contents being zeroed out.
///
/// This uses the global allocator to create a zeroed allocation and _then_
/// turns it into a Box. In other words, it's 100% assured that the zeroed data
/// won't be put temporarily on the stack. You can make a box of any size
/// without fear of a stack overflow.
///
/// ## Failure
///
/// This fails if the allocation fails.
#[inline]
pub fn try_zeroed_box<T: Zeroable>() -> Result<Box<T>, ()> {
if size_of::<T>() == 0 {
// This will not allocate but simply create a dangling pointer.
let dangling = core::ptr::NonNull::dangling().as_ptr();
return Ok(unsafe { Box::from_raw(dangling) });
}
let layout = Layout::new::<T>();
let ptr = unsafe { alloc_zeroed(layout) };
if ptr.is_null() {
// we don't know what the error is because `alloc_zeroed` is a dumb API
Err(())
} else {
Ok(unsafe { Box::<T>::from_raw(ptr as *mut T) })
}
}
/// As [`try_zeroed_box`], but unwraps for you.
#[inline]
pub fn zeroed_box<T: Zeroable>() -> Box<T> {
try_zeroed_box().unwrap()
}
/// Allocates a `Vec<T>` of length and capacity exactly equal to `length` and
/// all elements zeroed.
///
/// ## Failure
///
/// This fails if the allocation fails, or if a layout cannot be calculated for
/// the allocation.
pub fn try_zeroed_vec<T: Zeroable>(length: usize) -> Result<Vec<T>, ()> {
if length == 0 {
Ok(Vec::new())
} else {
let boxed_slice = try_zeroed_slice_box(length)?;
Ok(boxed_slice.into_vec())
}
}
/// As [`try_zeroed_vec`] but unwraps for you
pub fn zeroed_vec<T: Zeroable>(length: usize) -> Vec<T> {
try_zeroed_vec(length).unwrap()
}
/// Allocates a `Box<[T]>` with all contents being zeroed out.
///
/// This uses the global allocator to create a zeroed allocation and _then_
/// turns it into a Box. In other words, it's 100% assured that the zeroed data
/// won't be put temporarily on the stack. You can make a box of any size
/// without fear of a stack overflow.
///
/// ## Failure
///
/// This fails if the allocation fails, or if a layout cannot be calculated for
/// the allocation.
#[inline]
pub fn try_zeroed_slice_box<T: Zeroable>(
length: usize,
) -> Result<Box<[T]>, ()> {
if size_of::<T>() == 0 || length == 0 {
// This will not allocate but simply create a dangling slice pointer.
let dangling = core::ptr::NonNull::dangling().as_ptr();
let dangling_slice = core::ptr::slice_from_raw_parts_mut(dangling, length);
return Ok(unsafe { Box::from_raw(dangling_slice) });
}
let layout = core::alloc::Layout::array::<T>(length).map_err(|_| ())?;
let ptr = unsafe { alloc_zeroed(layout) };
if ptr.is_null() {
// we don't know what the error is because `alloc_zeroed` is a dumb API
Err(())
} else {
let slice =
unsafe { core::slice::from_raw_parts_mut(ptr as *mut T, length) };
Ok(unsafe { Box::<[T]>::from_raw(slice) })
}
}
/// As [`try_zeroed_slice_box`](try_zeroed_slice_box), but unwraps for you.
pub fn zeroed_slice_box<T: Zeroable>(length: usize) -> Box<[T]> {
try_zeroed_slice_box(length).unwrap()
}
/// As [`try_cast_slice_box`](try_cast_slice_box), but unwraps for you.
#[inline]
pub fn cast_slice_box<A: NoUninit, B: AnyBitPattern>(
input: Box<[A]>,
) -> Box<[B]> {
try_cast_slice_box(input).map_err(|(e, _v)| e).unwrap()
}
/// Attempts to cast the content type of a `Box<[T]>`.
///
/// On failure you get back an error along with the starting `Box<[T]>`.
///
/// ## Failure
///
/// * The start and end content type of the `Box<[T]>` must have the exact same
/// alignment.
/// * The start and end content size in bytes of the `Box<[T]>` must be the
/// exact same.
#[inline]
pub fn try_cast_slice_box<A: NoUninit, B: AnyBitPattern>(
input: Box<[A]>,
) -> Result<Box<[B]>, (PodCastError, Box<[A]>)> {
if align_of::<A>() != align_of::<B>() {
Err((PodCastError::AlignmentMismatch, input))
} else if size_of::<A>() != size_of::<B>() {
if size_of::<A>() * input.len() % size_of::<B>() != 0 {
// If the size in bytes of the underlying buffer does not match an exact
// multiple of the size of B, we cannot cast between them.
Err((PodCastError::SizeMismatch, input))
} else {
// Because the size is an exact multiple, we can now change the length
// of the slice and recreate the Box
// NOTE: This is a valid operation because according to the docs of
// std::alloc::GlobalAlloc::dealloc(), the Layout that was used to alloc
// the block must be the same Layout that is used to dealloc the block.
// Luckily, Layout only stores two things, the alignment, and the size in
// bytes. So as long as both of those stay the same, the Layout will
// remain a valid input to dealloc.
let length = size_of::<A>() * input.len() / size_of::<B>();
let box_ptr: *mut A = Box::into_raw(input) as *mut A;
let ptr: *mut [B] =
unsafe { core::slice::from_raw_parts_mut(box_ptr as *mut B, length) };
Ok(unsafe { Box::<[B]>::from_raw(ptr) })
}
} else {
let box_ptr: *mut [A] = Box::into_raw(input);
let ptr: *mut [B] = box_ptr as *mut [B];
Ok(unsafe { Box::<[B]>::from_raw(ptr) })
}
}
/// As [`try_cast_vec`](try_cast_vec), but unwraps for you.
#[inline]
pub fn cast_vec<A: NoUninit, B: AnyBitPattern>(input: Vec<A>) -> Vec<B> {
try_cast_vec(input).map_err(|(e, _v)| e).unwrap()
}
/// Attempts to cast the content type of a [`Vec`](alloc::vec::Vec).
///
/// On failure you get back an error along with the starting `Vec`.
///
/// ## Failure
///
/// * The start and end content type of the `Vec` must have the exact same
/// alignment.
/// * The start and end content size in bytes of the `Vec` must be the exact
/// same.
/// * The start and end capacity in bytes of the `Vec` must be the exact same.
#[inline]
pub fn try_cast_vec<A: NoUninit, B: AnyBitPattern>(
input: Vec<A>,
) -> Result<Vec<B>, (PodCastError, Vec<A>)> {
if align_of::<A>() != align_of::<B>() {
Err((PodCastError::AlignmentMismatch, input))
} else if size_of::<A>() != size_of::<B>() {
if size_of::<A>() * input.len() % size_of::<B>() != 0
|| size_of::<A>() * input.capacity() % size_of::<B>() != 0
{
// If the size in bytes of the underlying buffer does not match an exact
// multiple of the size of B, we cannot cast between them.
// Note that we have to pay special attention to make sure that both
// length and capacity are valid under B, as we do not want to
// change which bytes are considered part of the initialized slice
// of the Vec
Err((PodCastError::SizeMismatch, input))
} else {
// Because the size is an exact multiple, we can now change the length and
// capacity and recreate the Vec
// NOTE: This is a valid operation because according to the docs of
// std::alloc::GlobalAlloc::dealloc(), the Layout that was used to alloc
// the block must be the same Layout that is used to dealloc the block.
// Luckily, Layout only stores two things, the alignment, and the size in
// bytes. So as long as both of those stay the same, the Layout will
// remain a valid input to dealloc.
// Note(Lokathor): First we record the length and capacity, which don't
// have any secret provenance metadata.
let length: usize = size_of::<A>() * input.len() / size_of::<B>();
let capacity: usize = size_of::<A>() * input.capacity() / size_of::<B>();
// Note(Lokathor): Next we "pre-forget" the old Vec by wrapping with
// ManuallyDrop, because if we used `core::mem::forget` after taking the
// pointer then that would invalidate our pointer. In nightly there's a
// "into raw parts" method, which we can switch this too eventually.
let mut manual_drop_vec = ManuallyDrop::new(input);
let vec_ptr: *mut A = manual_drop_vec.as_mut_ptr();
let ptr: *mut B = vec_ptr as *mut B;
Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) })
}
} else {
// Note(Lokathor): First we record the length and capacity, which don't have
// any secret provenance metadata.
let length: usize = input.len();
let capacity: usize = input.capacity();
// Note(Lokathor): Next we "pre-forget" the old Vec by wrapping with
// ManuallyDrop, because if we used `core::mem::forget` after taking the
// pointer then that would invalidate our pointer. In nightly there's a
// "into raw parts" method, which we can switch this too eventually.
let mut manual_drop_vec = ManuallyDrop::new(input);
let vec_ptr: *mut A = manual_drop_vec.as_mut_ptr();
let ptr: *mut B = vec_ptr as *mut B;
Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) })
}
}
/// This "collects" a slice of pod data into a vec of a different pod type.
///
/// Unlike with [`cast_slice`] and [`cast_slice_mut`], this will always work.
///
/// The output vec will be of a minimal size/capacity to hold the slice given.
///
/// ```rust
/// # use bytemuck::*;
/// let halfwords: [u16; 4] = [5, 6, 7, 8];
/// let vec_of_words: Vec<u32> = pod_collect_to_vec(&halfwords);
/// if cfg!(target_endian = "little") {
/// assert_eq!(&vec_of_words[..], &[0x0006_0005, 0x0008_0007][..])
/// } else {
/// assert_eq!(&vec_of_words[..], &[0x0005_0006, 0x0007_0008][..])
/// }
/// ```
pub fn pod_collect_to_vec<A: NoUninit, B: NoUninit + AnyBitPattern>(
src: &[A],
) -> Vec<B> {
let src_size = size_of_val(src);
// Note(Lokathor): dst_count is rounded up so that the dest will always be at
// least as many bytes as the src.
let dst_count = src_size / size_of::<B>()
+ if src_size % size_of::<B>() != 0 { 1 } else { 0 };
let mut dst = vec![B::zeroed(); dst_count];
let src_bytes: &[u8] = cast_slice(src);
let dst_bytes: &mut [u8] = cast_slice_mut(&mut dst[..]);
dst_bytes[..src_size].copy_from_slice(src_bytes);
dst
}
/// As [`try_cast_rc`](try_cast_rc), but unwraps for you.
#[inline]
pub fn cast_rc<A: NoUninit + AnyBitPattern, B: NoUninit + AnyBitPattern>(
input: Rc<A>,
) -> Rc<B> {
try_cast_rc(input).map_err(|(e, _v)| e).unwrap()
}
/// Attempts to cast the content type of a [`Rc`](alloc::rc::Rc).
///
/// On failure you get back an error along with the starting `Rc`.
///
/// The bounds on this function are the same as [`cast_mut`], because a user
/// could call `Rc::get_unchecked_mut` on the output, which could be observable
/// in the input.
///
/// ## Failure
///
/// * The start and end content type of the `Rc` must have the exact same
/// alignment.
/// * The start and end size of the `Rc` must have the exact same size.
#[inline]
pub fn try_cast_rc<A: NoUninit + AnyBitPattern, B: NoUninit + AnyBitPattern>(
input: Rc<A>,
) -> Result<Rc<B>, (PodCastError, Rc<A>)> {
if align_of::<A>() != align_of::<B>() {
Err((PodCastError::AlignmentMismatch, input))
} else if size_of::<A>() != size_of::<B>() {
Err((PodCastError::SizeMismatch, input))
} else {
// Safety: Rc::from_raw requires size and alignment match, which is met.
let ptr: *const B = Rc::into_raw(input) as *const B;
Ok(unsafe { Rc::from_raw(ptr) })
}
}
/// As [`try_cast_arc`](try_cast_arc), but unwraps for you.
#[inline]
#[cfg(target_has_atomic = "ptr")]
pub fn cast_arc<A: NoUninit + AnyBitPattern, B: NoUninit + AnyBitPattern>(
input: Arc<A>,
) -> Arc<B> {
try_cast_arc(input).map_err(|(e, _v)| e).unwrap()
}
/// Attempts to cast the content type of a [`Arc`](alloc::sync::Arc).
///
/// On failure you get back an error along with the starting `Arc`.
///
/// The bounds on this function are the same as [`cast_mut`], because a user
/// could call `Rc::get_unchecked_mut` on the output, which could be observable
/// in the input.
///
/// ## Failure
///
/// * The start and end content type of the `Arc` must have the exact same
/// alignment.
/// * The start and end size of the `Arc` must have the exact same size.
#[inline]
#[cfg(target_has_atomic = "ptr")]
pub fn try_cast_arc<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
input: Arc<A>,
) -> Result<Arc<B>, (PodCastError, Arc<A>)> {
if align_of::<A>() != align_of::<B>() {
Err((PodCastError::AlignmentMismatch, input))
} else if size_of::<A>() != size_of::<B>() {
Err((PodCastError::SizeMismatch, input))
} else {
// Safety: Arc::from_raw requires size and alignment match, which is met.
let ptr: *const B = Arc::into_raw(input) as *const B;
Ok(unsafe { Arc::from_raw(ptr) })
}
}
/// As [`try_cast_slice_rc`](try_cast_slice_rc), but unwraps for you.
#[inline]
pub fn cast_slice_rc<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
input: Rc<[A]>,
) -> Rc<[B]> {
try_cast_slice_rc(input).map_err(|(e, _v)| e).unwrap()
}
/// Attempts to cast the content type of a `Rc<[T]>`.
///
/// On failure you get back an error along with the starting `Rc<[T]>`.
///
/// The bounds on this function are the same as [`cast_mut`], because a user
/// could call `Rc::get_unchecked_mut` on the output, which could be observable
/// in the input.
///
/// ## Failure
///
/// * The start and end content type of the `Rc<[T]>` must have the exact same
/// alignment.
/// * The start and end content size in bytes of the `Rc<[T]>` must be the exact
/// same.
#[inline]
pub fn try_cast_slice_rc<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
input: Rc<[A]>,
) -> Result<Rc<[B]>, (PodCastError, Rc<[A]>)> {
if align_of::<A>() != align_of::<B>() {
Err((PodCastError::AlignmentMismatch, input))
} else if size_of::<A>() != size_of::<B>() {
if size_of::<A>() * input.len() % size_of::<B>() != 0 {
// If the size in bytes of the underlying buffer does not match an exact
// multiple of the size of B, we cannot cast between them.
Err((PodCastError::SizeMismatch, input))
} else {
// Because the size is an exact multiple, we can now change the length
// of the slice and recreate the Rc
// NOTE: This is a valid operation because according to the docs of
// std::rc::Rc::from_raw(), the type U that was in the original Rc<U>
// acquired from Rc::into_raw() must have the same size alignment and
// size of the type T in the new Rc<T>. So as long as both the size
// and alignment stay the same, the Rc will remain a valid Rc.
let length = size_of::<A>() * input.len() / size_of::<B>();
let rc_ptr: *const A = Rc::into_raw(input) as *const A;
// Must use ptr::slice_from_raw_parts, because we cannot make an
// intermediate const reference, because it has mutable provenance,
// nor an intermediate mutable reference, because it could be aliased.
let ptr = core::ptr::slice_from_raw_parts(rc_ptr as *const B, length);
Ok(unsafe { Rc::<[B]>::from_raw(ptr) })
}
} else {
let rc_ptr: *const [A] = Rc::into_raw(input);
let ptr: *const [B] = rc_ptr as *const [B];
Ok(unsafe { Rc::<[B]>::from_raw(ptr) })
}
}
/// As [`try_cast_slice_arc`](try_cast_slice_arc), but unwraps for you.
#[inline]
#[cfg(target_has_atomic = "ptr")]
pub fn cast_slice_arc<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
input: Arc<[A]>,
) -> Arc<[B]> {
try_cast_slice_arc(input).map_err(|(e, _v)| e).unwrap()
}
/// Attempts to cast the content type of a `Arc<[T]>`.
///
/// On failure you get back an error along with the starting `Arc<[T]>`.
///
/// The bounds on this function are the same as [`cast_mut`], because a user
/// could call `Rc::get_unchecked_mut` on the output, which could be observable
/// in the input.
///
/// ## Failure
///
/// * The start and end content type of the `Arc<[T]>` must have the exact same
/// alignment.
/// * The start and end content size in bytes of the `Arc<[T]>` must be the
/// exact same.
#[inline]
#[cfg(target_has_atomic = "ptr")]
pub fn try_cast_slice_arc<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
input: Arc<[A]>,
) -> Result<Arc<[B]>, (PodCastError, Arc<[A]>)> {
if align_of::<A>() != align_of::<B>() {
Err((PodCastError::AlignmentMismatch, input))
} else if size_of::<A>() != size_of::<B>() {
if size_of::<A>() * input.len() % size_of::<B>() != 0 {
// If the size in bytes of the underlying buffer does not match an exact
// multiple of the size of B, we cannot cast between them.
Err((PodCastError::SizeMismatch, input))
} else {
// Because the size is an exact multiple, we can now change the length
// of the slice and recreate the Arc
// NOTE: This is a valid operation because according to the docs of
// std::sync::Arc::from_raw(), the type U that was in the original Arc<U>
// acquired from Arc::into_raw() must have the same size alignment and
// size of the type T in the new Arc<T>. So as long as both the size
// and alignment stay the same, the Arc will remain a valid Arc.
let length = size_of::<A>() * input.len() / size_of::<B>();
let arc_ptr: *const A = Arc::into_raw(input) as *const A;
// Must use ptr::slice_from_raw_parts, because we cannot make an
// intermediate const reference, because it has mutable provenance,
// nor an intermediate mutable reference, because it could be aliased.
let ptr = core::ptr::slice_from_raw_parts(arc_ptr as *const B, length);
Ok(unsafe { Arc::<[B]>::from_raw(ptr) })
}
} else {
let arc_ptr: *const [A] = Arc::into_raw(input);
let ptr: *const [B] = arc_ptr as *const [B];
Ok(unsafe { Arc::<[B]>::from_raw(ptr) })
}
}
/// An extension trait for `TransparentWrapper` and alloc types.
pub trait TransparentWrapperAlloc<Inner: ?Sized>:
TransparentWrapper<Inner>
{
/// Convert a vec of the inner type into a vec of the wrapper type.
fn wrap_vec(s: Vec<Inner>) -> Vec<Self>
where
Self: Sized,
Inner: Sized,
{
let mut s = core::mem::ManuallyDrop::new(s);
let length = s.len();
let capacity = s.capacity();
let ptr = s.as_mut_ptr();
unsafe {
// SAFETY:
// * ptr comes from Vec (and will not be double-dropped)
// * the two types have the identical representation
// * the len and capacity fields are valid
Vec::from_raw_parts(ptr as *mut Self, length, capacity)
}
}
/// Convert a box to the inner type into a box to the wrapper
/// type.
#[inline]
fn wrap_box(s: Box<Inner>) -> Box<Self> {
assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
unsafe {
// A pointer cast doesn't work here because rustc can't tell that
// the vtables match (because of the `?Sized` restriction relaxation).
// A `transmute` doesn't work because the sizes are unspecified.
//
// SAFETY:
// * The unsafe contract requires that pointers to Inner and Self have
// identical representations
// * Box is guaranteed to have representation identical to a (non-null)
// pointer
// * The pointer comes from a box (and thus satisfies all safety
// requirements of Box)
let inner_ptr: *mut Inner = Box::into_raw(s);
let wrapper_ptr: *mut Self = transmute!(inner_ptr);
Box::from_raw(wrapper_ptr)
}
}
/// Convert an [`Rc`](alloc::rc::Rc) to the inner type into an `Rc` to the
/// wrapper type.
#[inline]
fn wrap_rc(s: Rc<Inner>) -> Rc<Self> {
assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
unsafe {
// A pointer cast doesn't work here because rustc can't tell that
// the vtables match (because of the `?Sized` restriction relaxation).
// A `transmute` doesn't work because the layout of Rc is unspecified.
//
// SAFETY:
// * The unsafe contract requires that pointers to Inner and Self have
// identical representations, and that the size and alignment of Inner
// and Self are the same, which meets the safety requirements of
// Rc::from_raw
let inner_ptr: *const Inner = Rc::into_raw(s);
let wrapper_ptr: *const Self = transmute!(inner_ptr);
Rc::from_raw(wrapper_ptr)
}
}
/// Convert an [`Arc`](alloc::sync::Arc) to the inner type into an `Arc` to
/// the wrapper type.
#[inline]
#[cfg(target_has_atomic = "ptr")]
fn wrap_arc(s: Arc<Inner>) -> Arc<Self> {
assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
unsafe {
// A pointer cast doesn't work here because rustc can't tell that
// the vtables match (because of the `?Sized` restriction relaxation).
// A `transmute` doesn't work because the layout of Arc is unspecified.
//
// SAFETY:
// * The unsafe contract requires that pointers to Inner and Self have
// identical representations, and that the size and alignment of Inner
// and Self are the same, which meets the safety requirements of
// Arc::from_raw
let inner_ptr: *const Inner = Arc::into_raw(s);
let wrapper_ptr: *const Self = transmute!(inner_ptr);
Arc::from_raw(wrapper_ptr)
}
}
/// Convert a vec of the wrapper type into a vec of the inner type.
fn peel_vec(s: Vec<Self>) -> Vec<Inner>
where
Self: Sized,
Inner: Sized,
{
let mut s = core::mem::ManuallyDrop::new(s);
let length = s.len();
let capacity = s.capacity();
let ptr = s.as_mut_ptr();
unsafe {
// SAFETY:
// * ptr comes from Vec (and will not be double-dropped)
// * the two types have the identical representation
// * the len and capacity fields are valid
Vec::from_raw_parts(ptr as *mut Inner, length, capacity)
}
}
/// Convert a box to the wrapper type into a box to the inner
/// type.
#[inline]
fn peel_box(s: Box<Self>) -> Box<Inner> {
assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
unsafe {
// A pointer cast doesn't work here because rustc can't tell that
// the vtables match (because of the `?Sized` restriction relaxation).
// A `transmute` doesn't work because the sizes are unspecified.
//
// SAFETY:
// * The unsafe contract requires that pointers to Inner and Self have
// identical representations
// * Box is guaranteed to have representation identical to a (non-null)
// pointer
// * The pointer comes from a box (and thus satisfies all safety
// requirements of Box)
let wrapper_ptr: *mut Self = Box::into_raw(s);
let inner_ptr: *mut Inner = transmute!(wrapper_ptr);
Box::from_raw(inner_ptr)
}
}
/// Convert an [`Rc`](alloc::rc::Rc) to the wrapper type into an `Rc` to the
/// inner type.
#[inline]
fn peel_rc(s: Rc<Self>) -> Rc<Inner> {
assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
unsafe {
// A pointer cast doesn't work here because rustc can't tell that
// the vtables match (because of the `?Sized` restriction relaxation).
// A `transmute` doesn't work because the layout of Rc is unspecified.
//
// SAFETY:
// * The unsafe contract requires that pointers to Inner and Self have
// identical representations, and that the size and alignment of Inner
// and Self are the same, which meets the safety requirements of
// Rc::from_raw
let wrapper_ptr: *const Self = Rc::into_raw(s);
let inner_ptr: *const Inner = transmute!(wrapper_ptr);
Rc::from_raw(inner_ptr)
}
}
/// Convert an [`Arc`](alloc::sync::Arc) to the wrapper type into an `Arc` to
/// the inner type.
#[inline]
#[cfg(target_has_atomic = "ptr")]
fn peel_arc(s: Arc<Self>) -> Arc<Inner> {
assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
unsafe {
// A pointer cast doesn't work here because rustc can't tell that
// the vtables match (because of the `?Sized` restriction relaxation).
// A `transmute` doesn't work because the layout of Arc is unspecified.
//
// SAFETY:
// * The unsafe contract requires that pointers to Inner and Self have
// identical representations, and that the size and alignment of Inner
// and Self are the same, which meets the safety requirements of
// Arc::from_raw
let wrapper_ptr: *const Self = Arc::into_raw(s);
let inner_ptr: *const Inner = transmute!(wrapper_ptr);
Arc::from_raw(inner_ptr)
}
}
}
impl<I: ?Sized, T: ?Sized + TransparentWrapper<I>> TransparentWrapperAlloc<I>
for T
{
}
/// As `Box<[u8]>`, but remembers the original alignment.
pub struct BoxBytes {
// SAFETY: `ptr` is owned, was allocated with `layout`, and points to
// `layout.size()` initialized bytes.
ptr: NonNull<u8>,
layout: Layout,
}
impl Deref for BoxBytes {
type Target = [u8];
fn deref(&self) -> &Self::Target {
// SAFETY: See type invariant.
unsafe {
core::slice::from_raw_parts(self.ptr.as_ptr(), self.layout.size())
}
}
}
impl DerefMut for BoxBytes {
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY: See type invariant.
unsafe {
core::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.layout.size())
}
}
}
impl Drop for BoxBytes {
fn drop(&mut self) {
// SAFETY: See type invariant.
unsafe { alloc::alloc::dealloc(self.ptr.as_ptr(), self.layout) };
}
}
impl<T: ?Sized + sealed::BoxBytesOf> From<Box<T>> for BoxBytes {
fn from(value: Box<T>) -> Self {
value.box_bytes_of()
}
}
mod sealed {
use crate::{BoxBytes, PodCastError};
use alloc::boxed::Box;
pub trait BoxBytesOf {
fn box_bytes_of(self: Box<Self>) -> BoxBytes;
}
pub trait FromBoxBytes {
fn try_from_box_bytes(
bytes: BoxBytes,
) -> Result<Box<Self>, (PodCastError, BoxBytes)>;
}
}
impl<T: NoUninit> sealed::BoxBytesOf for T {
fn box_bytes_of(self: Box<Self>) -> BoxBytes {
let layout = Layout::new::<T>();
let ptr = Box::into_raw(self) as *mut u8;
// SAFETY: Box::into_raw() returns a non-null pointer.
let ptr = unsafe { NonNull::new_unchecked(ptr) };
BoxBytes { ptr, layout }
}
}
impl<T: NoUninit> sealed::BoxBytesOf for [T] {
fn box_bytes_of(self: Box<Self>) -> BoxBytes {
let layout = Layout::for_value::<[T]>(&self);
let ptr = Box::into_raw(self) as *mut u8;
// SAFETY: Box::into_raw() returns a non-null pointer.
let ptr = unsafe { NonNull::new_unchecked(ptr) };
BoxBytes { ptr, layout }
}
}
impl sealed::BoxBytesOf for str {
fn box_bytes_of(self: Box<Self>) -> BoxBytes {
self.into_boxed_bytes().box_bytes_of()
}
}
impl<T: AnyBitPattern> sealed::FromBoxBytes for T {
fn try_from_box_bytes(
bytes: BoxBytes,
) -> Result<Box<Self>, (PodCastError, BoxBytes)> {
let layout = Layout::new::<T>();
if bytes.layout.align() != layout.align() {
Err((PodCastError::AlignmentMismatch, bytes))
} else if bytes.layout.size() != layout.size() {
Err((PodCastError::SizeMismatch, bytes))
} else {
let (ptr, _) = bytes.into_raw_parts();
// SAFETY: See BoxBytes type invariant.
Ok(unsafe { Box::from_raw(ptr.as_ptr() as *mut T) })
}
}
}
impl<T: AnyBitPattern> sealed::FromBoxBytes for [T] {
fn try_from_box_bytes(
bytes: BoxBytes,
) -> Result<Box<Self>, (PodCastError, BoxBytes)> {
let single_layout = Layout::new::<T>();
if bytes.layout.align() != single_layout.align() {
Err((PodCastError::AlignmentMismatch, bytes))
} else if single_layout.size() == 0 {
Err((PodCastError::SizeMismatch, bytes))
} else if bytes.layout.size() % single_layout.size() != 0 {
Err((PodCastError::OutputSliceWouldHaveSlop, bytes))
} else {
let (ptr, layout) = bytes.into_raw_parts();
let length = layout.size() / single_layout.size();
let ptr =
core::ptr::slice_from_raw_parts_mut(ptr.as_ptr() as *mut T, length);
// SAFETY: See BoxBytes type invariant.
Ok(unsafe { Box::from_raw(ptr) })
}
}
}
/// Re-interprets `Box<T>` as `BoxBytes`.
///
/// `T` must be either [`Sized`] and [`NoUninit`],
/// [`[U]`](slice) where `U: NoUninit`, or [`str`].
#[inline]
pub fn box_bytes_of<T: sealed::BoxBytesOf + ?Sized>(input: Box<T>) -> BoxBytes {
input.box_bytes_of()
}
/// Re-interprets `BoxBytes` as `Box<T>`.
///
/// `T` must be either [`Sized`] + [`AnyBitPattern`], or
/// [`[U]`](slice) where `U: AnyBitPattern`.
///
/// ## Panics
///
/// This is [`try_from_box_bytes`] but will panic on error and the input will be
/// dropped.
#[inline]
pub fn from_box_bytes<T: sealed::FromBoxBytes + ?Sized>(
input: BoxBytes,
) -> Box<T> {
try_from_box_bytes(input).map_err(|(error, _)| error).unwrap()
}
/// Re-interprets `BoxBytes` as `Box<T>`.
///
/// `T` must be either [`Sized`] + [`AnyBitPattern`], or
/// [`[U]`](slice) where `U: AnyBitPattern`.
///
/// Returns `Err`:
/// * If the input isn't aligned for `T`.
/// * If `T: Sized` and the input's length isn't exactly the size of `T`.
/// * If `T = [U]` and the input's length isn't exactly a multiple of the size
/// of `U`.
#[inline]
pub fn try_from_box_bytes<T: sealed::FromBoxBytes + ?Sized>(
input: BoxBytes,
) -> Result<Box<T>, (PodCastError, BoxBytes)> {
T::try_from_box_bytes(input)
}
impl BoxBytes {
/// Constructs a `BoxBytes` from its raw parts.
///
/// # Safety
///
/// The pointer is owned, has been allocated with the provided layout, and
/// points to `layout.size()` initialized bytes.
pub unsafe fn from_raw_parts(ptr: NonNull<u8>, layout: Layout) -> Self {
BoxBytes { ptr, layout }
}
/// Deconstructs a `BoxBytes` into its raw parts.
///
/// The pointer is owned, has been allocated with the provided layout, and
/// points to `layout.size()` initialized bytes.
pub fn into_raw_parts(self) -> (NonNull<u8>, Layout) {
let me = ManuallyDrop::new(self);
(me.ptr, me.layout)
}
/// Returns the original layout.
pub fn layout(&self) -> Layout {
self.layout
}
}

Просмотреть файл

@ -1,61 +0,0 @@
use crate::{Pod, Zeroable};
/// Marker trait for "plain old data" types that are valid for any bit pattern.
///
/// The requirements for this is very similar to [`Pod`],
/// except that the type can allow uninit (or padding) bytes.
/// This limits what you can do with a type of this kind, but also broadens the
/// included types to `repr(C)` `struct`s that contain padding as well as
/// `union`s. Notably, you can only cast *immutable* references and *owned*
/// values into [`AnyBitPattern`] types, not *mutable* references.
///
/// [`Pod`] is a subset of [`AnyBitPattern`], meaning that any `T: Pod` is also
/// [`AnyBitPattern`] but any `T: AnyBitPattern` is not necessarily [`Pod`].
///
/// [`AnyBitPattern`] is a subset of [`Zeroable`], meaning that any `T:
/// AnyBitPattern` is also [`Zeroable`], but any `T: Zeroable` is not
/// necessarily [`AnyBitPattern`]
///
/// # Derive
///
/// A `#[derive(AnyBitPattern)]` macro is provided under the `derive` feature
/// flag which will automatically validate the requirements of this trait and
/// implement the trait for you for both structs and enums. This is the
/// recommended method for implementing the trait, however it's also possible to
/// do manually. If you implement it manually, you *must* carefully follow the
/// below safety rules.
///
/// * *NOTE: even `C-style`, fieldless enums are intentionally **excluded** from
/// this trait, since it is **unsound** for an enum to have a discriminant value
/// that is not one of its defined variants.
///
/// # Safety
///
/// Similar to [`Pod`] except we disregard the rule about it must not contain
/// uninit bytes. Still, this is a quite strong guarantee about a type, so *be
/// careful* when implementing it manually.
///
/// * The type must be inhabited (eg: no
/// [Infallible](core::convert::Infallible)).
/// * The type must be valid for any bit pattern of its backing memory.
/// * Structs need to have all fields also be `AnyBitPattern`.
/// * It is disallowed for types to contain pointer types, `Cell`, `UnsafeCell`,
/// atomics, and any other forms of interior mutability.
/// * More precisely: A shared reference to the type must allow reads, and
/// *only* reads. RustBelt's separation logic is based on the notion that a
/// type is allowed to define a sharing predicate, its own invariant that must
/// hold for shared references, and this predicate is the reasoning that allow
/// it to deal with atomic and cells etc. We require the sharing predicate to
/// be trivial and permit only read-only access.
/// * There's probably more, don't mess it up (I mean it).
pub unsafe trait AnyBitPattern:
Zeroable + Sized + Copy + 'static
{
}
unsafe impl<T: Pod> AnyBitPattern for T {}
#[cfg(feature = "zeroable_maybe_uninit")]
#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "zeroable_maybe_uninit")))]
unsafe impl<T> AnyBitPattern for core::mem::MaybeUninit<T> where T: AnyBitPattern
{}

522
third_party/rust/bytemuck/src/checked.rs поставляемый
Просмотреть файл

@ -1,522 +0,0 @@
//! Checked versions of the casting functions exposed in crate root
//! that support [`CheckedBitPattern`] types.
use crate::{
internal::{self, something_went_wrong},
AnyBitPattern, NoUninit,
};
/// A marker trait that allows types that have some invalid bit patterns to be
/// used in places that otherwise require [`AnyBitPattern`] or [`Pod`] types by
/// performing a runtime check on a perticular set of bits. This is particularly
/// useful for types like fieldless ('C-style') enums, [`char`], bool, and
/// structs containing them.
///
/// To do this, we define a `Bits` type which is a type with equivalent layout
/// to `Self` other than the invalid bit patterns which disallow `Self` from
/// being [`AnyBitPattern`]. This `Bits` type must itself implement
/// [`AnyBitPattern`]. Then, we implement a function that checks whether a
/// certain instance of the `Bits` is also a valid bit pattern of `Self`. If
/// this check passes, then we can allow casting from the `Bits` to `Self` (and
/// therefore, any type which is able to be cast to `Bits` is also able to be
/// cast to `Self`).
///
/// [`AnyBitPattern`] is a subset of [`CheckedBitPattern`], meaning that any `T:
/// AnyBitPattern` is also [`CheckedBitPattern`]. This means you can also use
/// any [`AnyBitPattern`] type in the checked versions of casting functions in
/// this module. If it's possible, prefer implementing [`AnyBitPattern`] for
/// your type directly instead of [`CheckedBitPattern`] as it gives greater
/// flexibility.
///
/// # Derive
///
/// A `#[derive(CheckedBitPattern)]` macro is provided under the `derive`
/// feature flag which will automatically validate the requirements of this
/// trait and implement the trait for you for both enums and structs. This is
/// the recommended method for implementing the trait, however it's also
/// possible to do manually.
///
/// # Example
///
/// If manually implementing the trait, we can do something like so:
///
/// ```rust
/// use bytemuck::{CheckedBitPattern, NoUninit};
///
/// #[repr(u32)]
/// #[derive(Copy, Clone)]
/// enum MyEnum {
/// Variant0 = 0,
/// Variant1 = 1,
/// Variant2 = 2,
/// }
///
/// unsafe impl CheckedBitPattern for MyEnum {
/// type Bits = u32;
///
/// fn is_valid_bit_pattern(bits: &u32) -> bool {
/// match *bits {
/// 0 | 1 | 2 => true,
/// _ => false,
/// }
/// }
/// }
///
/// // It is often useful to also implement `NoUninit` on our `CheckedBitPattern` types.
/// // This will allow us to do casting of mutable references (and mutable slices).
/// // It is not always possible to do so, but in this case we have no padding so it is.
/// unsafe impl NoUninit for MyEnum {}
/// ```
///
/// We can now use relevant casting functions. For example,
///
/// ```rust
/// # use bytemuck::{CheckedBitPattern, NoUninit};
/// # #[repr(u32)]
/// # #[derive(Copy, Clone, PartialEq, Eq, Debug)]
/// # enum MyEnum {
/// # Variant0 = 0,
/// # Variant1 = 1,
/// # Variant2 = 2,
/// # }
/// # unsafe impl NoUninit for MyEnum {}
/// # unsafe impl CheckedBitPattern for MyEnum {
/// # type Bits = u32;
/// # fn is_valid_bit_pattern(bits: &u32) -> bool {
/// # match *bits {
/// # 0 | 1 | 2 => true,
/// # _ => false,
/// # }
/// # }
/// # }
/// use bytemuck::{bytes_of, bytes_of_mut};
/// use bytemuck::checked;
///
/// let bytes = bytes_of(&2u32);
/// let result = checked::try_from_bytes::<MyEnum>(bytes);
/// assert_eq!(result, Ok(&MyEnum::Variant2));
///
/// // Fails for invalid discriminant
/// let bytes = bytes_of(&100u32);
/// let result = checked::try_from_bytes::<MyEnum>(bytes);
/// assert!(result.is_err());
///
/// // Since we implemented NoUninit, we can also cast mutably from an original type
/// // that is `NoUninit + AnyBitPattern`:
/// let mut my_u32 = 2u32;
/// {
/// let as_enum_mut = checked::cast_mut::<_, MyEnum>(&mut my_u32);
/// assert_eq!(as_enum_mut, &mut MyEnum::Variant2);
/// *as_enum_mut = MyEnum::Variant0;
/// }
/// assert_eq!(my_u32, 0u32);
/// ```
///
/// # Safety
///
/// * `Self` *must* have the same layout as the specified `Bits` except for
/// the possible invalid bit patterns being checked during
/// [`is_valid_bit_pattern`].
/// * This almost certainly means your type must be `#[repr(C)]` or a similar
/// specified repr, but if you think you know better, you probably don't. If
/// you still think you know better, be careful and have fun. And don't mess
/// it up (I mean it).
/// * If [`is_valid_bit_pattern`] returns true, then the bit pattern contained
/// in `bits` must also be valid for an instance of `Self`.
/// * Probably more, don't mess it up (I mean it 2.0)
///
/// [`is_valid_bit_pattern`]: CheckedBitPattern::is_valid_bit_pattern
/// [`Pod`]: crate::Pod
pub unsafe trait CheckedBitPattern: Copy {
/// `Self` *must* have the same layout as the specified `Bits` except for
/// the possible invalid bit patterns being checked during
/// [`is_valid_bit_pattern`].
///
/// [`is_valid_bit_pattern`]: CheckedBitPattern::is_valid_bit_pattern
type Bits: AnyBitPattern;
/// If this function returns true, then it must be valid to reinterpret `bits`
/// as `&Self`.
fn is_valid_bit_pattern(bits: &Self::Bits) -> bool;
}
unsafe impl<T: AnyBitPattern> CheckedBitPattern for T {
type Bits = T;
#[inline(always)]
fn is_valid_bit_pattern(_bits: &T) -> bool {
true
}
}
unsafe impl CheckedBitPattern for char {
type Bits = u32;
#[inline]
fn is_valid_bit_pattern(bits: &Self::Bits) -> bool {
core::char::from_u32(*bits).is_some()
}
}
unsafe impl CheckedBitPattern for bool {
type Bits = u8;
#[inline]
fn is_valid_bit_pattern(bits: &Self::Bits) -> bool {
match *bits {
0 | 1 => true,
_ => false,
}
}
}
// Rust 1.70.0 documents that NonZero[int] has the same layout as [int].
macro_rules! impl_checked_for_nonzero {
($($nonzero:ty: $primitive:ty),* $(,)?) => {
$(
unsafe impl CheckedBitPattern for $nonzero {
type Bits = $primitive;
#[inline]
fn is_valid_bit_pattern(bits: &Self::Bits) -> bool {
*bits != 0
}
}
)*
};
}
impl_checked_for_nonzero! {
core::num::NonZeroU8: u8,
core::num::NonZeroI8: i8,
core::num::NonZeroU16: u16,
core::num::NonZeroI16: i16,
core::num::NonZeroU32: u32,
core::num::NonZeroI32: i32,
core::num::NonZeroU64: u64,
core::num::NonZeroI64: i64,
core::num::NonZeroI128: i128,
core::num::NonZeroU128: u128,
core::num::NonZeroUsize: usize,
core::num::NonZeroIsize: isize,
}
/// The things that can go wrong when casting between [`CheckedBitPattern`] data
/// forms.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum CheckedCastError {
/// An error occurred during a true-[`Pod`] cast
///
/// [`Pod`]: crate::Pod
PodCastError(crate::PodCastError),
/// When casting to a [`CheckedBitPattern`] type, it is possible that the
/// original data contains an invalid bit pattern. If so, the cast will
/// fail and this error will be returned. Will never happen on casts
/// between [`Pod`] types.
///
/// [`Pod`]: crate::Pod
InvalidBitPattern,
}
#[cfg(not(target_arch = "spirv"))]
impl core::fmt::Display for CheckedCastError {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "{:?}", self)
}
}
#[cfg(feature = "extern_crate_std")]
#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_std")))]
impl std::error::Error for CheckedCastError {}
impl From<crate::PodCastError> for CheckedCastError {
fn from(err: crate::PodCastError) -> CheckedCastError {
CheckedCastError::PodCastError(err)
}
}
/// Re-interprets `&[u8]` as `&T`.
///
/// ## Failure
///
/// * If the slice isn't aligned for the new type
/// * If the slice's length isnt exactly the size of the new type
/// * If the slice contains an invalid bit pattern for `T`
#[inline]
pub fn try_from_bytes<T: CheckedBitPattern>(
s: &[u8],
) -> Result<&T, CheckedCastError> {
let pod = crate::try_from_bytes(s)?;
if <T as CheckedBitPattern>::is_valid_bit_pattern(pod) {
Ok(unsafe { &*(pod as *const <T as CheckedBitPattern>::Bits as *const T) })
} else {
Err(CheckedCastError::InvalidBitPattern)
}
}
/// Re-interprets `&mut [u8]` as `&mut T`.
///
/// ## Failure
///
/// * If the slice isn't aligned for the new type
/// * If the slice's length isnt exactly the size of the new type
/// * If the slice contains an invalid bit pattern for `T`
#[inline]
pub fn try_from_bytes_mut<T: CheckedBitPattern + NoUninit>(
s: &mut [u8],
) -> Result<&mut T, CheckedCastError> {
let pod = unsafe { internal::try_from_bytes_mut(s) }?;
if <T as CheckedBitPattern>::is_valid_bit_pattern(pod) {
Ok(unsafe { &mut *(pod as *mut <T as CheckedBitPattern>::Bits as *mut T) })
} else {
Err(CheckedCastError::InvalidBitPattern)
}
}
/// Reads from the bytes as if they were a `T`.
///
/// ## Failure
/// * If the `bytes` length is not equal to `size_of::<T>()`.
/// * If the slice contains an invalid bit pattern for `T`
#[inline]
pub fn try_pod_read_unaligned<T: CheckedBitPattern>(
bytes: &[u8],
) -> Result<T, CheckedCastError> {
let pod = crate::try_pod_read_unaligned(bytes)?;
if <T as CheckedBitPattern>::is_valid_bit_pattern(&pod) {
Ok(unsafe { transmute!(pod) })
} else {
Err(CheckedCastError::InvalidBitPattern)
}
}
/// Try to cast `T` into `U`.
///
/// Note that for this particular type of cast, alignment isn't a factor. The
/// input value is semantically copied into the function and then returned to a
/// new memory location which will have whatever the required alignment of the
/// output type is.
///
/// ## Failure
///
/// * If the types don't have the same size this fails.
/// * If `a` contains an invalid bit pattern for `B` this fails.
#[inline]
pub fn try_cast<A: NoUninit, B: CheckedBitPattern>(
a: A,
) -> Result<B, CheckedCastError> {
let pod = crate::try_cast(a)?;
if <B as CheckedBitPattern>::is_valid_bit_pattern(&pod) {
Ok(unsafe { transmute!(pod) })
} else {
Err(CheckedCastError::InvalidBitPattern)
}
}
/// Try to convert a `&T` into `&U`.
///
/// ## Failure
///
/// * If the reference isn't aligned in the new type
/// * If the source type and target type aren't the same size.
/// * If `a` contains an invalid bit pattern for `B` this fails.
#[inline]
pub fn try_cast_ref<A: NoUninit, B: CheckedBitPattern>(
a: &A,
) -> Result<&B, CheckedCastError> {
let pod = crate::try_cast_ref(a)?;
if <B as CheckedBitPattern>::is_valid_bit_pattern(pod) {
Ok(unsafe { &*(pod as *const <B as CheckedBitPattern>::Bits as *const B) })
} else {
Err(CheckedCastError::InvalidBitPattern)
}
}
/// Try to convert a `&mut T` into `&mut U`.
///
/// As [`try_cast_ref`], but `mut`.
#[inline]
pub fn try_cast_mut<
A: NoUninit + AnyBitPattern,
B: CheckedBitPattern + NoUninit,
>(
a: &mut A,
) -> Result<&mut B, CheckedCastError> {
let pod = unsafe { internal::try_cast_mut(a) }?;
if <B as CheckedBitPattern>::is_valid_bit_pattern(pod) {
Ok(unsafe { &mut *(pod as *mut <B as CheckedBitPattern>::Bits as *mut B) })
} else {
Err(CheckedCastError::InvalidBitPattern)
}
}
/// Try to convert `&[A]` into `&[B]` (possibly with a change in length).
///
/// * `input.as_ptr() as usize == output.as_ptr() as usize`
/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
///
/// ## Failure
///
/// * If the target type has a greater alignment requirement and the input slice
/// isn't aligned.
/// * If the target element type is a different size from the current element
/// type, and the output slice wouldn't be a whole number of elements when
/// accounting for the size change (eg: 3 `u16` values is 1.5 `u32` values, so
/// that's a failure).
/// * Similarly, you can't convert between a [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
/// and a non-ZST.
/// * If any element of the converted slice would contain an invalid bit pattern
/// for `B` this fails.
#[inline]
pub fn try_cast_slice<A: NoUninit, B: CheckedBitPattern>(
a: &[A],
) -> Result<&[B], CheckedCastError> {
let pod = crate::try_cast_slice(a)?;
if pod.iter().all(|pod| <B as CheckedBitPattern>::is_valid_bit_pattern(pod)) {
Ok(unsafe {
core::slice::from_raw_parts(pod.as_ptr() as *const B, pod.len())
})
} else {
Err(CheckedCastError::InvalidBitPattern)
}
}
/// Try to convert `&mut [A]` into `&mut [B]` (possibly with a change in
/// length).
///
/// As [`try_cast_slice`], but `&mut`.
#[inline]
pub fn try_cast_slice_mut<
A: NoUninit + AnyBitPattern,
B: CheckedBitPattern + NoUninit,
>(
a: &mut [A],
) -> Result<&mut [B], CheckedCastError> {
let pod = unsafe { internal::try_cast_slice_mut(a) }?;
if pod.iter().all(|pod| <B as CheckedBitPattern>::is_valid_bit_pattern(pod)) {
Ok(unsafe {
core::slice::from_raw_parts_mut(pod.as_mut_ptr() as *mut B, pod.len())
})
} else {
Err(CheckedCastError::InvalidBitPattern)
}
}
/// Re-interprets `&[u8]` as `&T`.
///
/// ## Panics
///
/// This is [`try_from_bytes`] but will panic on error.
#[inline]
pub fn from_bytes<T: CheckedBitPattern>(s: &[u8]) -> &T {
match try_from_bytes(s) {
Ok(t) => t,
Err(e) => something_went_wrong("from_bytes", e),
}
}
/// Re-interprets `&mut [u8]` as `&mut T`.
///
/// ## Panics
///
/// This is [`try_from_bytes_mut`] but will panic on error.
#[inline]
pub fn from_bytes_mut<T: NoUninit + CheckedBitPattern>(s: &mut [u8]) -> &mut T {
match try_from_bytes_mut(s) {
Ok(t) => t,
Err(e) => something_went_wrong("from_bytes_mut", e),
}
}
/// Reads the slice into a `T` value.
///
/// ## Panics
/// * This is like `try_pod_read_unaligned` but will panic on failure.
#[inline]
pub fn pod_read_unaligned<T: CheckedBitPattern>(bytes: &[u8]) -> T {
match try_pod_read_unaligned(bytes) {
Ok(t) => t,
Err(e) => something_went_wrong("pod_read_unaligned", e),
}
}
/// Cast `T` into `U`
///
/// ## Panics
///
/// * This is like [`try_cast`], but will panic on a size mismatch.
#[inline]
pub fn cast<A: NoUninit, B: CheckedBitPattern>(a: A) -> B {
match try_cast(a) {
Ok(t) => t,
Err(e) => something_went_wrong("cast", e),
}
}
/// Cast `&mut T` into `&mut U`.
///
/// ## Panics
///
/// This is [`try_cast_mut`] but will panic on error.
#[inline]
pub fn cast_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + CheckedBitPattern,
>(
a: &mut A,
) -> &mut B {
match try_cast_mut(a) {
Ok(t) => t,
Err(e) => something_went_wrong("cast_mut", e),
}
}
/// Cast `&T` into `&U`.
///
/// ## Panics
///
/// This is [`try_cast_ref`] but will panic on error.
#[inline]
pub fn cast_ref<A: NoUninit, B: CheckedBitPattern>(a: &A) -> &B {
match try_cast_ref(a) {
Ok(t) => t,
Err(e) => something_went_wrong("cast_ref", e),
}
}
/// Cast `&[A]` into `&[B]`.
///
/// ## Panics
///
/// This is [`try_cast_slice`] but will panic on error.
#[inline]
pub fn cast_slice<A: NoUninit, B: CheckedBitPattern>(a: &[A]) -> &[B] {
match try_cast_slice(a) {
Ok(t) => t,
Err(e) => something_went_wrong("cast_slice", e),
}
}
/// Cast `&mut [T]` into `&mut [U]`.
///
/// ## Panics
///
/// This is [`try_cast_slice_mut`] but will panic on error.
#[inline]
pub fn cast_slice_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + CheckedBitPattern,
>(
a: &mut [A],
) -> &mut [B] {
match try_cast_slice_mut(a) {
Ok(t) => t,
Err(e) => something_went_wrong("cast_slice_mut", e),
}
}

202
third_party/rust/bytemuck/src/contiguous.rs поставляемый
Просмотреть файл

@ -1,202 +0,0 @@
use super::*;
/// A trait indicating that:
///
/// 1. A type has an equivalent representation to some known integral type.
/// 2. All instances of this type fall in a fixed range of values.
/// 3. Within that range, there are no gaps.
///
/// This is generally useful for fieldless enums (aka "c-style" enums), however
/// it's important that it only be used for those with an explicit `#[repr]`, as
/// `#[repr(Rust)]` fieldess enums have an unspecified layout.
///
/// Additionally, you shouldn't assume that all implementations are enums. Any
/// type which meets the requirements above while following the rules under
/// "Safety" below is valid.
///
/// # Example
///
/// ```
/// # use bytemuck::Contiguous;
/// #[repr(u8)]
/// #[derive(Debug, Copy, Clone, PartialEq)]
/// enum Foo {
/// A = 0,
/// B = 1,
/// C = 2,
/// D = 3,
/// E = 4,
/// }
/// unsafe impl Contiguous for Foo {
/// type Int = u8;
/// const MIN_VALUE: u8 = Foo::A as u8;
/// const MAX_VALUE: u8 = Foo::E as u8;
/// }
/// assert_eq!(Foo::from_integer(3).unwrap(), Foo::D);
/// assert_eq!(Foo::from_integer(8), None);
/// assert_eq!(Foo::C.into_integer(), 2);
/// ```
/// # Safety
///
/// This is an unsafe trait, and incorrectly implementing it is undefined
/// behavior.
///
/// Informally, by implementing it, you're asserting that `C` is identical to
/// the integral type `C::Int`, and that every `C` falls between `C::MIN_VALUE`
/// and `C::MAX_VALUE` exactly once, without any gaps.
///
/// Precisely, the guarantees you must uphold when implementing `Contiguous` for
/// some type `C` are:
///
/// 1. The size of `C` and `C::Int` must be the same, and neither may be a ZST.
/// (Note: alignment is explicitly allowed to differ)
///
/// 2. `C::Int` must be a primitive integer, and not a wrapper type. In the
/// future, this may be lifted to include cases where the behavior is
/// identical for a relevant set of traits (Ord, arithmetic, ...).
///
/// 3. All `C::Int`s which are in the *inclusive* range between `C::MIN_VALUE`
/// and `C::MAX_VALUE` are bitwise identical to unique valid instances of
/// `C`.
///
/// 4. There exist no instances of `C` such that their bitpatterns, when
/// interpreted as instances of `C::Int`, fall outside of the `MAX_VALUE` /
/// `MIN_VALUE` range -- It is legal for unsafe code to assume that if it
/// gets a `C` that implements `Contiguous`, it is in the appropriate range.
///
/// 5. Finally, you promise not to provide overridden implementations of
/// `Contiguous::from_integer` and `Contiguous::into_integer`.
///
/// For clarity, the following rules could be derived from the above, but are
/// listed explicitly:
///
/// - `C::MAX_VALUE` must be greater or equal to `C::MIN_VALUE` (therefore, `C`
/// must be an inhabited type).
///
/// - There exist no two values between `MIN_VALUE` and `MAX_VALUE` such that
/// when interpreted as a `C` they are considered identical (by, say, match).
pub unsafe trait Contiguous: Copy + 'static {
/// The primitive integer type with an identical representation to this
/// type.
///
/// Contiguous is broadly intended for use with fieldless enums, and for
/// these the correct integer type is easy: The enum should have a
/// `#[repr(Int)]` or `#[repr(C)]` attribute, (if it does not, it is
/// *unsound* to implement `Contiguous`!).
///
/// - For `#[repr(Int)]`, use the listed `Int`. e.g. `#[repr(u8)]` should use
/// `type Int = u8`.
///
/// - For `#[repr(C)]`, use whichever type the C compiler will use to
/// represent the given enum. This is usually `c_int` (from `std::os::raw`
/// or `libc`), but it's up to you to make the determination as the
/// implementer of the unsafe trait.
///
/// For precise rules, see the list under "Safety" above.
type Int: Copy + Ord;
/// The upper *inclusive* bound for valid instances of this type.
const MAX_VALUE: Self::Int;
/// The lower *inclusive* bound for valid instances of this type.
const MIN_VALUE: Self::Int;
/// If `value` is within the range for valid instances of this type,
/// returns `Some(converted_value)`, otherwise, returns `None`.
///
/// This is a trait method so that you can write `value.into_integer()` in
/// your code. It is a contract of this trait that if you implement
/// `Contiguous` on your type you **must not** override this method.
///
/// # Panics
///
/// We will not panic for any correct implementation of `Contiguous`, but
/// *may* panic if we detect an incorrect one.
///
/// This is undefined behavior regardless, so it could have been the nasal
/// demons at that point anyway ;).
#[inline]
fn from_integer(value: Self::Int) -> Option<Self> {
// Guard against an illegal implementation of Contiguous. Annoyingly we
// can't rely on `transmute` to do this for us (see below), but
// whatever, this gets compiled into nothing in release.
assert!(size_of::<Self>() == size_of::<Self::Int>());
if Self::MIN_VALUE <= value && value <= Self::MAX_VALUE {
// SAFETY: We've checked their bounds (and their size, even though
// they've sworn under the Oath Of Unsafe Rust that that already
// matched) so this is allowed by `Contiguous`'s unsafe contract.
//
// So, the `transmute!`. ideally we'd use transmute here, which
// is more obviously safe. Sadly, we can't, as these types still
// have unspecified sizes.
Some(unsafe { transmute!(value) })
} else {
None
}
}
/// Perform the conversion from `C` into the underlying integral type. This
/// mostly exists otherwise generic code would need unsafe for the `value as
/// integer`
///
/// This is a trait method so that you can write `value.into_integer()` in
/// your code. It is a contract of this trait that if you implement
/// `Contiguous` on your type you **must not** override this method.
///
/// # Panics
///
/// We will not panic for any correct implementation of `Contiguous`, but
/// *may* panic if we detect an incorrect one.
///
/// This is undefined behavior regardless, so it could have been the nasal
/// demons at that point anyway ;).
#[inline]
fn into_integer(self) -> Self::Int {
// Guard against an illegal implementation of Contiguous. Annoyingly we
// can't rely on `transmute` to do the size check for us (see
// `from_integer's comment`), but whatever, this gets compiled into
// nothing in release. Note that we don't check the result of cast
assert!(size_of::<Self>() == size_of::<Self::Int>());
// SAFETY: The unsafe contract requires that these have identical
// representations, and that the range be entirely valid. Using
// transmute! instead of transmute here is annoying, but is required
// as `Self` and `Self::Int` have unspecified sizes still.
unsafe { transmute!(self) }
}
}
macro_rules! impl_contiguous {
($($src:ty as $repr:ident in [$min:expr, $max:expr];)*) => {$(
unsafe impl Contiguous for $src {
type Int = $repr;
const MAX_VALUE: $repr = $max;
const MIN_VALUE: $repr = $min;
}
)*};
}
impl_contiguous! {
bool as u8 in [0, 1];
u8 as u8 in [0, u8::max_value()];
u16 as u16 in [0, u16::max_value()];
u32 as u32 in [0, u32::max_value()];
u64 as u64 in [0, u64::max_value()];
u128 as u128 in [0, u128::max_value()];
usize as usize in [0, usize::max_value()];
i8 as i8 in [i8::min_value(), i8::max_value()];
i16 as i16 in [i16::min_value(), i16::max_value()];
i32 as i32 in [i32::min_value(), i32::max_value()];
i64 as i64 in [i64::min_value(), i64::max_value()];
i128 as i128 in [i128::min_value(), i128::max_value()];
isize as isize in [isize::min_value(), isize::max_value()];
NonZeroU8 as u8 in [1, u8::max_value()];
NonZeroU16 as u16 in [1, u16::max_value()];
NonZeroU32 as u32 in [1, u32::max_value()];
NonZeroU64 as u64 in [1, u64::max_value()];
NonZeroU128 as u128 in [1, u128::max_value()];
NonZeroUsize as usize in [1, usize::max_value()];
}

402
third_party/rust/bytemuck/src/internal.rs поставляемый
Просмотреть файл

@ -1,402 +0,0 @@
//! Internal implementation of casting functions not bound by marker traits
//! and therefore marked as unsafe. This is used so that we don't need to
//! duplicate the business logic contained in these functions between the
//! versions exported in the crate root, `checked`, and `relaxed` modules.
#![allow(unused_unsafe)]
use crate::PodCastError;
use core::{marker::*, mem::*};
/*
Note(Lokathor): We've switched all of the `unwrap` to `match` because there is
apparently a bug: https://github.com/rust-lang/rust/issues/68667
and it doesn't seem to show up in simple godbolt examples but has been reported
as having an impact when there's a cast mixed in with other more complicated
code around it. Rustc/LLVM ends up missing that the `Err` can't ever happen for
particular type combinations, and then it doesn't fully eliminated the panic
possibility code branch.
*/
/// Immediately panics.
#[cfg(not(target_arch = "spirv"))]
#[cold]
#[inline(never)]
pub(crate) fn something_went_wrong<D: core::fmt::Display>(
_src: &str, _err: D,
) -> ! {
// Note(Lokathor): Keeping the panic here makes the panic _formatting_ go
// here too, which helps assembly readability and also helps keep down
// the inline pressure.
panic!("{src}>{err}", src = _src, err = _err);
}
/// Immediately panics.
#[cfg(target_arch = "spirv")]
#[cold]
#[inline(never)]
pub(crate) fn something_went_wrong<D>(_src: &str, _err: D) -> ! {
// Note: On the spirv targets from [rust-gpu](https://github.com/EmbarkStudios/rust-gpu)
// panic formatting cannot be used. We we just give a generic error message
// The chance that the panicking version of these functions will ever get
// called on spir-v targets with invalid inputs is small, but giving a
// simple error message is better than no error message at all.
panic!("Called a panicing helper from bytemuck which paniced");
}
/// Re-interprets `&T` as `&[u8]`.
///
/// Any ZST becomes an empty slice, and in that case the pointer value of that
/// empty slice might not match the pointer value of the input reference.
#[inline(always)]
pub(crate) unsafe fn bytes_of<T: Copy>(t: &T) -> &[u8] {
if size_of::<T>() == 0 {
&[]
} else {
match try_cast_slice::<T, u8>(core::slice::from_ref(t)) {
Ok(s) => s,
Err(_) => unreachable!(),
}
}
}
/// Re-interprets `&mut T` as `&mut [u8]`.
///
/// Any ZST becomes an empty slice, and in that case the pointer value of that
/// empty slice might not match the pointer value of the input reference.
#[inline]
pub(crate) unsafe fn bytes_of_mut<T: Copy>(t: &mut T) -> &mut [u8] {
if size_of::<T>() == 0 {
&mut []
} else {
match try_cast_slice_mut::<T, u8>(core::slice::from_mut(t)) {
Ok(s) => s,
Err(_) => unreachable!(),
}
}
}
/// Re-interprets `&[u8]` as `&T`.
///
/// ## Panics
///
/// This is [`try_from_bytes`] but will panic on error.
#[inline]
pub(crate) unsafe fn from_bytes<T: Copy>(s: &[u8]) -> &T {
match try_from_bytes(s) {
Ok(t) => t,
Err(e) => something_went_wrong("from_bytes", e),
}
}
/// Re-interprets `&mut [u8]` as `&mut T`.
///
/// ## Panics
///
/// This is [`try_from_bytes_mut`] but will panic on error.
#[inline]
pub(crate) unsafe fn from_bytes_mut<T: Copy>(s: &mut [u8]) -> &mut T {
match try_from_bytes_mut(s) {
Ok(t) => t,
Err(e) => something_went_wrong("from_bytes_mut", e),
}
}
/// Reads from the bytes as if they were a `T`.
///
/// ## Failure
/// * If the `bytes` length is not equal to `size_of::<T>()`.
#[inline]
pub(crate) unsafe fn try_pod_read_unaligned<T: Copy>(
bytes: &[u8],
) -> Result<T, PodCastError> {
if bytes.len() != size_of::<T>() {
Err(PodCastError::SizeMismatch)
} else {
Ok(unsafe { (bytes.as_ptr() as *const T).read_unaligned() })
}
}
/// Reads the slice into a `T` value.
///
/// ## Panics
/// * This is like `try_pod_read_unaligned` but will panic on failure.
#[inline]
pub(crate) unsafe fn pod_read_unaligned<T: Copy>(bytes: &[u8]) -> T {
match try_pod_read_unaligned(bytes) {
Ok(t) => t,
Err(e) => something_went_wrong("pod_read_unaligned", e),
}
}
/// Checks if `ptr` is aligned to an `align` memory boundary.
///
/// ## Panics
/// * If `align` is not a power of two. This includes when `align` is zero.
#[inline]
pub(crate) fn is_aligned_to(ptr: *const (), align: usize) -> bool {
#[cfg(feature = "align_offset")]
{
// This is in a way better than `ptr as usize % align == 0`,
// because casting a pointer to an integer has the side effect that it
// exposes the pointer's provenance, which may theoretically inhibit
// some compiler optimizations.
ptr.align_offset(align) == 0
}
#[cfg(not(feature = "align_offset"))]
{
((ptr as usize) % align) == 0
}
}
/// Re-interprets `&[u8]` as `&T`.
///
/// ## Failure
///
/// * If the slice isn't aligned for the new type
/// * If the slice's length isnt exactly the size of the new type
#[inline]
pub(crate) unsafe fn try_from_bytes<T: Copy>(
s: &[u8],
) -> Result<&T, PodCastError> {
if s.len() != size_of::<T>() {
Err(PodCastError::SizeMismatch)
} else if !is_aligned_to(s.as_ptr() as *const (), align_of::<T>()) {
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else {
Ok(unsafe { &*(s.as_ptr() as *const T) })
}
}
/// Re-interprets `&mut [u8]` as `&mut T`.
///
/// ## Failure
///
/// * If the slice isn't aligned for the new type
/// * If the slice's length isnt exactly the size of the new type
#[inline]
pub(crate) unsafe fn try_from_bytes_mut<T: Copy>(
s: &mut [u8],
) -> Result<&mut T, PodCastError> {
if s.len() != size_of::<T>() {
Err(PodCastError::SizeMismatch)
} else if !is_aligned_to(s.as_ptr() as *const (), align_of::<T>()) {
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else {
Ok(unsafe { &mut *(s.as_mut_ptr() as *mut T) })
}
}
/// Cast `T` into `U`
///
/// ## Panics
///
/// * This is like [`try_cast`](try_cast), but will panic on a size mismatch.
#[inline]
pub(crate) unsafe fn cast<A: Copy, B: Copy>(a: A) -> B {
if size_of::<A>() == size_of::<B>() {
unsafe { transmute!(a) }
} else {
something_went_wrong("cast", PodCastError::SizeMismatch)
}
}
/// Cast `&mut T` into `&mut U`.
///
/// ## Panics
///
/// This is [`try_cast_mut`] but will panic on error.
#[inline]
pub(crate) unsafe fn cast_mut<A: Copy, B: Copy>(a: &mut A) -> &mut B {
if size_of::<A>() == size_of::<B>() && align_of::<A>() >= align_of::<B>() {
// Plz mr compiler, just notice that we can't ever hit Err in this case.
match try_cast_mut(a) {
Ok(b) => b,
Err(_) => unreachable!(),
}
} else {
match try_cast_mut(a) {
Ok(b) => b,
Err(e) => something_went_wrong("cast_mut", e),
}
}
}
/// Cast `&T` into `&U`.
///
/// ## Panics
///
/// This is [`try_cast_ref`] but will panic on error.
#[inline]
pub(crate) unsafe fn cast_ref<A: Copy, B: Copy>(a: &A) -> &B {
if size_of::<A>() == size_of::<B>() && align_of::<A>() >= align_of::<B>() {
// Plz mr compiler, just notice that we can't ever hit Err in this case.
match try_cast_ref(a) {
Ok(b) => b,
Err(_) => unreachable!(),
}
} else {
match try_cast_ref(a) {
Ok(b) => b,
Err(e) => something_went_wrong("cast_ref", e),
}
}
}
/// Cast `&[A]` into `&[B]`.
///
/// ## Panics
///
/// This is [`try_cast_slice`] but will panic on error.
#[inline]
pub(crate) unsafe fn cast_slice<A: Copy, B: Copy>(a: &[A]) -> &[B] {
match try_cast_slice(a) {
Ok(b) => b,
Err(e) => something_went_wrong("cast_slice", e),
}
}
/// Cast `&mut [T]` into `&mut [U]`.
///
/// ## Panics
///
/// This is [`try_cast_slice_mut`] but will panic on error.
#[inline]
pub(crate) unsafe fn cast_slice_mut<A: Copy, B: Copy>(a: &mut [A]) -> &mut [B] {
match try_cast_slice_mut(a) {
Ok(b) => b,
Err(e) => something_went_wrong("cast_slice_mut", e),
}
}
/// Try to cast `T` into `U`.
///
/// Note that for this particular type of cast, alignment isn't a factor. The
/// input value is semantically copied into the function and then returned to a
/// new memory location which will have whatever the required alignment of the
/// output type is.
///
/// ## Failure
///
/// * If the types don't have the same size this fails.
#[inline]
pub(crate) unsafe fn try_cast<A: Copy, B: Copy>(
a: A,
) -> Result<B, PodCastError> {
if size_of::<A>() == size_of::<B>() {
Ok(unsafe { transmute!(a) })
} else {
Err(PodCastError::SizeMismatch)
}
}
/// Try to convert a `&T` into `&U`.
///
/// ## Failure
///
/// * If the reference isn't aligned in the new type
/// * If the source type and target type aren't the same size.
#[inline]
pub(crate) unsafe fn try_cast_ref<A: Copy, B: Copy>(
a: &A,
) -> Result<&B, PodCastError> {
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization.
if align_of::<B>() > align_of::<A>()
&& !is_aligned_to(a as *const A as *const (), align_of::<B>())
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() {
Ok(unsafe { &*(a as *const A as *const B) })
} else {
Err(PodCastError::SizeMismatch)
}
}
/// Try to convert a `&mut T` into `&mut U`.
///
/// As [`try_cast_ref`], but `mut`.
#[inline]
pub(crate) unsafe fn try_cast_mut<A: Copy, B: Copy>(
a: &mut A,
) -> Result<&mut B, PodCastError> {
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization.
if align_of::<B>() > align_of::<A>()
&& !is_aligned_to(a as *const A as *const (), align_of::<B>())
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() {
Ok(unsafe { &mut *(a as *mut A as *mut B) })
} else {
Err(PodCastError::SizeMismatch)
}
}
/// Try to convert `&[A]` into `&[B]` (possibly with a change in length).
///
/// * `input.as_ptr() as usize == output.as_ptr() as usize`
/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
///
/// ## Failure
///
/// * If the target type has a greater alignment requirement and the input slice
/// isn't aligned.
/// * If the target element type is a different size from the current element
/// type, and the output slice wouldn't be a whole number of elements when
/// accounting for the size change (eg: 3 `u16` values is 1.5 `u32` values, so
/// that's a failure).
/// * Similarly, you can't convert between a [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
/// and a non-ZST.
#[inline]
pub(crate) unsafe fn try_cast_slice<A: Copy, B: Copy>(
a: &[A],
) -> Result<&[B], PodCastError> {
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization.
if align_of::<B>() > align_of::<A>()
&& !is_aligned_to(a.as_ptr() as *const (), align_of::<B>())
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() {
Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, a.len()) })
} else if size_of::<A>() == 0 || size_of::<B>() == 0 {
Err(PodCastError::SizeMismatch)
} else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
let new_len = core::mem::size_of_val(a) / size_of::<B>();
Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) })
} else {
Err(PodCastError::OutputSliceWouldHaveSlop)
}
}
/// Try to convert `&mut [A]` into `&mut [B]` (possibly with a change in
/// length).
///
/// As [`try_cast_slice`], but `&mut`.
#[inline]
pub(crate) unsafe fn try_cast_slice_mut<A: Copy, B: Copy>(
a: &mut [A],
) -> Result<&mut [B], PodCastError> {
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
// after monomorphization.
if align_of::<B>() > align_of::<A>()
&& !is_aligned_to(a.as_ptr() as *const (), align_of::<B>())
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else if size_of::<B>() == size_of::<A>() {
Ok(unsafe {
core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, a.len())
})
} else if size_of::<A>() == 0 || size_of::<B>() == 0 {
Err(PodCastError::SizeMismatch)
} else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
let new_len = core::mem::size_of_val(a) / size_of::<B>();
Ok(unsafe {
core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len)
})
} else {
Err(PodCastError::OutputSliceWouldHaveSlop)
}
}

508
third_party/rust/bytemuck/src/lib.rs поставляемый
Просмотреть файл

@ -1,508 +0,0 @@
#![no_std]
#![warn(missing_docs)]
#![allow(clippy::match_like_matches_macro)]
#![allow(clippy::uninlined_format_args)]
#![cfg_attr(feature = "nightly_docs", feature(doc_cfg))]
#![cfg_attr(feature = "nightly_portable_simd", feature(portable_simd))]
#![cfg_attr(
all(
feature = "nightly_stdsimd",
any(target_arch = "x86_64", target_arch = "x86")
),
feature(stdarch_x86_avx512)
)]
//! This crate gives small utilities for casting between plain data types.
//!
//! ## Basics
//!
//! Data comes in five basic forms in Rust, so we have five basic casting
//! functions:
//!
//! * `T` uses [`cast`]
//! * `&T` uses [`cast_ref`]
//! * `&mut T` uses [`cast_mut`]
//! * `&[T]` uses [`cast_slice`]
//! * `&mut [T]` uses [`cast_slice_mut`]
//!
//! Depending on the function, the [`NoUninit`] and/or [`AnyBitPattern`] traits
//! are used to maintain memory safety.
//!
//! **Historical Note:** When the crate first started the [`Pod`] trait was used
//! instead, and so you may hear people refer to that, but it has the strongest
//! requirements and people eventually wanted the more fine-grained system, so
//! here we are. All types that impl `Pod` have a blanket impl to also support
//! `NoUninit` and `AnyBitPattern`. The traits unfortunately do not have a
//! perfectly clean hierarchy for semver reasons.
//!
//! ## Failures
//!
//! Some casts will never fail, and other casts might fail.
//!
//! * `cast::<u32, f32>` always works (and [`f32::from_bits`]).
//! * `cast_ref::<[u8; 4], u32>` might fail if the specific array reference
//! given at runtime doesn't have alignment 4.
//!
//! In addition to the "normal" forms of each function, which will panic on
//! invalid input, there's also `try_` versions which will return a `Result`.
//!
//! If you would like to statically ensure that a cast will work at runtime you
//! can use the `must_cast` crate feature and the `must_` casting functions. A
//! "must cast" that can't be statically known to be valid will cause a
//! compilation error (and sometimes a very hard to read compilation error).
//!
//! ## Using Your Own Types
//!
//! All the functions listed above are guarded by the [`Pod`] trait, which is a
//! sub-trait of the [`Zeroable`] trait.
//!
//! If you enable the crate's `derive` feature then these traits can be derived
//! on your own types. The derive macros will perform the necessary checks on
//! your type declaration, and trigger an error if your type does not qualify.
//!
//! The derive macros might not cover all edge cases, and sometimes they will
//! error when actually everything is fine. As a last resort you can impl these
//! traits manually. However, these traits are `unsafe`, and you should
//! carefully read the requirements before using a manual implementation.
//!
//! ## Cargo Features
//!
//! The crate supports Rust 1.34 when no features are enabled, and so there's
//! cargo features for thing that you might consider "obvious".
//!
//! The cargo features **do not** promise any particular MSRV, and they may
//! increase their MSRV in new versions.
//!
//! * `derive`: Provide derive macros for the various traits.
//! * `extern_crate_alloc`: Provide utilities for `alloc` related types such as
//! Box and Vec.
//! * `zeroable_maybe_uninit` and `zeroable_atomics`: Provide more [`Zeroable`]
//! impls.
//! * `wasm_simd` and `aarch64_simd`: Support more SIMD types.
//! * `min_const_generics`: Provides appropriate impls for arrays of all lengths
//! instead of just for a select list of array lengths.
//! * `must_cast`: Provides the `must_` functions, which will compile error if
//! the requested cast can't be statically verified.
#[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
use core::arch::aarch64;
#[cfg(all(target_arch = "wasm32", feature = "wasm_simd"))]
use core::arch::wasm32;
#[cfg(target_arch = "x86")]
use core::arch::x86;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64;
//
use core::{marker::*, mem::*, num::*, ptr::*};
// Used from macros to ensure we aren't using some locally defined name and
// actually are referencing libcore. This also would allow pre-2018 edition
// crates to use our macros, but I'm not sure how important that is.
#[doc(hidden)]
pub use ::core as __core;
#[cfg(not(feature = "min_const_generics"))]
macro_rules! impl_unsafe_marker_for_array {
( $marker:ident , $( $n:expr ),* ) => {
$(unsafe impl<T> $marker for [T; $n] where T: $marker {})*
}
}
/// A macro to transmute between two types without requiring knowing size
/// statically.
macro_rules! transmute {
($val:expr) => {
::core::mem::transmute_copy(&::core::mem::ManuallyDrop::new($val))
};
}
/// A macro to implement marker traits for various simd types.
/// #[allow(unused)] because the impls are only compiled on relevant platforms
/// with relevant cargo features enabled.
#[allow(unused)]
macro_rules! impl_unsafe_marker_for_simd {
($(#[cfg($cfg_predicate:meta)])? unsafe impl $trait:ident for $platform:ident :: {}) => {};
($(#[cfg($cfg_predicate:meta)])? unsafe impl $trait:ident for $platform:ident :: { $first_type:ident $(, $types:ident)* $(,)? }) => {
$( #[cfg($cfg_predicate)] )?
$( #[cfg_attr(feature = "nightly_docs", doc(cfg($cfg_predicate)))] )?
unsafe impl $trait for $platform::$first_type {}
$( #[cfg($cfg_predicate)] )? // To prevent recursion errors if nothing is going to be expanded anyway.
impl_unsafe_marker_for_simd!($( #[cfg($cfg_predicate)] )? unsafe impl $trait for $platform::{ $( $types ),* });
};
}
#[cfg(feature = "extern_crate_std")]
extern crate std;
#[cfg(feature = "extern_crate_alloc")]
extern crate alloc;
#[cfg(feature = "extern_crate_alloc")]
#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_alloc")))]
pub mod allocation;
#[cfg(feature = "extern_crate_alloc")]
pub use allocation::*;
mod anybitpattern;
pub use anybitpattern::*;
pub mod checked;
pub use checked::CheckedBitPattern;
mod internal;
mod zeroable;
pub use zeroable::*;
mod zeroable_in_option;
pub use zeroable_in_option::*;
mod pod;
pub use pod::*;
mod pod_in_option;
pub use pod_in_option::*;
#[cfg(feature = "must_cast")]
mod must;
#[cfg(feature = "must_cast")]
#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "must_cast")))]
pub use must::*;
mod no_uninit;
pub use no_uninit::*;
mod contiguous;
pub use contiguous::*;
mod offset_of;
// ^ no import, the module only has a macro_rules, which are cursed and don't
// follow normal import/export rules.
mod transparent;
pub use transparent::*;
#[cfg(feature = "derive")]
#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "derive")))]
pub use bytemuck_derive::{
AnyBitPattern, ByteEq, ByteHash, CheckedBitPattern, Contiguous, NoUninit,
Pod, TransparentWrapper, Zeroable,
};
/// The things that can go wrong when casting between [`Pod`] data forms.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PodCastError {
/// You tried to cast a slice to an element type with a higher alignment
/// requirement but the slice wasn't aligned.
TargetAlignmentGreaterAndInputNotAligned,
/// If the element size changes then the output slice changes length
/// accordingly. If the output slice wouldn't be a whole number of elements
/// then the conversion fails.
OutputSliceWouldHaveSlop,
/// When casting a slice you can't convert between ZST elements and non-ZST
/// elements. When casting an individual `T`, `&T`, or `&mut T` value the
/// source size and destination size must be an exact match.
SizeMismatch,
/// For this type of cast the alignments must be exactly the same and they
/// were not so now you're sad.
///
/// This error is generated **only** by operations that cast allocated types
/// (such as `Box` and `Vec`), because in that case the alignment must stay
/// exact.
AlignmentMismatch,
}
#[cfg(not(target_arch = "spirv"))]
impl core::fmt::Display for PodCastError {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "{:?}", self)
}
}
#[cfg(feature = "extern_crate_std")]
#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_std")))]
impl std::error::Error for PodCastError {}
/// Re-interprets `&T` as `&[u8]`.
///
/// Any ZST becomes an empty slice, and in that case the pointer value of that
/// empty slice might not match the pointer value of the input reference.
#[inline]
pub fn bytes_of<T: NoUninit>(t: &T) -> &[u8] {
unsafe { internal::bytes_of(t) }
}
/// Re-interprets `&mut T` as `&mut [u8]`.
///
/// Any ZST becomes an empty slice, and in that case the pointer value of that
/// empty slice might not match the pointer value of the input reference.
#[inline]
pub fn bytes_of_mut<T: NoUninit + AnyBitPattern>(t: &mut T) -> &mut [u8] {
unsafe { internal::bytes_of_mut(t) }
}
/// Re-interprets `&[u8]` as `&T`.
///
/// ## Panics
///
/// This is like [`try_from_bytes`] but will panic on error.
#[inline]
pub fn from_bytes<T: AnyBitPattern>(s: &[u8]) -> &T {
unsafe { internal::from_bytes(s) }
}
/// Re-interprets `&mut [u8]` as `&mut T`.
///
/// ## Panics
///
/// This is like [`try_from_bytes_mut`] but will panic on error.
#[inline]
pub fn from_bytes_mut<T: NoUninit + AnyBitPattern>(s: &mut [u8]) -> &mut T {
unsafe { internal::from_bytes_mut(s) }
}
/// Reads from the bytes as if they were a `T`.
///
/// Unlike [`from_bytes`], the slice doesn't need to respect alignment of `T`,
/// only sizes must match.
///
/// ## Failure
/// * If the `bytes` length is not equal to `size_of::<T>()`.
#[inline]
pub fn try_pod_read_unaligned<T: AnyBitPattern>(
bytes: &[u8],
) -> Result<T, PodCastError> {
unsafe { internal::try_pod_read_unaligned(bytes) }
}
/// Reads the slice into a `T` value.
///
/// Unlike [`from_bytes`], the slice doesn't need to respect alignment of `T`,
/// only sizes must match.
///
/// ## Panics
/// * This is like `try_pod_read_unaligned` but will panic on failure.
#[inline]
pub fn pod_read_unaligned<T: AnyBitPattern>(bytes: &[u8]) -> T {
unsafe { internal::pod_read_unaligned(bytes) }
}
/// Re-interprets `&[u8]` as `&T`.
///
/// ## Failure
///
/// * If the slice isn't aligned for the new type
/// * If the slice's length isnt exactly the size of the new type
#[inline]
pub fn try_from_bytes<T: AnyBitPattern>(s: &[u8]) -> Result<&T, PodCastError> {
unsafe { internal::try_from_bytes(s) }
}
/// Re-interprets `&mut [u8]` as `&mut T`.
///
/// ## Failure
///
/// * If the slice isn't aligned for the new type
/// * If the slice's length isnt exactly the size of the new type
#[inline]
pub fn try_from_bytes_mut<T: NoUninit + AnyBitPattern>(
s: &mut [u8],
) -> Result<&mut T, PodCastError> {
unsafe { internal::try_from_bytes_mut(s) }
}
/// Cast `T` into `U`
///
/// ## Panics
///
/// * This is like [`try_cast`], but will panic on a size mismatch.
#[inline]
pub fn cast<A: NoUninit, B: AnyBitPattern>(a: A) -> B {
unsafe { internal::cast(a) }
}
/// Cast `&mut T` into `&mut U`.
///
/// ## Panics
///
/// This is [`try_cast_mut`] but will panic on error.
#[inline]
pub fn cast_mut<A: NoUninit + AnyBitPattern, B: NoUninit + AnyBitPattern>(
a: &mut A,
) -> &mut B {
unsafe { internal::cast_mut(a) }
}
/// Cast `&T` into `&U`.
///
/// ## Panics
///
/// This is [`try_cast_ref`] but will panic on error.
#[inline]
pub fn cast_ref<A: NoUninit, B: AnyBitPattern>(a: &A) -> &B {
unsafe { internal::cast_ref(a) }
}
/// Cast `&[A]` into `&[B]`.
///
/// ## Panics
///
/// This is [`try_cast_slice`] but will panic on error.
#[inline]
pub fn cast_slice<A: NoUninit, B: AnyBitPattern>(a: &[A]) -> &[B] {
unsafe { internal::cast_slice(a) }
}
/// Cast `&mut [T]` into `&mut [U]`.
///
/// ## Panics
///
/// This is [`try_cast_slice_mut`] but will panic on error.
#[inline]
pub fn cast_slice_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut [A],
) -> &mut [B] {
unsafe { internal::cast_slice_mut(a) }
}
/// As [`align_to`](https://doc.rust-lang.org/std/primitive.slice.html#method.align_to),
/// but safe because of the [`Pod`] bound.
#[inline]
pub fn pod_align_to<T: NoUninit, U: AnyBitPattern>(
vals: &[T],
) -> (&[T], &[U], &[T]) {
unsafe { vals.align_to::<U>() }
}
/// As [`align_to_mut`](https://doc.rust-lang.org/std/primitive.slice.html#method.align_to_mut),
/// but safe because of the [`Pod`] bound.
#[inline]
pub fn pod_align_to_mut<
T: NoUninit + AnyBitPattern,
U: NoUninit + AnyBitPattern,
>(
vals: &mut [T],
) -> (&mut [T], &mut [U], &mut [T]) {
unsafe { vals.align_to_mut::<U>() }
}
/// Try to cast `T` into `U`.
///
/// Note that for this particular type of cast, alignment isn't a factor. The
/// input value is semantically copied into the function and then returned to a
/// new memory location which will have whatever the required alignment of the
/// output type is.
///
/// ## Failure
///
/// * If the types don't have the same size this fails.
#[inline]
pub fn try_cast<A: NoUninit, B: AnyBitPattern>(
a: A,
) -> Result<B, PodCastError> {
unsafe { internal::try_cast(a) }
}
/// Try to convert a `&T` into `&U`.
///
/// ## Failure
///
/// * If the reference isn't aligned in the new type
/// * If the source type and target type aren't the same size.
#[inline]
pub fn try_cast_ref<A: NoUninit, B: AnyBitPattern>(
a: &A,
) -> Result<&B, PodCastError> {
unsafe { internal::try_cast_ref(a) }
}
/// Try to convert a `&mut T` into `&mut U`.
///
/// As [`try_cast_ref`], but `mut`.
#[inline]
pub fn try_cast_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut A,
) -> Result<&mut B, PodCastError> {
unsafe { internal::try_cast_mut(a) }
}
/// Try to convert `&[A]` into `&[B]` (possibly with a change in length).
///
/// * `input.as_ptr() as usize == output.as_ptr() as usize`
/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
///
/// ## Failure
///
/// * If the target type has a greater alignment requirement and the input slice
/// isn't aligned.
/// * If the target element type is a different size from the current element
/// type, and the output slice wouldn't be a whole number of elements when
/// accounting for the size change (eg: 3 `u16` values is 1.5 `u32` values, so
/// that's a failure).
/// * Similarly, you can't convert between a [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
/// and a non-ZST.
#[inline]
pub fn try_cast_slice<A: NoUninit, B: AnyBitPattern>(
a: &[A],
) -> Result<&[B], PodCastError> {
unsafe { internal::try_cast_slice(a) }
}
/// Try to convert `&mut [A]` into `&mut [B]` (possibly with a change in
/// length).
///
/// As [`try_cast_slice`], but `&mut`.
#[inline]
pub fn try_cast_slice_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut [A],
) -> Result<&mut [B], PodCastError> {
unsafe { internal::try_cast_slice_mut(a) }
}
/// Fill all bytes of `target` with zeroes (see [`Zeroable`]).
///
/// This is similar to `*target = Zeroable::zeroed()`, but guarantees that any
/// padding bytes in `target` are zeroed as well.
///
/// See also [`fill_zeroes`], if you have a slice rather than a single value.
#[inline]
pub fn write_zeroes<T: Zeroable>(target: &mut T) {
struct EnsureZeroWrite<T>(*mut T);
impl<T> Drop for EnsureZeroWrite<T> {
#[inline(always)]
fn drop(&mut self) {
unsafe {
core::ptr::write_bytes(self.0, 0u8, 1);
}
}
}
unsafe {
let guard = EnsureZeroWrite(target);
core::ptr::drop_in_place(guard.0);
drop(guard);
}
}
/// Fill all bytes of `slice` with zeroes (see [`Zeroable`]).
///
/// This is similar to `slice.fill(Zeroable::zeroed())`, but guarantees that any
/// padding bytes in `slice` are zeroed as well.
///
/// See also [`write_zeroes`], which zeroes all bytes of a single value rather
/// than a slice.
#[inline]
pub fn fill_zeroes<T: Zeroable>(slice: &mut [T]) {
if core::mem::needs_drop::<T>() {
// If `T` needs to be dropped then we have to do this one item at a time, in
// case one of the intermediate drops does a panic.
slice.iter_mut().for_each(write_zeroes);
} else {
// Otherwise we can be really fast and just fill everthing with zeros.
let len = core::mem::size_of_val::<[T]>(slice);
unsafe { core::ptr::write_bytes(slice.as_mut_ptr() as *mut u8, 0u8, len) }
}
}

203
third_party/rust/bytemuck/src/must.rs поставляемый
Просмотреть файл

@ -1,203 +0,0 @@
#![allow(clippy::module_name_repetitions)]
#![allow(clippy::let_unit_value)]
#![allow(clippy::let_underscore_untyped)]
#![allow(clippy::ptr_as_ptr)]
use crate::{AnyBitPattern, NoUninit};
use core::mem::{align_of, size_of};
struct Cast<A, B>((A, B));
impl<A, B> Cast<A, B> {
const ASSERT_ALIGN_GREATER_THAN_EQUAL: () =
assert!(align_of::<A>() >= align_of::<B>());
const ASSERT_SIZE_EQUAL: () = assert!(size_of::<A>() == size_of::<B>());
const ASSERT_SIZE_MULTIPLE_OF: () = assert!(
(size_of::<A>() == 0) == (size_of::<B>() == 0)
&& (size_of::<A>() % size_of::<B>() == 0)
);
}
// Workaround for https://github.com/rust-lang/miri/issues/2423.
// Miri currently doesn't see post-monomorphization errors until runtime,
// so `compile_fail` tests relying on post-monomorphization errors don't
// actually fail. Instead use `should_panic` under miri as a workaround.
#[cfg(miri)]
macro_rules! post_mono_compile_fail_doctest {
() => {
"```should_panic"
};
}
#[cfg(not(miri))]
macro_rules! post_mono_compile_fail_doctest {
() => {
"```compile_fail,E0080"
};
}
/// Cast `A` into `B` if infalliable, or fail to compile.
///
/// Note that for this particular type of cast, alignment isn't a factor. The
/// input value is semantically copied into the function and then returned to a
/// new memory location which will have whatever the required alignment of the
/// output type is.
///
/// ## Failure
///
/// * If the types don't have the same size this fails to compile.
///
/// ## Examples
/// ```
/// // compiles:
/// let bytes: [u8; 2] = bytemuck::must_cast(12_u16);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// // fails to compile (size mismatch):
/// let bytes : [u8; 3] = bytemuck::must_cast(12_u16);
/// ```
#[inline]
pub fn must_cast<A: NoUninit, B: AnyBitPattern>(a: A) -> B {
let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
unsafe { transmute!(a) }
}
/// Convert `&A` into `&B` if infalliable, or fail to compile.
///
/// ## Failure
///
/// * If the target type has a greater alignment requirement.
/// * If the source type and target type aren't the same size.
///
/// ## Examples
/// ```
/// // compiles:
/// let bytes: &[u8; 2] = bytemuck::must_cast_ref(&12_u16);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// // fails to compile (size mismatch):
/// let bytes : &[u8; 3] = bytemuck::must_cast_ref(&12_u16);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// // fails to compile (alignment requirements increased):
/// let bytes : &u16 = bytemuck::must_cast_ref(&[1u8, 2u8]);
/// ```
#[inline]
pub fn must_cast_ref<A: NoUninit, B: AnyBitPattern>(a: &A) -> &B {
let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
unsafe { &*(a as *const A as *const B) }
}
/// Convert a `&mut A` into `&mut B` if infalliable, or fail to compile.
///
/// As [`must_cast_ref`], but `mut`.
///
/// ## Examples
/// ```
/// let mut i = 12_u16;
/// // compiles:
/// let bytes: &mut [u8; 2] = bytemuck::must_cast_mut(&mut i);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let mut bytes: &mut [u8; 2] = &mut [1, 2];
/// // fails to compile (alignment requirements increased):
/// let i : &mut u16 = bytemuck::must_cast_mut(bytes);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let mut i = 12_u16;
/// // fails to compile (size mismatch):
/// let bytes : &mut [u8; 3] = bytemuck::must_cast_mut(&mut i);
/// ```
#[inline]
pub fn must_cast_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut A,
) -> &mut B {
let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
unsafe { &mut *(a as *mut A as *mut B) }
}
/// Convert `&[A]` into `&[B]` (possibly with a change in length) if
/// infalliable, or fail to compile.
///
/// * `input.as_ptr() as usize == output.as_ptr() as usize`
/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
///
/// ## Failure
///
/// * If the target type has a greater alignment requirement.
/// * If the target element type doesn't evenly fit into the the current element
/// type (eg: 3 `u16` values is 1.5 `u32` values, so that's a failure).
/// * Similarly, you can't convert between a [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
/// and a non-ZST.
///
/// ## Examples
/// ```
/// let indicies: &[u16] = &[1, 2, 3];
/// // compiles:
/// let bytes: &[u8] = bytemuck::must_cast_slice(indicies);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let bytes : &[u8] = &[1, 0, 2, 0, 3, 0];
/// // fails to compile (bytes.len() might not be a multiple of 2):
/// let byte_pairs : &[[u8; 2]] = bytemuck::must_cast_slice(bytes);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let byte_pairs : &[[u8; 2]] = &[[1, 0], [2, 0], [3, 0]];
/// // fails to compile (alignment requirements increased):
/// let indicies : &[u16] = bytemuck::must_cast_slice(byte_pairs);
/// ```
#[inline]
pub fn must_cast_slice<A: NoUninit, B: AnyBitPattern>(a: &[A]) -> &[B] {
let _ = Cast::<A, B>::ASSERT_SIZE_MULTIPLE_OF;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
let new_len = if size_of::<A>() == size_of::<B>() {
a.len()
} else {
a.len() * (size_of::<A>() / size_of::<B>())
};
unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) }
}
/// Convert `&mut [A]` into `&mut [B]` (possibly with a change in length) if
/// infalliable, or fail to compile.
///
/// As [`must_cast_slice`], but `&mut`.
///
/// ## Examples
/// ```
/// let mut indicies = [1, 2, 3];
/// let indicies: &mut [u16] = &mut indicies;
/// // compiles:
/// let bytes: &mut [u8] = bytemuck::must_cast_slice_mut(indicies);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let mut bytes = [1, 0, 2, 0, 3, 0];
/// # let bytes : &mut [u8] = &mut bytes[..];
/// // fails to compile (bytes.len() might not be a multiple of 2):
/// let byte_pairs : &mut [[u8; 2]] = bytemuck::must_cast_slice_mut(bytes);
/// ```
#[doc = post_mono_compile_fail_doctest!()]
/// # let mut byte_pairs = [[1, 0], [2, 0], [3, 0]];
/// # let byte_pairs : &mut [[u8; 2]] = &mut byte_pairs[..];
/// // fails to compile (alignment requirements increased):
/// let indicies : &mut [u16] = bytemuck::must_cast_slice_mut(byte_pairs);
/// ```
#[inline]
pub fn must_cast_slice_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut [A],
) -> &mut [B] {
let _ = Cast::<A, B>::ASSERT_SIZE_MULTIPLE_OF;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
let new_len = if size_of::<A>() == size_of::<B>() {
a.len()
} else {
a.len() * (size_of::<A>() / size_of::<B>())
};
unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len) }
}

80
third_party/rust/bytemuck/src/no_uninit.rs поставляемый
Просмотреть файл

@ -1,80 +0,0 @@
use crate::Pod;
use core::num::{
NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize,
NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize,
};
/// Marker trait for "plain old data" types with no uninit (or padding) bytes.
///
/// The requirements for this is very similar to [`Pod`],
/// except that it doesn't require that all bit patterns of the type are valid,
/// i.e. it does not require the type to be [`Zeroable`][crate::Zeroable].
/// This limits what you can do with a type of this kind, but also broadens the
/// included types to things like C-style enums. Notably, you can only cast from
/// *immutable* references to a [`NoUninit`] type into *immutable* references of
/// any other type, no casting of mutable references or mutable references to
/// slices etc.
///
/// [`Pod`] is a subset of [`NoUninit`], meaning that any `T: Pod` is also
/// [`NoUninit`] but any `T: NoUninit` is not necessarily [`Pod`]. If possible,
/// prefer implementing [`Pod`] directly. To get more [`Pod`]-like functionality
/// for a type that is only [`NoUninit`], consider also implementing
/// [`CheckedBitPattern`][crate::CheckedBitPattern].
///
/// # Derive
///
/// A `#[derive(NoUninit)]` macro is provided under the `derive` feature flag
/// which will automatically validate the requirements of this trait and
/// implement the trait for you for both enums and structs. This is the
/// recommended method for implementing the trait, however it's also possible to
/// do manually. If you implement it manually, you *must* carefully follow the
/// below safety rules.
///
/// # Safety
///
/// The same as [`Pod`] except we disregard the rule about it must
/// allow any bit pattern (i.e. it does not need to be
/// [`Zeroable`][crate::Zeroable]). Still, this is a quite strong guarantee
/// about a type, so *be careful* whem implementing it manually.
///
/// * The type must be inhabited (eg: no
/// [Infallible](core::convert::Infallible)).
/// * The type must not contain any uninit (or padding) bytes, either in the
/// middle or on the end (eg: no `#[repr(C)] struct Foo(u8, u16)`, which has
/// padding in the middle, and also no `#[repr(C)] struct Foo(u16, u8)`, which
/// has padding on the end).
/// * Structs need to have all fields also be `NoUninit`.
/// * Structs need to be `repr(C)` or `repr(transparent)`. In the case of
/// `repr(C)`, the `packed` and `align` repr modifiers can be used as long as
/// all other rules end up being followed.
/// * Enums need to have an explicit `#[repr(Int)]`
/// * Enums must have only fieldless variants
/// * It is disallowed for types to contain pointer types, `Cell`, `UnsafeCell`,
/// atomics, and any other forms of interior mutability.
/// * More precisely: A shared reference to the type must allow reads, and
/// *only* reads. RustBelt's separation logic is based on the notion that a
/// type is allowed to define a sharing predicate, its own invariant that must
/// hold for shared references, and this predicate is the reasoning that allow
/// it to deal with atomic and cells etc. We require the sharing predicate to
/// be trivial and permit only read-only access.
/// * There's probably more, don't mess it up (I mean it).
pub unsafe trait NoUninit: Sized + Copy + 'static {}
unsafe impl<T: Pod> NoUninit for T {}
unsafe impl NoUninit for char {}
unsafe impl NoUninit for bool {}
unsafe impl NoUninit for NonZeroU8 {}
unsafe impl NoUninit for NonZeroI8 {}
unsafe impl NoUninit for NonZeroU16 {}
unsafe impl NoUninit for NonZeroI16 {}
unsafe impl NoUninit for NonZeroU32 {}
unsafe impl NoUninit for NonZeroI32 {}
unsafe impl NoUninit for NonZeroU64 {}
unsafe impl NoUninit for NonZeroI64 {}
unsafe impl NoUninit for NonZeroU128 {}
unsafe impl NoUninit for NonZeroI128 {}
unsafe impl NoUninit for NonZeroUsize {}
unsafe impl NoUninit for NonZeroIsize {}

135
third_party/rust/bytemuck/src/offset_of.rs поставляемый
Просмотреть файл

@ -1,135 +0,0 @@
#![forbid(unsafe_code)]
/// Find the offset in bytes of the given `$field` of `$Type`. Requires an
/// already initialized `$instance` value to work with.
///
/// This is similar to the macro from [`memoffset`](https://docs.rs/memoffset),
/// however it uses no `unsafe` code.
///
/// This macro has a 3-argument and 2-argument version.
/// * In the 3-arg version you specify an instance of the type, the type itself,
/// and the field name.
/// * In the 2-arg version the macro will call the [`default`](Default::default)
/// method to make a temporary instance of the type for you.
///
/// The output of this macro is the byte offset of the field (as a `usize`). The
/// calculations of the macro are fixed across the entire program, but if the
/// type used is `repr(Rust)` then they're *not* fixed across compilations or
/// compilers.
///
/// ## Examples
///
/// ### 3-arg Usage
///
/// ```rust
/// # use bytemuck::offset_of;
/// // enums can't derive default, and for this example we don't pick one
/// enum MyExampleEnum {
/// A,
/// B,
/// C,
/// }
///
/// // so now our struct here doesn't have Default
/// #[repr(C)]
/// struct MyNotDefaultType {
/// pub counter: i32,
/// pub some_field: MyExampleEnum,
/// }
///
/// // but we provide an instance of the type and it's all good.
/// let val = MyNotDefaultType { counter: 5, some_field: MyExampleEnum::A };
/// assert_eq!(offset_of!(val, MyNotDefaultType, some_field), 4);
/// ```
///
/// ### 2-arg Usage
///
/// ```rust
/// # use bytemuck::offset_of;
/// #[derive(Default)]
/// #[repr(C)]
/// struct Vertex {
/// pub loc: [f32; 3],
/// pub color: [f32; 3],
/// }
/// // if the type impls Default the macro can make its own default instance.
/// assert_eq!(offset_of!(Vertex, loc), 0);
/// assert_eq!(offset_of!(Vertex, color), 12);
/// ```
///
/// # Usage with `#[repr(packed)]` structs
///
/// Attempting to compute the offset of a `#[repr(packed)]` struct with
/// `bytemuck::offset_of!` requires an `unsafe` block. We hope to relax this in
/// the future, but currently it is required to work around a soundness hole in
/// Rust (See [rust-lang/rust#27060]).
///
/// [rust-lang/rust#27060]: https://github.com/rust-lang/rust/issues/27060
///
/// <p style="background:rgba(255,181,77,0.16);padding:0.75em;">
/// <strong>Warning:</strong> This is only true for versions of bytemuck >
/// 1.4.0. Previous versions of
/// <code style="background:rgba(41,24,0,0.1);">bytemuck::offset_of!</code>
/// will only emit a warning when used on the field of a packed struct in safe
/// code, which can lead to unsoundness.
/// </p>
///
/// For example, the following will fail to compile:
///
/// ```compile_fail
/// #[repr(C, packed)]
/// #[derive(Default)]
/// struct Example {
/// field: u32,
/// }
/// // Doesn't compile:
/// let _offset = bytemuck::offset_of!(Example, field);
/// ```
///
/// While the error message this generates will mention the
/// `safe_packed_borrows` lint, the macro will still fail to compile even if
/// that lint is `#[allow]`ed:
///
/// ```compile_fail
/// # #[repr(C, packed)] #[derive(Default)] struct Example { field: u32 }
/// // Still doesn't compile:
/// #[allow(safe_packed_borrows)]
/// {
/// let _offset = bytemuck::offset_of!(Example, field);
/// }
/// ```
///
/// This *can* be worked around by using `unsafe`, but it is only sound to do so
/// if you can guarantee that taking a reference to the field is sound.
///
/// In practice, this means it only works for fields of align(1) types, or if
/// you know the field's offset in advance (defeating the point of `offset_of`)
/// and can prove that the struct's alignment and the field's offset are enough
/// to prove the field's alignment.
///
/// Once the `raw_ref` macros are available, a future version of this crate will
/// use them to lift the limitations of packed structs. For the duration of the
/// `1.x` version of this crate that will be behind an on-by-default cargo
/// feature (to maintain minimum rust version support).
#[macro_export]
macro_rules! offset_of {
($instance:expr, $Type:path, $field:tt) => {{
#[forbid(safe_packed_borrows)]
{
// This helps us guard against field access going through a Deref impl.
#[allow(clippy::unneeded_field_pattern)]
let $Type { $field: _, .. };
let reference: &$Type = &$instance;
let address = reference as *const _ as usize;
let field_pointer = &reference.$field as *const _ as usize;
// These asserts/unwraps are compiled away at release, and defend against
// the case where somehow a deref impl is still invoked.
let result = field_pointer.checked_sub(address).unwrap();
assert!(result <= $crate::__core::mem::size_of::<$Type>());
result
}
}};
($Type:path, $field:tt) => {{
$crate::offset_of!(<$Type as Default>::default(), $Type, $field)
}};
}

165
third_party/rust/bytemuck/src/pod.rs поставляемый
Просмотреть файл

@ -1,165 +0,0 @@
use super::*;
/// Marker trait for "plain old data".
///
/// The point of this trait is that once something is marked "plain old data"
/// you can really go to town with the bit fiddling and bit casting. Therefore,
/// it's a relatively strong claim to make about a type. Do not add this to your
/// type casually.
///
/// **Reminder:** The results of casting around bytes between data types are
/// _endian dependant_. Little-endian machines are the most common, but
/// big-endian machines do exist (and big-endian is also used for "network
/// order" bytes).
///
/// ## Safety
///
/// * The type must be inhabited (eg: no
/// [Infallible](core::convert::Infallible)).
/// * The type must allow any bit pattern (eg: no `bool` or `char`, which have
/// illegal bit patterns).
/// * The type must not contain any uninit (or padding) bytes, either in the
/// middle or on the end (eg: no `#[repr(C)] struct Foo(u8, u16)`, which has
/// padding in the middle, and also no `#[repr(C)] struct Foo(u16, u8)`, which
/// has padding on the end).
/// * The type needs to have all fields also be `Pod`.
/// * The type needs to be `repr(C)` or `repr(transparent)`. In the case of
/// `repr(C)`, the `packed` and `align` repr modifiers can be used as long as
/// all other rules end up being followed.
/// * It is disallowed for types to contain pointer types, `Cell`, `UnsafeCell`,
/// atomics, and any other forms of interior mutability.
/// * More precisely: A shared reference to the type must allow reads, and
/// *only* reads. RustBelt's separation logic is based on the notion that a
/// type is allowed to define a sharing predicate, its own invariant that must
/// hold for shared references, and this predicate is the reasoning that allow
/// it to deal with atomic and cells etc. We require the sharing predicate to
/// be trivial and permit only read-only access.
pub unsafe trait Pod: Zeroable + Copy + 'static {}
unsafe impl Pod for () {}
unsafe impl Pod for u8 {}
unsafe impl Pod for i8 {}
unsafe impl Pod for u16 {}
unsafe impl Pod for i16 {}
unsafe impl Pod for u32 {}
unsafe impl Pod for i32 {}
unsafe impl Pod for u64 {}
unsafe impl Pod for i64 {}
unsafe impl Pod for usize {}
unsafe impl Pod for isize {}
unsafe impl Pod for u128 {}
unsafe impl Pod for i128 {}
unsafe impl Pod for f32 {}
unsafe impl Pod for f64 {}
unsafe impl<T: Pod> Pod for Wrapping<T> {}
#[cfg(feature = "unsound_ptr_pod_impl")]
#[cfg_attr(
feature = "nightly_docs",
doc(cfg(feature = "unsound_ptr_pod_impl"))
)]
unsafe impl<T: 'static> Pod for *mut T {}
#[cfg(feature = "unsound_ptr_pod_impl")]
#[cfg_attr(
feature = "nightly_docs",
doc(cfg(feature = "unsound_ptr_pod_impl"))
)]
unsafe impl<T: 'static> Pod for *const T {}
#[cfg(feature = "unsound_ptr_pod_impl")]
#[cfg_attr(
feature = "nightly_docs",
doc(cfg(feature = "unsound_ptr_pod_impl"))
)]
unsafe impl<T: 'static> PodInOption for NonNull<T> {}
unsafe impl<T: ?Sized + 'static> Pod for PhantomData<T> {}
unsafe impl Pod for PhantomPinned {}
unsafe impl<T: Pod> Pod for ManuallyDrop<T> {}
// Note(Lokathor): MaybeUninit can NEVER be Pod.
#[cfg(feature = "min_const_generics")]
#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "min_const_generics")))]
unsafe impl<T, const N: usize> Pod for [T; N] where T: Pod {}
#[cfg(not(feature = "min_const_generics"))]
impl_unsafe_marker_for_array!(
Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
512, 1024, 2048, 4096
);
impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "wasm32", feature = "wasm_simd"))]
unsafe impl Pod for wasm32::{v128}
);
impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
unsafe impl Pod for aarch64::{
float32x2_t, float32x2x2_t, float32x2x3_t, float32x2x4_t, float32x4_t,
float32x4x2_t, float32x4x3_t, float32x4x4_t, float64x1_t, float64x1x2_t,
float64x1x3_t, float64x1x4_t, float64x2_t, float64x2x2_t, float64x2x3_t,
float64x2x4_t, int16x4_t, int16x4x2_t, int16x4x3_t, int16x4x4_t, int16x8_t,
int16x8x2_t, int16x8x3_t, int16x8x4_t, int32x2_t, int32x2x2_t, int32x2x3_t,
int32x2x4_t, int32x4_t, int32x4x2_t, int32x4x3_t, int32x4x4_t, int64x1_t,
int64x1x2_t, int64x1x3_t, int64x1x4_t, int64x2_t, int64x2x2_t, int64x2x3_t,
int64x2x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int8x8_t,
int8x8x2_t, int8x8x3_t, int8x8x4_t, poly16x4_t, poly16x4x2_t, poly16x4x3_t,
poly16x4x4_t, poly16x8_t, poly16x8x2_t, poly16x8x3_t, poly16x8x4_t,
poly64x1_t, poly64x1x2_t, poly64x1x3_t, poly64x1x4_t, poly64x2_t,
poly64x2x2_t, poly64x2x3_t, poly64x2x4_t, poly8x16_t, poly8x16x2_t,
poly8x16x3_t, poly8x16x4_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, poly8x8x4_t,
uint16x4_t, uint16x4x2_t, uint16x4x3_t, uint16x4x4_t, uint16x8_t,
uint16x8x2_t, uint16x8x3_t, uint16x8x4_t, uint32x2_t, uint32x2x2_t,
uint32x2x3_t, uint32x2x4_t, uint32x4_t, uint32x4x2_t, uint32x4x3_t,
uint32x4x4_t, uint64x1_t, uint64x1x2_t, uint64x1x3_t, uint64x1x4_t,
uint64x2_t, uint64x2x2_t, uint64x2x3_t, uint64x2x4_t, uint8x16_t,
uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint8x8_t, uint8x8x2_t,
uint8x8x3_t, uint8x8x4_t,
}
);
impl_unsafe_marker_for_simd!(
#[cfg(target_arch = "x86")]
unsafe impl Pod for x86::{
__m128i, __m128, __m128d,
__m256i, __m256, __m256d,
}
);
impl_unsafe_marker_for_simd!(
#[cfg(target_arch = "x86_64")]
unsafe impl Pod for x86_64::{
__m128i, __m128, __m128d,
__m256i, __m256, __m256d,
}
);
#[cfg(feature = "nightly_portable_simd")]
#[cfg_attr(
feature = "nightly_docs",
doc(cfg(feature = "nightly_portable_simd"))
)]
unsafe impl<T, const N: usize> Pod for core::simd::Simd<T, N>
where
T: core::simd::SimdElement + Pod,
core::simd::LaneCount<N>: core::simd::SupportedLaneCount,
{
}
impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86", feature = "nightly_stdsimd"))]
unsafe impl Pod for x86::{
__m128bh, __m256bh, __m512,
__m512bh, __m512d, __m512i,
}
);
impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86_64", feature = "nightly_stdsimd"))]
unsafe impl Pod for x86_64::{
__m128bh, __m256bh, __m512,
__m512bh, __m512d, __m512i,
}
);

Просмотреть файл

@ -1,27 +0,0 @@
use super::*;
// Note(Lokathor): This is the neat part!!
unsafe impl<T: PodInOption> Pod for Option<T> {}
/// Trait for types which are [Pod](Pod) when wrapped in
/// [Option](core::option::Option).
///
/// ## Safety
///
/// * `Option<T>` must uphold the same invariants as [Pod](Pod).
/// * **Reminder:** pointers are **not** pod! **Do not** mix this trait with a
/// newtype over [NonNull](core::ptr::NonNull).
pub unsafe trait PodInOption: ZeroableInOption + Copy + 'static {}
unsafe impl PodInOption for NonZeroI8 {}
unsafe impl PodInOption for NonZeroI16 {}
unsafe impl PodInOption for NonZeroI32 {}
unsafe impl PodInOption for NonZeroI64 {}
unsafe impl PodInOption for NonZeroI128 {}
unsafe impl PodInOption for NonZeroIsize {}
unsafe impl PodInOption for NonZeroU8 {}
unsafe impl PodInOption for NonZeroU16 {}
unsafe impl PodInOption for NonZeroU32 {}
unsafe impl PodInOption for NonZeroU64 {}
unsafe impl PodInOption for NonZeroU128 {}
unsafe impl PodInOption for NonZeroUsize {}

288
third_party/rust/bytemuck/src/transparent.rs поставляемый
Просмотреть файл

@ -1,288 +0,0 @@
use super::*;
/// A trait which indicates that a type is a `#[repr(transparent)]` wrapper
/// around the `Inner` value.
///
/// This allows safely copy transmuting between the `Inner` type and the
/// `TransparentWrapper` type. Functions like `wrap_{}` convert from the inner
/// type to the wrapper type and `peel_{}` functions do the inverse conversion
/// from the wrapper type to the inner type. We deliberately do not call the
/// wrapper-removing methods "unwrap" because at this point that word is too
/// strongly tied to the Option/ Result methods.
///
/// # Safety
///
/// The safety contract of `TransparentWrapper` is relatively simple:
///
/// For a given `Wrapper` which implements `TransparentWrapper<Inner>`:
///
/// 1. `Wrapper` must be a wrapper around `Inner` with an identical data
/// representations. This either means that it must be a
/// `#[repr(transparent)]` struct which contains a either a field of type
/// `Inner` (or a field of some other transparent wrapper for `Inner`) as
/// the only non-ZST field.
///
/// 2. Any fields *other* than the `Inner` field must be trivially constructable
/// ZSTs, for example `PhantomData`, `PhantomPinned`, etc. (When deriving
/// `TransparentWrapper` on a type with ZST fields, the ZST fields must be
/// [`Zeroable`]).
///
/// 3. The `Wrapper` may not impose additional alignment requirements over
/// `Inner`.
/// - Note: this is currently guaranteed by `repr(transparent)`, but there
/// have been discussions of lifting it, so it's stated here explicitly.
///
/// 4. All functions on `TransparentWrapper` **may not** be overridden.
///
/// ## Caveats
///
/// If the wrapper imposes additional constraints upon the inner type which are
/// required for safety, it's responsible for ensuring those still hold -- this
/// generally requires preventing access to instances of the inner type, as
/// implementing `TransparentWrapper<U> for T` means anybody can call
/// `T::cast_ref(any_instance_of_u)`.
///
/// For example, it would be invalid to implement TransparentWrapper for `str`
/// to implement `TransparentWrapper` around `[u8]` because of this.
///
/// # Examples
///
/// ## Basic
///
/// ```
/// use bytemuck::TransparentWrapper;
/// # #[derive(Default)]
/// # struct SomeStruct(u32);
///
/// #[repr(transparent)]
/// struct MyWrapper(SomeStruct);
///
/// unsafe impl TransparentWrapper<SomeStruct> for MyWrapper {}
///
/// // interpret a reference to &SomeStruct as a &MyWrapper
/// let thing = SomeStruct::default();
/// let inner_ref: &MyWrapper = MyWrapper::wrap_ref(&thing);
///
/// // Works with &mut too.
/// let mut mut_thing = SomeStruct::default();
/// let inner_mut: &mut MyWrapper = MyWrapper::wrap_mut(&mut mut_thing);
///
/// # let _ = (inner_ref, inner_mut); // silence warnings
/// ```
///
/// ## Use with dynamically sized types
///
/// ```
/// use bytemuck::TransparentWrapper;
///
/// #[repr(transparent)]
/// struct Slice<T>([T]);
///
/// unsafe impl<T> TransparentWrapper<[T]> for Slice<T> {}
///
/// let s = Slice::wrap_ref(&[1u32, 2, 3]);
/// assert_eq!(&s.0, &[1, 2, 3]);
///
/// let mut buf = [1, 2, 3u8];
/// let sm = Slice::wrap_mut(&mut buf);
/// ```
///
/// ## Deriving
///
/// When deriving, the non-wrapped fields must uphold all the normal requirements,
/// and must also be `Zeroable`.
///
#[cfg_attr(feature = "derive", doc = "```")]
#[cfg_attr(
not(feature = "derive"),
doc = "```ignore
// This example requires the `derive` feature."
)]
/// use bytemuck::TransparentWrapper;
/// use std::marker::PhantomData;
///
/// #[derive(TransparentWrapper)]
/// #[repr(transparent)]
/// #[transparent(usize)]
/// struct Wrapper<T: ?Sized>(usize, PhantomData<T>); // PhantomData<T> implements Zeroable for all T
/// ```
///
/// Here, an error will occur, because `MyZst` does not implement `Zeroable`.
///
#[cfg_attr(feature = "derive", doc = "```compile_fail")]
#[cfg_attr(
not(feature = "derive"),
doc = "```ignore
// This example requires the `derive` feature."
)]
/// use bytemuck::TransparentWrapper;
/// struct MyZst;
///
/// #[derive(TransparentWrapper)]
/// #[repr(transparent)]
/// #[transparent(usize)]
/// struct Wrapper(usize, MyZst); // MyZst does not implement Zeroable
/// ```
pub unsafe trait TransparentWrapper<Inner: ?Sized> {
/// Convert the inner type into the wrapper type.
#[inline]
fn wrap(s: Inner) -> Self
where
Self: Sized,
Inner: Sized,
{
// SAFETY: The unsafe contract requires that `Self` and `Inner` have
// identical representations.
unsafe { transmute!(s) }
}
/// Convert a reference to the inner type into a reference to the wrapper
/// type.
#[inline]
fn wrap_ref(s: &Inner) -> &Self {
unsafe {
assert!(size_of::<*const Inner>() == size_of::<*const Self>());
// A pointer cast doesn't work here because rustc can't tell that
// the vtables match (because of the `?Sized` restriction relaxation).
// A `transmute` doesn't work because the sizes are unspecified.
//
// SAFETY: The unsafe contract requires that these two have
// identical representations.
let inner_ptr = s as *const Inner;
let wrapper_ptr: *const Self = transmute!(inner_ptr);
&*wrapper_ptr
}
}
/// Convert a mutable reference to the inner type into a mutable reference to
/// the wrapper type.
#[inline]
fn wrap_mut(s: &mut Inner) -> &mut Self {
unsafe {
assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
// A pointer cast doesn't work here because rustc can't tell that
// the vtables match (because of the `?Sized` restriction relaxation).
// A `transmute` doesn't work because the sizes are unspecified.
//
// SAFETY: The unsafe contract requires that these two have
// identical representations.
let inner_ptr = s as *mut Inner;
let wrapper_ptr: *mut Self = transmute!(inner_ptr);
&mut *wrapper_ptr
}
}
/// Convert a slice to the inner type into a slice to the wrapper type.
#[inline]
fn wrap_slice(s: &[Inner]) -> &[Self]
where
Self: Sized,
Inner: Sized,
{
unsafe {
assert!(size_of::<*const Inner>() == size_of::<*const Self>());
assert!(align_of::<*const Inner>() == align_of::<*const Self>());
// SAFETY: The unsafe contract requires that these two have
// identical representations (size and alignment).
core::slice::from_raw_parts(s.as_ptr() as *const Self, s.len())
}
}
/// Convert a mutable slice to the inner type into a mutable slice to the
/// wrapper type.
#[inline]
fn wrap_slice_mut(s: &mut [Inner]) -> &mut [Self]
where
Self: Sized,
Inner: Sized,
{
unsafe {
assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
assert!(align_of::<*mut Inner>() == align_of::<*mut Self>());
// SAFETY: The unsafe contract requires that these two have
// identical representations (size and alignment).
core::slice::from_raw_parts_mut(s.as_mut_ptr() as *mut Self, s.len())
}
}
/// Convert the wrapper type into the inner type.
#[inline]
fn peel(s: Self) -> Inner
where
Self: Sized,
Inner: Sized,
{
unsafe { transmute!(s) }
}
/// Convert a reference to the wrapper type into a reference to the inner
/// type.
#[inline]
fn peel_ref(s: &Self) -> &Inner {
unsafe {
assert!(size_of::<*const Inner>() == size_of::<*const Self>());
// A pointer cast doesn't work here because rustc can't tell that
// the vtables match (because of the `?Sized` restriction relaxation).
// A `transmute` doesn't work because the sizes are unspecified.
//
// SAFETY: The unsafe contract requires that these two have
// identical representations.
let wrapper_ptr = s as *const Self;
let inner_ptr: *const Inner = transmute!(wrapper_ptr);
&*inner_ptr
}
}
/// Convert a mutable reference to the wrapper type into a mutable reference
/// to the inner type.
#[inline]
fn peel_mut(s: &mut Self) -> &mut Inner {
unsafe {
assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
// A pointer cast doesn't work here because rustc can't tell that
// the vtables match (because of the `?Sized` restriction relaxation).
// A `transmute` doesn't work because the sizes are unspecified.
//
// SAFETY: The unsafe contract requires that these two have
// identical representations.
let wrapper_ptr = s as *mut Self;
let inner_ptr: *mut Inner = transmute!(wrapper_ptr);
&mut *inner_ptr
}
}
/// Convert a slice to the wrapped type into a slice to the inner type.
#[inline]
fn peel_slice(s: &[Self]) -> &[Inner]
where
Self: Sized,
Inner: Sized,
{
unsafe {
assert!(size_of::<*const Inner>() == size_of::<*const Self>());
assert!(align_of::<*const Inner>() == align_of::<*const Self>());
// SAFETY: The unsafe contract requires that these two have
// identical representations (size and alignment).
core::slice::from_raw_parts(s.as_ptr() as *const Inner, s.len())
}
}
/// Convert a mutable slice to the wrapped type into a mutable slice to the
/// inner type.
#[inline]
fn peel_slice_mut(s: &mut [Self]) -> &mut [Inner]
where
Self: Sized,
Inner: Sized,
{
unsafe {
assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
assert!(align_of::<*mut Inner>() == align_of::<*mut Self>());
// SAFETY: The unsafe contract requires that these two have
// identical representations (size and alignment).
core::slice::from_raw_parts_mut(s.as_mut_ptr() as *mut Inner, s.len())
}
}
}
unsafe impl<T> TransparentWrapper<T> for core::num::Wrapping<T> {}

245
third_party/rust/bytemuck/src/zeroable.rs поставляемый
Просмотреть файл

@ -1,245 +0,0 @@
use super::*;
/// Trait for types that can be safely created with
/// [`zeroed`](core::mem::zeroed).
///
/// An all-zeroes value may or may not be the same value as the
/// [Default](core::default::Default) value of the type.
///
/// ## Safety
///
/// * Your type must be inhabited (eg: no
/// [Infallible](core::convert::Infallible)).
/// * Your type must be allowed to be an "all zeroes" bit pattern (eg: no
/// [`NonNull<T>`](core::ptr::NonNull)).
///
/// ## Features
///
/// Some `impl`s are feature gated due to the MSRV policy:
///
/// * `MaybeUninit<T>` was not available in 1.34.0, but is available under the
/// `zeroable_maybe_uninit` feature flag.
/// * `Atomic*` types require Rust 1.60.0 or later to work on certain platforms,
/// but is available under the `zeroable_atomics` feature flag.
/// * `[T; N]` for arbitrary `N` requires the `min_const_generics` feature flag.
pub unsafe trait Zeroable: Sized {
/// Calls [`zeroed`](core::mem::zeroed).
///
/// This is a trait method so that you can write `MyType::zeroed()` in your
/// code. It is a contract of this trait that if you implement it on your type
/// you **must not** override this method.
#[inline]
fn zeroed() -> Self {
unsafe { core::mem::zeroed() }
}
}
unsafe impl Zeroable for () {}
unsafe impl Zeroable for bool {}
unsafe impl Zeroable for char {}
unsafe impl Zeroable for u8 {}
unsafe impl Zeroable for i8 {}
unsafe impl Zeroable for u16 {}
unsafe impl Zeroable for i16 {}
unsafe impl Zeroable for u32 {}
unsafe impl Zeroable for i32 {}
unsafe impl Zeroable for u64 {}
unsafe impl Zeroable for i64 {}
unsafe impl Zeroable for usize {}
unsafe impl Zeroable for isize {}
unsafe impl Zeroable for u128 {}
unsafe impl Zeroable for i128 {}
unsafe impl Zeroable for f32 {}
unsafe impl Zeroable for f64 {}
unsafe impl<T: Zeroable> Zeroable for Wrapping<T> {}
unsafe impl<T: Zeroable> Zeroable for core::cmp::Reverse<T> {}
// Note: we can't implement this for all `T: ?Sized` types because it would
// create NULL pointers for vtables.
// Maybe one day this could be changed to be implemented for
// `T: ?Sized where <T as core::ptr::Pointee>::Metadata: Zeroable`.
unsafe impl<T> Zeroable for *mut T {}
unsafe impl<T> Zeroable for *const T {}
unsafe impl<T> Zeroable for *mut [T] {}
unsafe impl<T> Zeroable for *const [T] {}
unsafe impl Zeroable for *mut str {}
unsafe impl Zeroable for *const str {}
unsafe impl<T: ?Sized> Zeroable for PhantomData<T> {}
unsafe impl Zeroable for PhantomPinned {}
unsafe impl<T: Zeroable> Zeroable for ManuallyDrop<T> {}
unsafe impl<T: Zeroable> Zeroable for core::cell::UnsafeCell<T> {}
unsafe impl<T: Zeroable> Zeroable for core::cell::Cell<T> {}
#[cfg(feature = "zeroable_atomics")]
#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "zeroable_atomics")))]
mod atomic_impls {
use super::Zeroable;
#[cfg(target_has_atomic = "8")]
unsafe impl Zeroable for core::sync::atomic::AtomicBool {}
#[cfg(target_has_atomic = "8")]
unsafe impl Zeroable for core::sync::atomic::AtomicU8 {}
#[cfg(target_has_atomic = "8")]
unsafe impl Zeroable for core::sync::atomic::AtomicI8 {}
#[cfg(target_has_atomic = "16")]
unsafe impl Zeroable for core::sync::atomic::AtomicU16 {}
#[cfg(target_has_atomic = "16")]
unsafe impl Zeroable for core::sync::atomic::AtomicI16 {}
#[cfg(target_has_atomic = "32")]
unsafe impl Zeroable for core::sync::atomic::AtomicU32 {}
#[cfg(target_has_atomic = "32")]
unsafe impl Zeroable for core::sync::atomic::AtomicI32 {}
#[cfg(target_has_atomic = "64")]
unsafe impl Zeroable for core::sync::atomic::AtomicU64 {}
#[cfg(target_has_atomic = "64")]
unsafe impl Zeroable for core::sync::atomic::AtomicI64 {}
#[cfg(target_has_atomic = "ptr")]
unsafe impl Zeroable for core::sync::atomic::AtomicUsize {}
#[cfg(target_has_atomic = "ptr")]
unsafe impl Zeroable for core::sync::atomic::AtomicIsize {}
#[cfg(target_has_atomic = "ptr")]
unsafe impl<T> Zeroable for core::sync::atomic::AtomicPtr<T> {}
}
#[cfg(feature = "zeroable_maybe_uninit")]
#[cfg_attr(
feature = "nightly_docs",
doc(cfg(feature = "zeroable_maybe_uninit"))
)]
unsafe impl<T> Zeroable for core::mem::MaybeUninit<T> {}
unsafe impl<A: Zeroable> Zeroable for (A,) {}
unsafe impl<A: Zeroable, B: Zeroable> Zeroable for (A, B) {}
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable> Zeroable for (A, B, C) {}
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable> Zeroable
for (A, B, C, D)
{
}
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable>
Zeroable for (A, B, C, D, E)
{
}
unsafe impl<
A: Zeroable,
B: Zeroable,
C: Zeroable,
D: Zeroable,
E: Zeroable,
F: Zeroable,
> Zeroable for (A, B, C, D, E, F)
{
}
unsafe impl<
A: Zeroable,
B: Zeroable,
C: Zeroable,
D: Zeroable,
E: Zeroable,
F: Zeroable,
G: Zeroable,
> Zeroable for (A, B, C, D, E, F, G)
{
}
unsafe impl<
A: Zeroable,
B: Zeroable,
C: Zeroable,
D: Zeroable,
E: Zeroable,
F: Zeroable,
G: Zeroable,
H: Zeroable,
> Zeroable for (A, B, C, D, E, F, G, H)
{
}
#[cfg(feature = "min_const_generics")]
#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "min_const_generics")))]
unsafe impl<T, const N: usize> Zeroable for [T; N] where T: Zeroable {}
#[cfg(not(feature = "min_const_generics"))]
impl_unsafe_marker_for_array!(
Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
512, 1024, 2048, 4096
);
impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "wasm32", feature = "wasm_simd"))]
unsafe impl Zeroable for wasm32::{v128}
);
impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
unsafe impl Zeroable for aarch64::{
float32x2_t, float32x2x2_t, float32x2x3_t, float32x2x4_t, float32x4_t,
float32x4x2_t, float32x4x3_t, float32x4x4_t, float64x1_t, float64x1x2_t,
float64x1x3_t, float64x1x4_t, float64x2_t, float64x2x2_t, float64x2x3_t,
float64x2x4_t, int16x4_t, int16x4x2_t, int16x4x3_t, int16x4x4_t, int16x8_t,
int16x8x2_t, int16x8x3_t, int16x8x4_t, int32x2_t, int32x2x2_t, int32x2x3_t,
int32x2x4_t, int32x4_t, int32x4x2_t, int32x4x3_t, int32x4x4_t, int64x1_t,
int64x1x2_t, int64x1x3_t, int64x1x4_t, int64x2_t, int64x2x2_t, int64x2x3_t,
int64x2x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int8x8_t,
int8x8x2_t, int8x8x3_t, int8x8x4_t, poly16x4_t, poly16x4x2_t, poly16x4x3_t,
poly16x4x4_t, poly16x8_t, poly16x8x2_t, poly16x8x3_t, poly16x8x4_t,
poly64x1_t, poly64x1x2_t, poly64x1x3_t, poly64x1x4_t, poly64x2_t,
poly64x2x2_t, poly64x2x3_t, poly64x2x4_t, poly8x16_t, poly8x16x2_t,
poly8x16x3_t, poly8x16x4_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, poly8x8x4_t,
uint16x4_t, uint16x4x2_t, uint16x4x3_t, uint16x4x4_t, uint16x8_t,
uint16x8x2_t, uint16x8x3_t, uint16x8x4_t, uint32x2_t, uint32x2x2_t,
uint32x2x3_t, uint32x2x4_t, uint32x4_t, uint32x4x2_t, uint32x4x3_t,
uint32x4x4_t, uint64x1_t, uint64x1x2_t, uint64x1x3_t, uint64x1x4_t,
uint64x2_t, uint64x2x2_t, uint64x2x3_t, uint64x2x4_t, uint8x16_t,
uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint8x8_t, uint8x8x2_t,
uint8x8x3_t, uint8x8x4_t,
}
);
impl_unsafe_marker_for_simd!(
#[cfg(target_arch = "x86")]
unsafe impl Zeroable for x86::{
__m128i, __m128, __m128d,
__m256i, __m256, __m256d,
}
);
impl_unsafe_marker_for_simd!(
#[cfg(target_arch = "x86_64")]
unsafe impl Zeroable for x86_64::{
__m128i, __m128, __m128d,
__m256i, __m256, __m256d,
}
);
#[cfg(feature = "nightly_portable_simd")]
#[cfg_attr(
feature = "nightly_docs",
doc(cfg(feature = "nightly_portable_simd"))
)]
unsafe impl<T, const N: usize> Zeroable for core::simd::Simd<T, N>
where
T: core::simd::SimdElement + Zeroable,
core::simd::LaneCount<N>: core::simd::SupportedLaneCount,
{
}
impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86", feature = "nightly_stdsimd"))]
unsafe impl Zeroable for x86::{
__m128bh, __m256bh, __m512,
__m512bh, __m512d, __m512i,
}
);
impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86_64", feature = "nightly_stdsimd"))]
unsafe impl Zeroable for x86_64::{
__m128bh, __m256bh, __m512,
__m512bh, __m512d, __m512i,
}
);

Просмотреть файл

@ -1,35 +0,0 @@
use super::*;
// Note(Lokathor): This is the neat part!!
unsafe impl<T: ZeroableInOption> Zeroable for Option<T> {}
/// Trait for types which are [Zeroable](Zeroable) when wrapped in
/// [Option](core::option::Option).
///
/// ## Safety
///
/// * `Option<YourType>` must uphold the same invariants as
/// [Zeroable](Zeroable).
pub unsafe trait ZeroableInOption: Sized {}
unsafe impl ZeroableInOption for NonZeroI8 {}
unsafe impl ZeroableInOption for NonZeroI16 {}
unsafe impl ZeroableInOption for NonZeroI32 {}
unsafe impl ZeroableInOption for NonZeroI64 {}
unsafe impl ZeroableInOption for NonZeroI128 {}
unsafe impl ZeroableInOption for NonZeroIsize {}
unsafe impl ZeroableInOption for NonZeroU8 {}
unsafe impl ZeroableInOption for NonZeroU16 {}
unsafe impl ZeroableInOption for NonZeroU32 {}
unsafe impl ZeroableInOption for NonZeroU64 {}
unsafe impl ZeroableInOption for NonZeroU128 {}
unsafe impl ZeroableInOption for NonZeroUsize {}
// Note: this does not create NULL vtable because we get `None` anyway.
unsafe impl<T: ?Sized> ZeroableInOption for NonNull<T> {}
unsafe impl<T: ?Sized> ZeroableInOption for &'_ T {}
unsafe impl<T: ?Sized> ZeroableInOption for &'_ mut T {}
#[cfg(feature = "extern_crate_alloc")]
#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_alloc")))]
unsafe impl<T: ?Sized> ZeroableInOption for alloc::boxed::Box<T> {}

Просмотреть файл

@ -1,12 +0,0 @@
#[test]
pub fn test_cast_array() {
let x = [0u32, 1u32, 2u32];
let _: [u16; 6] = bytemuck::cast(x);
}
#[cfg(feature = "min_const_generics")]
#[test]
pub fn test_cast_long_array() {
let x = [0u32; 65];
let _: [u16; 130] = bytemuck::cast(x);
}

Просмотреть файл

@ -1,197 +0,0 @@
#![allow(clippy::unnecessary_cast)]
#![allow(clippy::manual_slice_size_calculation)]
use core::mem::size_of;
use bytemuck::*;
#[test]
fn test_try_cast_slice() {
// some align4 data
let u32_slice: &[u32] = &[4, 5, 6];
// the same data as align1
let the_bytes: &[u8] = try_cast_slice(u32_slice).unwrap();
assert_eq!(
u32_slice.as_ptr() as *const u32 as usize,
the_bytes.as_ptr() as *const u8 as usize
);
assert_eq!(
u32_slice.len() * size_of::<u32>(),
the_bytes.len() * size_of::<u8>()
);
// by taking one byte off the front, we're definitely mis-aligned for u32.
let mis_aligned_bytes = &the_bytes[1..];
assert_eq!(
try_cast_slice::<u8, u32>(mis_aligned_bytes),
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
);
// by taking one byte off the end, we're aligned but would have slop bytes for
// u32
let the_bytes_len_minus1 = the_bytes.len() - 1;
let slop_bytes = &the_bytes[..the_bytes_len_minus1];
assert_eq!(
try_cast_slice::<u8, u32>(slop_bytes),
Err(PodCastError::OutputSliceWouldHaveSlop)
);
// if we don't mess with it we can up-alignment cast
try_cast_slice::<u8, u32>(the_bytes).unwrap();
}
#[test]
fn test_try_cast_slice_mut() {
// some align4 data
let u32_slice: &mut [u32] = &mut [4, 5, 6];
let u32_len = u32_slice.len();
let u32_ptr = u32_slice.as_ptr();
// the same data as align1
let the_bytes: &mut [u8] = try_cast_slice_mut(u32_slice).unwrap();
let the_bytes_len = the_bytes.len();
let the_bytes_ptr = the_bytes.as_ptr();
assert_eq!(
u32_ptr as *const u32 as usize,
the_bytes_ptr as *const u8 as usize
);
assert_eq!(u32_len * size_of::<u32>(), the_bytes_len * size_of::<u8>());
// by taking one byte off the front, we're definitely mis-aligned for u32.
let mis_aligned_bytes = &mut the_bytes[1..];
assert_eq!(
try_cast_slice_mut::<u8, u32>(mis_aligned_bytes),
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
);
// by taking one byte off the end, we're aligned but would have slop bytes for
// u32
let the_bytes_len_minus1 = the_bytes.len() - 1;
let slop_bytes = &mut the_bytes[..the_bytes_len_minus1];
assert_eq!(
try_cast_slice_mut::<u8, u32>(slop_bytes),
Err(PodCastError::OutputSliceWouldHaveSlop)
);
// if we don't mess with it we can up-alignment cast
try_cast_slice_mut::<u8, u32>(the_bytes).unwrap();
}
#[test]
fn test_types() {
let _: i32 = cast(1.0_f32);
let _: &mut i32 = cast_mut(&mut 1.0_f32);
let _: &i32 = cast_ref(&1.0_f32);
let _: &[i32] = cast_slice(&[1.0_f32]);
let _: &mut [i32] = cast_slice_mut(&mut [1.0_f32]);
//
let _: Result<i32, PodCastError> = try_cast(1.0_f32);
let _: Result<&mut i32, PodCastError> = try_cast_mut(&mut 1.0_f32);
let _: Result<&i32, PodCastError> = try_cast_ref(&1.0_f32);
let _: Result<&[i32], PodCastError> = try_cast_slice(&[1.0_f32]);
let _: Result<&mut [i32], PodCastError> = try_cast_slice_mut(&mut [1.0_f32]);
}
#[test]
fn test_bytes_of() {
assert_eq!(bytes_of(&0xaabbccdd_u32), &0xaabbccdd_u32.to_ne_bytes());
assert_eq!(
bytes_of_mut(&mut 0xaabbccdd_u32),
&mut 0xaabbccdd_u32.to_ne_bytes()
);
let mut a = 0xaabbccdd_u32;
let a_addr = &a as *const _ as usize;
// ensure addresses match.
assert_eq!(bytes_of(&a).as_ptr() as usize, a_addr);
assert_eq!(bytes_of_mut(&mut a).as_ptr() as usize, a_addr);
}
#[test]
fn test_try_from_bytes() {
let u32s = [0xaabbccdd, 0x11223344_u32];
let bytes = bytemuck::cast_slice::<u32, u8>(&u32s);
assert_eq!(try_from_bytes::<u32>(&bytes[..4]), Ok(&u32s[0]));
assert_eq!(
try_from_bytes::<u32>(&bytes[..5]),
Err(PodCastError::SizeMismatch)
);
assert_eq!(
try_from_bytes::<u32>(&bytes[..3]),
Err(PodCastError::SizeMismatch)
);
assert_eq!(
try_from_bytes::<u32>(&bytes[1..5]),
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
);
}
#[test]
fn test_try_from_bytes_mut() {
let mut abcd = 0xaabbccdd;
let mut u32s = [abcd, 0x11223344_u32];
let bytes = bytemuck::cast_slice_mut::<u32, u8>(&mut u32s);
assert_eq!(try_from_bytes_mut::<u32>(&mut bytes[..4]), Ok(&mut abcd));
assert_eq!(try_from_bytes_mut::<u32>(&mut bytes[..4]), Ok(&mut abcd));
assert_eq!(
try_from_bytes_mut::<u32>(&mut bytes[..5]),
Err(PodCastError::SizeMismatch)
);
assert_eq!(
try_from_bytes_mut::<u32>(&mut bytes[..3]),
Err(PodCastError::SizeMismatch)
);
assert_eq!(
try_from_bytes::<u32>(&bytes[1..5]),
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
);
}
#[test]
fn test_from_bytes() {
let abcd = 0xaabbccdd_u32;
let aligned_bytes = bytemuck::bytes_of(&abcd);
assert_eq!(from_bytes::<u32>(aligned_bytes), &abcd);
assert!(core::ptr::eq(from_bytes(aligned_bytes), &abcd));
}
#[test]
fn test_from_bytes_mut() {
let mut a = 0xaabbccdd_u32;
let a_addr = &a as *const _ as usize;
let aligned_bytes = bytemuck::bytes_of_mut(&mut a);
assert_eq!(*from_bytes_mut::<u32>(aligned_bytes), 0xaabbccdd_u32);
assert_eq!(
from_bytes_mut::<u32>(aligned_bytes) as *const u32 as usize,
a_addr
);
}
// like #[should_panic], but can be a part of another test, instead of requiring
// it to be it's own test.
macro_rules! should_panic {
($ex:expr) => {
assert!(
std::panic::catch_unwind(|| {
let _ = $ex;
})
.is_err(),
concat!("should have panicked: `", stringify!($ex), "`")
);
};
}
#[test]
fn test_panics() {
should_panic!(cast_slice::<u8, u32>(&[1u8, 2u8]));
should_panic!(cast_slice_mut::<u8, u32>(&mut [1u8, 2u8]));
should_panic!(from_bytes::<u32>(&[1u8, 2]));
should_panic!(from_bytes::<u32>(&[1u8, 2, 3, 4, 5]));
should_panic!(from_bytes_mut::<u32>(&mut [1u8, 2]));
should_panic!(from_bytes_mut::<u32>(&mut [1u8, 2, 3, 4, 5]));
// use cast_slice on some u32s to get some align>=4 bytes, so we can know
// we'll give from_bytes unaligned ones.
let aligned_bytes = bytemuck::cast_slice::<u32, u8>(&[0, 0]);
should_panic!(from_bytes::<u32>(&aligned_bytes[1..5]));
}

Просмотреть файл

@ -1,419 +0,0 @@
#![allow(clippy::unnecessary_cast)]
#![allow(clippy::manual_slice_size_calculation)]
use core::{
mem::size_of,
num::{NonZeroU32, NonZeroU8},
};
use bytemuck::{checked::CheckedCastError, *};
#[test]
fn test_try_cast_slice() {
// some align4 data
let nonzero_u32_slice: &[NonZeroU32] = &[
NonZeroU32::new(4).unwrap(),
NonZeroU32::new(5).unwrap(),
NonZeroU32::new(6).unwrap(),
];
// contains bytes with invalid bitpattern for NonZeroU8
assert_eq!(
checked::try_cast_slice::<NonZeroU32, NonZeroU8>(nonzero_u32_slice),
Err(CheckedCastError::InvalidBitPattern)
);
// the same data as align1
let the_bytes: &[u8] = checked::try_cast_slice(nonzero_u32_slice).unwrap();
assert_eq!(
nonzero_u32_slice.as_ptr() as *const NonZeroU32 as usize,
the_bytes.as_ptr() as *const u8 as usize
);
assert_eq!(
nonzero_u32_slice.len() * size_of::<NonZeroU32>(),
the_bytes.len() * size_of::<u8>()
);
// by taking one byte off the front, we're definitely mis-aligned for
// NonZeroU32.
let mis_aligned_bytes = &the_bytes[1..];
assert_eq!(
checked::try_cast_slice::<u8, NonZeroU32>(mis_aligned_bytes),
Err(CheckedCastError::PodCastError(
PodCastError::TargetAlignmentGreaterAndInputNotAligned
))
);
// by taking one byte off the end, we're aligned but would have slop bytes for
// NonZeroU32
let the_bytes_len_minus1 = the_bytes.len() - 1;
let slop_bytes = &the_bytes[..the_bytes_len_minus1];
assert_eq!(
checked::try_cast_slice::<u8, NonZeroU32>(slop_bytes),
Err(CheckedCastError::PodCastError(PodCastError::OutputSliceWouldHaveSlop))
);
// if we don't mess with it we can up-alignment cast
checked::try_cast_slice::<u8, NonZeroU32>(the_bytes).unwrap();
}
#[test]
fn test_try_cast_slice_mut() {
// some align4 data
let u32_slice: &mut [u32] = &mut [4, 5, 6];
// contains bytes with invalid bitpattern for NonZeroU8
assert_eq!(
checked::try_cast_slice_mut::<u32, NonZeroU8>(u32_slice),
Err(CheckedCastError::InvalidBitPattern)
);
// some align4 data
let u32_slice: &mut [u32] = &mut [0x4444_4444, 0x5555_5555, 0x6666_6666];
let u32_len = u32_slice.len();
let u32_ptr = u32_slice.as_ptr();
// the same data as align1, nonzero bytes
let the_nonzero_bytes: &mut [NonZeroU8] =
checked::try_cast_slice_mut(u32_slice).unwrap();
let the_nonzero_bytes_len = the_nonzero_bytes.len();
let the_nonzero_bytes_ptr = the_nonzero_bytes.as_ptr();
assert_eq!(
u32_ptr as *const u32 as usize,
the_nonzero_bytes_ptr as *const NonZeroU8 as usize
);
assert_eq!(
u32_len * size_of::<u32>(),
the_nonzero_bytes_len * size_of::<NonZeroU8>()
);
// the same data as align1
let the_bytes: &mut [u8] = checked::try_cast_slice_mut(u32_slice).unwrap();
let the_bytes_len = the_bytes.len();
let the_bytes_ptr = the_bytes.as_ptr();
assert_eq!(
u32_ptr as *const u32 as usize,
the_bytes_ptr as *const u8 as usize
);
assert_eq!(
u32_len * size_of::<u32>(),
the_bytes_len * size_of::<NonZeroU8>()
);
// by taking one byte off the front, we're definitely mis-aligned for u32.
let mis_aligned_bytes = &mut the_bytes[1..];
assert_eq!(
checked::try_cast_slice_mut::<u8, NonZeroU32>(mis_aligned_bytes),
Err(CheckedCastError::PodCastError(
PodCastError::TargetAlignmentGreaterAndInputNotAligned
))
);
// by taking one byte off the end, we're aligned but would have slop bytes for
// NonZeroU32
let the_bytes_len_minus1 = the_bytes.len() - 1;
let slop_bytes = &mut the_bytes[..the_bytes_len_minus1];
assert_eq!(
checked::try_cast_slice_mut::<u8, NonZeroU32>(slop_bytes),
Err(CheckedCastError::PodCastError(PodCastError::OutputSliceWouldHaveSlop))
);
// if we don't mess with it we can up-alignment cast, since there are no
// zeroes in the original slice
checked::try_cast_slice_mut::<u8, NonZeroU32>(the_bytes).unwrap();
}
#[test]
fn test_types() {
let _: NonZeroU32 = checked::cast(1.0_f32);
let _: &mut NonZeroU32 = checked::cast_mut(&mut 1.0_f32);
let _: &NonZeroU32 = checked::cast_ref(&1.0_f32);
let _: &[NonZeroU32] = checked::cast_slice(&[1.0_f32]);
let _: &mut [NonZeroU32] = checked::cast_slice_mut(&mut [1.0_f32]);
//
let _: Result<NonZeroU32, CheckedCastError> = checked::try_cast(1.0_f32);
let _: Result<&mut NonZeroU32, CheckedCastError> =
checked::try_cast_mut(&mut 1.0_f32);
let _: Result<&NonZeroU32, CheckedCastError> =
checked::try_cast_ref(&1.0_f32);
let _: Result<&[NonZeroU32], CheckedCastError> =
checked::try_cast_slice(&[1.0_f32]);
let _: Result<&mut [NonZeroU32], CheckedCastError> =
checked::try_cast_slice_mut(&mut [1.0_f32]);
}
#[test]
fn test_try_pod_read_unaligned() {
let u32s = [0xaabbccdd, 0x11223344_u32];
let bytes = bytemuck::checked::cast_slice::<u32, u8>(&u32s);
#[cfg(target_endian = "big")]
assert_eq!(
checked::try_pod_read_unaligned::<NonZeroU32>(&bytes[1..5]),
Ok(NonZeroU32::new(0xbbccdd11).unwrap())
);
#[cfg(target_endian = "little")]
assert_eq!(
checked::try_pod_read_unaligned::<NonZeroU32>(&bytes[1..5]),
Ok(NonZeroU32::new(0x44aabbcc).unwrap())
);
let u32s = [0; 2];
let bytes = bytemuck::checked::cast_slice::<u32, u8>(&u32s);
assert_eq!(
checked::try_pod_read_unaligned::<NonZeroU32>(&bytes[1..5]),
Err(CheckedCastError::InvalidBitPattern)
);
}
#[test]
fn test_try_from_bytes() {
let nonzero_u32s = [
NonZeroU32::new(0xaabbccdd).unwrap(),
NonZeroU32::new(0x11223344).unwrap(),
];
let bytes = bytemuck::checked::cast_slice::<NonZeroU32, u8>(&nonzero_u32s);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[..4]),
Ok(&nonzero_u32s[0])
);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[..5]),
Err(CheckedCastError::PodCastError(PodCastError::SizeMismatch))
);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[..3]),
Err(CheckedCastError::PodCastError(PodCastError::SizeMismatch))
);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[1..5]),
Err(CheckedCastError::PodCastError(
PodCastError::TargetAlignmentGreaterAndInputNotAligned
))
);
let zero_u32s = [0, 0x11223344_u32];
let bytes = bytemuck::checked::cast_slice::<u32, u8>(&zero_u32s);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[..4]),
Err(CheckedCastError::InvalidBitPattern)
);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[4..]),
Ok(&NonZeroU32::new(zero_u32s[1]).unwrap())
);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[..5]),
Err(CheckedCastError::PodCastError(PodCastError::SizeMismatch))
);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[..3]),
Err(CheckedCastError::PodCastError(PodCastError::SizeMismatch))
);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[1..5]),
Err(CheckedCastError::PodCastError(
PodCastError::TargetAlignmentGreaterAndInputNotAligned
))
);
}
#[test]
fn test_try_from_bytes_mut() {
let a = 0xaabbccdd_u32;
let b = 0x11223344_u32;
let mut u32s = [a, b];
let bytes = bytemuck::checked::cast_slice_mut::<u32, u8>(&mut u32s);
assert_eq!(
checked::try_from_bytes_mut::<NonZeroU32>(&mut bytes[..4]),
Ok(&mut NonZeroU32::new(a).unwrap())
);
assert_eq!(
checked::try_from_bytes_mut::<NonZeroU32>(&mut bytes[4..]),
Ok(&mut NonZeroU32::new(b).unwrap())
);
assert_eq!(
checked::try_from_bytes_mut::<NonZeroU32>(&mut bytes[..5]),
Err(CheckedCastError::PodCastError(PodCastError::SizeMismatch))
);
assert_eq!(
checked::try_from_bytes_mut::<NonZeroU32>(&mut bytes[..3]),
Err(CheckedCastError::PodCastError(PodCastError::SizeMismatch))
);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[1..5]),
Err(CheckedCastError::PodCastError(
PodCastError::TargetAlignmentGreaterAndInputNotAligned
))
);
let mut u32s = [0, b];
let bytes = bytemuck::checked::cast_slice_mut::<u32, u8>(&mut u32s);
assert_eq!(
checked::try_from_bytes_mut::<NonZeroU32>(&mut bytes[..4]),
Err(CheckedCastError::InvalidBitPattern)
);
assert_eq!(
checked::try_from_bytes_mut::<NonZeroU32>(&mut bytes[4..]),
Ok(&mut NonZeroU32::new(b).unwrap())
);
assert_eq!(
checked::try_from_bytes_mut::<NonZeroU32>(&mut bytes[..5]),
Err(CheckedCastError::PodCastError(PodCastError::SizeMismatch))
);
assert_eq!(
checked::try_from_bytes_mut::<NonZeroU32>(&mut bytes[..3]),
Err(CheckedCastError::PodCastError(PodCastError::SizeMismatch))
);
assert_eq!(
checked::try_from_bytes::<NonZeroU32>(&bytes[1..5]),
Err(CheckedCastError::PodCastError(
PodCastError::TargetAlignmentGreaterAndInputNotAligned
))
);
}
#[test]
fn test_from_bytes() {
let abcd = 0xaabbccdd_u32;
let aligned_bytes = bytemuck::bytes_of(&abcd);
assert_eq!(
checked::from_bytes::<NonZeroU32>(aligned_bytes),
&NonZeroU32::new(abcd).unwrap()
);
assert!(core::ptr::eq(
checked::from_bytes(aligned_bytes) as *const NonZeroU32 as *const u32,
&abcd
));
}
#[test]
fn test_from_bytes_mut() {
let mut a = 0xaabbccdd_u32;
let a_addr = &a as *const _ as usize;
let aligned_bytes = bytemuck::bytes_of_mut(&mut a);
assert_eq!(
*checked::from_bytes_mut::<NonZeroU32>(aligned_bytes),
NonZeroU32::new(0xaabbccdd).unwrap()
);
assert_eq!(
checked::from_bytes_mut::<NonZeroU32>(aligned_bytes) as *const NonZeroU32
as usize,
a_addr
);
}
// like #[should_panic], but can be a part of another test, instead of requiring
// it to be it's own test.
macro_rules! should_panic {
($ex:expr) => {
assert!(
std::panic::catch_unwind(|| {
let _ = $ex;
})
.is_err(),
concat!("should have panicked: `", stringify!($ex), "`")
);
};
}
#[test]
fn test_panics() {
should_panic!(checked::cast::<u32, NonZeroU32>(0));
should_panic!(checked::cast_ref::<u32, NonZeroU32>(&0));
should_panic!(checked::cast_mut::<u32, NonZeroU32>(&mut 0));
should_panic!(checked::cast_slice::<u8, NonZeroU32>(&[1u8, 2u8]));
should_panic!(checked::cast_slice_mut::<u8, NonZeroU32>(&mut [1u8, 2u8]));
should_panic!(checked::from_bytes::<NonZeroU32>(&[1u8, 2]));
should_panic!(checked::from_bytes::<NonZeroU32>(&[1u8, 2, 3, 4, 5]));
should_panic!(checked::from_bytes_mut::<NonZeroU32>(&mut [1u8, 2]));
should_panic!(checked::from_bytes_mut::<NonZeroU32>(&mut [1u8, 2, 3, 4, 5]));
// use cast_slice on some u32s to get some align>=4 bytes, so we can know
// we'll give from_bytes unaligned ones.
let aligned_bytes = bytemuck::cast_slice::<u32, u8>(&[0, 0]);
should_panic!(checked::from_bytes::<NonZeroU32>(aligned_bytes));
should_panic!(checked::from_bytes::<NonZeroU32>(&aligned_bytes[1..5]));
should_panic!(checked::pod_read_unaligned::<NonZeroU32>(
&aligned_bytes[1..5]
));
}
#[test]
fn test_char() {
assert_eq!(checked::try_cast::<u32, char>(0), Ok('\0'));
assert_eq!(checked::try_cast::<u32, char>(0xd7ff), Ok('\u{d7ff}'));
assert_eq!(
checked::try_cast::<u32, char>(0xd800),
Err(CheckedCastError::InvalidBitPattern)
);
assert_eq!(
checked::try_cast::<u32, char>(0xdfff),
Err(CheckedCastError::InvalidBitPattern)
);
assert_eq!(checked::try_cast::<u32, char>(0xe000), Ok('\u{e000}'));
assert_eq!(checked::try_cast::<u32, char>(0x10ffff), Ok('\u{10ffff}'));
assert_eq!(
checked::try_cast::<u32, char>(0x110000),
Err(CheckedCastError::InvalidBitPattern)
);
assert_eq!(
checked::try_cast::<u32, char>(-1i32 as u32),
Err(CheckedCastError::InvalidBitPattern)
);
}
#[test]
fn test_bool() {
assert_eq!(checked::try_cast::<u8, bool>(0), Ok(false));
assert_eq!(checked::try_cast::<u8, bool>(1), Ok(true));
for i in 2..=255 {
assert_eq!(
checked::try_cast::<u8, bool>(i),
Err(CheckedCastError::InvalidBitPattern)
);
}
assert_eq!(checked::try_from_bytes::<bool>(&[1]), Ok(&true));
assert_eq!(
checked::try_from_bytes::<bool>(&[3]),
Err(CheckedCastError::InvalidBitPattern)
);
assert_eq!(
checked::try_from_bytes::<bool>(&[0, 1]),
Err(CheckedCastError::PodCastError(PodCastError::SizeMismatch))
);
}
#[test]
fn test_all_nonzero() {
use core::num::*;
macro_rules! test_nonzero {
($nonzero:ty: $primitive:ty) => {
assert_eq!(
checked::try_cast::<$primitive, $nonzero>(0),
Err(CheckedCastError::InvalidBitPattern)
);
assert_eq!(
checked::try_cast::<$primitive, $nonzero>(1),
Ok(<$nonzero>::new(1).unwrap())
);
};
}
test_nonzero!(NonZeroU8: u8);
test_nonzero!(NonZeroI8: i8);
test_nonzero!(NonZeroU16: u16);
test_nonzero!(NonZeroI16: i16);
test_nonzero!(NonZeroU32: u32);
test_nonzero!(NonZeroI32: i32);
test_nonzero!(NonZeroU64: u64);
test_nonzero!(NonZeroI64: i64);
test_nonzero!(NonZeroU128: u128);
test_nonzero!(NonZeroI128: i128);
test_nonzero!(NonZeroUsize: usize);
test_nonzero!(NonZeroIsize: isize);
}

77
third_party/rust/bytemuck/tests/derive.rs поставляемый
Просмотреть файл

@ -1,77 +0,0 @@
#![cfg(feature = "derive")]
#![allow(dead_code)]
use bytemuck::{ByteEq, ByteHash, Pod, TransparentWrapper, Zeroable};
use std::marker::PhantomData;
#[derive(Copy, Clone, Pod, Zeroable, ByteEq, ByteHash)]
#[repr(C)]
struct Test {
a: u16,
b: u16,
}
#[derive(TransparentWrapper)]
#[repr(transparent)]
struct TransparentSingle {
a: u16,
}
#[derive(TransparentWrapper)]
#[repr(transparent)]
#[transparent(u16)]
struct TransparentWithZeroSized {
a: u16,
b: (),
}
#[derive(TransparentWrapper)]
#[repr(transparent)]
struct TransparentWithGeneric<T: ?Sized> {
a: T,
}
/// Ensuring that no additional bounds are emitted.
/// See https://github.com/Lokathor/bytemuck/issues/145
fn test_generic<T>(x: T) -> TransparentWithGeneric<T> {
TransparentWithGeneric::wrap(x)
}
#[derive(TransparentWrapper)]
#[repr(transparent)]
#[transparent(T)]
struct TransparentWithGenericAndZeroSized<T: ?Sized> {
a: (),
b: T,
}
/// Ensuring that no additional bounds are emitted.
/// See https://github.com/Lokathor/bytemuck/issues/145
fn test_generic_with_zst<T>(x: T) -> TransparentWithGenericAndZeroSized<T> {
TransparentWithGenericAndZeroSized::wrap(x)
}
#[derive(TransparentWrapper)]
#[repr(transparent)]
struct TransparentUnsized {
a: dyn std::fmt::Debug,
}
type DynDebug = dyn std::fmt::Debug;
#[derive(TransparentWrapper)]
#[repr(transparent)]
#[transparent(DynDebug)]
struct TransparentUnsizedWithZeroSized {
a: (),
b: DynDebug,
}
#[derive(TransparentWrapper)]
#[repr(transparent)]
#[transparent(DynDebug)]
struct TransparentUnsizedWithGenericZeroSizeds<T: ?Sized, U: ?Sized> {
a: PhantomData<T>,
b: PhantomData<U>,
c: DynDebug,
}

124
third_party/rust/bytemuck/tests/doc_tests.rs поставляемый
Просмотреть файл

@ -1,124 +0,0 @@
#![allow(clippy::disallowed_names)]
#![allow(dead_code)]
//! Cargo miri doesn't run doctests yet, so we duplicate these here. It's
//! probably not that important to sweat keeping these perfectly up to date, but
//! we should try to catch the cases where the primary tests are doctests.
use bytemuck::*;
// Miri doesn't run on doctests, so... copypaste to the rescue.
#[test]
fn test_transparent_slice() {
#[repr(transparent)]
struct Slice<T>([T]);
unsafe impl<T> TransparentWrapper<[T]> for Slice<T> {}
let s = Slice::wrap_ref(&[1u32, 2, 3]);
assert_eq!(&s.0, &[1, 2, 3]);
let mut buf = [1, 2, 3u8];
let _sm = Slice::wrap_mut(&mut buf);
}
#[test]
fn test_transparent_basic() {
#[derive(Default)]
struct SomeStruct(u32);
#[repr(transparent)]
struct MyWrapper(SomeStruct);
unsafe impl TransparentWrapper<SomeStruct> for MyWrapper {}
// interpret a reference to &SomeStruct as a &MyWrapper
let thing = SomeStruct::default();
let wrapped_ref: &MyWrapper = MyWrapper::wrap_ref(&thing);
// Works with &mut too.
let mut mut_thing = SomeStruct::default();
let wrapped_mut: &mut MyWrapper = MyWrapper::wrap_mut(&mut mut_thing);
let _ = (wrapped_ref, wrapped_mut);
}
// Work around miri not running doctests
#[test]
fn test_contiguous_doc() {
#[repr(u8)]
#[derive(Debug, Copy, Clone, PartialEq)]
enum Foo {
A = 0,
B = 1,
C = 2,
D = 3,
E = 4,
}
unsafe impl Contiguous for Foo {
type Int = u8;
const MIN_VALUE: u8 = Foo::A as u8;
const MAX_VALUE: u8 = Foo::E as u8;
}
assert_eq!(Foo::from_integer(3).unwrap(), Foo::D);
assert_eq!(Foo::from_integer(8), None);
assert_eq!(Foo::C.into_integer(), 2);
assert_eq!(Foo::B.into_integer(), Foo::B as u8);
}
#[test]
fn test_offsetof_vertex() {
#[repr(C)]
struct Vertex {
pos: [f32; 2],
uv: [u16; 2],
color: [u8; 4],
}
unsafe impl Zeroable for Vertex {}
let pos = offset_of!(Zeroable::zeroed(), Vertex, pos);
let uv = offset_of!(Zeroable::zeroed(), Vertex, uv);
let color = offset_of!(Zeroable::zeroed(), Vertex, color);
assert_eq!(pos, 0);
assert_eq!(uv, 8);
assert_eq!(color, 12);
}
#[test]
fn test_offsetof_nonpod() {
#[derive(Default)]
struct Foo {
a: u8,
b: &'static str,
c: i32,
}
let a_offset = offset_of!(Default::default(), Foo, a);
let b_offset = offset_of!(Default::default(), Foo, b);
let c_offset = offset_of!(Default::default(), Foo, c);
assert_ne!(a_offset, b_offset);
assert_ne!(b_offset, c_offset);
// We can't check against hardcoded values for a repr(Rust) type,
// but prove to ourself this way.
let foo = Foo::default();
// Note: offsets are in bytes.
let as_bytes = &foo as *const _ as *const u8;
// We're using wrapping_offset here because it's not worth
// the unsafe block, but it would be valid to use `add` instead,
// as it cannot overflow.
assert_eq!(
&foo.a as *const _ as usize,
as_bytes.wrapping_add(a_offset) as usize
);
assert_eq!(
&foo.b as *const _ as usize,
as_bytes.wrapping_add(b_offset) as usize
);
assert_eq!(
&foo.c as *const _ as usize,
as_bytes.wrapping_add(c_offset) as usize
);
}

Просмотреть файл

@ -1,60 +0,0 @@
#![allow(clippy::disallowed_names)]
use bytemuck::{offset_of, Zeroable};
#[test]
fn test_offset_of_vertex() {
#[repr(C)]
struct Vertex {
pos: [f32; 2],
uv: [u16; 2],
color: [u8; 4],
}
unsafe impl Zeroable for Vertex {}
let pos = offset_of!(Zeroable::zeroed(), Vertex, pos);
let uv = offset_of!(Zeroable::zeroed(), Vertex, uv);
let color = offset_of!(Zeroable::zeroed(), Vertex, color);
assert_eq!(pos, 0);
assert_eq!(uv, 8);
assert_eq!(color, 12);
}
#[test]
fn test_offset_of_foo() {
#[derive(Default)]
struct Foo {
a: u8,
b: &'static str,
c: i32,
}
let a_offset = offset_of!(Default::default(), Foo, a);
let b_offset = offset_of!(Default::default(), Foo, b);
let c_offset = offset_of!(Default::default(), Foo, c);
assert_ne!(a_offset, b_offset);
assert_ne!(b_offset, c_offset);
// We can't check against hardcoded values for a repr(Rust) type,
// but prove to ourself this way.
let foo = Foo::default();
// Note: offsets are in bytes.
let as_bytes = &foo as *const _ as *const u8;
// we're using wrapping_offset here because it's not worth
// the unsafe block, but it would be valid to use `add` instead,
// as it cannot overflow.
assert_eq!(
&foo.a as *const _ as usize,
as_bytes.wrapping_add(a_offset) as usize
);
assert_eq!(
&foo.b as *const _ as usize,
as_bytes.wrapping_add(b_offset) as usize
);
assert_eq!(
&foo.c as *const _ as usize,
as_bytes.wrapping_add(c_offset) as usize
);
}

107
third_party/rust/bytemuck/tests/std_tests.rs поставляемый
Просмотреть файл

@ -1,107 +0,0 @@
#![allow(clippy::uninlined_format_args)]
#![allow(unused_imports)]
//! The integration tests seem to always have `std` linked, so things that would
//! depend on that can go here.
use bytemuck::*;
use core::num::NonZeroU8;
#[test]
fn test_transparent_vtabled() {
use core::fmt::Display;
#[repr(transparent)]
struct DisplayTraitObj(dyn Display);
unsafe impl TransparentWrapper<dyn Display> for DisplayTraitObj {}
impl Display for DisplayTraitObj {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
self.0.fmt(f)
}
}
let v = DisplayTraitObj::wrap_ref(&5i32);
let s = format!("{}", v);
assert_eq!(s, "5");
let mut x = 100i32;
let v_mut = DisplayTraitObj::wrap_mut(&mut x);
let s = format!("{}", v_mut);
assert_eq!(s, "100");
}
#[test]
#[cfg(feature = "extern_crate_alloc")]
fn test_large_box_alloc() {
type SuperPage = [[u8; 4096]; 4096];
let _: Box<SuperPage> = try_zeroed_box().unwrap();
}
#[test]
#[cfg(feature = "extern_crate_alloc")]
fn test_zero_sized_box_alloc() {
#[repr(align(4096))]
struct Empty;
unsafe impl Zeroable for Empty {}
let _: Box<Empty> = try_zeroed_box().unwrap();
}
#[test]
#[cfg(feature = "extern_crate_alloc")]
fn test_try_from_box_bytes() {
// Different layout: target alignment is greater than source alignment.
assert_eq!(
try_from_box_bytes::<u32>(Box::new([0u8; 4]).into()).map_err(|(x, _)| x),
Err(PodCastError::AlignmentMismatch)
);
// Different layout: target alignment is less than source alignment.
assert_eq!(
try_from_box_bytes::<u32>(Box::new(0u64).into()).map_err(|(x, _)| x),
Err(PodCastError::AlignmentMismatch)
);
// Different layout: target size is greater than source size.
assert_eq!(
try_from_box_bytes::<[u32; 2]>(Box::new(0u32).into()).map_err(|(x, _)| x),
Err(PodCastError::SizeMismatch)
);
// Different layout: target size is less than source size.
assert_eq!(
try_from_box_bytes::<u32>(Box::new([0u32; 2]).into()).map_err(|(x, _)| x),
Err(PodCastError::SizeMismatch)
);
// Round trip: alignment is equal to size.
assert_eq!(*from_box_bytes::<u32>(Box::new(1000u32).into()), 1000u32);
// Round trip: alignment is divider of size.
assert_eq!(&*from_box_bytes::<[u8; 5]>(Box::new(*b"hello").into()), b"hello");
// It's ok for T to have uninitialized bytes.
#[cfg(feature = "derive")]
{
#[derive(Debug, Copy, Clone, PartialEq, Eq, AnyBitPattern)]
struct Foo(u8, u16);
assert_eq!(
*from_box_bytes::<Foo>(Box::new([0xc5c5u16; 2]).into()),
Foo(0xc5u8, 0xc5c5u16)
);
}
}
#[test]
#[cfg(feature = "extern_crate_alloc")]
fn test_box_bytes_of() {
assert_eq!(&*box_bytes_of(Box::new(*b"hello")), b"hello");
#[cfg(target_endian = "big")]
assert_eq!(&*box_bytes_of(Box::new(0x12345678)), b"\x12\x34\x56\x78");
#[cfg(target_endian = "little")]
assert_eq!(&*box_bytes_of(Box::new(0x12345678)), b"\x78\x56\x34\x12");
// It's ok for T to have invalid bit patterns.
assert_eq!(&*box_bytes_of(Box::new(NonZeroU8::new(0xc5))), b"\xc5");
}

116
third_party/rust/bytemuck/tests/transparent.rs поставляемый
Просмотреть файл

@ -1,116 +0,0 @@
// Currently this test doesn't actually check the output of the functions.
// It's only here for miri to check for any potential undefined behaviour.
// TODO: check function results
#[test]
fn test_transparent_wrapper() {
// An external type defined in a different crate.
#[derive(Debug, Copy, Clone, Default)]
struct Foreign(u8);
use bytemuck::TransparentWrapper;
#[derive(Debug, Copy, Clone)]
#[repr(transparent)]
struct Wrapper(Foreign);
unsafe impl TransparentWrapper<Foreign> for Wrapper {}
// Traits can be implemented on crate-local wrapper.
unsafe impl bytemuck::Zeroable for Wrapper {}
unsafe impl bytemuck::Pod for Wrapper {}
impl PartialEq<u8> for Foreign {
fn eq(&self, &other: &u8) -> bool {
self.0 == other
}
}
impl PartialEq<u8> for Wrapper {
fn eq(&self, &other: &u8) -> bool {
self.0 == other
}
}
let _: u8 = bytemuck::cast(Wrapper::wrap(Foreign::default()));
let _: Foreign = Wrapper::peel(bytemuck::cast(u8::default()));
let _: &u8 = bytemuck::cast_ref(Wrapper::wrap_ref(&Foreign::default()));
let _: &Foreign = Wrapper::peel_ref(bytemuck::cast_ref(&u8::default()));
let _: &mut u8 =
bytemuck::cast_mut(Wrapper::wrap_mut(&mut Foreign::default()));
let _: &mut Foreign =
Wrapper::peel_mut(bytemuck::cast_mut(&mut u8::default()));
let _: &[u8] =
bytemuck::cast_slice(Wrapper::wrap_slice(&[Foreign::default()]));
let _: &[Foreign] =
Wrapper::peel_slice(bytemuck::cast_slice(&[u8::default()]));
let _: &mut [u8] =
bytemuck::cast_slice_mut(Wrapper::wrap_slice_mut(
&mut [Foreign::default()],
));
let _: &mut [Foreign] =
Wrapper::peel_slice_mut(bytemuck::cast_slice_mut(&mut [u8::default()]));
let _: &[u8] = bytemuck::bytes_of(Wrapper::wrap_ref(&Foreign::default()));
let _: &Foreign = Wrapper::peel_ref(bytemuck::from_bytes(&[u8::default()]));
let _: &mut [u8] =
bytemuck::bytes_of_mut(Wrapper::wrap_mut(&mut Foreign::default()));
let _: &mut Foreign =
Wrapper::peel_mut(bytemuck::from_bytes_mut(&mut [u8::default()]));
// not sure if this is the right usage
let _ =
bytemuck::pod_align_to::<_, u8>(Wrapper::wrap_slice(&[Foreign::default()]));
// counterpart?
// not sure if this is the right usage
let _ = bytemuck::pod_align_to_mut::<_, u8>(Wrapper::wrap_slice_mut(&mut [
Foreign::default(),
]));
// counterpart?
#[cfg(feature = "extern_crate_alloc")]
{
use bytemuck::allocation::TransparentWrapperAlloc;
use std::rc::Rc;
let a: Vec<Foreign> = vec![Foreign::default(); 2];
let b: Vec<Wrapper> = Wrapper::wrap_vec(a);
assert_eq!(b, [0, 0]);
let c: Vec<Foreign> = Wrapper::peel_vec(b);
assert_eq!(c, [0, 0]);
let d: Box<Foreign> = Box::new(Foreign::default());
let e: Box<Wrapper> = Wrapper::wrap_box(d);
assert_eq!(&*e, &0);
let f: Box<Foreign> = Wrapper::peel_box(e);
assert_eq!(&*f, &0);
let g: Rc<Foreign> = Rc::new(Foreign::default());
let h: Rc<Wrapper> = Wrapper::wrap_rc(g);
assert_eq!(&*h, &0);
let i: Rc<Foreign> = Wrapper::peel_rc(h);
assert_eq!(&*i, &0);
#[cfg(target_has_atomic = "ptr")]
{
use std::sync::Arc;
let j: Arc<Foreign> = Arc::new(Foreign::default());
let k: Arc<Wrapper> = Wrapper::wrap_arc(j);
assert_eq!(&*k, &0);
let l: Arc<Foreign> = Wrapper::peel_arc(k);
assert_eq!(&*l, &0);
}
}
}

Просмотреть файл

@ -1,13 +0,0 @@
use bytemuck::TransparentWrapper;
#[repr(transparent)]
struct Wrap(Box<u32>);
// SAFETY: it's #[repr(transparent)]
unsafe impl TransparentWrapper<Box<u32>> for Wrap {}
fn main() {
let value = Box::new(5);
// This used to duplicate the wrapped value, creating a double free :(
Wrap::wrap(value);
}

Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"c87eab9ea5fc46d5e449ddee9cf271d2e0bfe64a4972a8b593936a5845ebbaf1","LICENSE-APACHE":"870e20c217d15bcfcbe53d7c5867cd8fac44a4ca0b41fc1eb843557e16063eba","LICENSE-MIT":"0b2d108c9c686a74ac312990ee8377902756a2a081a7af3b0f9d68abf0a8f1a1","LICENSE-ZLIB":"682b4c81b85e83ce6cc6e1ace38fdd97aeb4de0e972bd2b44aa0916c54af8c96","README.md":"c44fcbb0a6555b948e7c0b26313ecdc5f3079ebd1ae74aadcc42fd1ba1245540","changelog.md":"70a32751e189a01bab0c2bc03f8cc11c0025469f29f68bd608aaf0f3ff6b90f4","src/lib.rs":"0c2941080a69a9ed6655fa3f94fdc85c5aeb6e7d8af080042cd7a2a9c5b80424","src/traits.rs":"a96d498f9e1d3050df1adfbcdd86a3258a900a325127699759b210bdb16a0617","tests/basic.rs":"980f46ba184d07b25de599e3de1d95a2a21e1270b3c9916a8046c71eabed5baf"},"package":"4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60"}

44
third_party/rust/bytemuck_derive/Cargo.toml поставляемый
Просмотреть файл

@ -1,44 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "bytemuck_derive"
version = "1.6.0"
authors = ["Lokathor <zefria@gmail.com>"]
description = "derive proc-macros for `bytemuck`"
readme = "README.md"
keywords = [
"transmute",
"bytes",
"casting",
]
categories = [
"encoding",
"no-std",
]
license = "Zlib OR Apache-2.0 OR MIT"
repository = "https://github.com/Lokathor/bytemuck"
[lib]
name = "bytemuck_derive"
proc-macro = true
[dependencies.proc-macro2]
version = "1.0.60"
[dependencies.quote]
version = "1"
[dependencies.syn]
version = "2.0.1"
[dev-dependencies]

Просмотреть файл

@ -1,61 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Просмотреть файл

@ -1,9 +0,0 @@
MIT License
Copyright (c) 2019 Daniel "Lokathor" Gee.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

11
third_party/rust/bytemuck_derive/LICENSE-ZLIB поставляемый
Просмотреть файл

@ -1,11 +0,0 @@
Copyright (c) 2019 Daniel "Lokathor" Gee.
This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.

10
third_party/rust/bytemuck_derive/README.md поставляемый
Просмотреть файл

@ -1,10 +0,0 @@
# bytemuck_derive
Derive macros for [bytemuck](https://docs.rs/bytemuck) traits.
MSRV: None!
This is an opt-in bonus feature for `bytemuck` that doesn't particularly do
anything you couldn't do yourself, and so MSRV is not a strong consideration for
this crate.

72
third_party/rust/bytemuck_derive/changelog.md поставляемый
Просмотреть файл

@ -1,72 +0,0 @@
## `bytemuck_derive` changelog
## 1.6.0
* This allows `CheckedBitPattern` to be derived for enums with fields.
The repr must be one of the following:
* `#[repr(C)]`
* `#[repr(C, int)]`
* `#[repr(int)]`
* `#[repr(transparent)]`
## 1.5.0
* The `Zeroable` derive now allows custom bounds. See the rustdoc for an explanation.
## 1.4.1
* Move the `syn` dependency to use version 2.
This should not affect the public API in any way.
## 1.4.0
* `ByteEq` and `ByteHash` derives will make `Eq` and `Hash` impls that treat the
value as a `&[u8]` during equality checks and hashing. This provides a large
codegen improvement for some types.
* Derives of `repr(int)` enums should now accept byte literal values as the
discriminant.
## 1.3.0
* Allow `repr(transparent)` to be used generically in `derive(Pod)`.
## 1.2.1
* Fixed a regression of the `align(N)` attribute that occurred during otherwise
routine cleanup.
## 1.2.0
* Apparently our minimum required version of `syn` went up without anyone
noticing for a while. Because of a bump in our `syn` requirements, we're also
issuing this minor version bump in the `bytemuck_derive` crate. Because it's
possible to *reduce* the minimum required version of a dep in only a patch
release, I'm going to ratchet the required version of `syn` all the way up to
"current" (1.0.99). If absolutely necessary we could probably reduce the
minimum `syn` version again in a patch release for 1.2, but I don't want to
play this dance too much so I'd rather make each jump as big as can possibly
be. [Issue 122](https://github.com/Lokathor/bytemuck/issues/122). **Note:**
While the core `bytemuck` crate continues to keep building on rustc-1.34.0,
the `bytemuck_derive` crate is considered an opt-in bonus feature (which
doesn't do anything you couldn't trivially do yourself) and so it does not
support a specific MSRV.
## 1.1.1
* Adjusted the license files to use full files rather than symlinks.
[PR](https://github.com/Lokathor/bytemuck/pull/118)
The license is unchanged, just no more symlinks.
## 1.1.0
* Updated to work with `bytemuck-1.9.0`
## 1.0.1
* [yanchith](https://github.com/yanchith) fixed the derive checks code to make clippy more happy.
[PR 45](https://github.com/Lokathor/bytemuck/pull/45)
## 1.0.0
* Initial stable release.

632
third_party/rust/bytemuck_derive/src/lib.rs поставляемый
Просмотреть файл

@ -1,632 +0,0 @@
//! Derive macros for [bytemuck](https://docs.rs/bytemuck) traits.
extern crate proc_macro;
mod traits;
use proc_macro2::TokenStream;
use quote::quote;
use syn::{parse_macro_input, DeriveInput, Result};
use crate::traits::{
bytemuck_crate_name, AnyBitPattern, CheckedBitPattern, Contiguous, Derivable,
NoUninit, Pod, TransparentWrapper, Zeroable,
};
/// Derive the `Pod` trait for a struct
///
/// The macro ensures that the struct follows all the the safety requirements
/// for the `Pod` trait.
///
/// The following constraints need to be satisfied for the macro to succeed
///
/// - All fields in the struct must implement `Pod`
/// - The struct must be `#[repr(C)]` or `#[repr(transparent)]`
/// - The struct must not contain any padding bytes
/// - The struct contains no generic parameters, if it is not
/// `#[repr(transparent)]`
///
/// ## Examples
///
/// ```rust
/// # use std::marker::PhantomData;
/// # use bytemuck_derive::{Pod, Zeroable};
/// #[derive(Copy, Clone, Pod, Zeroable)]
/// #[repr(C)]
/// struct Test {
/// a: u16,
/// b: u16,
/// }
///
/// #[derive(Copy, Clone, Pod, Zeroable)]
/// #[repr(transparent)]
/// struct Generic<A, B> {
/// a: A,
/// b: PhantomData<B>,
/// }
/// ```
///
/// If the struct is generic, it must be `#[repr(transparent)]` also.
///
/// ```compile_fail
/// # use bytemuck::{Pod, Zeroable};
/// # use std::marker::PhantomData;
/// #[derive(Copy, Clone, Pod, Zeroable)]
/// #[repr(C)] // must be `#[repr(transparent)]`
/// struct Generic<A> {
/// a: A,
/// }
/// ```
///
/// If the struct is generic and `#[repr(transparent)]`, then it is only `Pod`
/// when all of its generics are `Pod`, not just its fields.
///
/// ```
/// # use bytemuck::{Pod, Zeroable};
/// # use std::marker::PhantomData;
/// #[derive(Copy, Clone, Pod, Zeroable)]
/// #[repr(transparent)]
/// struct Generic<A, B> {
/// a: A,
/// b: PhantomData<B>,
/// }
///
/// let _: u32 = bytemuck::cast(Generic { a: 4u32, b: PhantomData::<u32> });
/// ```
///
/// ```compile_fail
/// # use bytemuck::{Pod, Zeroable};
/// # use std::marker::PhantomData;
/// # #[derive(Copy, Clone, Pod, Zeroable)]
/// # #[repr(transparent)]
/// # struct Generic<A, B> {
/// # a: A,
/// # b: PhantomData<B>,
/// # }
/// struct NotPod;
///
/// let _: u32 = bytemuck::cast(Generic { a: 4u32, b: PhantomData::<NotPod> });
/// ```
#[proc_macro_derive(Pod, attributes(bytemuck))]
pub fn derive_pod(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let expanded =
derive_marker_trait::<Pod>(parse_macro_input!(input as DeriveInput));
proc_macro::TokenStream::from(expanded)
}
/// Derive the `AnyBitPattern` trait for a struct
///
/// The macro ensures that the struct follows all the the safety requirements
/// for the `AnyBitPattern` trait.
///
/// The following constraints need to be satisfied for the macro to succeed
///
/// - All fields in the struct must to implement `AnyBitPattern`
#[proc_macro_derive(AnyBitPattern, attributes(bytemuck))]
pub fn derive_anybitpattern(
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let expanded = derive_marker_trait::<AnyBitPattern>(parse_macro_input!(
input as DeriveInput
));
proc_macro::TokenStream::from(expanded)
}
/// Derive the `Zeroable` trait for a struct
///
/// The macro ensures that the struct follows all the the safety requirements
/// for the `Zeroable` trait.
///
/// The following constraints need to be satisfied for the macro to succeed
///
/// - All fields in the struct must to implement `Zeroable`
///
/// ## Example
///
/// ```rust
/// # use bytemuck_derive::{Zeroable};
/// #[derive(Copy, Clone, Zeroable)]
/// #[repr(C)]
/// struct Test {
/// a: u16,
/// b: u16,
/// }
/// ```
///
/// # Custom bounds
///
/// Custom bounds for the derived `Zeroable` impl can be given using the
/// `#[zeroable(bound = "")]` helper attribute.
///
/// Using this attribute additionally opts-in to "perfect derive" semantics,
/// where instead of adding bounds for each generic type parameter, bounds are
/// added for each field's type.
///
/// ## Examples
///
/// ```rust
/// # use bytemuck::Zeroable;
/// # use std::marker::PhantomData;
/// #[derive(Clone, Zeroable)]
/// #[zeroable(bound = "")]
/// struct AlwaysZeroable<T> {
/// a: PhantomData<T>,
/// }
///
/// AlwaysZeroable::<std::num::NonZeroU8>::zeroed();
/// ```
///
/// ```rust,compile_fail
/// # use bytemuck::Zeroable;
/// # use std::marker::PhantomData;
/// #[derive(Clone, Zeroable)]
/// #[zeroable(bound = "T: Copy")]
/// struct ZeroableWhenTIsCopy<T> {
/// a: PhantomData<T>,
/// }
///
/// ZeroableWhenTIsCopy::<String>::zeroed();
/// ```
///
/// The restriction that all fields must be Zeroable is still applied, and this
/// is enforced using the mentioned "perfect derive" semantics.
///
/// ```rust
/// # use bytemuck::Zeroable;
/// #[derive(Clone, Zeroable)]
/// #[zeroable(bound = "")]
/// struct ZeroableWhenTIsZeroable<T> {
/// a: T,
/// }
/// ZeroableWhenTIsZeroable::<u32>::zeroed();
/// ```
///
/// ```rust,compile_fail
/// # use bytemuck::Zeroable;
/// # #[derive(Clone, Zeroable)]
/// # #[zeroable(bound = "")]
/// # struct ZeroableWhenTIsZeroable<T> {
/// # a: T,
/// # }
/// ZeroableWhenTIsZeroable::<String>::zeroed();
/// ```
#[proc_macro_derive(Zeroable, attributes(bytemuck, zeroable))]
pub fn derive_zeroable(
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let expanded =
derive_marker_trait::<Zeroable>(parse_macro_input!(input as DeriveInput));
proc_macro::TokenStream::from(expanded)
}
/// Derive the `NoUninit` trait for a struct or enum
///
/// The macro ensures that the type follows all the the safety requirements
/// for the `NoUninit` trait.
///
/// The following constraints need to be satisfied for the macro to succeed
/// (the rest of the constraints are guaranteed by the `NoUninit` subtrait
/// bounds, i.e. the type must be `Sized + Copy + 'static`):
///
/// If applied to a struct:
/// - All fields in the struct must implement `NoUninit`
/// - The struct must be `#[repr(C)]` or `#[repr(transparent)]`
/// - The struct must not contain any padding bytes
/// - The struct must contain no generic parameters
///
/// If applied to an enum:
/// - The enum must be explicit `#[repr(Int)]`, `#[repr(C)]`, or both
/// - All variants must be fieldless
/// - The enum must contain no generic parameters
#[proc_macro_derive(NoUninit)]
pub fn derive_no_uninit(
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let expanded =
derive_marker_trait::<NoUninit>(parse_macro_input!(input as DeriveInput));
proc_macro::TokenStream::from(expanded)
}
/// Derive the `CheckedBitPattern` trait for a struct or enum.
///
/// The macro ensures that the type follows all the the safety requirements
/// for the `CheckedBitPattern` trait and derives the required `Bits` type
/// definition and `is_valid_bit_pattern` method for the type automatically.
///
/// The following constraints need to be satisfied for the macro to succeed:
///
/// If applied to a struct:
/// - All fields must implement `CheckedBitPattern`
/// - The struct must be `#[repr(C)]` or `#[repr(transparent)]`
/// - The struct must contain no generic parameters
///
/// If applied to an enum:
/// - The enum must be explicit `#[repr(Int)]`
/// - All fields in variants must implement `CheckedBitPattern`
/// - The enum must contain no generic parameters
#[proc_macro_derive(CheckedBitPattern)]
pub fn derive_maybe_pod(
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let expanded = derive_marker_trait::<CheckedBitPattern>(parse_macro_input!(
input as DeriveInput
));
proc_macro::TokenStream::from(expanded)
}
/// Derive the `TransparentWrapper` trait for a struct
///
/// The macro ensures that the struct follows all the the safety requirements
/// for the `TransparentWrapper` trait.
///
/// The following constraints need to be satisfied for the macro to succeed
///
/// - The struct must be `#[repr(transparent)]`
/// - The struct must contain the `Wrapped` type
/// - Any ZST fields must be [`Zeroable`][derive@Zeroable].
///
/// If the struct only contains a single field, the `Wrapped` type will
/// automatically be determined. If there is more then one field in the struct,
/// you need to specify the `Wrapped` type using `#[transparent(T)]`
///
/// ## Examples
///
/// ```rust
/// # use bytemuck_derive::TransparentWrapper;
/// # use std::marker::PhantomData;
/// #[derive(Copy, Clone, TransparentWrapper)]
/// #[repr(transparent)]
/// #[transparent(u16)]
/// struct Test<T> {
/// inner: u16,
/// extra: PhantomData<T>,
/// }
/// ```
///
/// If the struct contains more than one field, the `Wrapped` type must be
/// explicitly specified.
///
/// ```rust,compile_fail
/// # use bytemuck_derive::TransparentWrapper;
/// # use std::marker::PhantomData;
/// #[derive(Copy, Clone, TransparentWrapper)]
/// #[repr(transparent)]
/// // missing `#[transparent(u16)]`
/// struct Test<T> {
/// inner: u16,
/// extra: PhantomData<T>,
/// }
/// ```
///
/// Any ZST fields must be `Zeroable`.
///
/// ```rust,compile_fail
/// # use bytemuck_derive::TransparentWrapper;
/// # use std::marker::PhantomData;
/// struct NonTransparentSafeZST;
///
/// #[derive(TransparentWrapper)]
/// #[repr(transparent)]
/// #[transparent(u16)]
/// struct Test<T> {
/// inner: u16,
/// extra: PhantomData<T>,
/// another_extra: NonTransparentSafeZST, // not `Zeroable`
/// }
/// ```
#[proc_macro_derive(TransparentWrapper, attributes(bytemuck, transparent))]
pub fn derive_transparent(
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let expanded = derive_marker_trait::<TransparentWrapper>(parse_macro_input!(
input as DeriveInput
));
proc_macro::TokenStream::from(expanded)
}
/// Derive the `Contiguous` trait for an enum
///
/// The macro ensures that the enum follows all the the safety requirements
/// for the `Contiguous` trait.
///
/// The following constraints need to be satisfied for the macro to succeed
///
/// - The enum must be `#[repr(Int)]`
/// - The enum must be fieldless
/// - The enum discriminants must form a contiguous range
///
/// ## Example
///
/// ```rust
/// # use bytemuck_derive::{Contiguous};
///
/// #[derive(Copy, Clone, Contiguous)]
/// #[repr(u8)]
/// enum Test {
/// A = 0,
/// B = 1,
/// C = 2,
/// }
/// ```
#[proc_macro_derive(Contiguous)]
pub fn derive_contiguous(
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let expanded =
derive_marker_trait::<Contiguous>(parse_macro_input!(input as DeriveInput));
proc_macro::TokenStream::from(expanded)
}
/// Derive the `PartialEq` and `Eq` trait for a type
///
/// The macro implements `PartialEq` and `Eq` by casting both sides of the
/// comparison to a byte slice and then compares those.
///
/// ## Warning
///
/// Since this implements a byte wise comparison, the behavior of floating point
/// numbers does not match their usual comparison behavior. Additionally other
/// custom comparison behaviors of the individual fields are also ignored. This
/// also does not implement `StructuralPartialEq` / `StructuralEq` like
/// `PartialEq` / `Eq` would. This means you can't pattern match on the values.
///
/// ## Example
///
/// ```rust
/// # use bytemuck_derive::{ByteEq, NoUninit};
/// #[derive(Copy, Clone, NoUninit, ByteEq)]
/// #[repr(C)]
/// struct Test {
/// a: u32,
/// b: char,
/// c: f32,
/// }
/// ```
#[proc_macro_derive(ByteEq)]
pub fn derive_byte_eq(
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let crate_name = bytemuck_crate_name(&input);
let ident = input.ident;
proc_macro::TokenStream::from(quote! {
impl ::core::cmp::PartialEq for #ident {
#[inline]
#[must_use]
fn eq(&self, other: &Self) -> bool {
#crate_name::bytes_of(self) == #crate_name::bytes_of(other)
}
}
impl ::core::cmp::Eq for #ident { }
})
}
/// Derive the `Hash` trait for a type
///
/// The macro implements `Hash` by casting the value to a byte slice and hashing
/// that.
///
/// ## Warning
///
/// The hash does not match the standard library's `Hash` derive.
///
/// ## Example
///
/// ```rust
/// # use bytemuck_derive::{ByteHash, NoUninit};
/// #[derive(Copy, Clone, NoUninit, ByteHash)]
/// #[repr(C)]
/// struct Test {
/// a: u32,
/// b: char,
/// c: f32,
/// }
/// ```
#[proc_macro_derive(ByteHash)]
pub fn derive_byte_hash(
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let crate_name = bytemuck_crate_name(&input);
let ident = input.ident;
proc_macro::TokenStream::from(quote! {
impl ::core::hash::Hash for #ident {
#[inline]
fn hash<H: ::core::hash::Hasher>(&self, state: &mut H) {
::core::hash::Hash::hash_slice(#crate_name::bytes_of(self), state)
}
#[inline]
fn hash_slice<H: ::core::hash::Hasher>(data: &[Self], state: &mut H) {
::core::hash::Hash::hash_slice(#crate_name::cast_slice::<_, u8>(data), state)
}
}
})
}
/// Basic wrapper for error handling
fn derive_marker_trait<Trait: Derivable>(input: DeriveInput) -> TokenStream {
derive_marker_trait_inner::<Trait>(input)
.unwrap_or_else(|err| err.into_compile_error())
}
/// Find `#[name(key = "value")]` helper attributes on the struct, and return
/// their `"value"`s parsed with `parser`.
///
/// Returns an error if any attributes with the given `name` do not match the
/// expected format. Returns `Ok([])` if no attributes with `name` are found.
fn find_and_parse_helper_attributes<P: syn::parse::Parser + Copy>(
attributes: &[syn::Attribute], name: &str, key: &str, parser: P,
example_value: &str, invalid_value_msg: &str,
) -> Result<Vec<P::Output>> {
let invalid_format_msg =
format!("{name} attribute must be `{name}({key} = \"{example_value}\")`",);
let values_to_check = attributes.iter().filter_map(|attr| match &attr.meta {
// If a `Path` matches our `name`, return an error, else ignore it.
// e.g. `#[zeroable]`
syn::Meta::Path(path) => path
.is_ident(name)
.then(|| Err(syn::Error::new_spanned(path, &invalid_format_msg))),
// If a `NameValue` matches our `name`, return an error, else ignore it.
// e.g. `#[zeroable = "hello"]`
syn::Meta::NameValue(namevalue) => {
namevalue.path.is_ident(name).then(|| {
Err(syn::Error::new_spanned(&namevalue.path, &invalid_format_msg))
})
}
// If a `List` matches our `name`, match its contents to our format, else
// ignore it. If its contents match our format, return the value, else
// return an error.
syn::Meta::List(list) => list.path.is_ident(name).then(|| {
let namevalue: syn::MetaNameValue = syn::parse2(list.tokens.clone())
.map_err(|_| {
syn::Error::new_spanned(&list.tokens, &invalid_format_msg)
})?;
if namevalue.path.is_ident(key) {
match namevalue.value {
syn::Expr::Lit(syn::ExprLit {
lit: syn::Lit::Str(strlit), ..
}) => Ok(strlit),
_ => {
Err(syn::Error::new_spanned(&namevalue.path, &invalid_format_msg))
}
}
} else {
Err(syn::Error::new_spanned(&namevalue.path, &invalid_format_msg))
}
}),
});
// Parse each value found with the given parser, and return them if no errors
// occur.
values_to_check
.map(|lit| {
let lit = lit?;
lit.parse_with(parser).map_err(|err| {
syn::Error::new_spanned(&lit, format!("{invalid_value_msg}: {err}"))
})
})
.collect()
}
fn derive_marker_trait_inner<Trait: Derivable>(
mut input: DeriveInput,
) -> Result<TokenStream> {
let crate_name = bytemuck_crate_name(&input);
let trait_ = Trait::ident(&input, &crate_name)?;
// If this trait allows explicit bounds, and any explicit bounds were given,
// then use those explicit bounds. Else, apply the default bounds (bound
// each generic type on this trait).
if let Some(name) = Trait::explicit_bounds_attribute_name() {
// See if any explicit bounds were given in attributes.
let explicit_bounds = find_and_parse_helper_attributes(
&input.attrs,
name,
"bound",
<syn::punctuated::Punctuated<syn::WherePredicate, syn::Token![,]>>::parse_terminated,
"Type: Trait",
"invalid where predicate",
)?;
if !explicit_bounds.is_empty() {
// Explicit bounds were given.
// Enforce explicitly given bounds, and emit "perfect derive" (i.e. add
// bounds for each field's type).
let explicit_bounds = explicit_bounds
.into_iter()
.flatten()
.collect::<Vec<syn::WherePredicate>>();
let predicates = &mut input.generics.make_where_clause().predicates;
predicates.extend(explicit_bounds);
let fields = match &input.data {
syn::Data::Struct(syn::DataStruct { fields, .. }) => fields.clone(),
syn::Data::Union(_) => {
return Err(syn::Error::new_spanned(
trait_,
&"perfect derive is not supported for unions",
));
}
syn::Data::Enum(_) => {
return Err(syn::Error::new_spanned(
trait_,
&"perfect derive is not supported for enums",
));
}
};
for field in fields {
let ty = field.ty;
predicates.push(syn::parse_quote!(
#ty: #trait_
));
}
} else {
// No explicit bounds were given.
// Enforce trait bound on all type generics.
add_trait_marker(&mut input.generics, &trait_);
}
} else {
// This trait does not allow explicit bounds.
// Enforce trait bound on all type generics.
add_trait_marker(&mut input.generics, &trait_);
}
let name = &input.ident;
let (impl_generics, ty_generics, where_clause) =
input.generics.split_for_impl();
Trait::check_attributes(&input.data, &input.attrs)?;
let asserts = Trait::asserts(&input, &crate_name)?;
let (trait_impl_extras, trait_impl) = Trait::trait_impl(&input, &crate_name)?;
let implies_trait = if let Some(implies_trait) =
Trait::implies_trait(&crate_name)
{
quote!(unsafe impl #impl_generics #implies_trait for #name #ty_generics #where_clause {})
} else {
quote!()
};
let where_clause =
if Trait::requires_where_clause() { where_clause } else { None };
Ok(quote! {
#asserts
#trait_impl_extras
unsafe impl #impl_generics #trait_ for #name #ty_generics #where_clause {
#trait_impl
}
#implies_trait
})
}
/// Add a trait marker to the generics if it is not already present
fn add_trait_marker(generics: &mut syn::Generics, trait_name: &syn::Path) {
// Get each generic type parameter.
let type_params = generics
.type_params()
.map(|param| &param.ident)
.map(|param| {
syn::parse_quote!(
#param: #trait_name
)
})
.collect::<Vec<syn::WherePredicate>>();
generics.make_where_clause().predicates.extend(type_params);
}

1265
third_party/rust/bytemuck_derive/src/traits.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,452 +0,0 @@
#![allow(dead_code)]
use bytemuck::{
AnyBitPattern, CheckedBitPattern, Contiguous, NoUninit, Pod,
TransparentWrapper, Zeroable, checked::CheckedCastError,
};
use std::marker::{PhantomData, PhantomPinned};
#[derive(Copy, Clone, Pod, Zeroable)]
#[repr(C)]
struct Test {
a: u16,
b: u16,
}
#[derive(Pod, Zeroable)]
#[repr(C, packed)]
struct GenericPackedStruct<T: Pod> {
a: u32,
b: T,
c: u32,
}
impl<T: Pod> Clone for GenericPackedStruct<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T: Pod> Copy for GenericPackedStruct<T> {}
#[derive(Pod, Zeroable)]
#[repr(C, packed(1))]
struct GenericPackedStructExplicitPackedAlignment<T: Pod> {
a: u32,
b: T,
c: u32,
}
impl<T: Pod> Clone for GenericPackedStructExplicitPackedAlignment<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T: Pod> Copy for GenericPackedStructExplicitPackedAlignment<T> {}
#[derive(Zeroable)]
struct ZeroGeneric<T: bytemuck::Zeroable> {
a: T,
}
#[derive(TransparentWrapper)]
#[repr(transparent)]
struct TransparentSingle {
a: u16,
}
#[derive(TransparentWrapper)]
#[repr(transparent)]
#[transparent(u16)]
struct TransparentWithZeroSized<T> {
a: u16,
b: PhantomData<T>,
}
struct MyZst<T>(PhantomData<T>, [u8; 0], PhantomPinned);
unsafe impl<T> Zeroable for MyZst<T> {}
#[derive(TransparentWrapper)]
#[repr(transparent)]
#[transparent(u16)]
struct TransparentTupleWithCustomZeroSized<T>(u16, MyZst<T>);
#[repr(u8)]
#[derive(Clone, Copy, Contiguous)]
enum ContiguousWithValues {
A = 0,
B = 1,
C = 2,
D = 3,
E = 4,
}
#[repr(i8)]
#[derive(Clone, Copy, Contiguous)]
enum ContiguousWithImplicitValues {
A = -10,
B,
C,
D,
E,
}
#[derive(Copy, Clone, NoUninit)]
#[repr(C)]
struct NoUninitTest {
a: u16,
b: u16,
}
#[derive(Copy, Clone, AnyBitPattern)]
#[repr(C)]
union UnionTestAnyBitPattern {
a: u8,
b: u16,
}
#[repr(u8)]
#[derive(Debug, Clone, Copy, NoUninit, CheckedBitPattern, PartialEq, Eq)]
enum CheckedBitPatternEnumWithValues {
A = 0,
B = 1,
C = 2,
D = 3,
E = 4,
}
#[repr(i8)]
#[derive(Clone, Copy, NoUninit, CheckedBitPattern)]
enum CheckedBitPatternEnumWithImplicitValues {
A = -10,
B,
C,
D,
E,
}
#[repr(u8)]
#[derive(Debug, Clone, Copy, NoUninit, CheckedBitPattern, PartialEq, Eq)]
enum CheckedBitPatternEnumNonContiguous {
A = 1,
B = 8,
C = 2,
D = 3,
E = 56,
}
#[repr(u8)]
#[derive(Debug, Clone, Copy, NoUninit, CheckedBitPattern, PartialEq, Eq)]
enum CheckedBitPatternEnumByteLit {
A = b'A',
B = b'B',
C = b'C',
D = b'D',
E = b'E',
}
#[derive(Debug, Copy, Clone, NoUninit, CheckedBitPattern, PartialEq, Eq)]
#[repr(C)]
struct CheckedBitPatternStruct {
a: u8,
b: CheckedBitPatternEnumNonContiguous,
}
#[derive(Debug, Copy, Clone, AnyBitPattern, PartialEq, Eq)]
#[repr(C)]
struct AnyBitPatternTest<A: AnyBitPattern, B: AnyBitPattern> {
a: A,
b: B,
}
#[derive(Clone, Copy, CheckedBitPattern)]
#[repr(C, align(8))]
struct CheckedBitPatternAlignedStruct {
a: u16,
}
#[derive(Debug, Clone, Copy, CheckedBitPattern, PartialEq, Eq)]
#[repr(C)]
enum CheckedBitPatternCDefaultDiscriminantEnumWithFields {
A(u64),
B { c: u64 },
}
#[derive(Debug, Clone, Copy, CheckedBitPattern, PartialEq, Eq)]
#[repr(C, u8)]
enum CheckedBitPatternCEnumWithFields {
A(u32),
B { c: u32 },
}
#[derive(Debug, Clone, Copy, CheckedBitPattern, PartialEq, Eq)]
#[repr(u8)]
enum CheckedBitPatternIntEnumWithFields {
A(u8),
B { c: u32 },
}
#[derive(Debug, Clone, Copy, CheckedBitPattern, PartialEq, Eq)]
#[repr(transparent)]
enum CheckedBitPatternTransparentEnumWithFields {
A { b: u32 },
}
// size 24, align 8.
// first byte always the u8 discriminant, then 7 bytes of padding until the payload union since the align of the payload
// is the greatest of the align of all the variants, which is 8 (from CheckedBitPatternCDefaultDiscriminantEnumWithFields)
#[derive(Debug, Clone, Copy, CheckedBitPattern, PartialEq, Eq)]
#[repr(C, u8)]
enum CheckedBitPatternEnumNested {
A(CheckedBitPatternCEnumWithFields),
B(CheckedBitPatternCDefaultDiscriminantEnumWithFields),
}
/// ```compile_fail
/// use bytemuck::{Pod, Zeroable};
///
/// #[derive(Pod, Zeroable)]
/// #[repr(transparent)]
/// struct TransparentSingle<T>(T);
///
/// struct NotPod(u32);
///
/// let _: u32 = bytemuck::cast(TransparentSingle(NotPod(0u32)));
/// ```
#[derive(
Debug, Copy, Clone, PartialEq, Eq, Pod, Zeroable, TransparentWrapper,
)]
#[repr(transparent)]
struct NewtypeWrapperTest<T>(T);
#[test]
fn fails_cast_contiguous() {
let can_cast = CheckedBitPatternEnumWithValues::is_valid_bit_pattern(&5);
assert!(!can_cast);
}
#[test]
fn passes_cast_contiguous() {
let res =
bytemuck::checked::from_bytes::<CheckedBitPatternEnumWithValues>(&[2u8]);
assert_eq!(*res, CheckedBitPatternEnumWithValues::C);
}
#[test]
fn fails_cast_noncontiguous() {
let can_cast = CheckedBitPatternEnumNonContiguous::is_valid_bit_pattern(&4);
assert!(!can_cast);
}
#[test]
fn passes_cast_noncontiguous() {
let res =
bytemuck::checked::from_bytes::<CheckedBitPatternEnumNonContiguous>(&[
56u8,
]);
assert_eq!(*res, CheckedBitPatternEnumNonContiguous::E);
}
#[test]
fn fails_cast_bytelit() {
let can_cast = CheckedBitPatternEnumByteLit::is_valid_bit_pattern(&b'a');
assert!(!can_cast);
}
#[test]
fn passes_cast_bytelit() {
let res =
bytemuck::checked::cast_slice::<u8, CheckedBitPatternEnumByteLit>(b"CAB");
assert_eq!(
res,
[
CheckedBitPatternEnumByteLit::C,
CheckedBitPatternEnumByteLit::A,
CheckedBitPatternEnumByteLit::B
]
);
}
#[test]
fn fails_cast_struct() {
let pod = [0u8, 24u8];
let res = bytemuck::checked::try_from_bytes::<CheckedBitPatternStruct>(&pod);
assert!(res.is_err());
}
#[test]
fn passes_cast_struct() {
let pod = [0u8, 8u8];
let res = bytemuck::checked::from_bytes::<CheckedBitPatternStruct>(&pod);
assert_eq!(
*res,
CheckedBitPatternStruct { a: 0, b: CheckedBitPatternEnumNonContiguous::B }
);
}
#[test]
fn anybitpattern_implies_zeroable() {
let test = AnyBitPatternTest::<isize, usize>::zeroed();
assert_eq!(test, AnyBitPatternTest { a: 0isize, b: 0usize });
}
#[test]
fn checkedbitpattern_try_pod_read_unaligned() {
let pod = [0u8];
let res = bytemuck::checked::try_pod_read_unaligned::<
CheckedBitPatternEnumWithValues,
>(&pod);
assert!(res.is_ok());
let pod = [5u8];
let res = bytemuck::checked::try_pod_read_unaligned::<
CheckedBitPatternEnumWithValues,
>(&pod);
assert!(res.is_err());
}
#[test]
fn checkedbitpattern_aligned_struct() {
let pod = [0u8; 8];
bytemuck::checked::pod_read_unaligned::<CheckedBitPatternAlignedStruct>(&pod);
}
#[test]
fn checkedbitpattern_c_default_discriminant_enum_with_fields() {
let pod = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x55, 0x55, 0x55,
0x55, 0x55, 0x55, 0xcc,
];
let value = bytemuck::checked::pod_read_unaligned::<
CheckedBitPatternCDefaultDiscriminantEnumWithFields,
>(&pod);
assert_eq!(
value,
CheckedBitPatternCDefaultDiscriminantEnumWithFields::A(0xcc555555555555cc)
);
let pod = [
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x55, 0x55, 0x55,
0x55, 0x55, 0x55, 0xcc,
];
let value = bytemuck::checked::pod_read_unaligned::<
CheckedBitPatternCDefaultDiscriminantEnumWithFields,
>(&pod);
assert_eq!(
value,
CheckedBitPatternCDefaultDiscriminantEnumWithFields::B {
c: 0xcc555555555555cc
}
);
}
#[test]
fn checkedbitpattern_c_enum_with_fields() {
let pod = [0x00, 0x00, 0x00, 0x00, 0xcc, 0x55, 0x55, 0xcc];
let value = bytemuck::checked::pod_read_unaligned::<
CheckedBitPatternCEnumWithFields,
>(&pod);
assert_eq!(value, CheckedBitPatternCEnumWithFields::A(0xcc5555cc));
let pod = [0x01, 0x00, 0x00, 0x00, 0xcc, 0x55, 0x55, 0xcc];
let value = bytemuck::checked::pod_read_unaligned::<
CheckedBitPatternCEnumWithFields,
>(&pod);
assert_eq!(value, CheckedBitPatternCEnumWithFields::B { c: 0xcc5555cc });
}
#[test]
fn checkedbitpattern_int_enum_with_fields() {
let pod = [0x00, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
let value = bytemuck::checked::pod_read_unaligned::<
CheckedBitPatternIntEnumWithFields,
>(&pod);
assert_eq!(value, CheckedBitPatternIntEnumWithFields::A(0x55));
let pod = [0x01, 0x00, 0x00, 0x00, 0xcc, 0x55, 0x55, 0xcc];
let value = bytemuck::checked::pod_read_unaligned::<
CheckedBitPatternIntEnumWithFields,
>(&pod);
assert_eq!(value, CheckedBitPatternIntEnumWithFields::B { c: 0xcc5555cc });
}
#[test]
fn checkedbitpattern_nested_enum_with_fields() {
// total size 24 bytes. first byte always the u8 discriminant.
#[repr(C, align(8))]
struct Align8Bytes([u8; 24]);
// first we'll check variantA, nested variant A
let pod = Align8Bytes([
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // byte 0 discriminant = 0 = variant A, bytes 1-7 irrelevant padding.
0x00, 0x00, 0x00, 0x00, 0xcc, 0x55, 0x55, 0xcc, // bytes 8-15 are the nested CheckedBitPatternCEnumWithFields,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // bytes 16-23 padding
]);
let value = bytemuck::checked::from_bytes::<
CheckedBitPatternEnumNested,
>(&pod.0);
assert_eq!(value, &CheckedBitPatternEnumNested::A(CheckedBitPatternCEnumWithFields::A(0xcc5555cc)));
// next we'll check invalid first discriminant fails
let pod = Align8Bytes([
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // byte 0 discriminant = 2 = invalid, bytes 1-7 padding
0x00, 0x00, 0x00, 0x00, 0xcc, 0x55, 0x55, 0xcc, // bytes 8-15 are the nested CheckedBitPatternCEnumWithFields = A,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // bytes 16-23 padding
]);
let result = bytemuck::checked::try_from_bytes::<
CheckedBitPatternEnumNested,
>(&pod.0);
assert_eq!(result, Err(CheckedCastError::InvalidBitPattern));
// next we'll check variant B, nested variant B
let pod = Align8Bytes([
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // byte 0 discriminant = 1 = variant B, bytes 1-7 padding
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // bytes 8-15 is C int size discriminant of CheckedBitPatternCDefaultDiscrimimantEnumWithFields, 1 (LE byte order) = variant B
0xcc, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0xcc, // bytes 16-13 is the data contained in nested variant B
]);
let value = bytemuck::checked::from_bytes::<
CheckedBitPatternEnumNested,
>(&pod.0);
assert_eq!(
value,
&CheckedBitPatternEnumNested::B(CheckedBitPatternCDefaultDiscriminantEnumWithFields::B {
c: 0xcc555555555555cc
})
);
// finally we'll check variant B, nested invalid discriminant
let pod = Align8Bytes([
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 1 discriminant = variant B, bytes 1-7 padding
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // bytes 8-15 is C int size discriminant of CheckedBitPatternCDefaultDiscrimimantEnumWithFields, 0x08 is invalid
0xcc, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0xcc, // bytes 16-13 is the data contained in nested variant B
]);
let result = bytemuck::checked::try_from_bytes::<
CheckedBitPatternEnumNested,
>(&pod.0);
assert_eq!(result, Err(CheckedCastError::InvalidBitPattern));
}
#[test]
fn checkedbitpattern_transparent_enum_with_fields() {
let pod = [0xcc, 0x55, 0x55, 0xcc];
let value = bytemuck::checked::pod_read_unaligned::<
CheckedBitPatternTransparentEnumWithFields,
>(&pod);
assert_eq!(
value,
CheckedBitPatternTransparentEnumWithFields::A { b: 0xcc5555cc }
);
}
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C, align(16))]
struct Issue127 {}
use bytemuck as reexport_name;
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable, bytemuck::ByteEq)]
#[bytemuck(crate = "reexport_name")]
#[repr(C)]
struct Issue93 {}

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"0d30b4166abd35d7a300478c089a4f0ade53cb81ce3333876b1461151810bd69","Cargo.toml":"0a419871f05f413d27701e336c3c8e9253b980243025be0adf47a6c101d69430","LICENSE.txt":"3ddf9be5c28fe27dad143a5dc76eea25222ad1dd68934a047064e56ed2fa40c5","README.md":"0970fc9c7d9933e1900a465d95b9300bd68435ef56436a665b998108bf1b0abd","build.rs":"da53087156a235fe65cab1ee34e12d97b6c3e540e2c8e3ae9b2aeac71efcf1ce","build/common.rs":"52fcb086e5cf1f47a528bdc4474206a2299b6d54e536f164e288344904cdd7ed","build/dynamic.rs":"2754e488d48920681351c32b89ead0ebd03a83705b55b36f32eed54e68fa8f73","build/macros.rs":"cdd7553864bf1e5565ea3eb71561718f1485d75f277020ed37af4c272d97b5c8","build/static.rs":"b8b2654504fe509f42091b658b323214030161beb7850bbdb45924959bccc0c2","clippy.toml":"fcf54943ba571514b244cc098ce08671b4117167733e8107e799d533a12a2195","src/lib.rs":"d70eee9a78304b3daaf090471a2ae93b8007ffa181aab654b5631f53eb14475e","src/link.rs":"4d257be78d4fd654133102ec26e855cce9d9376143cff0ba0aea7c2e5b4216f8","src/support.rs":"da7e4e08a5fce844f563943588fca5de3b72972fc61db96c5158b1246737e006","tests/build.rs":"db3df3417906a77aafc8f8b4bf936dbd2cf89f2f6a7b728f14ff967897597f58","tests/header.h":"1b15a686d1c06561960045a26c25a34d840f26c8246f2f5e630f993b69c7492c","tests/lib.rs":"81a459e9f48e6d384b3fd61e7942e685f7ea39af6574bc2b382a0fbe7820ff65"},"package":"77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a"}
{"files":{"CHANGELOG.md":"7a177945b551a62c30b9183f8edaac38216fbeb3fb7f8760e5f2a69916140086","Cargo.toml":"9d896747d0d657eeb8913dc254439b06f77025399c654026db5d58bf655837f6","LICENSE.txt":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"d75bc7b6e957818c2c9c78107eade13c48bd1e9f6a5eca3b04644b21938b0157","build.rs":"f344874bf25f8ec6f852c55edaf262789fc6b883489afb47fa3d7da83f95ad1c","build/common.rs":"0b23893c737a9b60d8d400b5101bf3bcc632e0833d948590c42d189a1f69aeb0","build/dynamic.rs":"79014d1f9c7a197affb018d60218458e8878ccc80e61fc2e5d61b2f291ebbb4b","build/macros.rs":"eac7bffaac5f70728764065145eb1541b0a133c900356a0bcf55f0d89966c84e","build/static.rs":"b3000f872b139b3f3230f49e98a183d05ac18be661192bb5cada896eca853aca","clippy.toml":"acef14b9acffa18d1069ae08a4e8fe824a614f91b0bc71a6b1c68e4d885397e6","src/lib.rs":"3c0fc0c0e3cabc3b81732b5ac784b36f69dfb72abcecc3bf4e5a08c72d9c8ad0","src/link.rs":"13b236714d68483fbcec9df2ee1ae63db21e87176881c2d53a9157a2b38240eb","src/support.rs":"5398f8c35cceae64ca941b9a26ed28f29d34a1301958399d636e599dcfd0d64f","tests/build.rs":"66ed85f6e1baf9fac5c50e486fa43413e40c3f16ce9f503c9bba53e300682a9a","tests/header.h":"b1cf564b21d76db78529d1934e1481a5f0452fdedc6e32954608293c310498b6","tests/lib.rs":"a39e48b2ab3347692f461609e296456850cff870514fa3df8232341318015568"},"package":"67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1"}

1044
third_party/rust/clang-sys/CHANGELOG.md поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

11
third_party/rust/clang-sys/Cargo.toml поставляемый
Просмотреть файл

@ -11,7 +11,7 @@
[package]
name = "clang-sys"
version = "1.6.0"
version = "1.7.0"
authors = ["Kyle Mayes <kyle@mayeses.com>"]
build = "build.rs"
links = "clang"
@ -23,7 +23,7 @@ repository = "https://github.com/KyleMayes/clang-sys"
[package.metadata.docs.rs]
features = [
"clang_16_0",
"clang_17_0",
"runtime",
]
@ -35,7 +35,7 @@ version = "0.2.39"
default-features = false
[dependencies.libloading]
version = "0.7"
version = "0.8"
optional = true
[dev-dependencies.glob]
@ -44,8 +44,8 @@ version = "0.3"
[dev-dependencies.serial_test]
version = "1"
[dev-dependencies.tempdir]
version = "0.3"
[dev-dependencies.tempfile]
version = "3"
[build-dependencies.glob]
version = "0.3"
@ -58,6 +58,7 @@ clang_13_0 = ["clang_12_0"]
clang_14_0 = ["clang_13_0"]
clang_15_0 = ["clang_14_0"]
clang_16_0 = ["clang_15_0"]
clang_17_0 = ["clang_16_0"]
clang_3_5 = []
clang_3_6 = ["clang_3_5"]
clang_3_7 = ["clang_3_6"]

404
third_party/rust/clang-sys/LICENSE.txt поставляемый
Просмотреть файл

@ -1,202 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

178
third_party/rust/clang-sys/README.md поставляемый
Просмотреть файл

@ -1,89 +1,89 @@
# clang-sys
[![Crate](https://img.shields.io/crates/v/clang-sys.svg)](https://crates.io/crates/clang-sys)
[![Documentation](https://docs.rs/clang-sys/badge.svg)](https://docs.rs/clang-sys)
[![CI](https://img.shields.io/github/actions/workflow/status/KyleMayes/clang-sys/ci.yml?branch=master)](https://github.com/KyleMayes/clang-sys/actions?query=workflow%3ACI)
![MSRV](https://img.shields.io/badge/MSRV-1.51.0-blue)
Rust bindings for `libclang`.
If you are interested in a somewhat idiomatic Rust wrapper for these bindings, see [`clang-rs`](https://github.com/KyleMayes/clang-rs).
Released under the Apache License 2.0.
## [Documentation](https://docs.rs/clang-sys)
Note that the documentation on https://docs.rs for this crate assumes usage of the `runtime` Cargo feature as well as the Cargo feature for the latest supported version of `libclang` (e.g., `clang_16_0`), neither of which are enabled by default.
Due to the usage of the `runtime` Cargo feature, this documentation will contain some additional types and functions to manage a dynamically loaded `libclang` instance at runtime.
Due to the usage of the Cargo feature for the latest supported version of `libclang`, this documentation will contain constants and functions that are not available in the oldest supported version of `libclang` (3.5). All of these types and functions have a documentation comment which specifies the minimum `libclang` version required to use the item.
## Supported Versions
To target a version of `libclang`, enable a Cargo features such as one of the following:
* `clang_3_5` - requires `libclang` 3.5 or later
* `clang_3_6` - requires `libclang` 3.6 or later
* etc...
* `clang_15_0` - requires `libclang` 15.0 or later
* `clang_16_0` - requires `libclang` 16.0 or later
If you do not enable one of these features, the API provided by `libclang` 3.5 will be available by default.
**Note:** If you are using Clang 15.0 or later, you should enable the `clang_15_0` feature or a more recent version feature. Clang 15.0 introduced [a breaking change to the `EntityKind` enum](https://github.com/llvm/llvm-project/commit/bb83f8e70bd1d56152f02307adacd718cd67e312#diff-674613a0e47f4e66cc19061e28e3296d39be2d124dceefb68237b30b8e241e7c) which resulted in a mismatch between the values returned by `libclang` and the values for `EntityKind` defined by this crate in previous versions.
## Dependencies
By default, this crate will attempt to link to `libclang` dynamically. In this case, this crate depends on the `libclang` shared library (`libclang.so` on Linux, `libclang.dylib` on macOS, `libclang.dll` on Windows). If you want to link to `libclang` statically instead, enable the `static` Cargo feature. In this case, this crate depends on the LLVM and Clang static libraries. If you don't want to link to `libclang` at compiletime but instead want to load it at runtime, enable the `runtime` Cargo feature.
These libraries can be either be installed as a part of Clang or downloaded [here](http://llvm.org/releases/download.html).
**Note:** The downloads for LLVM and Clang 3.8 and later do not include the `libclang.a` static library. This means you cannot link to any of these versions of `libclang` statically unless you build it from source.
### Versioned Dependencies
This crate supports finding versioned instances of `libclang.so` (e.g.,`libclang-3.9.so`). In the case where there are multiple instances to choose from, this crate will prefer instances with higher versions. For example, the following instances of `libclang.so` are listed in descending order of preference:
1. `libclang-4.0.so`
2. `libclang-4.so`
3. `libclang-3.9.so`
4. `libclang-3.so`
5. `libclang.so`
**Note:** On BSD distributions, versioned instances of `libclang.so` matching the pattern `libclang.so.*` (e.g., `libclang.so.7.0`) are also included.
**Note:** On Linux distributions when the `runtime` features is enabled, versioned instances of `libclang.so` matching the pattern `libclang.so.*` (e.g., `libclang.so.1`) are also included.
## Environment Variables
The following environment variables, if set, are used by this crate to find the required libraries and executables:
* `LLVM_CONFIG_PATH` **(compiletime)** - provides a full path to an `llvm-config` executable (including the executable itself [i.e., `/usr/local/bin/llvm-config-8.0`])
* `LIBCLANG_PATH` **(compiletime)** - provides a path to a directory containing a `libclang` shared library or a full path to a specific `libclang` shared library
* `LIBCLANG_STATIC_PATH` **(compiletime)** - provides a path to a directory containing LLVM and Clang static libraries
* `CLANG_PATH` **(runtime)** - provides a path to a `clang` executable
## Linking
### Dynamic
`libclang` shared libraries will be searched for in the following directories:
* the directory provided by the `LIBCLANG_PATH` environment variable
* the `bin` and `lib` directories in the directory provided by `llvm-config --libdir`
* the directories provided by `LD_LIBRARY_PATH` environment variable
* a list of likely directories for the target platform (e.g., `/usr/local/lib` on Linux)
* **macOS only:** the toolchain directory in the directory provided by `xcode-select --print-path`
On Linux, running an executable that has been dynamically linked to `libclang` may require you to add a path to `libclang.so` to the `LD_LIBRARY_PATH` environment variable. The same is true on OS X, except the `DYLD_LIBRARY_PATH` environment variable is used instead.
On Windows, running an executable that has been dynamically linked to `libclang` requires that `libclang.dll` can be found by the executable at runtime. See [here](https://msdn.microsoft.com/en-us/library/7d83bc18.aspx) for more information.
### Static
The availability of `llvm-config` is not optional for static linking. Ensure that an instance of this executable can be found on your system's path or set the `LLVM_CONFIG_PATH` environment variable. The required LLVM and Clang static libraries will be searched for in the same way as shared libraries are searched for, except the `LIBCLANG_STATIC_PATH` environment variable is used in place of the `LIBCLANG_PATH` environment variable.
### Runtime
The `clang_sys::load` function is used to load a `libclang` shared library for use in the thread in which it is called. The `clang_sys::unload` function will unload the `libclang` shared library. `clang_sys::load` searches for a `libclang` shared library in the same way one is searched for when linking to `libclang` dynamically at compiletime.
# clang-sys
[![Crate](https://img.shields.io/crates/v/clang-sys.svg)](https://crates.io/crates/clang-sys)
[![Documentation](https://docs.rs/clang-sys/badge.svg)](https://docs.rs/clang-sys)
[![CI](https://img.shields.io/github/actions/workflow/status/KyleMayes/clang-sys/ci.yml?branch=master)](https://github.com/KyleMayes/clang-sys/actions?query=workflow%3ACI)
![MSRV](https://img.shields.io/badge/MSRV-1.51.0-blue)
Rust bindings for `libclang`.
If you are interested in a somewhat idiomatic Rust wrapper for these bindings, see [`clang-rs`](https://github.com/KyleMayes/clang-rs).
Released under the Apache License 2.0.
## [Documentation](https://docs.rs/clang-sys)
Note that the documentation on https://docs.rs for this crate assumes usage of the `runtime` Cargo feature as well as the Cargo feature for the latest supported version of `libclang` (e.g., `clang_16_0`), neither of which are enabled by default.
Due to the usage of the `runtime` Cargo feature, this documentation will contain some additional types and functions to manage a dynamically loaded `libclang` instance at runtime.
Due to the usage of the Cargo feature for the latest supported version of `libclang`, this documentation will contain constants and functions that are not available in the oldest supported version of `libclang` (3.5). All of these types and functions have a documentation comment which specifies the minimum `libclang` version required to use the item.
## Supported Versions
To target a version of `libclang`, enable a Cargo features such as one of the following:
* `clang_3_5` - requires `libclang` 3.5 or later
* `clang_3_6` - requires `libclang` 3.6 or later
* etc...
* `clang_15_0` - requires `libclang` 15.0 or later
* `clang_16_0` - requires `libclang` 16.0 or later
If you do not enable one of these features, the API provided by `libclang` 3.5 will be available by default.
**Note:** If you are using Clang 15.0 or later, you should enable the `clang_15_0` feature or a more recent version feature. Clang 15.0 introduced [a breaking change to the `EntityKind` enum](https://github.com/llvm/llvm-project/commit/bb83f8e70bd1d56152f02307adacd718cd67e312#diff-674613a0e47f4e66cc19061e28e3296d39be2d124dceefb68237b30b8e241e7c) which resulted in a mismatch between the values returned by `libclang` and the values for `EntityKind` defined by this crate in previous versions.
## Dependencies
By default, this crate will attempt to link to `libclang` dynamically. In this case, this crate depends on the `libclang` shared library (`libclang.so` on Linux, `libclang.dylib` on macOS, `libclang.dll` on Windows). If you want to link to `libclang` statically instead, enable the `static` Cargo feature. In this case, this crate depends on the LLVM and Clang static libraries. If you don't want to link to `libclang` at compiletime but instead want to load it at runtime, enable the `runtime` Cargo feature.
These libraries can be either be installed as a part of Clang or downloaded [here](http://llvm.org/releases/download.html).
**Note:** The downloads for LLVM and Clang 3.8 and later do not include the `libclang.a` static library. This means you cannot link to any of these versions of `libclang` statically unless you build it from source.
### Versioned Dependencies
This crate supports finding versioned instances of `libclang.so` (e.g.,`libclang-3.9.so`). In the case where there are multiple instances to choose from, this crate will prefer instances with higher versions. For example, the following instances of `libclang.so` are listed in descending order of preference:
1. `libclang-4.0.so`
2. `libclang-4.so`
3. `libclang-3.9.so`
4. `libclang-3.so`
5. `libclang.so`
**Note:** On BSD distributions, versioned instances of `libclang.so` matching the pattern `libclang.so.*` (e.g., `libclang.so.7.0`) are also included.
**Note:** On Linux distributions when the `runtime` features is enabled, versioned instances of `libclang.so` matching the pattern `libclang.so.*` (e.g., `libclang.so.1`) are also included.
## Environment Variables
The following environment variables, if set, are used by this crate to find the required libraries and executables:
* `LLVM_CONFIG_PATH` **(compiletime)** - provides a full path to an `llvm-config` executable (including the executable itself [i.e., `/usr/local/bin/llvm-config-8.0`])
* `LIBCLANG_PATH` **(compiletime)** - provides a path to a directory containing a `libclang` shared library or a full path to a specific `libclang` shared library
* `LIBCLANG_STATIC_PATH` **(compiletime)** - provides a path to a directory containing LLVM and Clang static libraries
* `CLANG_PATH` **(runtime)** - provides a path to a `clang` executable
## Linking
### Dynamic
`libclang` shared libraries will be searched for in the following directories:
* the directory provided by the `LIBCLANG_PATH` environment variable
* the `bin` and `lib` directories in the directory provided by `llvm-config --libdir`
* the directories provided by `LD_LIBRARY_PATH` environment variable
* a list of likely directories for the target platform (e.g., `/usr/local/lib` on Linux)
* **macOS only:** the toolchain directory in the directory provided by `xcode-select --print-path`
On Linux, running an executable that has been dynamically linked to `libclang` may require you to add a path to `libclang.so` to the `LD_LIBRARY_PATH` environment variable. The same is true on OS X, except the `DYLD_LIBRARY_PATH` environment variable is used instead.
On Windows, running an executable that has been dynamically linked to `libclang` requires that `libclang.dll` can be found by the executable at runtime. See [here](https://msdn.microsoft.com/en-us/library/7d83bc18.aspx) for more information.
### Static
The availability of `llvm-config` is not optional for static linking. Ensure that an instance of this executable can be found on your system's path or set the `LLVM_CONFIG_PATH` environment variable. The required LLVM and Clang static libraries will be searched for in the same way as shared libraries are searched for, except the `LIBCLANG_STATIC_PATH` environment variable is used in place of the `LIBCLANG_PATH` environment variable.
### Runtime
The `clang_sys::load` function is used to load a `libclang` shared library for use in the thread in which it is called. The `clang_sys::unload` function will unload the `libclang` shared library. `clang_sys::load` searches for a `libclang` shared library in the same way one is searched for when linking to `libclang` dynamically at compiletime.

158
third_party/rust/clang-sys/build.rs поставляемый
Просмотреть файл

@ -1,79 +1,79 @@
// SPDX-License-Identifier: Apache-2.0
//! Finds `libclang` static or shared libraries and links to them.
//!
//! # Environment Variables
//!
//! This build script can make use of several environment variables to help it
//! find the required static or shared libraries.
//!
//! * `LLVM_CONFIG_PATH` - provides a path to an `llvm-config` executable
//! * `LIBCLANG_PATH` - provides a path to a directory containing a `libclang`
//! shared library or a path to a specific `libclang` shared library
//! * `LIBCLANG_STATIC_PATH` - provides a path to a directory containing LLVM
//! and Clang static libraries
#![allow(unused_attributes)]
extern crate glob;
use std::path::Path;
#[macro_use]
#[path = "build/macros.rs"]
pub mod macros;
#[path = "build/common.rs"]
pub mod common;
#[path = "build/dynamic.rs"]
pub mod dynamic;
#[path = "build/static.rs"]
pub mod r#static;
/// Copies a file.
#[cfg(feature = "runtime")]
fn copy(source: &str, destination: &Path) {
use std::fs::File;
use std::io::{Read, Write};
let mut string = String::new();
File::open(source)
.unwrap()
.read_to_string(&mut string)
.unwrap();
File::create(destination)
.unwrap()
.write_all(string.as_bytes())
.unwrap();
}
/// Copies the code used to find and link to `libclang` shared libraries into
/// the build output directory so that it may be used when linking at runtime.
#[cfg(feature = "runtime")]
fn main() {
use std::env;
if cfg!(feature = "static") {
panic!("`runtime` and `static` features can't be combined");
}
let out = env::var("OUT_DIR").unwrap();
copy("build/macros.rs", &Path::new(&out).join("macros.rs"));
copy("build/common.rs", &Path::new(&out).join("common.rs"));
copy("build/dynamic.rs", &Path::new(&out).join("dynamic.rs"));
}
/// Finds and links to the required libraries dynamically or statically.
#[cfg(not(feature = "runtime"))]
fn main() {
if cfg!(feature = "static") {
r#static::link();
} else {
dynamic::link();
}
if let Some(output) = common::run_llvm_config(&["--includedir"]) {
let directory = Path::new(output.trim_end());
println!("cargo:include={}", directory.display());
}
}
// SPDX-License-Identifier: Apache-2.0
//! Finds `libclang` static or shared libraries and links to them.
//!
//! # Environment Variables
//!
//! This build script can make use of several environment variables to help it
//! find the required static or shared libraries.
//!
//! * `LLVM_CONFIG_PATH` - provides a path to an `llvm-config` executable
//! * `LIBCLANG_PATH` - provides a path to a directory containing a `libclang`
//! shared library or a path to a specific `libclang` shared library
//! * `LIBCLANG_STATIC_PATH` - provides a path to a directory containing LLVM
//! and Clang static libraries
#![allow(unused_attributes)]
extern crate glob;
use std::path::Path;
#[macro_use]
#[path = "build/macros.rs"]
pub mod macros;
#[path = "build/common.rs"]
pub mod common;
#[path = "build/dynamic.rs"]
pub mod dynamic;
#[path = "build/static.rs"]
pub mod r#static;
/// Copies a file.
#[cfg(feature = "runtime")]
fn copy(source: &str, destination: &Path) {
use std::fs::File;
use std::io::{Read, Write};
let mut string = String::new();
File::open(source)
.unwrap()
.read_to_string(&mut string)
.unwrap();
File::create(destination)
.unwrap()
.write_all(string.as_bytes())
.unwrap();
}
/// Copies the code used to find and link to `libclang` shared libraries into
/// the build output directory so that it may be used when linking at runtime.
#[cfg(feature = "runtime")]
fn main() {
use std::env;
if cfg!(feature = "static") {
panic!("`runtime` and `static` features can't be combined");
}
let out = env::var("OUT_DIR").unwrap();
copy("build/macros.rs", &Path::new(&out).join("macros.rs"));
copy("build/common.rs", &Path::new(&out).join("common.rs"));
copy("build/dynamic.rs", &Path::new(&out).join("dynamic.rs"));
}
/// Finds and links to the required libraries dynamically or statically.
#[cfg(not(feature = "runtime"))]
fn main() {
if cfg!(feature = "static") {
r#static::link();
} else {
dynamic::link();
}
if let Some(output) = common::run_llvm_config(&["--includedir"]) {
let directory = Path::new(output.trim_end());
println!("cargo:include={}", directory.display());
}
}

710
third_party/rust/clang-sys/build/common.rs поставляемый
Просмотреть файл

@ -1,355 +1,355 @@
// SPDX-License-Identifier: Apache-2.0
extern crate glob;
use std::cell::RefCell;
use std::collections::HashMap;
use std::env;
use std::path::{Path, PathBuf};
use std::process::Command;
use glob::{MatchOptions, Pattern};
//================================================
// Commands
//================================================
thread_local! {
/// The errors encountered by the build script while executing commands.
static COMMAND_ERRORS: RefCell<HashMap<String, Vec<String>>> = RefCell::default();
}
/// Adds an error encountered by the build script while executing a command.
fn add_command_error(name: &str, path: &str, arguments: &[&str], message: String) {
COMMAND_ERRORS.with(|e| {
e.borrow_mut()
.entry(name.into())
.or_insert_with(Vec::new)
.push(format!(
"couldn't execute `{} {}` (path={}) ({})",
name,
arguments.join(" "),
path,
message,
))
});
}
/// A struct that prints the errors encountered by the build script while
/// executing commands when dropped (unless explictly discarded).
///
/// This is handy because we only want to print these errors when the build
/// script fails to link to an instance of `libclang`. For example, if
/// `llvm-config` couldn't be executed but an instance of `libclang` was found
/// anyway we don't want to pollute the build output with irrelevant errors.
#[derive(Default)]
pub struct CommandErrorPrinter {
discard: bool,
}
impl CommandErrorPrinter {
pub fn discard(mut self) {
self.discard = true;
}
}
impl Drop for CommandErrorPrinter {
fn drop(&mut self) {
if self.discard {
return;
}
let errors = COMMAND_ERRORS.with(|e| e.borrow().clone());
if let Some(errors) = errors.get("llvm-config") {
println!(
"cargo:warning=could not execute `llvm-config` one or more \
times, if the LLVM_CONFIG_PATH environment variable is set to \
a full path to valid `llvm-config` executable it will be used \
to try to find an instance of `libclang` on your system: {}",
errors
.iter()
.map(|e| format!("\"{}\"", e))
.collect::<Vec<_>>()
.join("\n "),
)
}
if let Some(errors) = errors.get("xcode-select") {
println!(
"cargo:warning=could not execute `xcode-select` one or more \
times, if a valid instance of this executable is on your PATH \
it will be used to try to find an instance of `libclang` on \
your system: {}",
errors
.iter()
.map(|e| format!("\"{}\"", e))
.collect::<Vec<_>>()
.join("\n "),
)
}
}
}
#[cfg(test)]
pub static RUN_COMMAND_MOCK: std::sync::Mutex<
Option<Box<dyn Fn(&str, &str, &[&str]) -> Option<String> + Send + Sync + 'static>>,
> = std::sync::Mutex::new(None);
/// Executes a command and returns the `stdout` output if the command was
/// successfully executed (errors are added to `COMMAND_ERRORS`).
fn run_command(name: &str, path: &str, arguments: &[&str]) -> Option<String> {
#[cfg(test)]
if let Some(command) = &*RUN_COMMAND_MOCK.lock().unwrap() {
return command(name, path, arguments);
}
let output = match Command::new(path).args(arguments).output() {
Ok(output) => output,
Err(error) => {
let message = format!("error: {}", error);
add_command_error(name, path, arguments, message);
return None;
}
};
if output.status.success() {
Some(String::from_utf8_lossy(&output.stdout).into_owned())
} else {
let message = format!("exit code: {}", output.status);
add_command_error(name, path, arguments, message);
None
}
}
/// Executes the `llvm-config` command and returns the `stdout` output if the
/// command was successfully executed (errors are added to `COMMAND_ERRORS`).
pub fn run_llvm_config(arguments: &[&str]) -> Option<String> {
let path = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".into());
run_command("llvm-config", &path, arguments)
}
/// Executes the `xcode-select` command and returns the `stdout` output if the
/// command was successfully executed (errors are added to `COMMAND_ERRORS`).
pub fn run_xcode_select(arguments: &[&str]) -> Option<String> {
run_command("xcode-select", "xcode-select", arguments)
}
//================================================
// Search Directories
//================================================
// These search directories are listed in order of
// preference, so if multiple `libclang` instances
// are found when searching matching directories,
// the `libclang` instances from earlier
// directories will be preferred (though version
// takes precedence over location).
//================================================
/// `libclang` directory patterns for Haiku.
const DIRECTORIES_HAIKU: &[&str] = &[
"/boot/home/config/non-packaged/develop/lib",
"/boot/home/config/non-packaged/lib",
"/boot/system/non-packaged/develop/lib",
"/boot/system/non-packaged/lib",
"/boot/system/develop/lib",
"/boot/system/lib",
];
/// `libclang` directory patterns for Linux (and FreeBSD).
const DIRECTORIES_LINUX: &[&str] = &[
"/usr/local/llvm*/lib*",
"/usr/local/lib*/*/*",
"/usr/local/lib*/*",
"/usr/local/lib*",
"/usr/lib*/*/*",
"/usr/lib*/*",
"/usr/lib*",
];
/// `libclang` directory patterns for macOS.
const DIRECTORIES_MACOS: &[&str] = &[
"/usr/local/opt/llvm*/lib/llvm*/lib",
"/Library/Developer/CommandLineTools/usr/lib",
"/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib",
"/usr/local/opt/llvm*/lib",
];
/// `libclang` directory patterns for Windows.
///
/// The boolean indicates whether the directory pattern should be used when
/// compiling for an MSVC target environment.
const DIRECTORIES_WINDOWS: &[(&str, bool)] = &[
// LLVM + Clang can be installed using Scoop (https://scoop.sh).
// Other Windows package managers install LLVM + Clang to other listed
// system-wide directories.
("C:\\Users\\*\\scoop\\apps\\llvm\\current\\lib", true),
("C:\\MSYS*\\MinGW*\\lib", false),
("C:\\Program Files*\\LLVM\\lib", true),
("C:\\LLVM\\lib", true),
// LLVM + Clang can be installed as a component of Visual Studio.
// https://github.com/KyleMayes/clang-sys/issues/121
("C:\\Program Files*\\Microsoft Visual Studio\\*\\BuildTools\\VC\\Tools\\Llvm\\**\\lib", true),
];
/// `libclang` directory patterns for illumos
const DIRECTORIES_ILLUMOS: &[&str] = &[
"/opt/ooce/llvm-*/lib",
"/opt/ooce/clang-*/lib",
];
//================================================
// Searching
//================================================
/// Finds the files in a directory that match one or more filename glob patterns
/// and returns the paths to and filenames of those files.
fn search_directory(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> {
// Escape the specified directory in case it contains characters that have
// special meaning in glob patterns (e.g., `[` or `]`).
let directory = Pattern::escape(directory.to_str().unwrap());
let directory = Path::new(&directory);
// Join the escaped directory to the filename glob patterns to obtain
// complete glob patterns for the files being searched for.
let paths = filenames
.iter()
.map(|f| directory.join(f).to_str().unwrap().to_owned());
// Prevent wildcards from matching path separators to ensure that the search
// is limited to the specified directory.
let mut options = MatchOptions::new();
options.require_literal_separator = true;
paths
.map(|p| glob::glob_with(&p, options))
.filter_map(Result::ok)
.flatten()
.filter_map(|p| {
let path = p.ok()?;
let filename = path.file_name()?.to_str().unwrap();
// The `libclang_shared` library has been renamed to `libclang-cpp`
// in Clang 10. This can cause instances of this library (e.g.,
// `libclang-cpp.so.10`) to be matched by patterns looking for
// instances of `libclang`.
if filename.contains("-cpp.") {
return None;
}
Some((directory.to_owned(), filename.into()))
})
.collect::<Vec<_>>()
}
/// Finds the files in a directory (and any relevant sibling directories) that
/// match one or more filename glob patterns and returns the paths to and
/// filenames of those files.
fn search_directories(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> {
let mut results = search_directory(directory, filenames);
// On Windows, `libclang.dll` is usually found in the LLVM `bin` directory
// while `libclang.lib` is usually found in the LLVM `lib` directory. To
// keep things consistent with other platforms, only LLVM `lib` directories
// are included in the backup search directory globs so we need to search
// the LLVM `bin` directory here.
if target_os!("windows") && directory.ends_with("lib") {
let sibling = directory.parent().unwrap().join("bin");
results.extend(search_directory(&sibling, filenames).into_iter());
}
results
}
/// Finds the `libclang` static or dynamic libraries matching one or more
/// filename glob patterns and returns the paths to and filenames of those files.
pub fn search_libclang_directories(filenames: &[String], variable: &str) -> Vec<(PathBuf, String)> {
// Search only the path indicated by the relevant environment variable
// (e.g., `LIBCLANG_PATH`) if it is set.
if let Ok(path) = env::var(variable).map(|d| Path::new(&d).to_path_buf()) {
// Check if the path is a matching file.
if let Some(parent) = path.parent() {
let filename = path.file_name().unwrap().to_str().unwrap();
let libraries = search_directories(parent, filenames);
if libraries.iter().any(|(_, f)| f == filename) {
return vec![(parent.into(), filename.into())];
}
}
// Check if the path is directory containing a matching file.
return search_directories(&path, filenames);
}
let mut found = vec![];
// Search the `bin` and `lib` directories in the directory returned by
// `llvm-config --prefix`.
if let Some(output) = run_llvm_config(&["--prefix"]) {
let directory = Path::new(output.lines().next().unwrap()).to_path_buf();
found.extend(search_directories(&directory.join("bin"), filenames));
found.extend(search_directories(&directory.join("lib"), filenames));
found.extend(search_directories(&directory.join("lib64"), filenames));
}
// Search the toolchain directory in the directory returned by
// `xcode-select --print-path`.
if target_os!("macos") {
if let Some(output) = run_xcode_select(&["--print-path"]) {
let directory = Path::new(output.lines().next().unwrap()).to_path_buf();
let directory = directory.join("Toolchains/XcodeDefault.xctoolchain/usr/lib");
found.extend(search_directories(&directory, filenames));
}
}
// Search the directories in the `LD_LIBRARY_PATH` environment variable.
if let Ok(path) = env::var("LD_LIBRARY_PATH") {
for directory in env::split_paths(&path) {
found.extend(search_directories(&directory, filenames));
}
}
// Determine the `libclang` directory patterns.
let directories: Vec<&str> = if target_os!("haiku") {
DIRECTORIES_HAIKU.into()
} else if target_os!("linux") || target_os!("freebsd") {
DIRECTORIES_LINUX.into()
} else if target_os!("macos") {
DIRECTORIES_MACOS.into()
} else if target_os!("windows") {
let msvc = target_env!("msvc");
DIRECTORIES_WINDOWS
.iter()
.filter(|d| d.1 || !msvc)
.map(|d| d.0)
.collect()
} else if target_os!("illumos") {
DIRECTORIES_ILLUMOS.into()
} else {
vec![]
};
// We use temporary directories when testing the build script so we'll
// remove the prefixes that make the directories absolute.
let directories = if test!() {
directories
.iter()
.map(|d| d.strip_prefix('/').or_else(|| d.strip_prefix("C:\\")).unwrap_or(d))
.collect::<Vec<_>>()
} else {
directories.into()
};
// Search the directories provided by the `libclang` directory patterns.
let mut options = MatchOptions::new();
options.case_sensitive = false;
options.require_literal_separator = true;
for directory in directories.iter() {
if let Ok(directories) = glob::glob_with(directory, options) {
for directory in directories.filter_map(Result::ok).filter(|p| p.is_dir()) {
found.extend(search_directories(&directory, filenames));
}
}
}
found
}
// SPDX-License-Identifier: Apache-2.0
extern crate glob;
use std::cell::RefCell;
use std::collections::HashMap;
use std::env;
use std::path::{Path, PathBuf};
use std::process::Command;
use glob::{MatchOptions, Pattern};
//================================================
// Commands
//================================================
thread_local! {
/// The errors encountered by the build script while executing commands.
static COMMAND_ERRORS: RefCell<HashMap<String, Vec<String>>> = RefCell::default();
}
/// Adds an error encountered by the build script while executing a command.
fn add_command_error(name: &str, path: &str, arguments: &[&str], message: String) {
COMMAND_ERRORS.with(|e| {
e.borrow_mut()
.entry(name.into())
.or_insert_with(Vec::new)
.push(format!(
"couldn't execute `{} {}` (path={}) ({})",
name,
arguments.join(" "),
path,
message,
))
});
}
/// A struct that prints the errors encountered by the build script while
/// executing commands when dropped (unless explictly discarded).
///
/// This is handy because we only want to print these errors when the build
/// script fails to link to an instance of `libclang`. For example, if
/// `llvm-config` couldn't be executed but an instance of `libclang` was found
/// anyway we don't want to pollute the build output with irrelevant errors.
#[derive(Default)]
pub struct CommandErrorPrinter {
discard: bool,
}
impl CommandErrorPrinter {
pub fn discard(mut self) {
self.discard = true;
}
}
impl Drop for CommandErrorPrinter {
fn drop(&mut self) {
if self.discard {
return;
}
let errors = COMMAND_ERRORS.with(|e| e.borrow().clone());
if let Some(errors) = errors.get("llvm-config") {
println!(
"cargo:warning=could not execute `llvm-config` one or more \
times, if the LLVM_CONFIG_PATH environment variable is set to \
a full path to valid `llvm-config` executable it will be used \
to try to find an instance of `libclang` on your system: {}",
errors
.iter()
.map(|e| format!("\"{}\"", e))
.collect::<Vec<_>>()
.join("\n "),
)
}
if let Some(errors) = errors.get("xcode-select") {
println!(
"cargo:warning=could not execute `xcode-select` one or more \
times, if a valid instance of this executable is on your PATH \
it will be used to try to find an instance of `libclang` on \
your system: {}",
errors
.iter()
.map(|e| format!("\"{}\"", e))
.collect::<Vec<_>>()
.join("\n "),
)
}
}
}
#[cfg(test)]
pub static RUN_COMMAND_MOCK: std::sync::Mutex<
Option<Box<dyn Fn(&str, &str, &[&str]) -> Option<String> + Send + Sync + 'static>>,
> = std::sync::Mutex::new(None);
/// Executes a command and returns the `stdout` output if the command was
/// successfully executed (errors are added to `COMMAND_ERRORS`).
fn run_command(name: &str, path: &str, arguments: &[&str]) -> Option<String> {
#[cfg(test)]
if let Some(command) = &*RUN_COMMAND_MOCK.lock().unwrap() {
return command(name, path, arguments);
}
let output = match Command::new(path).args(arguments).output() {
Ok(output) => output,
Err(error) => {
let message = format!("error: {}", error);
add_command_error(name, path, arguments, message);
return None;
}
};
if output.status.success() {
Some(String::from_utf8_lossy(&output.stdout).into_owned())
} else {
let message = format!("exit code: {}", output.status);
add_command_error(name, path, arguments, message);
None
}
}
/// Executes the `llvm-config` command and returns the `stdout` output if the
/// command was successfully executed (errors are added to `COMMAND_ERRORS`).
pub fn run_llvm_config(arguments: &[&str]) -> Option<String> {
let path = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".into());
run_command("llvm-config", &path, arguments)
}
/// Executes the `xcode-select` command and returns the `stdout` output if the
/// command was successfully executed (errors are added to `COMMAND_ERRORS`).
pub fn run_xcode_select(arguments: &[&str]) -> Option<String> {
run_command("xcode-select", "xcode-select", arguments)
}
//================================================
// Search Directories
//================================================
// These search directories are listed in order of
// preference, so if multiple `libclang` instances
// are found when searching matching directories,
// the `libclang` instances from earlier
// directories will be preferred (though version
// takes precedence over location).
//================================================
/// `libclang` directory patterns for Haiku.
const DIRECTORIES_HAIKU: &[&str] = &[
"/boot/home/config/non-packaged/develop/lib",
"/boot/home/config/non-packaged/lib",
"/boot/system/non-packaged/develop/lib",
"/boot/system/non-packaged/lib",
"/boot/system/develop/lib",
"/boot/system/lib",
];
/// `libclang` directory patterns for Linux (and FreeBSD).
const DIRECTORIES_LINUX: &[&str] = &[
"/usr/local/llvm*/lib*",
"/usr/local/lib*/*/*",
"/usr/local/lib*/*",
"/usr/local/lib*",
"/usr/lib*/*/*",
"/usr/lib*/*",
"/usr/lib*",
];
/// `libclang` directory patterns for macOS.
const DIRECTORIES_MACOS: &[&str] = &[
"/usr/local/opt/llvm*/lib/llvm*/lib",
"/Library/Developer/CommandLineTools/usr/lib",
"/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib",
"/usr/local/opt/llvm*/lib",
];
/// `libclang` directory patterns for Windows.
///
/// The boolean indicates whether the directory pattern should be used when
/// compiling for an MSVC target environment.
const DIRECTORIES_WINDOWS: &[(&str, bool)] = &[
// LLVM + Clang can be installed using Scoop (https://scoop.sh).
// Other Windows package managers install LLVM + Clang to other listed
// system-wide directories.
("C:\\Users\\*\\scoop\\apps\\llvm\\current\\lib", true),
("C:\\MSYS*\\MinGW*\\lib", false),
("C:\\Program Files*\\LLVM\\lib", true),
("C:\\LLVM\\lib", true),
// LLVM + Clang can be installed as a component of Visual Studio.
// https://github.com/KyleMayes/clang-sys/issues/121
("C:\\Program Files*\\Microsoft Visual Studio\\*\\BuildTools\\VC\\Tools\\Llvm\\**\\lib", true),
];
/// `libclang` directory patterns for illumos
const DIRECTORIES_ILLUMOS: &[&str] = &[
"/opt/ooce/llvm-*/lib",
"/opt/ooce/clang-*/lib",
];
//================================================
// Searching
//================================================
/// Finds the files in a directory that match one or more filename glob patterns
/// and returns the paths to and filenames of those files.
fn search_directory(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> {
// Escape the specified directory in case it contains characters that have
// special meaning in glob patterns (e.g., `[` or `]`).
let directory = Pattern::escape(directory.to_str().unwrap());
let directory = Path::new(&directory);
// Join the escaped directory to the filename glob patterns to obtain
// complete glob patterns for the files being searched for.
let paths = filenames
.iter()
.map(|f| directory.join(f).to_str().unwrap().to_owned());
// Prevent wildcards from matching path separators to ensure that the search
// is limited to the specified directory.
let mut options = MatchOptions::new();
options.require_literal_separator = true;
paths
.map(|p| glob::glob_with(&p, options))
.filter_map(Result::ok)
.flatten()
.filter_map(|p| {
let path = p.ok()?;
let filename = path.file_name()?.to_str().unwrap();
// The `libclang_shared` library has been renamed to `libclang-cpp`
// in Clang 10. This can cause instances of this library (e.g.,
// `libclang-cpp.so.10`) to be matched by patterns looking for
// instances of `libclang`.
if filename.contains("-cpp.") {
return None;
}
Some((directory.to_owned(), filename.into()))
})
.collect::<Vec<_>>()
}
/// Finds the files in a directory (and any relevant sibling directories) that
/// match one or more filename glob patterns and returns the paths to and
/// filenames of those files.
fn search_directories(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> {
let mut results = search_directory(directory, filenames);
// On Windows, `libclang.dll` is usually found in the LLVM `bin` directory
// while `libclang.lib` is usually found in the LLVM `lib` directory. To
// keep things consistent with other platforms, only LLVM `lib` directories
// are included in the backup search directory globs so we need to search
// the LLVM `bin` directory here.
if target_os!("windows") && directory.ends_with("lib") {
let sibling = directory.parent().unwrap().join("bin");
results.extend(search_directory(&sibling, filenames).into_iter());
}
results
}
/// Finds the `libclang` static or dynamic libraries matching one or more
/// filename glob patterns and returns the paths to and filenames of those files.
pub fn search_libclang_directories(filenames: &[String], variable: &str) -> Vec<(PathBuf, String)> {
// Search only the path indicated by the relevant environment variable
// (e.g., `LIBCLANG_PATH`) if it is set.
if let Ok(path) = env::var(variable).map(|d| Path::new(&d).to_path_buf()) {
// Check if the path is a matching file.
if let Some(parent) = path.parent() {
let filename = path.file_name().unwrap().to_str().unwrap();
let libraries = search_directories(parent, filenames);
if libraries.iter().any(|(_, f)| f == filename) {
return vec![(parent.into(), filename.into())];
}
}
// Check if the path is directory containing a matching file.
return search_directories(&path, filenames);
}
let mut found = vec![];
// Search the `bin` and `lib` directories in the directory returned by
// `llvm-config --prefix`.
if let Some(output) = run_llvm_config(&["--prefix"]) {
let directory = Path::new(output.lines().next().unwrap()).to_path_buf();
found.extend(search_directories(&directory.join("bin"), filenames));
found.extend(search_directories(&directory.join("lib"), filenames));
found.extend(search_directories(&directory.join("lib64"), filenames));
}
// Search the toolchain directory in the directory returned by
// `xcode-select --print-path`.
if target_os!("macos") {
if let Some(output) = run_xcode_select(&["--print-path"]) {
let directory = Path::new(output.lines().next().unwrap()).to_path_buf();
let directory = directory.join("Toolchains/XcodeDefault.xctoolchain/usr/lib");
found.extend(search_directories(&directory, filenames));
}
}
// Search the directories in the `LD_LIBRARY_PATH` environment variable.
if let Ok(path) = env::var("LD_LIBRARY_PATH") {
for directory in env::split_paths(&path) {
found.extend(search_directories(&directory, filenames));
}
}
// Determine the `libclang` directory patterns.
let directories: Vec<&str> = if target_os!("haiku") {
DIRECTORIES_HAIKU.into()
} else if target_os!("linux") || target_os!("freebsd") {
DIRECTORIES_LINUX.into()
} else if target_os!("macos") {
DIRECTORIES_MACOS.into()
} else if target_os!("windows") {
let msvc = target_env!("msvc");
DIRECTORIES_WINDOWS
.iter()
.filter(|d| d.1 || !msvc)
.map(|d| d.0)
.collect()
} else if target_os!("illumos") {
DIRECTORIES_ILLUMOS.into()
} else {
vec![]
};
// We use temporary directories when testing the build script so we'll
// remove the prefixes that make the directories absolute.
let directories = if test!() {
directories
.iter()
.map(|d| d.strip_prefix('/').or_else(|| d.strip_prefix("C:\\")).unwrap_or(d))
.collect::<Vec<_>>()
} else {
directories
};
// Search the directories provided by the `libclang` directory patterns.
let mut options = MatchOptions::new();
options.case_sensitive = false;
options.require_literal_separator = true;
for directory in directories.iter() {
if let Ok(directories) = glob::glob_with(directory, options) {
for directory in directories.filter_map(Result::ok).filter(|p| p.is_dir()) {
found.extend(search_directories(&directory, filenames));
}
}
}
found
}

514
third_party/rust/clang-sys/build/dynamic.rs поставляемый
Просмотреть файл

@ -1,257 +1,257 @@
// SPDX-License-Identifier: Apache-2.0
use std::env;
use std::fs::File;
use std::io::{self, Error, ErrorKind, Read, Seek, SeekFrom};
use std::path::{Path, PathBuf};
use super::common;
//================================================
// Validation
//================================================
/// Extracts the ELF class from the ELF header in a shared library.
fn parse_elf_header(path: &Path) -> io::Result<u8> {
let mut file = File::open(path)?;
let mut buffer = [0; 5];
file.read_exact(&mut buffer)?;
if buffer[..4] == [127, 69, 76, 70] {
Ok(buffer[4])
} else {
Err(Error::new(ErrorKind::InvalidData, "invalid ELF header"))
}
}
/// Extracts the magic number from the PE header in a shared library.
fn parse_pe_header(path: &Path) -> io::Result<u16> {
let mut file = File::open(path)?;
// Extract the header offset.
let mut buffer = [0; 4];
let start = SeekFrom::Start(0x3C);
file.seek(start)?;
file.read_exact(&mut buffer)?;
let offset = i32::from_le_bytes(buffer);
// Check the validity of the header.
file.seek(SeekFrom::Start(offset as u64))?;
file.read_exact(&mut buffer)?;
if buffer != [80, 69, 0, 0] {
return Err(Error::new(ErrorKind::InvalidData, "invalid PE header"));
}
// Extract the magic number.
let mut buffer = [0; 2];
file.seek(SeekFrom::Current(20))?;
file.read_exact(&mut buffer)?;
Ok(u16::from_le_bytes(buffer))
}
/// Checks that a `libclang` shared library matches the target platform.
fn validate_library(path: &Path) -> Result<(), String> {
if target_os!("linux") || target_os!("freebsd") {
let class = parse_elf_header(path).map_err(|e| e.to_string())?;
if target_pointer_width!("32") && class != 1 {
return Err("invalid ELF class (64-bit)".into());
}
if target_pointer_width!("64") && class != 2 {
return Err("invalid ELF class (32-bit)".into());
}
Ok(())
} else if target_os!("windows") {
let magic = parse_pe_header(path).map_err(|e| e.to_string())?;
if target_pointer_width!("32") && magic != 267 {
return Err("invalid DLL (64-bit)".into());
}
if target_pointer_width!("64") && magic != 523 {
return Err("invalid DLL (32-bit)".into());
}
Ok(())
} else {
Ok(())
}
}
//================================================
// Searching
//================================================
/// Extracts the version components in a `libclang` shared library filename.
fn parse_version(filename: &str) -> Vec<u32> {
let version = if let Some(version) = filename.strip_prefix("libclang.so.") {
version
} else if filename.starts_with("libclang-") {
&filename[9..filename.len() - 3]
} else {
return vec![];
};
version.split('.').map(|s| s.parse().unwrap_or(0)).collect()
}
/// Finds `libclang` shared libraries and returns the paths to, filenames of,
/// and versions of those shared libraries.
fn search_libclang_directories(runtime: bool) -> Result<Vec<(PathBuf, String, Vec<u32>)>, String> {
let mut files = vec![format!(
"{}clang{}",
env::consts::DLL_PREFIX,
env::consts::DLL_SUFFIX
)];
if target_os!("linux") {
// Some Linux distributions don't create a `libclang.so` symlink, so we
// need to look for versioned files (e.g., `libclang-3.9.so`).
files.push("libclang-*.so".into());
// Some Linux distributions don't create a `libclang.so` symlink and
// don't have versioned files as described above, so we need to look for
// suffix versioned files (e.g., `libclang.so.1`). However, `ld` cannot
// link to these files, so this will only be included when linking at
// runtime.
if runtime {
files.push("libclang.so.*".into());
files.push("libclang-*.so.*".into());
}
}
if target_os!("freebsd") || target_os!("haiku") || target_os!("netbsd") || target_os!("openbsd") {
// Some BSD distributions don't create a `libclang.so` symlink either,
// but use a different naming scheme for versioned files (e.g.,
// `libclang.so.7.0`).
files.push("libclang.so.*".into());
}
if target_os!("windows") {
// The official LLVM build uses `libclang.dll` on Windows instead of
// `clang.dll`. However, unofficial builds such as MinGW use `clang.dll`.
files.push("libclang.dll".into());
}
// Find and validate `libclang` shared libraries and collect the versions.
let mut valid = vec![];
let mut invalid = vec![];
for (directory, filename) in common::search_libclang_directories(&files, "LIBCLANG_PATH") {
let path = directory.join(&filename);
match validate_library(&path) {
Ok(()) => {
let version = parse_version(&filename);
valid.push((directory, filename, version))
}
Err(message) => invalid.push(format!("({}: {})", path.display(), message)),
}
}
if !valid.is_empty() {
return Ok(valid);
}
let message = format!(
"couldn't find any valid shared libraries matching: [{}], set the \
`LIBCLANG_PATH` environment variable to a path where one of these files \
can be found (invalid: [{}])",
files
.iter()
.map(|f| format!("'{}'", f))
.collect::<Vec<_>>()
.join(", "),
invalid.join(", "),
);
Err(message)
}
/// Finds the "best" `libclang` shared library and returns the directory and
/// filename of that library.
pub fn find(runtime: bool) -> Result<(PathBuf, String), String> {
search_libclang_directories(runtime)?
.iter()
// We want to find the `libclang` shared library with the highest
// version number, hence `max_by_key` below.
//
// However, in the case where there are multiple such `libclang` shared
// libraries, we want to use the order in which they appeared in the
// list returned by `search_libclang_directories` as a tiebreaker since
// that function returns `libclang` shared libraries in descending order
// of preference by how they were found.
//
// `max_by_key`, perhaps surprisingly, returns the *last* item with the
// maximum key rather than the first which results in the opposite of
// the tiebreaking behavior we want. This is easily fixed by reversing
// the list first.
.rev()
.max_by_key(|f| &f.2)
.cloned()
.map(|(path, filename, _)| (path, filename))
.ok_or_else(|| "unreachable".into())
}
//================================================
// Linking
//================================================
/// Finds and links to a `libclang` shared library.
#[cfg(not(feature = "runtime"))]
pub fn link() {
let cep = common::CommandErrorPrinter::default();
use std::fs;
let (directory, filename) = find(false).unwrap();
println!("cargo:rustc-link-search={}", directory.display());
if cfg!(all(target_os = "windows", target_env = "msvc")) {
// Find the `libclang` stub static library required for the MSVC
// toolchain.
let lib = if !directory.ends_with("bin") {
directory
} else {
directory.parent().unwrap().join("lib")
};
if lib.join("libclang.lib").exists() {
println!("cargo:rustc-link-search={}", lib.display());
} else if lib.join("libclang.dll.a").exists() {
// MSYS and MinGW use `libclang.dll.a` instead of `libclang.lib`.
// It is linkable with the MSVC linker, but Rust doesn't recognize
// the `.a` suffix, so we need to copy it with a different name.
//
// FIXME: Maybe we can just hardlink or symlink it?
let out = env::var("OUT_DIR").unwrap();
fs::copy(
lib.join("libclang.dll.a"),
Path::new(&out).join("libclang.lib"),
)
.unwrap();
println!("cargo:rustc-link-search=native={}", out);
} else {
panic!(
"using '{}', so 'libclang.lib' or 'libclang.dll.a' must be \
available in {}",
filename,
lib.display(),
);
}
println!("cargo:rustc-link-lib=dylib=libclang");
} else {
let name = filename.trim_start_matches("lib");
// Strip extensions and trailing version numbers (e.g., the `.so.7.0` in
// `libclang.so.7.0`).
let name = match name.find(".dylib").or_else(|| name.find(".so")) {
Some(index) => &name[0..index],
None => name,
};
println!("cargo:rustc-link-lib=dylib={}", name);
}
cep.discard();
}
// SPDX-License-Identifier: Apache-2.0
use std::env;
use std::fs::File;
use std::io::{self, Error, ErrorKind, Read, Seek, SeekFrom};
use std::path::{Path, PathBuf};
use super::common;
//================================================
// Validation
//================================================
/// Extracts the ELF class from the ELF header in a shared library.
fn parse_elf_header(path: &Path) -> io::Result<u8> {
let mut file = File::open(path)?;
let mut buffer = [0; 5];
file.read_exact(&mut buffer)?;
if buffer[..4] == [127, 69, 76, 70] {
Ok(buffer[4])
} else {
Err(Error::new(ErrorKind::InvalidData, "invalid ELF header"))
}
}
/// Extracts the magic number from the PE header in a shared library.
fn parse_pe_header(path: &Path) -> io::Result<u16> {
let mut file = File::open(path)?;
// Extract the header offset.
let mut buffer = [0; 4];
let start = SeekFrom::Start(0x3C);
file.seek(start)?;
file.read_exact(&mut buffer)?;
let offset = i32::from_le_bytes(buffer);
// Check the validity of the header.
file.seek(SeekFrom::Start(offset as u64))?;
file.read_exact(&mut buffer)?;
if buffer != [80, 69, 0, 0] {
return Err(Error::new(ErrorKind::InvalidData, "invalid PE header"));
}
// Extract the magic number.
let mut buffer = [0; 2];
file.seek(SeekFrom::Current(20))?;
file.read_exact(&mut buffer)?;
Ok(u16::from_le_bytes(buffer))
}
/// Checks that a `libclang` shared library matches the target platform.
fn validate_library(path: &Path) -> Result<(), String> {
if target_os!("linux") || target_os!("freebsd") {
let class = parse_elf_header(path).map_err(|e| e.to_string())?;
if target_pointer_width!("32") && class != 1 {
return Err("invalid ELF class (64-bit)".into());
}
if target_pointer_width!("64") && class != 2 {
return Err("invalid ELF class (32-bit)".into());
}
Ok(())
} else if target_os!("windows") {
let magic = parse_pe_header(path).map_err(|e| e.to_string())?;
if target_pointer_width!("32") && magic != 267 {
return Err("invalid DLL (64-bit)".into());
}
if target_pointer_width!("64") && magic != 523 {
return Err("invalid DLL (32-bit)".into());
}
Ok(())
} else {
Ok(())
}
}
//================================================
// Searching
//================================================
/// Extracts the version components in a `libclang` shared library filename.
fn parse_version(filename: &str) -> Vec<u32> {
let version = if let Some(version) = filename.strip_prefix("libclang.so.") {
version
} else if filename.starts_with("libclang-") {
&filename[9..filename.len() - 3]
} else {
return vec![];
};
version.split('.').map(|s| s.parse().unwrap_or(0)).collect()
}
/// Finds `libclang` shared libraries and returns the paths to, filenames of,
/// and versions of those shared libraries.
fn search_libclang_directories(runtime: bool) -> Result<Vec<(PathBuf, String, Vec<u32>)>, String> {
let mut files = vec![format!(
"{}clang{}",
env::consts::DLL_PREFIX,
env::consts::DLL_SUFFIX
)];
if target_os!("linux") {
// Some Linux distributions don't create a `libclang.so` symlink, so we
// need to look for versioned files (e.g., `libclang-3.9.so`).
files.push("libclang-*.so".into());
// Some Linux distributions don't create a `libclang.so` symlink and
// don't have versioned files as described above, so we need to look for
// suffix versioned files (e.g., `libclang.so.1`). However, `ld` cannot
// link to these files, so this will only be included when linking at
// runtime.
if runtime {
files.push("libclang.so.*".into());
files.push("libclang-*.so.*".into());
}
}
if target_os!("freebsd") || target_os!("haiku") || target_os!("netbsd") || target_os!("openbsd") {
// Some BSD distributions don't create a `libclang.so` symlink either,
// but use a different naming scheme for versioned files (e.g.,
// `libclang.so.7.0`).
files.push("libclang.so.*".into());
}
if target_os!("windows") {
// The official LLVM build uses `libclang.dll` on Windows instead of
// `clang.dll`. However, unofficial builds such as MinGW use `clang.dll`.
files.push("libclang.dll".into());
}
// Find and validate `libclang` shared libraries and collect the versions.
let mut valid = vec![];
let mut invalid = vec![];
for (directory, filename) in common::search_libclang_directories(&files, "LIBCLANG_PATH") {
let path = directory.join(&filename);
match validate_library(&path) {
Ok(()) => {
let version = parse_version(&filename);
valid.push((directory, filename, version))
}
Err(message) => invalid.push(format!("({}: {})", path.display(), message)),
}
}
if !valid.is_empty() {
return Ok(valid);
}
let message = format!(
"couldn't find any valid shared libraries matching: [{}], set the \
`LIBCLANG_PATH` environment variable to a path where one of these files \
can be found (invalid: [{}])",
files
.iter()
.map(|f| format!("'{}'", f))
.collect::<Vec<_>>()
.join(", "),
invalid.join(", "),
);
Err(message)
}
/// Finds the "best" `libclang` shared library and returns the directory and
/// filename of that library.
pub fn find(runtime: bool) -> Result<(PathBuf, String), String> {
search_libclang_directories(runtime)?
.iter()
// We want to find the `libclang` shared library with the highest
// version number, hence `max_by_key` below.
//
// However, in the case where there are multiple such `libclang` shared
// libraries, we want to use the order in which they appeared in the
// list returned by `search_libclang_directories` as a tiebreaker since
// that function returns `libclang` shared libraries in descending order
// of preference by how they were found.
//
// `max_by_key`, perhaps surprisingly, returns the *last* item with the
// maximum key rather than the first which results in the opposite of
// the tiebreaking behavior we want. This is easily fixed by reversing
// the list first.
.rev()
.max_by_key(|f| &f.2)
.cloned()
.map(|(path, filename, _)| (path, filename))
.ok_or_else(|| "unreachable".into())
}
//================================================
// Linking
//================================================
/// Finds and links to a `libclang` shared library.
#[cfg(not(feature = "runtime"))]
pub fn link() {
let cep = common::CommandErrorPrinter::default();
use std::fs;
let (directory, filename) = find(false).unwrap();
println!("cargo:rustc-link-search={}", directory.display());
if cfg!(all(target_os = "windows", target_env = "msvc")) {
// Find the `libclang` stub static library required for the MSVC
// toolchain.
let lib = if !directory.ends_with("bin") {
directory
} else {
directory.parent().unwrap().join("lib")
};
if lib.join("libclang.lib").exists() {
println!("cargo:rustc-link-search={}", lib.display());
} else if lib.join("libclang.dll.a").exists() {
// MSYS and MinGW use `libclang.dll.a` instead of `libclang.lib`.
// It is linkable with the MSVC linker, but Rust doesn't recognize
// the `.a` suffix, so we need to copy it with a different name.
//
// FIXME: Maybe we can just hardlink or symlink it?
let out = env::var("OUT_DIR").unwrap();
fs::copy(
lib.join("libclang.dll.a"),
Path::new(&out).join("libclang.lib"),
)
.unwrap();
println!("cargo:rustc-link-search=native={}", out);
} else {
panic!(
"using '{}', so 'libclang.lib' or 'libclang.dll.a' must be \
available in {}",
filename,
lib.display(),
);
}
println!("cargo:rustc-link-lib=dylib=libclang");
} else {
let name = filename.trim_start_matches("lib");
// Strip extensions and trailing version numbers (e.g., the `.so.7.0` in
// `libclang.so.7.0`).
let name = match name.find(".dylib").or_else(|| name.find(".so")) {
Some(index) => &name[0..index],
None => name,
};
println!("cargo:rustc-link-lib=dylib={}", name);
}
cep.discard();
}

76
third_party/rust/clang-sys/build/macros.rs поставляемый
Просмотреть файл

@ -1,38 +1,38 @@
// SPDX-License-Identifier: Apache-2.0
macro_rules! test {
() => (cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok());
}
macro_rules! target_os {
($os:expr) => {
if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() {
let var = ::std::env::var("_CLANG_SYS_TEST_OS");
var.map_or(false, |v| v == $os)
} else {
cfg!(target_os = $os)
}
};
}
macro_rules! target_pointer_width {
($pointer_width:expr) => {
if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() {
let var = ::std::env::var("_CLANG_SYS_TEST_POINTER_WIDTH");
var.map_or(false, |v| v == $pointer_width)
} else {
cfg!(target_pointer_width = $pointer_width)
}
};
}
macro_rules! target_env {
($env:expr) => {
if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() {
let var = ::std::env::var("_CLANG_SYS_TEST_ENV");
var.map_or(false, |v| v == $env)
} else {
cfg!(target_env = $env)
}
};
}
// SPDX-License-Identifier: Apache-2.0
macro_rules! test {
() => (cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok());
}
macro_rules! target_os {
($os:expr) => {
if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() {
let var = ::std::env::var("_CLANG_SYS_TEST_OS");
var.map_or(false, |v| v == $os)
} else {
cfg!(target_os = $os)
}
};
}
macro_rules! target_pointer_width {
($pointer_width:expr) => {
if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() {
let var = ::std::env::var("_CLANG_SYS_TEST_POINTER_WIDTH");
var.map_or(false, |v| v == $pointer_width)
} else {
cfg!(target_pointer_width = $pointer_width)
}
};
}
macro_rules! target_env {
($env:expr) => {
if cfg!(test) && ::std::env::var("_CLANG_SYS_TEST").is_ok() {
let var = ::std::env::var("_CLANG_SYS_TEST_ENV");
var.map_or(false, |v| v == $env)
} else {
cfg!(target_env = $env)
}
};
}

280
third_party/rust/clang-sys/build/static.rs поставляемый
Просмотреть файл

@ -1,140 +1,140 @@
// SPDX-License-Identifier: Apache-2.0
extern crate glob;
use std::path::{Path, PathBuf};
use glob::Pattern;
use super::common;
//================================================
// Searching
//================================================
/// Clang static libraries required to link to `libclang` 3.5 and later.
const CLANG_LIBRARIES: &[&str] = &[
"clang",
"clangAST",
"clangAnalysis",
"clangBasic",
"clangDriver",
"clangEdit",
"clangFrontend",
"clangIndex",
"clangLex",
"clangParse",
"clangRewrite",
"clangSema",
"clangSerialization",
];
/// Gets the name of an LLVM or Clang static library from a path.
fn get_library_name(path: &Path) -> Option<String> {
path.file_stem().map(|p| {
let string = p.to_string_lossy();
if let Some(name) = string.strip_prefix("lib") {
name.to_owned()
} else {
string.to_string()
}
})
}
/// Gets the LLVM static libraries required to link to `libclang`.
fn get_llvm_libraries() -> Vec<String> {
common::run_llvm_config(&["--libs"])
.unwrap()
.split_whitespace()
.filter_map(|p| {
// Depending on the version of `llvm-config` in use, listed
// libraries may be in one of two forms, a full path to the library
// or simply prefixed with `-l`.
if let Some(path) = p.strip_prefix("-l") {
Some(path.into())
} else {
get_library_name(Path::new(p))
}
})
.collect()
}
/// Gets the Clang static libraries required to link to `libclang`.
fn get_clang_libraries<P: AsRef<Path>>(directory: P) -> Vec<String> {
// Escape the directory in case it contains characters that have special
// meaning in glob patterns (e.g., `[` or `]`).
let directory = Pattern::escape(directory.as_ref().to_str().unwrap());
let directory = Path::new(&directory);
let pattern = directory.join("libclang*.a").to_str().unwrap().to_owned();
if let Ok(libraries) = glob::glob(&pattern) {
libraries
.filter_map(|l| l.ok().and_then(|l| get_library_name(&l)))
.collect()
} else {
CLANG_LIBRARIES.iter().map(|l| (*l).to_string()).collect()
}
}
/// Finds a directory containing LLVM and Clang static libraries and returns the
/// path to that directory.
fn find() -> PathBuf {
let name = if target_os!("windows") {
"libclang.lib"
} else {
"libclang.a"
};
let files = common::search_libclang_directories(&[name.into()], "LIBCLANG_STATIC_PATH");
if let Some((directory, _)) = files.into_iter().next() {
directory
} else {
panic!("could not find any static libraries");
}
}
//================================================
// Linking
//================================================
/// Finds and links to `libclang` static libraries.
pub fn link() {
let cep = common::CommandErrorPrinter::default();
let directory = find();
// Specify required Clang static libraries.
println!("cargo:rustc-link-search=native={}", directory.display());
for library in get_clang_libraries(directory) {
println!("cargo:rustc-link-lib=static={}", library);
}
// Determine the shared mode used by LLVM.
let mode = common::run_llvm_config(&["--shared-mode"]).map(|m| m.trim().to_owned());
let prefix = if mode.map_or(false, |m| m == "static") {
"static="
} else {
""
};
// Specify required LLVM static libraries.
println!(
"cargo:rustc-link-search=native={}",
common::run_llvm_config(&["--libdir"]).unwrap().trim_end()
);
for library in get_llvm_libraries() {
println!("cargo:rustc-link-lib={}{}", prefix, library);
}
// Specify required system libraries.
// MSVC doesn't need this, as it tracks dependencies inside `.lib` files.
if cfg!(target_os = "freebsd") {
println!("cargo:rustc-flags=-l ffi -l ncursesw -l c++ -l z");
} else if cfg!(any(target_os = "haiku", target_os = "linux")) {
println!("cargo:rustc-flags=-l ffi -l ncursesw -l stdc++ -l z");
} else if cfg!(target_os = "macos") {
println!("cargo:rustc-flags=-l ffi -l ncurses -l c++ -l z");
}
cep.discard();
}
// SPDX-License-Identifier: Apache-2.0
extern crate glob;
use std::path::{Path, PathBuf};
use glob::Pattern;
use super::common;
//================================================
// Searching
//================================================
/// Clang static libraries required to link to `libclang` 3.5 and later.
const CLANG_LIBRARIES: &[&str] = &[
"clang",
"clangAST",
"clangAnalysis",
"clangBasic",
"clangDriver",
"clangEdit",
"clangFrontend",
"clangIndex",
"clangLex",
"clangParse",
"clangRewrite",
"clangSema",
"clangSerialization",
];
/// Gets the name of an LLVM or Clang static library from a path.
fn get_library_name(path: &Path) -> Option<String> {
path.file_stem().map(|p| {
let string = p.to_string_lossy();
if let Some(name) = string.strip_prefix("lib") {
name.to_owned()
} else {
string.to_string()
}
})
}
/// Gets the LLVM static libraries required to link to `libclang`.
fn get_llvm_libraries() -> Vec<String> {
common::run_llvm_config(&["--libs"])
.unwrap()
.split_whitespace()
.filter_map(|p| {
// Depending on the version of `llvm-config` in use, listed
// libraries may be in one of two forms, a full path to the library
// or simply prefixed with `-l`.
if let Some(path) = p.strip_prefix("-l") {
Some(path.into())
} else {
get_library_name(Path::new(p))
}
})
.collect()
}
/// Gets the Clang static libraries required to link to `libclang`.
fn get_clang_libraries<P: AsRef<Path>>(directory: P) -> Vec<String> {
// Escape the directory in case it contains characters that have special
// meaning in glob patterns (e.g., `[` or `]`).
let directory = Pattern::escape(directory.as_ref().to_str().unwrap());
let directory = Path::new(&directory);
let pattern = directory.join("libclang*.a").to_str().unwrap().to_owned();
if let Ok(libraries) = glob::glob(&pattern) {
libraries
.filter_map(|l| l.ok().and_then(|l| get_library_name(&l)))
.collect()
} else {
CLANG_LIBRARIES.iter().map(|l| (*l).to_string()).collect()
}
}
/// Finds a directory containing LLVM and Clang static libraries and returns the
/// path to that directory.
fn find() -> PathBuf {
let name = if target_os!("windows") {
"libclang.lib"
} else {
"libclang.a"
};
let files = common::search_libclang_directories(&[name.into()], "LIBCLANG_STATIC_PATH");
if let Some((directory, _)) = files.into_iter().next() {
directory
} else {
panic!("could not find any static libraries");
}
}
//================================================
// Linking
//================================================
/// Finds and links to `libclang` static libraries.
pub fn link() {
let cep = common::CommandErrorPrinter::default();
let directory = find();
// Specify required Clang static libraries.
println!("cargo:rustc-link-search=native={}", directory.display());
for library in get_clang_libraries(directory) {
println!("cargo:rustc-link-lib=static={}", library);
}
// Determine the shared mode used by LLVM.
let mode = common::run_llvm_config(&["--shared-mode"]).map(|m| m.trim().to_owned());
let prefix = if mode.map_or(false, |m| m == "static") {
"static="
} else {
""
};
// Specify required LLVM static libraries.
println!(
"cargo:rustc-link-search=native={}",
common::run_llvm_config(&["--libdir"]).unwrap().trim_end()
);
for library in get_llvm_libraries() {
println!("cargo:rustc-link-lib={}{}", prefix, library);
}
// Specify required system libraries.
// MSVC doesn't need this, as it tracks dependencies inside `.lib` files.
if cfg!(target_os = "freebsd") {
println!("cargo:rustc-flags=-l ffi -l ncursesw -l c++ -l z");
} else if cfg!(any(target_os = "haiku", target_os = "linux")) {
println!("cargo:rustc-flags=-l ffi -l ncursesw -l stdc++ -l z");
} else if cfg!(target_os = "macos") {
println!("cargo:rustc-flags=-l ffi -l ncurses -l c++ -l z");
}
cep.discard();
}

2
third_party/rust/clang-sys/clippy.toml поставляемый
Просмотреть файл

@ -1 +1 @@
doc-valid-idents = ["FreeBSD"]
doc-valid-idents = ["FreeBSD"]

4725
third_party/rust/clang-sys/src/lib.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

596
third_party/rust/clang-sys/src/link.rs поставляемый
Просмотреть файл

@ -1,273 +1,323 @@
// SPDX-License-Identifier: Apache-2.0
//================================================
// Macros
//================================================
#[cfg(feature = "runtime")]
macro_rules! link {
(
@LOAD:
$(#[doc=$doc:expr])*
#[cfg($cfg:meta)]
fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*
) => (
$(#[doc=$doc])*
#[cfg($cfg)]
pub fn $name(library: &mut super::SharedLibrary) {
let symbol = unsafe { library.library.get(stringify!($name).as_bytes()) }.ok();
library.functions.$name = match symbol {
Some(s) => *s,
None => None,
};
}
#[cfg(not($cfg))]
pub fn $name(_: &mut super::SharedLibrary) {}
);
(
@LOAD:
fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*
) => (
link!(@LOAD: #[cfg(feature = "runtime")] fn $name($($pname: $pty), *) $(-> $ret)*);
);
(
$(
$(#[doc=$doc:expr] #[cfg($cfg:meta)])*
pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*;
)+
) => (
use std::cell::{RefCell};
use std::sync::{Arc};
use std::path::{Path, PathBuf};
/// The (minimum) version of a `libclang` shared library.
#[allow(missing_docs)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Version {
V3_5 = 35,
V3_6 = 36,
V3_7 = 37,
V3_8 = 38,
V3_9 = 39,
V4_0 = 40,
V5_0 = 50,
V6_0 = 60,
V7_0 = 70,
V8_0 = 80,
V9_0 = 90,
}
/// The set of functions loaded dynamically.
#[derive(Debug, Default)]
pub struct Functions {
$(
$(#[doc=$doc] #[cfg($cfg)])*
pub $name: Option<unsafe extern fn($($pname: $pty), *) $(-> $ret)*>,
)+
}
/// A dynamically loaded instance of the `libclang` library.
#[derive(Debug)]
pub struct SharedLibrary {
library: libloading::Library,
path: PathBuf,
pub functions: Functions,
}
impl SharedLibrary {
fn new(library: libloading::Library, path: PathBuf) -> Self {
Self { library, path, functions: Functions::default() }
}
/// Returns the path to this `libclang` shared library.
pub fn path(&self) -> &Path {
&self.path
}
/// Returns the (minimum) version of this `libclang` shared library.
///
/// If this returns `None`, it indicates that the version is too old
/// to be supported by this crate (i.e., `3.4` or earlier). If the
/// version of this shared library is more recent than that fully
/// supported by this crate, the most recent fully supported version
/// will be returned.
pub fn version(&self) -> Option<Version> {
macro_rules! check {
($fn:expr, $version:ident) => {
if self.library.get::<unsafe extern fn()>($fn).is_ok() {
return Some(Version::$version);
}
};
}
unsafe {
check!(b"clang_Cursor_isAnonymousRecordDecl", V9_0);
check!(b"clang_Cursor_getObjCPropertyGetterName", V8_0);
check!(b"clang_File_tryGetRealPathName", V7_0);
check!(b"clang_CXIndex_setInvocationEmissionPathOption", V6_0);
check!(b"clang_Cursor_isExternalSymbol", V5_0);
check!(b"clang_EvalResult_getAsLongLong", V4_0);
check!(b"clang_CXXConstructor_isConvertingConstructor", V3_9);
check!(b"clang_CXXField_isMutable", V3_8);
check!(b"clang_Cursor_getOffsetOfField", V3_7);
check!(b"clang_Cursor_getStorageClass", V3_6);
check!(b"clang_Type_getNumTemplateArguments", V3_5);
}
None
}
}
thread_local!(static LIBRARY: RefCell<Option<Arc<SharedLibrary>>> = RefCell::new(None));
/// Returns whether a `libclang` shared library is loaded on this thread.
pub fn is_loaded() -> bool {
LIBRARY.with(|l| l.borrow().is_some())
}
fn with_library<T, F>(f: F) -> Option<T> where F: FnOnce(&SharedLibrary) -> T {
LIBRARY.with(|l| {
match l.borrow().as_ref() {
Some(library) => Some(f(&library)),
_ => None,
}
})
}
$(
#[cfg_attr(feature="cargo-clippy", allow(clippy::missing_safety_doc))]
#[cfg_attr(feature="cargo-clippy", allow(clippy::too_many_arguments))]
$(#[doc=$doc] #[cfg($cfg)])*
pub unsafe fn $name($($pname: $pty), *) $(-> $ret)* {
let f = with_library(|l| {
l.functions.$name.expect(concat!(
"`libclang` function not loaded: `",
stringify!($name),
"`. This crate requires that `libclang` 3.9 or later be installed on your ",
"system. For more information on how to accomplish this, see here: ",
"https://rust-lang.github.io/rust-bindgen/requirements.html#installing-clang-39"))
}).expect("a `libclang` shared library is not loaded on this thread");
f($($pname), *)
}
$(#[doc=$doc] #[cfg($cfg)])*
pub mod $name {
pub fn is_loaded() -> bool {
super::with_library(|l| l.functions.$name.is_some()).unwrap_or(false)
}
}
)+
mod load {
$(link!(@LOAD: $(#[cfg($cfg)])* fn $name($($pname: $pty), *) $(-> $ret)*);)+
}
/// Loads a `libclang` shared library and returns the library instance.
///
/// This function does not attempt to load any functions from the shared library. The caller
/// is responsible for loading the functions they require.
///
/// # Failures
///
/// * a `libclang` shared library could not be found
/// * the `libclang` shared library could not be opened
pub fn load_manually() -> Result<SharedLibrary, String> {
#[allow(dead_code)]
mod build {
include!(concat!(env!("OUT_DIR"), "/macros.rs"));
pub mod common { include!(concat!(env!("OUT_DIR"), "/common.rs")); }
pub mod dynamic { include!(concat!(env!("OUT_DIR"), "/dynamic.rs")); }
}
let (directory, filename) = build::dynamic::find(true)?;
let path = directory.join(filename);
unsafe {
let library = libloading::Library::new(&path).map_err(|e| {
format!(
"the `libclang` shared library at {} could not be opened: {}",
path.display(),
e,
)
});
let mut library = SharedLibrary::new(library?, path);
$(load::$name(&mut library);)+
Ok(library)
}
}
/// Loads a `libclang` shared library for use in the current thread.
///
/// This functions attempts to load all the functions in the shared library. Whether a
/// function has been loaded can be tested by calling the `is_loaded` function on the
/// module with the same name as the function (e.g., `clang_createIndex::is_loaded()` for
/// the `clang_createIndex` function).
///
/// # Failures
///
/// * a `libclang` shared library could not be found
/// * the `libclang` shared library could not be opened
#[allow(dead_code)]
pub fn load() -> Result<(), String> {
let library = Arc::new(load_manually()?);
LIBRARY.with(|l| *l.borrow_mut() = Some(library));
Ok(())
}
/// Unloads the `libclang` shared library in use in the current thread.
///
/// # Failures
///
/// * a `libclang` shared library is not in use in the current thread
pub fn unload() -> Result<(), String> {
let library = set_library(None);
if library.is_some() {
Ok(())
} else {
Err("a `libclang` shared library is not in use in the current thread".into())
}
}
/// Returns the library instance stored in TLS.
///
/// This functions allows for sharing library instances between threads.
pub fn get_library() -> Option<Arc<SharedLibrary>> {
LIBRARY.with(|l| l.borrow_mut().clone())
}
/// Sets the library instance stored in TLS and returns the previous library.
///
/// This functions allows for sharing library instances between threads.
pub fn set_library(library: Option<Arc<SharedLibrary>>) -> Option<Arc<SharedLibrary>> {
LIBRARY.with(|l| mem::replace(&mut *l.borrow_mut(), library))
}
)
}
#[cfg(not(feature = "runtime"))]
macro_rules! link {
(
$(
$(#[doc=$doc:expr] #[cfg($cfg:meta)])*
pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*;
)+
) => (
extern {
$(
$(#[doc=$doc] #[cfg($cfg)])*
pub fn $name($($pname: $pty), *) $(-> $ret)*;
)+
}
$(
$(#[doc=$doc] #[cfg($cfg)])*
pub mod $name {
pub fn is_loaded() -> bool { true }
}
)+
)
}
// SPDX-License-Identifier: Apache-2.0
//================================================
// Macros
//================================================
#[cfg(feature = "runtime")]
macro_rules! link {
(
@LOAD:
$(#[doc=$doc:expr])*
#[cfg($cfg:meta)]
fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*
) => (
$(#[doc=$doc])*
#[cfg($cfg)]
pub fn $name(library: &mut super::SharedLibrary) {
let symbol = unsafe { library.library.get(stringify!($name).as_bytes()) }.ok();
library.functions.$name = match symbol {
Some(s) => *s,
None => None,
};
}
#[cfg(not($cfg))]
pub fn $name(_: &mut super::SharedLibrary) {}
);
(
@LOAD:
fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*
) => (
link!(@LOAD: #[cfg(feature = "runtime")] fn $name($($pname: $pty), *) $(-> $ret)*);
);
(
$(
$(#[doc=$doc:expr] #[cfg($cfg:meta)])*
pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*;
)+
) => (
use std::cell::{RefCell};
use std::fmt;
use std::sync::{Arc};
use std::path::{Path, PathBuf};
/// The (minimum) version of a `libclang` shared library.
#[allow(missing_docs)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Version {
V3_5 = 35,
V3_6 = 36,
V3_7 = 37,
V3_8 = 38,
V3_9 = 39,
V4_0 = 40,
V5_0 = 50,
V6_0 = 60,
V7_0 = 70,
V8_0 = 80,
V9_0 = 90,
V11_0 = 110,
V12_0 = 120,
V16_0 = 160,
V17_0 = 170,
}
impl fmt::Display for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use Version::*;
match self {
V3_5 => write!(f, "3.5.x"),
V3_6 => write!(f, "3.6.x"),
V3_7 => write!(f, "3.7.x"),
V3_8 => write!(f, "3.8.x"),
V3_9 => write!(f, "3.9.x"),
V4_0 => write!(f, "4.0.x"),
V5_0 => write!(f, "5.0.x"),
V6_0 => write!(f, "6.0.x"),
V7_0 => write!(f, "7.0.x"),
V8_0 => write!(f, "8.0.x"),
V9_0 => write!(f, "9.0.x - 10.0.x"),
V11_0 => write!(f, "11.0.x"),
V12_0 => write!(f, "12.0.x - 15.0.x"),
V16_0 => write!(f, "16.0.x"),
V17_0 => write!(f, "17.0.x or later"),
}
}
}
/// The set of functions loaded dynamically.
#[derive(Debug, Default)]
pub struct Functions {
$(
$(#[doc=$doc] #[cfg($cfg)])*
pub $name: Option<unsafe extern fn($($pname: $pty), *) $(-> $ret)*>,
)+
}
/// A dynamically loaded instance of the `libclang` library.
#[derive(Debug)]
pub struct SharedLibrary {
library: libloading::Library,
path: PathBuf,
pub functions: Functions,
}
impl SharedLibrary {
fn new(library: libloading::Library, path: PathBuf) -> Self {
Self { library, path, functions: Functions::default() }
}
/// Returns the path to this `libclang` shared library.
pub fn path(&self) -> &Path {
&self.path
}
/// Returns the (minimum) version of this `libclang` shared library.
///
/// If this returns `None`, it indicates that the version is too old
/// to be supported by this crate (i.e., `3.4` or earlier). If the
/// version of this shared library is more recent than that fully
/// supported by this crate, the most recent fully supported version
/// will be returned.
pub fn version(&self) -> Option<Version> {
macro_rules! check {
($fn:expr, $version:ident) => {
if self.library.get::<unsafe extern fn()>($fn).is_ok() {
return Some(Version::$version);
}
};
}
unsafe {
check!(b"clang_CXXMethod_isExplicit", V17_0);
check!(b"clang_CXXMethod_isCopyAssignmentOperator", V16_0);
check!(b"clang_Cursor_getVarDeclInitializer", V12_0);
check!(b"clang_Type_getValueType", V11_0);
check!(b"clang_Cursor_isAnonymousRecordDecl", V9_0);
check!(b"clang_Cursor_getObjCPropertyGetterName", V8_0);
check!(b"clang_File_tryGetRealPathName", V7_0);
check!(b"clang_CXIndex_setInvocationEmissionPathOption", V6_0);
check!(b"clang_Cursor_isExternalSymbol", V5_0);
check!(b"clang_EvalResult_getAsLongLong", V4_0);
check!(b"clang_CXXConstructor_isConvertingConstructor", V3_9);
check!(b"clang_CXXField_isMutable", V3_8);
check!(b"clang_Cursor_getOffsetOfField", V3_7);
check!(b"clang_Cursor_getStorageClass", V3_6);
check!(b"clang_Type_getNumTemplateArguments", V3_5);
}
None
}
}
thread_local!(static LIBRARY: RefCell<Option<Arc<SharedLibrary>>> = RefCell::new(None));
/// Returns whether a `libclang` shared library is loaded on this thread.
pub fn is_loaded() -> bool {
LIBRARY.with(|l| l.borrow().is_some())
}
fn with_library<T, F>(f: F) -> Option<T> where F: FnOnce(&SharedLibrary) -> T {
LIBRARY.with(|l| {
match l.borrow().as_ref() {
Some(library) => Some(f(&library)),
_ => None,
}
})
}
$(
#[cfg_attr(feature="cargo-clippy", allow(clippy::missing_safety_doc))]
#[cfg_attr(feature="cargo-clippy", allow(clippy::too_many_arguments))]
$(#[doc=$doc] #[cfg($cfg)])*
pub unsafe fn $name($($pname: $pty), *) $(-> $ret)* {
let f = with_library(|library| {
if let Some(function) = library.functions.$name {
function
} else {
panic!(
r#"
A `libclang` function was called that is not supported by the loaded `libclang` instance.
called function = `{0}`
loaded `libclang` instance = {1}
This crate only supports `libclang` 3.5 and later.
The minimum `libclang` requirement for this particular function can be found here:
https://docs.rs/clang-sys/latest/clang_sys/{0}/index.html
Instructions for installing `libclang` can be found here:
https://rust-lang.github.io/rust-bindgen/requirements.html
"#,
stringify!($name),
library
.version()
.map(|v| format!("{}", v))
.unwrap_or_else(|| "unsupported version".into()),
);
}
}).expect("a `libclang` shared library is not loaded on this thread");
f($($pname), *)
}
$(#[doc=$doc] #[cfg($cfg)])*
pub mod $name {
pub fn is_loaded() -> bool {
super::with_library(|l| l.functions.$name.is_some()).unwrap_or(false)
}
}
)+
mod load {
$(link!(@LOAD: $(#[cfg($cfg)])* fn $name($($pname: $pty), *) $(-> $ret)*);)+
}
/// Loads a `libclang` shared library and returns the library instance.
///
/// This function does not attempt to load any functions from the shared library. The caller
/// is responsible for loading the functions they require.
///
/// # Failures
///
/// * a `libclang` shared library could not be found
/// * the `libclang` shared library could not be opened
pub fn load_manually() -> Result<SharedLibrary, String> {
#[allow(dead_code)]
mod build {
include!(concat!(env!("OUT_DIR"), "/macros.rs"));
pub mod common { include!(concat!(env!("OUT_DIR"), "/common.rs")); }
pub mod dynamic { include!(concat!(env!("OUT_DIR"), "/dynamic.rs")); }
}
let (directory, filename) = build::dynamic::find(true)?;
let path = directory.join(filename);
unsafe {
let library = libloading::Library::new(&path).map_err(|e| {
format!(
"the `libclang` shared library at {} could not be opened: {}",
path.display(),
e,
)
});
let mut library = SharedLibrary::new(library?, path);
$(load::$name(&mut library);)+
Ok(library)
}
}
/// Loads a `libclang` shared library for use in the current thread.
///
/// This functions attempts to load all the functions in the shared library. Whether a
/// function has been loaded can be tested by calling the `is_loaded` function on the
/// module with the same name as the function (e.g., `clang_createIndex::is_loaded()` for
/// the `clang_createIndex` function).
///
/// # Failures
///
/// * a `libclang` shared library could not be found
/// * the `libclang` shared library could not be opened
#[allow(dead_code)]
pub fn load() -> Result<(), String> {
let library = Arc::new(load_manually()?);
LIBRARY.with(|l| *l.borrow_mut() = Some(library));
Ok(())
}
/// Unloads the `libclang` shared library in use in the current thread.
///
/// # Failures
///
/// * a `libclang` shared library is not in use in the current thread
pub fn unload() -> Result<(), String> {
let library = set_library(None);
if library.is_some() {
Ok(())
} else {
Err("a `libclang` shared library is not in use in the current thread".into())
}
}
/// Returns the library instance stored in TLS.
///
/// This functions allows for sharing library instances between threads.
pub fn get_library() -> Option<Arc<SharedLibrary>> {
LIBRARY.with(|l| l.borrow_mut().clone())
}
/// Sets the library instance stored in TLS and returns the previous library.
///
/// This functions allows for sharing library instances between threads.
pub fn set_library(library: Option<Arc<SharedLibrary>>) -> Option<Arc<SharedLibrary>> {
LIBRARY.with(|l| mem::replace(&mut *l.borrow_mut(), library))
}
)
}
#[cfg(not(feature = "runtime"))]
macro_rules! link {
(
$(
$(#[doc=$doc:expr] #[cfg($cfg:meta)])*
pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*;
)+
) => (
extern {
$(
$(#[doc=$doc] #[cfg($cfg)])*
pub fn $name($($pname: $pty), *) $(-> $ret)*;
)+
}
$(
$(#[doc=$doc] #[cfg($cfg)])*
pub mod $name {
pub fn is_loaded() -> bool { true }
}
)+
)
}

472
third_party/rust/clang-sys/src/support.rs поставляемый
Просмотреть файл

@ -1,236 +1,236 @@
// SPDX-License-Identifier: Apache-2.0
//! Provides helper functionality.
use std::path::{Path, PathBuf};
use std::process::Command;
use std::{env, io};
use glob::{self, Pattern};
use libc::c_int;
use super::CXVersion;
//================================================
// Structs
//================================================
/// A `clang` executable.
#[derive(Clone, Debug)]
pub struct Clang {
/// The path to this `clang` executable.
pub path: PathBuf,
/// The version of this `clang` executable if it could be parsed.
pub version: Option<CXVersion>,
/// The directories searched by this `clang` executable for C headers if
/// they could be parsed.
pub c_search_paths: Option<Vec<PathBuf>>,
/// The directories searched by this `clang` executable for C++ headers if
/// they could be parsed.
pub cpp_search_paths: Option<Vec<PathBuf>>,
}
impl Clang {
fn new(path: impl AsRef<Path>, args: &[String]) -> Self {
Self {
path: path.as_ref().into(),
version: parse_version(path.as_ref()),
c_search_paths: parse_search_paths(path.as_ref(), "c", args),
cpp_search_paths: parse_search_paths(path.as_ref(), "c++", args),
}
}
/// Returns a `clang` executable if one can be found.
///
/// If the `CLANG_PATH` environment variable is set, that is the instance of
/// `clang` used. Otherwise, a series of directories are searched. First, if
/// a path is supplied, that is the first directory searched. Then, the
/// directory returned by `llvm-config --bindir` is searched. On macOS
/// systems, `xcodebuild -find clang` will next be queried. Last, the
/// directories in the system's `PATH` are searched.
///
/// ## Cross-compilation
///
/// If target arguments are provided (e.g., `-target` followed by a target
/// like `x86_64-unknown-linux-gnu`) then this method will prefer a
/// target-prefixed instance of `clang` (e.g.,
/// `x86_64-unknown-linux-gnu-clang` for the above example).
pub fn find(path: Option<&Path>, args: &[String]) -> Option<Clang> {
if let Ok(path) = env::var("CLANG_PATH") {
let p = Path::new(&path);
if p.is_file() && is_executable(&p).unwrap_or(false) {
return Some(Clang::new(p, args));
}
}
// Determine the cross-compilation target, if any.
let mut target = None;
for i in 0..args.len() {
if args[i] == "-target" && i + 1 < args.len() {
target = Some(&args[i + 1]);
}
}
// Collect the paths to search for a `clang` executable in.
let mut paths = vec![];
if let Some(path) = path {
paths.push(path.into());
}
if let Ok(path) = run_llvm_config(&["--bindir"]) {
if let Some(line) = path.lines().next() {
paths.push(line.into());
}
}
if cfg!(target_os = "macos") {
if let Ok((path, _)) = run("xcodebuild", &["-find", "clang"]) {
if let Some(line) = path.lines().next() {
paths.push(line.into());
}
}
}
if let Ok(path) = env::var("PATH") {
paths.extend(env::split_paths(&path));
}
// First, look for a target-prefixed `clang` executable.
if let Some(target) = target {
let default = format!("{}-clang{}", target, env::consts::EXE_SUFFIX);
let versioned = format!("{}-clang-[0-9]*{}", target, env::consts::EXE_SUFFIX);
let patterns = &[&default[..], &versioned[..]];
for path in &paths {
if let Some(path) = find(path, patterns) {
return Some(Clang::new(path, args));
}
}
}
// Otherwise, look for any other `clang` executable.
let default = format!("clang{}", env::consts::EXE_SUFFIX);
let versioned = format!("clang-[0-9]*{}", env::consts::EXE_SUFFIX);
let patterns = &[&default[..], &versioned[..]];
for path in paths {
if let Some(path) = find(&path, patterns) {
return Some(Clang::new(path, args));
}
}
None
}
}
//================================================
// Functions
//================================================
/// Returns the first match to the supplied glob patterns in the supplied
/// directory if there are any matches.
fn find(directory: &Path, patterns: &[&str]) -> Option<PathBuf> {
// Escape the directory in case it contains characters that have special
// meaning in glob patterns (e.g., `[` or `]`).
let directory = if let Some(directory) = directory.to_str() {
Path::new(&Pattern::escape(directory)).to_owned()
} else {
return None;
};
for pattern in patterns {
let pattern = directory.join(pattern).to_string_lossy().into_owned();
if let Some(path) = glob::glob(&pattern).ok()?.filter_map(|p| p.ok()).next() {
if path.is_file() && is_executable(&path).unwrap_or(false) {
return Some(path);
}
}
}
None
}
#[cfg(unix)]
fn is_executable(path: &Path) -> io::Result<bool> {
use std::ffi::CString;
use std::os::unix::ffi::OsStrExt;
let path = CString::new(path.as_os_str().as_bytes())?;
unsafe { Ok(libc::access(path.as_ptr(), libc::X_OK) == 0) }
}
#[cfg(not(unix))]
fn is_executable(_: &Path) -> io::Result<bool> {
Ok(true)
}
/// Attempts to run an executable, returning the `stdout` and `stderr` output if
/// successful.
fn run(executable: &str, arguments: &[&str]) -> Result<(String, String), String> {
Command::new(executable)
.args(arguments)
.output()
.map(|o| {
let stdout = String::from_utf8_lossy(&o.stdout).into_owned();
let stderr = String::from_utf8_lossy(&o.stderr).into_owned();
(stdout, stderr)
})
.map_err(|e| format!("could not run executable `{}`: {}", executable, e))
}
/// Runs `clang`, returning the `stdout` and `stderr` output.
fn run_clang(path: &Path, arguments: &[&str]) -> (String, String) {
run(&path.to_string_lossy().into_owned(), arguments).unwrap()
}
/// Runs `llvm-config`, returning the `stdout` output if successful.
fn run_llvm_config(arguments: &[&str]) -> Result<String, String> {
let config = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".to_string());
run(&config, arguments).map(|(o, _)| o)
}
/// Parses a version number if possible, ignoring trailing non-digit characters.
fn parse_version_number(number: &str) -> Option<c_int> {
number
.chars()
.take_while(|c| c.is_digit(10))
.collect::<String>()
.parse()
.ok()
}
/// Parses the version from the output of a `clang` executable if possible.
fn parse_version(path: &Path) -> Option<CXVersion> {
let output = run_clang(path, &["--version"]).0;
let start = output.find("version ")? + 8;
let mut numbers = output[start..].split_whitespace().next()?.split('.');
let major = numbers.next().and_then(parse_version_number)?;
let minor = numbers.next().and_then(parse_version_number)?;
let subminor = numbers.next().and_then(parse_version_number).unwrap_or(0);
Some(CXVersion {
Major: major,
Minor: minor,
Subminor: subminor,
})
}
/// Parses the search paths from the output of a `clang` executable if possible.
fn parse_search_paths(path: &Path, language: &str, args: &[String]) -> Option<Vec<PathBuf>> {
let mut clang_args = vec!["-E", "-x", language, "-", "-v"];
clang_args.extend(args.iter().map(|s| &**s));
let output = run_clang(path, &clang_args).1;
let start = output.find("#include <...> search starts here:")? + 34;
let end = output.find("End of search list.")?;
let paths = output[start..end].replace("(framework directory)", "");
Some(
paths
.lines()
.filter(|l| !l.is_empty())
.map(|l| Path::new(l.trim()).into())
.collect(),
)
}
// SPDX-License-Identifier: Apache-2.0
//! Provides helper functionality.
use std::path::{Path, PathBuf};
use std::process::Command;
use std::{env, io};
use glob::{self, Pattern};
use libc::c_int;
use super::CXVersion;
//================================================
// Structs
//================================================
/// A `clang` executable.
#[derive(Clone, Debug)]
pub struct Clang {
/// The path to this `clang` executable.
pub path: PathBuf,
/// The version of this `clang` executable if it could be parsed.
pub version: Option<CXVersion>,
/// The directories searched by this `clang` executable for C headers if
/// they could be parsed.
pub c_search_paths: Option<Vec<PathBuf>>,
/// The directories searched by this `clang` executable for C++ headers if
/// they could be parsed.
pub cpp_search_paths: Option<Vec<PathBuf>>,
}
impl Clang {
fn new(path: impl AsRef<Path>, args: &[String]) -> Self {
Self {
path: path.as_ref().into(),
version: parse_version(path.as_ref()),
c_search_paths: parse_search_paths(path.as_ref(), "c", args),
cpp_search_paths: parse_search_paths(path.as_ref(), "c++", args),
}
}
/// Returns a `clang` executable if one can be found.
///
/// If the `CLANG_PATH` environment variable is set, that is the instance of
/// `clang` used. Otherwise, a series of directories are searched. First, if
/// a path is supplied, that is the first directory searched. Then, the
/// directory returned by `llvm-config --bindir` is searched. On macOS
/// systems, `xcodebuild -find clang` will next be queried. Last, the
/// directories in the system's `PATH` are searched.
///
/// ## Cross-compilation
///
/// If target arguments are provided (e.g., `-target` followed by a target
/// like `x86_64-unknown-linux-gnu`) then this method will prefer a
/// target-prefixed instance of `clang` (e.g.,
/// `x86_64-unknown-linux-gnu-clang` for the above example).
pub fn find(path: Option<&Path>, args: &[String]) -> Option<Clang> {
if let Ok(path) = env::var("CLANG_PATH") {
let p = Path::new(&path);
if p.is_file() && is_executable(p).unwrap_or(false) {
return Some(Clang::new(p, args));
}
}
// Determine the cross-compilation target, if any.
let mut target = None;
for i in 0..args.len() {
if args[i] == "-target" && i + 1 < args.len() {
target = Some(&args[i + 1]);
}
}
// Collect the paths to search for a `clang` executable in.
let mut paths = vec![];
if let Some(path) = path {
paths.push(path.into());
}
if let Ok(path) = run_llvm_config(&["--bindir"]) {
if let Some(line) = path.lines().next() {
paths.push(line.into());
}
}
if cfg!(target_os = "macos") {
if let Ok((path, _)) = run("xcodebuild", &["-find", "clang"]) {
if let Some(line) = path.lines().next() {
paths.push(line.into());
}
}
}
if let Ok(path) = env::var("PATH") {
paths.extend(env::split_paths(&path));
}
// First, look for a target-prefixed `clang` executable.
if let Some(target) = target {
let default = format!("{}-clang{}", target, env::consts::EXE_SUFFIX);
let versioned = format!("{}-clang-[0-9]*{}", target, env::consts::EXE_SUFFIX);
let patterns = &[&default[..], &versioned[..]];
for path in &paths {
if let Some(path) = find(path, patterns) {
return Some(Clang::new(path, args));
}
}
}
// Otherwise, look for any other `clang` executable.
let default = format!("clang{}", env::consts::EXE_SUFFIX);
let versioned = format!("clang-[0-9]*{}", env::consts::EXE_SUFFIX);
let patterns = &[&default[..], &versioned[..]];
for path in paths {
if let Some(path) = find(&path, patterns) {
return Some(Clang::new(path, args));
}
}
None
}
}
//================================================
// Functions
//================================================
/// Returns the first match to the supplied glob patterns in the supplied
/// directory if there are any matches.
fn find(directory: &Path, patterns: &[&str]) -> Option<PathBuf> {
// Escape the directory in case it contains characters that have special
// meaning in glob patterns (e.g., `[` or `]`).
let directory = if let Some(directory) = directory.to_str() {
Path::new(&Pattern::escape(directory)).to_owned()
} else {
return None;
};
for pattern in patterns {
let pattern = directory.join(pattern).to_string_lossy().into_owned();
if let Some(path) = glob::glob(&pattern).ok()?.filter_map(|p| p.ok()).next() {
if path.is_file() && is_executable(&path).unwrap_or(false) {
return Some(path);
}
}
}
None
}
#[cfg(unix)]
fn is_executable(path: &Path) -> io::Result<bool> {
use std::ffi::CString;
use std::os::unix::ffi::OsStrExt;
let path = CString::new(path.as_os_str().as_bytes())?;
unsafe { Ok(libc::access(path.as_ptr(), libc::X_OK) == 0) }
}
#[cfg(not(unix))]
fn is_executable(_: &Path) -> io::Result<bool> {
Ok(true)
}
/// Attempts to run an executable, returning the `stdout` and `stderr` output if
/// successful.
fn run(executable: &str, arguments: &[&str]) -> Result<(String, String), String> {
Command::new(executable)
.args(arguments)
.output()
.map(|o| {
let stdout = String::from_utf8_lossy(&o.stdout).into_owned();
let stderr = String::from_utf8_lossy(&o.stderr).into_owned();
(stdout, stderr)
})
.map_err(|e| format!("could not run executable `{}`: {}", executable, e))
}
/// Runs `clang`, returning the `stdout` and `stderr` output.
fn run_clang(path: &Path, arguments: &[&str]) -> (String, String) {
run(&path.to_string_lossy(), arguments).unwrap()
}
/// Runs `llvm-config`, returning the `stdout` output if successful.
fn run_llvm_config(arguments: &[&str]) -> Result<String, String> {
let config = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".to_string());
run(&config, arguments).map(|(o, _)| o)
}
/// Parses a version number if possible, ignoring trailing non-digit characters.
fn parse_version_number(number: &str) -> Option<c_int> {
number
.chars()
.take_while(|c| c.is_ascii_digit())
.collect::<String>()
.parse()
.ok()
}
/// Parses the version from the output of a `clang` executable if possible.
fn parse_version(path: &Path) -> Option<CXVersion> {
let output = run_clang(path, &["--version"]).0;
let start = output.find("version ")? + 8;
let mut numbers = output[start..].split_whitespace().next()?.split('.');
let major = numbers.next().and_then(parse_version_number)?;
let minor = numbers.next().and_then(parse_version_number)?;
let subminor = numbers.next().and_then(parse_version_number).unwrap_or(0);
Some(CXVersion {
Major: major,
Minor: minor,
Subminor: subminor,
})
}
/// Parses the search paths from the output of a `clang` executable if possible.
fn parse_search_paths(path: &Path, language: &str, args: &[String]) -> Option<Vec<PathBuf>> {
let mut clang_args = vec!["-E", "-x", language, "-", "-v"];
clang_args.extend(args.iter().map(|s| &**s));
let output = run_clang(path, &clang_args).1;
let start = output.find("#include <...> search starts here:")? + 34;
let end = output.find("End of search list.")?;
let paths = output[start..end].replace("(framework directory)", "");
Some(
paths
.lines()
.filter(|l| !l.is_empty())
.map(|l| Path::new(l.trim()).into())
.collect(),
)
}

562
third_party/rust/clang-sys/tests/build.rs поставляемый
Просмотреть файл

@ -1,281 +1,281 @@
#![allow(dead_code)]
extern crate glob;
extern crate serial_test;
extern crate tempdir;
use std::collections::HashMap;
use std::env;
use std::fs;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use serial_test::serial;
use tempdir::TempDir;
#[macro_use]
#[path = "../build/macros.rs"]
mod macros;
#[path = "../build/common.rs"]
mod common;
#[path = "../build/dynamic.rs"]
mod dynamic;
#[path = "../build/static.rs"]
mod r#static;
#[derive(Debug, Default)]
struct RunCommandMock {
invocations: Vec<(String, String, Vec<String>)>,
responses: HashMap<Vec<String>, String>,
}
#[derive(Debug)]
struct Env {
os: String,
pointer_width: String,
env: Option<String>,
vars: HashMap<String, (Option<String>, Option<String>)>,
cwd: PathBuf,
tmp: TempDir,
files: Vec<String>,
commands: Arc<Mutex<RunCommandMock>>,
}
impl Env {
fn new(os: &str, pointer_width: &str) -> Self {
Env {
os: os.into(),
pointer_width: pointer_width.into(),
env: None,
vars: HashMap::new(),
cwd: env::current_dir().unwrap(),
tmp: TempDir::new("clang_sys_test").unwrap(),
files: vec![],
commands: Default::default(),
}
.var("CLANG_PATH", None)
.var("LD_LIBRARY_PATH", None)
.var("LIBCLANG_PATH", None)
.var("LIBCLANG_STATIC_PATH", None)
.var("LLVM_CONFIG_PATH", None)
.var("PATH", None)
}
fn env(mut self, env: &str) -> Self {
self.env = Some(env.into());
self
}
fn var(mut self, name: &str, value: Option<&str>) -> Self {
let previous = env::var(name).ok();
self.vars.insert(name.into(), (value.map(|v| v.into()), previous));
self
}
fn dir(mut self, path: &str) -> Self {
self.files.push(path.into());
let path = self.tmp.path().join(path);
fs::create_dir_all(path).unwrap();
self
}
fn file(mut self, path: &str, contents: &[u8]) -> Self {
self.files.push(path.into());
let path = self.tmp.path().join(path);
fs::create_dir_all(path.parent().unwrap()).unwrap();
fs::write(self.tmp.path().join(path), contents).unwrap();
self
}
fn dll(self, path: &str, pointer_width: &str) -> Self {
// PE header.
let mut contents = [0; 64];
contents[0x3C..0x3C + 4].copy_from_slice(&i32::to_le_bytes(10));
contents[10..14].copy_from_slice(&[b'P', b'E', 0, 0]);
let magic = if pointer_width == "64" { 523 } else { 267 };
contents[34..36].copy_from_slice(&u16::to_le_bytes(magic));
self.file(path, &contents)
}
fn so(self, path: &str, pointer_width: &str) -> Self {
// ELF header.
let class = if pointer_width == "64" { 2 } else { 1 };
let contents = [127, 69, 76, 70, class];
self.file(path, &contents)
}
fn command(self, command: &str, args: &[&str], response: &str) -> Self {
let command = command.to_string();
let args = args.iter().map(|a| a.to_string()).collect::<Vec<_>>();
let mut key = vec![command];
key.extend(args);
self.commands.lock().unwrap().responses.insert(key, response.into());
self
}
fn enable(self) -> Self {
env::set_var("_CLANG_SYS_TEST", "yep");
env::set_var("_CLANG_SYS_TEST_OS", &self.os);
env::set_var("_CLANG_SYS_TEST_POINTER_WIDTH", &self.pointer_width);
if let Some(env) = &self.env {
env::set_var("_CLANG_SYS_TEST_ENV", env);
}
for (name, (value, _)) in &self.vars {
if let Some(value) = value {
env::set_var(name, value);
} else {
env::remove_var(name);
}
}
env::set_current_dir(&self.tmp).unwrap();
let commands = self.commands.clone();
let mock = &mut *common::RUN_COMMAND_MOCK.lock().unwrap();
*mock = Some(Box::new(move |command, path, args| {
let command = command.to_string();
let path = path.to_string();
let args = args.iter().map(|a| a.to_string()).collect::<Vec<_>>();
let mut commands = commands.lock().unwrap();
commands.invocations.push((command.clone(), path, args.clone()));
let mut key = vec![command];
key.extend(args);
commands.responses.get(&key).cloned()
}));
self
}
}
impl Drop for Env {
fn drop(&mut self) {
env::remove_var("_CLANG_SYS_TEST");
env::remove_var("_CLANG_SYS_TEST_OS");
env::remove_var("_CLANG_SYS_TEST_POINTER_WIDTH");
env::remove_var("_CLANG_SYS_TEST_ENV");
for (name, (_, previous)) in &self.vars {
if let Some(previous) = previous {
env::set_var(name, previous);
} else {
env::remove_var(name);
}
}
if let Err(error) = env::set_current_dir(&self.cwd) {
println!("Failed to reset working directory: {:?}", error);
}
}
}
//================================================
// Dynamic
//================================================
// Linux -----------------------------------------
#[test]
#[serial]
fn test_linux_directory_preference() {
let _env = Env::new("linux", "64")
.so("usr/lib/libclang.so.1", "64")
.so("usr/local/lib/libclang.so.1", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("usr/local/lib".into(), "libclang.so.1".into())),
);
}
#[test]
#[serial]
fn test_linux_version_preference() {
let _env = Env::new("linux", "64")
.so("usr/lib/libclang-3.so", "64")
.so("usr/lib/libclang-3.5.so", "64")
.so("usr/lib/libclang-3.5.0.so", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("usr/lib".into(), "libclang-3.5.0.so".into())),
);
}
#[test]
#[serial]
fn test_linux_directory_and_version_preference() {
let _env = Env::new("linux", "64")
.so("usr/local/llvm/lib/libclang-3.so", "64")
.so("usr/local/lib/libclang-3.5.so", "64")
.so("usr/lib/libclang-3.5.0.so", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("usr/lib".into(), "libclang-3.5.0.so".into())),
);
}
// Windows ---------------------------------------
#[cfg(target_os = "windows")]
#[test]
#[serial]
fn test_windows_bin_sibling() {
let _env = Env::new("windows", "64")
.dir("Program Files\\LLVM\\lib")
.dll("Program Files\\LLVM\\bin\\libclang.dll", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("Program Files\\LLVM\\bin".into(), "libclang.dll".into())),
);
}
#[cfg(target_os = "windows")]
#[test]
#[serial]
fn test_windows_mingw_gnu() {
let _env = Env::new("windows", "64")
.env("gnu")
.dir("MSYS\\MinGW\\lib")
.dll("MSYS\\MinGW\\bin\\clang.dll", "64")
.dir("Program Files\\LLVM\\lib")
.dll("Program Files\\LLVM\\bin\\libclang.dll", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("MSYS\\MinGW\\bin".into(), "clang.dll".into())),
);
}
#[cfg(target_os = "windows")]
#[test]
#[serial]
fn test_windows_mingw_msvc() {
let _env = Env::new("windows", "64")
.env("msvc")
.dir("MSYS\\MinGW\\lib")
.dll("MSYS\\MinGW\\bin\\clang.dll", "64")
.dir("Program Files\\LLVM\\lib")
.dll("Program Files\\LLVM\\bin\\libclang.dll", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("Program Files\\LLVM\\bin".into(), "libclang.dll".into())),
);
}
#![allow(dead_code)]
extern crate glob;
extern crate serial_test;
extern crate tempfile;
use std::collections::HashMap;
use std::env;
use std::fs;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use serial_test::serial;
use tempfile::TempDir;
#[macro_use]
#[path = "../build/macros.rs"]
mod macros;
#[path = "../build/common.rs"]
mod common;
#[path = "../build/dynamic.rs"]
mod dynamic;
#[path = "../build/static.rs"]
mod r#static;
#[derive(Debug, Default)]
struct RunCommandMock {
invocations: Vec<(String, String, Vec<String>)>,
responses: HashMap<Vec<String>, String>,
}
#[derive(Debug)]
struct Env {
os: String,
pointer_width: String,
env: Option<String>,
vars: HashMap<String, (Option<String>, Option<String>)>,
cwd: PathBuf,
tmp: TempDir,
files: Vec<String>,
commands: Arc<Mutex<RunCommandMock>>,
}
impl Env {
fn new(os: &str, pointer_width: &str) -> Self {
Env {
os: os.into(),
pointer_width: pointer_width.into(),
env: None,
vars: HashMap::new(),
cwd: env::current_dir().unwrap(),
tmp: tempfile::Builder::new().prefix("clang_sys_test").tempdir().unwrap(),
files: vec![],
commands: Default::default(),
}
.var("CLANG_PATH", None)
.var("LD_LIBRARY_PATH", None)
.var("LIBCLANG_PATH", None)
.var("LIBCLANG_STATIC_PATH", None)
.var("LLVM_CONFIG_PATH", None)
.var("PATH", None)
}
fn env(mut self, env: &str) -> Self {
self.env = Some(env.into());
self
}
fn var(mut self, name: &str, value: Option<&str>) -> Self {
let previous = env::var(name).ok();
self.vars.insert(name.into(), (value.map(|v| v.into()), previous));
self
}
fn dir(mut self, path: &str) -> Self {
self.files.push(path.into());
let path = self.tmp.path().join(path);
fs::create_dir_all(path).unwrap();
self
}
fn file(mut self, path: &str, contents: &[u8]) -> Self {
self.files.push(path.into());
let path = self.tmp.path().join(path);
fs::create_dir_all(path.parent().unwrap()).unwrap();
fs::write(self.tmp.path().join(path), contents).unwrap();
self
}
fn dll(self, path: &str, pointer_width: &str) -> Self {
// PE header.
let mut contents = [0; 64];
contents[0x3C..0x3C + 4].copy_from_slice(&i32::to_le_bytes(10));
contents[10..14].copy_from_slice(&[b'P', b'E', 0, 0]);
let magic = if pointer_width == "64" { 523 } else { 267 };
contents[34..36].copy_from_slice(&u16::to_le_bytes(magic));
self.file(path, &contents)
}
fn so(self, path: &str, pointer_width: &str) -> Self {
// ELF header.
let class = if pointer_width == "64" { 2 } else { 1 };
let contents = [127, 69, 76, 70, class];
self.file(path, &contents)
}
fn command(self, command: &str, args: &[&str], response: &str) -> Self {
let command = command.to_string();
let args = args.iter().map(|a| a.to_string()).collect::<Vec<_>>();
let mut key = vec![command];
key.extend(args);
self.commands.lock().unwrap().responses.insert(key, response.into());
self
}
fn enable(self) -> Self {
env::set_var("_CLANG_SYS_TEST", "yep");
env::set_var("_CLANG_SYS_TEST_OS", &self.os);
env::set_var("_CLANG_SYS_TEST_POINTER_WIDTH", &self.pointer_width);
if let Some(env) = &self.env {
env::set_var("_CLANG_SYS_TEST_ENV", env);
}
for (name, (value, _)) in &self.vars {
if let Some(value) = value {
env::set_var(name, value);
} else {
env::remove_var(name);
}
}
env::set_current_dir(&self.tmp).unwrap();
let commands = self.commands.clone();
let mock = &mut *common::RUN_COMMAND_MOCK.lock().unwrap();
*mock = Some(Box::new(move |command, path, args| {
let command = command.to_string();
let path = path.to_string();
let args = args.iter().map(|a| a.to_string()).collect::<Vec<_>>();
let mut commands = commands.lock().unwrap();
commands.invocations.push((command.clone(), path, args.clone()));
let mut key = vec![command];
key.extend(args);
commands.responses.get(&key).cloned()
}));
self
}
}
impl Drop for Env {
fn drop(&mut self) {
env::remove_var("_CLANG_SYS_TEST");
env::remove_var("_CLANG_SYS_TEST_OS");
env::remove_var("_CLANG_SYS_TEST_POINTER_WIDTH");
env::remove_var("_CLANG_SYS_TEST_ENV");
for (name, (_, previous)) in &self.vars {
if let Some(previous) = previous {
env::set_var(name, previous);
} else {
env::remove_var(name);
}
}
if let Err(error) = env::set_current_dir(&self.cwd) {
println!("Failed to reset working directory: {:?}", error);
}
}
}
//================================================
// Dynamic
//================================================
// Linux -----------------------------------------
#[test]
#[serial]
fn test_linux_directory_preference() {
let _env = Env::new("linux", "64")
.so("usr/lib/libclang.so.1", "64")
.so("usr/local/lib/libclang.so.1", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("usr/local/lib".into(), "libclang.so.1".into())),
);
}
#[test]
#[serial]
fn test_linux_version_preference() {
let _env = Env::new("linux", "64")
.so("usr/lib/libclang-3.so", "64")
.so("usr/lib/libclang-3.5.so", "64")
.so("usr/lib/libclang-3.5.0.so", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("usr/lib".into(), "libclang-3.5.0.so".into())),
);
}
#[test]
#[serial]
fn test_linux_directory_and_version_preference() {
let _env = Env::new("linux", "64")
.so("usr/local/llvm/lib/libclang-3.so", "64")
.so("usr/local/lib/libclang-3.5.so", "64")
.so("usr/lib/libclang-3.5.0.so", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("usr/lib".into(), "libclang-3.5.0.so".into())),
);
}
// Windows ---------------------------------------
#[cfg(target_os = "windows")]
#[test]
#[serial]
fn test_windows_bin_sibling() {
let _env = Env::new("windows", "64")
.dir("Program Files\\LLVM\\lib")
.dll("Program Files\\LLVM\\bin\\libclang.dll", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("Program Files\\LLVM\\bin".into(), "libclang.dll".into())),
);
}
#[cfg(target_os = "windows")]
#[test]
#[serial]
fn test_windows_mingw_gnu() {
let _env = Env::new("windows", "64")
.env("gnu")
.dir("MSYS\\MinGW\\lib")
.dll("MSYS\\MinGW\\bin\\clang.dll", "64")
.dir("Program Files\\LLVM\\lib")
.dll("Program Files\\LLVM\\bin\\libclang.dll", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("MSYS\\MinGW\\bin".into(), "clang.dll".into())),
);
}
#[cfg(target_os = "windows")]
#[test]
#[serial]
fn test_windows_mingw_msvc() {
let _env = Env::new("windows", "64")
.env("msvc")
.dir("MSYS\\MinGW\\lib")
.dll("MSYS\\MinGW\\bin\\clang.dll", "64")
.dir("Program Files\\LLVM\\lib")
.dll("Program Files\\LLVM\\bin\\libclang.dll", "64")
.enable();
assert_eq!(
dynamic::find(true),
Ok(("Program Files\\LLVM\\bin".into(), "libclang.dll".into())),
);
}

12
third_party/rust/clang-sys/tests/header.h поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
#ifndef HEADER_H_
#define HEADER_H_
int add(int a, int b);
#endif
#ifndef HEADER_H_
#define HEADER_H_
int add(int a, int b);
#endif

110
third_party/rust/clang-sys/tests/lib.rs поставляемый
Просмотреть файл

@ -1,55 +1,55 @@
extern crate clang_sys;
extern crate libc;
use std::ptr;
use clang_sys::*;
use libc::c_char;
fn parse() {
unsafe {
let index = clang_createIndex(0, 0);
assert!(!index.is_null());
let tu = clang_parseTranslationUnit(
index,
"tests/header.h\0".as_ptr() as *const c_char,
ptr::null_mut(),
0,
ptr::null_mut(),
0,
0,
);
assert!(!tu.is_null());
}
}
#[cfg(feature = "runtime")]
#[test]
fn test() {
load().unwrap();
let library = get_library().unwrap();
println!("{:?} ({:?})", library.version(), library.path());
parse();
unload().unwrap();
}
#[cfg(not(feature = "runtime"))]
#[test]
fn test() {
parse();
}
#[test]
fn test_support() {
let clang = support::Clang::find(None, &[]).unwrap();
println!("{:?}", clang);
}
#[test]
fn test_support_target() {
let args = &["-target".into(), "x86_64-unknown-linux-gnu".into()];
let clang = support::Clang::find(None, args).unwrap();
println!("{:?}", clang);
}
extern crate clang_sys;
extern crate libc;
use std::ptr;
use clang_sys::*;
use libc::c_char;
fn parse() {
unsafe {
let index = clang_createIndex(0, 0);
assert!(!index.is_null());
let tu = clang_parseTranslationUnit(
index,
"tests/header.h\0".as_ptr() as *const c_char,
ptr::null_mut(),
0,
ptr::null_mut(),
0,
0,
);
assert!(!tu.is_null());
}
}
#[cfg(feature = "runtime")]
#[test]
fn test() {
load().unwrap();
let library = get_library().unwrap();
println!("{:?} ({:?})", library.version(), library.path());
parse();
unload().unwrap();
}
#[cfg(not(feature = "runtime"))]
#[test]
fn test() {
parse();
}
#[test]
fn test_support() {
let clang = support::Clang::find(None, &[]).unwrap();
println!("{:?}", clang);
}
#[test]
fn test_support_target() {
let args = &["-target".into(), "x86_64-unknown-linux-gnu".into()];
let clang = support::Clang::find(None, args).unwrap();
println!("{:?}", clang);
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"b3a6e03dbc48515bbca791eba64096ab43dddafa6c60eec2b824369e2afb31b8","LICENSE":"8c044baa5d883274736eeece0b955249076c2697b826e576fce59496235b2cf5","src/capi.rs":"d9bc993ea8bb51220c5cdc54488bc795f74411fe688332bd80dbb062c41342bb","src/lib.rs":"4ce6db20f88487e99057ebeae0615fa1c99e82cfee0ab0825936da2e108fc73e","src/log.rs":"968b839a8a5a56fe42e372678fdd580e2777e1dba8c4f0bee6fbfe5e3f03b860","src/ops.rs":"e631eba0d1ef8c13287361be32c0806d7869f598f58266bab0a45642521fa07e","src/traits.rs":"45e3e58772dd0ff40bf36d56dd9f1fc682cc08cea570022fa50eae1c34d43a3a","tests/test_capi.rs":"783c8321dceb33a414168ae64e162f934015144f49118d868557009819a20e06"},"package":"b00b0f3b84e315571bd8c4e18794180633066267a413f2f05bca65001adc8410"}
{"files":{"Cargo.toml":"ea3e08eb6e9fd52717bf756cf2a7063afeb3af617df0dee2f14591f45b9e6f62","LICENSE":"8c044baa5d883274736eeece0b955249076c2697b826e576fce59496235b2cf5","src/capi.rs":"d9bc993ea8bb51220c5cdc54488bc795f74411fe688332bd80dbb062c41342bb","src/lib.rs":"4ce6db20f88487e99057ebeae0615fa1c99e82cfee0ab0825936da2e108fc73e","src/log.rs":"968b839a8a5a56fe42e372678fdd580e2777e1dba8c4f0bee6fbfe5e3f03b860","src/ops.rs":"e631eba0d1ef8c13287361be32c0806d7869f598f58266bab0a45642521fa07e","src/traits.rs":"876ea164c03d198c92a61d9b11d2839c3a257ae48042b9cd2d9ececcf00d373a","tests/test_capi.rs":"783c8321dceb33a414168ae64e162f934015144f49118d868557009819a20e06"},"package":"67361fe9b49b4599e2a230ce322529b6ddd91df14897c872dcede716f8fbca81"}

4
third_party/rust/cubeb-backend/Cargo.toml поставляемый
Просмотреть файл

@ -11,7 +11,7 @@
[package]
name = "cubeb-backend"
version = "0.12.0"
version = "0.13.0"
authors = ["Dan Glastonbury <dglastonbury@mozilla.com>"]
description = """
Bindings to libcubeb internals to facilitate implementing cubeb backends in rust.
@ -23,7 +23,7 @@ license = "ISC"
repository = "https://github.com/mozilla/cubeb-rs"
[dependencies.cubeb-core]
version = "0.12.0"
version = "0.13.0"
[features]
gecko-in-tree = ["cubeb-core/gecko-in-tree"]

Просмотреть файл

@ -24,7 +24,7 @@ pub trait ContextOps {
collection: &DeviceCollectionRef,
) -> Result<()>;
fn device_collection_destroy(&mut self, collection: &mut DeviceCollectionRef) -> Result<()>;
#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
#[allow(clippy::too_many_arguments)]
fn stream_init(
&mut self,
stream_name: Option<&CStr>,

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"9f1c5d546cc66c991eafd6369bb8799f6021e677217b86f092d08c6a1e6fee6a","LICENSE":"8c044baa5d883274736eeece0b955249076c2697b826e576fce59496235b2cf5","src/builders.rs":"87adbbdd63fde848eeaae4a79a04859ed02ca982fb644f6b8b5fe7c158a8944c","src/call.rs":"cf8949c2cf53f5418d8e5222f570b9301a98a476a18c734fc2ebbc8b53ac0165","src/channel.rs":"c88b8846e53bbec6b125a4fa6a3787c48312be958bd08cc5ee773b218d93d683","src/context.rs":"ac5937037e69f32bb3ef5985d1921f53a2651d0068c12f90762eb0e18489e93e","src/device.rs":"86fd507bf1cd97b13b8cf3e92519b3be11904be68d4997658b276d5054162cd7","src/device_collection.rs":"f86535ffeee73b889bdbac1c5a3432b01cca86df819a7cc26eaa0c983d30cbbe","src/error.rs":"3693bf575e9504b9b1ab114de3e2ce7132039acf27ec68967dae66c2ce1581aa","src/ffi_types.rs":"71948d0949675df876e18c4340f4e693fc6a14c4de9d63a3885450c95577596b","src/format.rs":"7162c1550be53f5fe94b0bba4c71fea2f0304462108657c62a20ea47207ca413","src/lib.rs":"57ecf793ab1cc052c021ccbf9ad011ea52d1303e2dbc54adea90a95b5d7a36cf","src/log.rs":"4bef74d7a7fd9ffdf249be5ef95781969f00a7ecff2d3a190d5b1536d1635a73","src/stream.rs":"9fbfc65f77fb1009f8578c530744276828a6d6778f53b96decb698d937c2b098","src/util.rs":"308cfbaacd615ff600e74415c52daeef007fff34a4a0648a73c0042f6067f84f"},"package":"2380c03a7df0ea3744f6a210d6340f423935e53cbf2fd68ada84b5e808e46ac7"}
{"files":{"Cargo.toml":"23878f9ab656986033932729656b52e131f3fc186be68fa7bbef288d6d7590d8","LICENSE":"8c044baa5d883274736eeece0b955249076c2697b826e576fce59496235b2cf5","src/builders.rs":"584d3e911d55746ab6ff9aed4c5901a73a9518ac14fa2e589fe169541ec78e33","src/call.rs":"cf8949c2cf53f5418d8e5222f570b9301a98a476a18c734fc2ebbc8b53ac0165","src/channel.rs":"c88b8846e53bbec6b125a4fa6a3787c48312be958bd08cc5ee773b218d93d683","src/context.rs":"f58f8562599d27ab4a9bf76d429a5b7dc971449c20245803fc857ddb6e3090cc","src/device.rs":"86fd507bf1cd97b13b8cf3e92519b3be11904be68d4997658b276d5054162cd7","src/device_collection.rs":"f86535ffeee73b889bdbac1c5a3432b01cca86df819a7cc26eaa0c983d30cbbe","src/error.rs":"3693bf575e9504b9b1ab114de3e2ce7132039acf27ec68967dae66c2ce1581aa","src/ffi_types.rs":"71948d0949675df876e18c4340f4e693fc6a14c4de9d63a3885450c95577596b","src/format.rs":"7162c1550be53f5fe94b0bba4c71fea2f0304462108657c62a20ea47207ca413","src/lib.rs":"57ecf793ab1cc052c021ccbf9ad011ea52d1303e2dbc54adea90a95b5d7a36cf","src/log.rs":"4bef74d7a7fd9ffdf249be5ef95781969f00a7ecff2d3a190d5b1536d1635a73","src/stream.rs":"9fbfc65f77fb1009f8578c530744276828a6d6778f53b96decb698d937c2b098","src/util.rs":"308cfbaacd615ff600e74415c52daeef007fff34a4a0648a73c0042f6067f84f"},"package":"ac08d314dd1ec6d41d9ccdeec70899c98ed3b89845367000dd6096099481bc73"}

4
third_party/rust/cubeb-core/Cargo.toml поставляемый
Просмотреть файл

@ -11,7 +11,7 @@
[package]
name = "cubeb-core"
version = "0.12.0"
version = "0.13.0"
authors = ["Dan Glastonbury <dglastonbury@mozilla.com>"]
description = """
Common types and definitions for cubeb rust and C bindings. Not intended for direct use.
@ -26,7 +26,7 @@ repository = "https://github.com/mozilla/cubeb-rs"
version = "1.2.0"
[dependencies.cubeb-sys]
version = "0.12.0"
version = "0.13"
[features]
gecko-in-tree = ["cubeb-sys/gecko-in-tree"]

1
third_party/rust/cubeb-core/src/builders.rs поставляемый
Просмотреть файл

@ -6,7 +6,6 @@
use ffi;
use {ChannelLayout, SampleFormat, StreamParams, StreamPrefs};
///
#[derive(Debug)]
pub struct StreamParamsBuilder(ffi::cubeb_stream_params);

2
third_party/rust/cubeb-core/src/context.rs поставляемый
Просмотреть файл

@ -95,7 +95,7 @@ impl ContextRef {
///
/// This function is unsafe because it dereferences the given `data_callback`, `state_callback`, and `user_ptr` pointers.
/// The caller should ensure those pointers are valid.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
#[allow(clippy::too_many_arguments)]
pub unsafe fn stream_init(
&self,
stream_name: Option<&CStr>,

Просмотреть файл

@ -1 +1 @@
{"files":{".circleci/config.yml":"7f3dc865105ca8f33965a7958b1fe2e627ae2d5a703f3b2a4ab6e2e796018597",".editorconfig":"4e53b182bcc78b83d7e1b5c03efa14d22d4955c4ed2514d1ba4e99c1eb1a50ba",".githooks/pre-push":"8b8b26544cd56f54c0c33812551f786bb25cb08c86dbfeb6bf3daad881c826a1",".github/workflows/test.yml":"ac8f4cf5b7631b5c738d50c0cf78113bd395940b9e76593904bbaf2d02d16a70",".travis.yml":"dc07bac53f70f16c9bdf52264bdc58500ae6018c1b4c567bc7642f6b4ca3cc35","Cargo.toml":"a16b883e4fb41bdbbe5f68158040f181aeeffb4573ab0d493e9452f7c6f00541","LICENSE":"6e6f56aff5bbf3cbc60747e152fb1a719bd0716aaf6d711c554f57d92e96297c","README.md":"0007782a05a5330f739ad789c19c82562c82e32386b0447000fc72c0d48405bc","build-audiounit-rust-in-cubeb.sh":"d228a05985dcd02ec1ecac66a2b64dae5a530804a25a7054ccc95905aedfb7ef","install_git_hook.sh":"d38c8e51e636f6b90b489621ac34ccd1d1b1f40dccce3d178ed1da1c5068f16d","install_rustfmt_clippy.sh":"4ae90d8dcb9757cb3ae4ae142ef80e5377c0dde61c63f4a3c32418646e80ca7b","run_device_tests.sh":"90c2542fa3ff8a35fed894fae3a1aa0157117b7f0e28df14b8e6f7b1f1f43797","run_sanitizers.sh":"84e93a0da137803018f37403511e8c92760be730426bf6cea34419d93d1a7ff8","run_tests.sh":"bae82f66dd47a060b6fdcc238520084aec1079d5b1b1d66d103baa1ffaa8773d","src/backend/aggregate_device.rs":"db7d644358090b1d65ff2d53ad854369790ae4ad7dfa12b79888c0002c1b4950","src/backend/auto_release.rs":"050fdcee74cf46b9a8a85a877e166d72a853d33220f59cf734cbb6ea09daa441","src/backend/buffer_manager.rs":"e9bcf964347daa8952f98caa2746e34a31ea8908375204896593f56e4b6147ca","src/backend/device_property.rs":"a7622feaa41db1cd76fd35a85a022e44f4894e396a104a59008d5b8757d2ab4e","src/backend/mixer.rs":"c4d09291598cbffb2217b551770ec590f34b6dd6b461dd99b019d5bb70f0eef3","src/backend/mod.rs":"d75e116a58d63c6a7cb281d160066f48c8c449702dad58c762ad50d9512d7bd3","src/backend/resampler.rs":"48bf8f56ae8d60dbabca6417b768000619abee8731ac3902164b45651ac08a4d","src/backend/tests/aggregate_device.rs":"770cf90f32b5ab2203476031c1fbc8379b713baa97bec36f7fd0d77fef1efd60","src/backend/tests/api.rs":"773e88b506efccf0eacbf408d34dea1fb2c5a8500e7fe8a494a97f15f1ea41fc","src/backend/tests/backlog.rs":"3b189a7e036543c467cc242af0ed3332721179ee2b1c8847a6db563546f1ac52","src/backend/tests/device_change.rs":"babf50326fb38db24fe80f24f546e1b6ad04319ae8835bb372d893fc9b3038a2","src/backend/tests/device_property.rs":"73c25f579a995e8a59c9b7d391813afb75f739b5e2f825480cba04499a1d46e8","src/backend/tests/interfaces.rs":"654333cd6d6023e72ba392d98872d33bc55f8f052205a9f701aec72069449e24","src/backend/tests/manual.rs":"e550cc8bb7619bb80b68e49bf7f475c029e0f1b34323d1d30edcbe322cf4efc7","src/backend/tests/mod.rs":"8dba770023d7f9c4228f0e11915347f0e07da5fd818e3ee4478c4b197af9aa2a","src/backend/tests/parallel.rs":"a7ebd579339c40ca64c0757cc9da6baec641e670f226e1b2ec5049894700bd7a","src/backend/tests/tone.rs":"b028c67777b6453a26190b6a49785dfe28556adcbe179cb10862ce0d47ee8509","src/backend/tests/utils.rs":"80d7e4ebc06b23c63a4d2867e0c80e0bfe05449fa55edd21e785ed2c089bf7d5","src/backend/utils.rs":"6c3ffbcd602e6cc9f56deb9ecb07b2eef2e6f074ef924178e466f380aae5c595","src/capi.rs":"21b66b70545bf04ec719928004d1d9adb45b24ced51288f5b2993d79aaf78f5f","src/lib.rs":"5e586d45cd6b3722f0a6736d9252593299269817a153eef1930a5fb9bfbb56f5","todo.md":"efc1f012eb9a331a040cad4ac03aa79307f25885f71b6fb38f3ad7af8d7d515c"},"package":null}
{"files":{".circleci/config.yml":"7f3dc865105ca8f33965a7958b1fe2e627ae2d5a703f3b2a4ab6e2e796018597",".editorconfig":"4e53b182bcc78b83d7e1b5c03efa14d22d4955c4ed2514d1ba4e99c1eb1a50ba",".githooks/pre-push":"8b8b26544cd56f54c0c33812551f786bb25cb08c86dbfeb6bf3daad881c826a1",".github/workflows/test.yml":"cf6ebe6d41b022897360866b526d19ba8843aa82ae99a1d28393985576b6a782",".travis.yml":"dc07bac53f70f16c9bdf52264bdc58500ae6018c1b4c567bc7642f6b4ca3cc35","Cargo.toml":"2698cf87581d8d551ed3ac5875564720ed23d7b788e8d145d4281c8026203cd2","LICENSE":"6e6f56aff5bbf3cbc60747e152fb1a719bd0716aaf6d711c554f57d92e96297c","README.md":"0007782a05a5330f739ad789c19c82562c82e32386b0447000fc72c0d48405bc","build-audiounit-rust-in-cubeb.sh":"d228a05985dcd02ec1ecac66a2b64dae5a530804a25a7054ccc95905aedfb7ef","install_git_hook.sh":"d38c8e51e636f6b90b489621ac34ccd1d1b1f40dccce3d178ed1da1c5068f16d","install_rustfmt_clippy.sh":"4ae90d8dcb9757cb3ae4ae142ef80e5377c0dde61c63f4a3c32418646e80ca7b","run_device_tests.sh":"90c2542fa3ff8a35fed894fae3a1aa0157117b7f0e28df14b8e6f7b1f1f43797","run_sanitizers.sh":"84e93a0da137803018f37403511e8c92760be730426bf6cea34419d93d1a7ff8","run_tests.sh":"bae82f66dd47a060b6fdcc238520084aec1079d5b1b1d66d103baa1ffaa8773d","src/backend/aggregate_device.rs":"6e94c36c09081a728b1ab748b460fe8f538cf5f50bc62fd47171a393fe2d609a","src/backend/auto_release.rs":"050fdcee74cf46b9a8a85a877e166d72a853d33220f59cf734cbb6ea09daa441","src/backend/buffer_manager.rs":"e9bcf964347daa8952f98caa2746e34a31ea8908375204896593f56e4b6147ca","src/backend/device_property.rs":"0714b90c3187b0b1709f5e4b7757e1b434659276e00db48a3f3270fbfd429640","src/backend/mixer.rs":"c4d09291598cbffb2217b551770ec590f34b6dd6b461dd99b019d5bb70f0eef3","src/backend/mod.rs":"cfda5e4d5f7d3f6fda65fbcbf19bb114cdd2d9b6750c03967a4432bd1bfb788e","src/backend/resampler.rs":"48bf8f56ae8d60dbabca6417b768000619abee8731ac3902164b45651ac08a4d","src/backend/tests/aggregate_device.rs":"48e291b355a7c0c643fc58e9d238ed00234b4f1ac0f4c26737cc74862d4f2ac8","src/backend/tests/api.rs":"ef3babcd3410394b8d5bcdaf0ea526486b14d8e42f33211997aafe179430bf4a","src/backend/tests/backlog.rs":"3b189a7e036543c467cc242af0ed3332721179ee2b1c8847a6db563546f1ac52","src/backend/tests/device_change.rs":"babf50326fb38db24fe80f24f546e1b6ad04319ae8835bb372d893fc9b3038a2","src/backend/tests/device_property.rs":"73c25f579a995e8a59c9b7d391813afb75f739b5e2f825480cba04499a1d46e8","src/backend/tests/interfaces.rs":"cd58614435574444d8a1f039dc201cf371cccacd58efbae8ed8fbff919550d0a","src/backend/tests/manual.rs":"16dca201d7a7c6d37186aafdee277d437fc2ce5bbd215f33e660c6cb971395de","src/backend/tests/mod.rs":"8dba770023d7f9c4228f0e11915347f0e07da5fd818e3ee4478c4b197af9aa2a","src/backend/tests/parallel.rs":"a7ebd579339c40ca64c0757cc9da6baec641e670f226e1b2ec5049894700bd7a","src/backend/tests/tone.rs":"b028c67777b6453a26190b6a49785dfe28556adcbe179cb10862ce0d47ee8509","src/backend/tests/utils.rs":"21c8e7f6f18da0f8d33733ad0fc981041b43586db6a637c3f7aec7e7b3936aed","src/backend/utils.rs":"6c3ffbcd602e6cc9f56deb9ecb07b2eef2e6f074ef924178e466f380aae5c595","src/capi.rs":"21b66b70545bf04ec719928004d1d9adb45b24ced51288f5b2993d79aaf78f5f","src/lib.rs":"5e586d45cd6b3722f0a6736d9252593299269817a153eef1930a5fb9bfbb56f5","todo.md":"efc1f012eb9a331a040cad4ac03aa79307f25885f71b6fb38f3ad7af8d7d515c"},"package":null}

Просмотреть файл

@ -33,10 +33,13 @@ jobs:
rustc --version
cargo --version
- name: Setup Audio
- name: Setup switchaudio
if: ${{ matrix.os == 'macos-13' || matrix.os == 'macos-14' }}
run: |
brew install switchaudio-osx
- name: Setup blackhole-2ch
run: |
brew install blackhole-2ch
SwitchAudioSource -s "BlackHole 2ch" -t input
SwitchAudioSource -s "BlackHole 2ch" -t output

2
third_party/rust/cubeb-coreaudio/Cargo.toml поставляемый
Просмотреть файл

@ -29,7 +29,7 @@ crate-type = [
atomic = "0.4"
audio-mixer = "0.2"
bitflags = "2"
cubeb-backend = "0.12.0"
cubeb-backend = "0.13"
float-cmp = "0.6"
lazy_static = "1.2"
libc = "0.2"

Просмотреть файл

@ -149,11 +149,12 @@ impl AggregateDevice {
pub fn create_blank_device_sync(
plugin_id: AudioObjectID,
) -> std::result::Result<AudioObjectID, Error> {
debug_assert_running_serially();
let waiting_time = Duration::new(5, 0);
let condvar_pair = Arc::new((Mutex::new(Vec::<AudioObjectID>::new()), Condvar::new()));
let condvar_pair = Arc::new((Mutex::new(()), Condvar::new()));
let mut cloned_condvar_pair = condvar_pair.clone();
let data_ptr = &mut cloned_condvar_pair as *mut Arc<(Mutex<Vec<AudioObjectID>>, Condvar)>;
let data_ptr = &mut cloned_condvar_pair as *mut Arc<(Mutex<()>, Condvar)>;
let address = get_property_address(
Property::HardwareDevices,
@ -182,18 +183,18 @@ impl AggregateDevice {
// Wait until the aggregate is created.
let (lock, cvar) = &*condvar_pair;
let devices = lock.lock().unwrap();
if !devices.contains(&device) {
let (devs, timeout_res) = cvar.wait_timeout(devices, waiting_time).unwrap();
if timeout_res.timed_out() {
cubeb_log!(
"Time out for waiting the creation of aggregate device {}!",
device
);
}
if !devs.contains(&device) {
return Err(Error::from(waiting_time));
}
let guard = lock.lock().unwrap();
let (_guard, timeout_res) = cvar
.wait_timeout_while(guard, waiting_time, |()| {
!audiounit_get_devices().contains(&device)
})
.unwrap();
if timeout_res.timed_out() {
cubeb_log!(
"Time out for waiting the creation of aggregate device {}!",
device
);
return Err(Error::from(waiting_time));
}
extern "C" fn devices_changed_callback(
@ -203,10 +204,9 @@ impl AggregateDevice {
data: *mut c_void,
) -> OSStatus {
assert_eq!(id, kAudioObjectSystemObject);
let pair = unsafe { &mut *(data as *mut Arc<(Mutex<Vec<AudioObjectID>>, Condvar)>) };
let pair = unsafe { &mut *(data as *mut Arc<(Mutex<()>, Condvar)>) };
let (lock, cvar) = &**pair;
let mut devices = lock.lock().unwrap();
*devices = audiounit_get_devices();
let _guard = lock.lock().unwrap();
cvar.notify_one();
NO_ERR
}
@ -218,6 +218,7 @@ impl AggregateDevice {
plugin_id: AudioObjectID,
) -> std::result::Result<AudioObjectID, Error> {
assert_ne!(plugin_id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = AudioObjectPropertyAddress {
mSelector: kAudioPlugInCreateAggregateDevice,
@ -306,6 +307,7 @@ impl AggregateDevice {
input_id: AudioDeviceID,
output_id: AudioDeviceID,
) -> std::result::Result<(), Error> {
debug_assert_running_serially();
let address = AudioObjectPropertyAddress {
mSelector: kAudioAggregateDevicePropertyFullSubDeviceList,
mScope: kAudioObjectPropertyScopeGlobal,
@ -392,6 +394,7 @@ impl AggregateDevice {
assert_ne!(input_id, kAudioObjectUnknown);
assert_ne!(output_id, kAudioObjectUnknown);
assert_ne!(input_id, output_id);
debug_assert_running_serially();
let output_sub_devices = Self::get_sub_devices(output_id)?;
let input_sub_devices = Self::get_sub_devices(input_id)?;
@ -431,6 +434,7 @@ impl AggregateDevice {
device_id: AudioDeviceID,
) -> std::result::Result<Vec<AudioObjectID>, Error> {
assert_ne!(device_id, kAudioObjectUnknown);
debug_assert_running_serially();
let mut sub_devices = Vec::new();
let address = AudioObjectPropertyAddress {
@ -468,6 +472,7 @@ impl AggregateDevice {
}
pub fn get_master_device_uid(device_id: AudioDeviceID) -> std::result::Result<String, Error> {
debug_assert_running_serially();
let address = AudioObjectPropertyAddress {
mSelector: kAudioAggregateDevicePropertyMainSubDevice,
mScope: kAudioObjectPropertyScopeGlobal,
@ -495,6 +500,7 @@ impl AggregateDevice {
) -> std::result::Result<(), Error> {
assert_ne!(device_id, kAudioObjectUnknown);
assert_ne!(primary_id, kAudioObjectUnknown);
debug_assert_running_serially();
cubeb_log!(
"Set master device of the aggregate device {} to device {}",
@ -526,6 +532,7 @@ impl AggregateDevice {
device_id: AudioObjectID,
) -> std::result::Result<(), Error> {
assert_ne!(device_id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = AudioObjectPropertyAddress {
mSelector: kAudioObjectPropertyOwnedObjects,
mScope: kAudioObjectPropertyScopeGlobal,
@ -609,6 +616,7 @@ impl AggregateDevice {
) -> std::result::Result<(), Error> {
assert_ne!(plugin_id, kAudioObjectUnknown);
assert_ne!(device_id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = AudioObjectPropertyAddress {
mSelector: kAudioPlugInDestroyAggregateDevice,
@ -640,6 +648,7 @@ impl AggregateDevice {
assert_ne!(input_id, kAudioObjectUnknown);
assert_ne!(output_id, kAudioObjectUnknown);
assert_ne!(input_id, output_id);
debug_assert_running_serially();
let label = get_device_label(input_id, DeviceType::INPUT)?;
let input_label = label.into_string();

Просмотреть файл

@ -5,6 +5,7 @@ pub fn get_device_uid(
devtype: DeviceType,
) -> std::result::Result<StringRef, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::DeviceUID, devtype);
let mut size = mem::size_of::<CFStringRef>();
@ -22,6 +23,7 @@ pub fn get_device_model_uid(
devtype: DeviceType,
) -> std::result::Result<StringRef, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::ModelUID, devtype);
let mut size = mem::size_of::<CFStringRef>();
@ -39,6 +41,7 @@ pub fn get_device_transport_type(
devtype: DeviceType,
) -> std::result::Result<u32, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::TransportType, devtype);
let mut size = mem::size_of::<u32>();
@ -56,6 +59,7 @@ pub fn get_device_source(
devtype: DeviceType,
) -> std::result::Result<u32, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::DeviceSource, devtype);
let mut size = mem::size_of::<u32>();
@ -73,6 +77,7 @@ pub fn get_device_source_name(
devtype: DeviceType,
) -> std::result::Result<StringRef, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let mut source: u32 = get_device_source(id, devtype)?;
let address = get_property_address(Property::DeviceSourceName, devtype);
@ -97,6 +102,7 @@ pub fn get_device_name(
devtype: DeviceType,
) -> std::result::Result<StringRef, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::DeviceName, devtype);
let mut size = mem::size_of::<CFStringRef>();
@ -114,6 +120,7 @@ pub fn get_device_manufacturer(
devtype: DeviceType,
) -> std::result::Result<StringRef, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::DeviceManufacturer, devtype);
let mut size = mem::size_of::<CFStringRef>();
@ -131,6 +138,7 @@ pub fn get_device_buffer_frame_size_range(
devtype: DeviceType,
) -> std::result::Result<AudioValueRange, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::DeviceBufferFrameSizeRange, devtype);
let mut size = mem::size_of::<AudioValueRange>();
@ -148,6 +156,7 @@ pub fn get_device_latency(
devtype: DeviceType,
) -> std::result::Result<u32, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::DeviceLatency, devtype);
let mut size = mem::size_of::<u32>();
@ -165,6 +174,7 @@ pub fn get_device_streams(
devtype: DeviceType,
) -> std::result::Result<Vec<AudioStreamID>, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::DeviceStreams, devtype);
@ -188,6 +198,7 @@ pub fn get_device_sample_rate(
devtype: DeviceType,
) -> std::result::Result<f64, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::DeviceSampleRate, devtype);
let mut size = mem::size_of::<f64>();
@ -205,6 +216,7 @@ pub fn get_ranges_of_device_sample_rate(
devtype: DeviceType,
) -> std::result::Result<Vec<AudioValueRange>, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::DeviceSampleRates, devtype);
@ -225,6 +237,7 @@ pub fn get_ranges_of_device_sample_rate(
pub fn get_stream_latency(id: AudioStreamID) -> std::result::Result<u32, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(
Property::StreamLatency,
@ -242,6 +255,7 @@ pub fn get_stream_latency(id: AudioStreamID) -> std::result::Result<u32, OSStatu
pub fn get_stream_terminal_type(id: AudioStreamID) -> std::result::Result<u32, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(
Property::StreamTerminalType,
@ -261,6 +275,7 @@ pub fn get_stream_virtual_format(
id: AudioStreamID,
) -> std::result::Result<AudioStreamBasicDescription, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(
Property::StreamVirtualFormat,
@ -281,6 +296,7 @@ pub fn get_clock_domain(
devtype: DeviceType,
) -> std::result::Result<u32, OSStatus> {
assert_ne!(id, kAudioObjectUnknown);
debug_assert_running_serially();
let address = get_property_address(Property::ClockDomain, devtype);
let mut size = mem::size_of::<u32>();

Просмотреть файл

@ -33,7 +33,7 @@ use self::mixer::*;
use self::resampler::*;
use self::utils::*;
use backend::ringbuf::RingBuffer;
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
use cubeb_backend::ffi::cubeb_audio_dump_stream_t;
use cubeb_backend::{
ffi, ChannelLayout, Context, ContextOps, DeviceCollectionRef, DeviceId, DeviceRef, DeviceType,
@ -116,7 +116,7 @@ lazy_static! {
};
}
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
fn dump_audio(stream: cubeb_audio_dump_stream_t, audio_samples: *mut c_void, count: u32) {
unsafe {
let rv = ffi::cubeb_audio_dump_write(stream, audio_samples, count);
@ -569,7 +569,7 @@ extern "C" fn audiounit_input_callback(
} else {
assert_eq!(status, NO_ERR);
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
{
dump_audio(
stm.core_stream_data.audio_dump_input,
@ -628,15 +628,15 @@ extern "C" fn audiounit_input_callback(
0,
);
if outframes < 0 {
stm.stopped.store(true, Ordering::SeqCst);
stm.notify_state_changed(State::Error);
let queue = stm.queue.clone();
// Use a new thread, through the queue, to avoid deadlock when calling
// AudioOutputUnitStop method from inside render callback
queue.run_async(move || {
stm.core_stream_data.stop_audiounits();
});
return handle;
if !stm.stopped.swap(true, Ordering::SeqCst) {
stm.notify_state_changed(State::Error);
// Use a new thread, through the queue, to avoid deadlock when calling
// AudioOutputUnitStop method from inside render callback
stm.queue.clone().run_async(move || {
stm.core_stream_data.stop_audiounits();
});
}
return ErrorHandle::Return(status);
}
if outframes < total_input_frames {
stm.draining.store(true, Ordering::SeqCst);
@ -654,15 +654,16 @@ extern "C" fn audiounit_input_callback(
// If the input (input-only stream) is drained, cancel this callback. Whenever an output
// is involved, the output callback handles stopping all units and notifying of state.
if stm.core_stream_data.output_unit.is_null() && stm.draining.load(Ordering::SeqCst) {
stm.stopped.store(true, Ordering::SeqCst);
if stm.core_stream_data.output_unit.is_null()
&& stm.draining.load(Ordering::SeqCst)
&& !stm.stopped.swap(true, Ordering::SeqCst)
{
cubeb_alog!("({:p}) Input-only drained.", stm as *const AudioUnitStream);
stm.notify_state_changed(State::Drained);
let queue = stm.queue.clone();
// Use a new thread, through the queue, to avoid deadlock when calling
// AudioOutputUnitStop method from inside render callback
let stm_ptr = user_ptr as usize;
queue.run_async(move || {
stm.queue.clone().run_async(move || {
let stm = unsafe { &mut *(stm_ptr as *mut AudioUnitStream) };
stm.core_stream_data.stop_audiounits();
});
@ -735,7 +736,7 @@ extern "C" fn audiounit_output_callback(
if stm.stopped.load(Ordering::SeqCst) {
cubeb_alog!("({:p}) output stopped.", stm as *const AudioUnitStream);
audiounit_make_silent(&buffers[0]);
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
{
dump_audio(
stm.core_stream_data.audio_dump_output,
@ -749,12 +750,8 @@ extern "C" fn audiounit_output_callback(
if stm.draining.load(Ordering::SeqCst) {
// Cancel all callbacks. For input-only streams, the input callback handles
// cancelling itself.
stm.stopped.store(true, Ordering::SeqCst);
cubeb_alog!("({:p}) output drained.", stm as *const AudioUnitStream);
stm.notify_state_changed(State::Drained);
let queue = stm.queue.clone();
audiounit_make_silent(&buffers[0]);
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
{
dump_audio(
stm.core_stream_data.audio_dump_output,
@ -762,11 +759,15 @@ extern "C" fn audiounit_output_callback(
output_frames * stm.core_stream_data.output_dev_desc.mChannelsPerFrame,
);
}
// Use a new thread, through the queue, to avoid deadlock when calling
// AudioOutputUnitStop method from inside render callback
queue.run_async(move || {
stm.core_stream_data.stop_audiounits();
});
if !stm.stopped.swap(true, Ordering::SeqCst) {
cubeb_alog!("({:p}) output drained.", stm as *const AudioUnitStream);
stm.notify_state_changed(State::Drained);
// Use a new thread, through the queue, to avoid deadlock when calling
// AudioOutputUnitStop method from inside render callback
stm.queue.clone().run_async(move || {
stm.core_stream_data.stop_audiounits();
});
}
return NO_ERR;
}
@ -886,12 +887,9 @@ extern "C" fn audiounit_output_callback(
);
if outframes < 0 || outframes > i64::from(output_frames) {
stm.stopped.store(true, Ordering::SeqCst);
stm.notify_state_changed(State::Error);
let queue = stm.queue.clone();
audiounit_make_silent(&buffers[0]);
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
{
dump_audio(
stm.core_stream_data.audio_dump_output,
@ -899,11 +897,14 @@ extern "C" fn audiounit_output_callback(
output_frames * stm.core_stream_data.output_dev_desc.mChannelsPerFrame,
);
}
// Use a new thread, through the queue, to avoid deadlock when calling
// AudioOutputUnitStop method from inside render callback
queue.run_async(move || {
stm.core_stream_data.stop_audiounits();
});
if !stm.stopped.swap(true, Ordering::SeqCst) {
stm.notify_state_changed(State::Error);
// Use a new thread, through the queue, to avoid deadlock when calling
// AudioOutputUnitStop method from inside render callback
stm.queue.clone().run_async(move || {
stm.core_stream_data.stop_audiounits();
});
}
return NO_ERR;
}
@ -951,7 +952,7 @@ extern "C" fn audiounit_output_callback(
);
}
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
{
dump_audio(
stm.core_stream_data.audio_dump_output,
@ -1001,15 +1002,16 @@ extern "C" fn audiounit_property_listener_callback(
// Handle the events
if explicit_device_dead {
cubeb_log!("The user-selected input or output device is dead, entering error state");
stm.stopped.store(true, Ordering::SeqCst);
if !stm.stopped.swap(true, Ordering::SeqCst) {
cubeb_log!("The user-selected input or output device is dead, entering error state");
// Use a different thread, through the queue, to avoid deadlock when calling
// Get/SetProperties method from inside notify callback
stm.queue.clone().run_async(move || {
stm.core_stream_data.stop_audiounits();
stm.close_on_error();
});
// Use a different thread, through the queue, to avoid deadlock when calling
// Get/SetProperties method from inside notify callback
stm.queue.clone().run_async(move || {
stm.core_stream_data.stop_audiounits();
stm.close_on_error();
});
}
return NO_ERR;
}
{
@ -1026,6 +1028,7 @@ extern "C" fn audiounit_property_listener_callback(
}
fn get_default_device(devtype: DeviceType) -> Option<AudioObjectID> {
debug_assert_running_serially();
match get_default_device_id(devtype) {
Err(e) => {
cubeb_log!("Cannot get default {:?} device. Error: {}", devtype, e);
@ -1040,6 +1043,7 @@ fn get_default_device(devtype: DeviceType) -> Option<AudioObjectID> {
}
fn get_default_device_id(devtype: DeviceType) -> std::result::Result<AudioObjectID, OSStatus> {
debug_assert_running_serially();
let address = get_property_address(
match devtype {
DeviceType::INPUT => Property::HardwareDefaultInputDevice,
@ -1087,6 +1091,7 @@ fn audiounit_convert_channel_layout(layout: &AudioChannelLayout) -> Result<Vec<m
}
fn audiounit_get_preferred_channel_layout(output_unit: AudioUnit) -> Result<Vec<mixer::Channel>> {
debug_assert_running_serially();
let mut rv = NO_ERR;
let mut size: usize = 0;
rv = audio_unit_get_property_info(
@ -1129,6 +1134,7 @@ fn audiounit_get_preferred_channel_layout(output_unit: AudioUnit) -> Result<Vec<
// This is for output AudioUnit only. Calling this by input-only AudioUnit is prone
// to crash intermittently.
fn audiounit_get_current_channel_layout(output_unit: AudioUnit) -> Result<Vec<mixer::Channel>> {
debug_assert_running_serially();
let mut rv = NO_ERR;
let mut size: usize = 0;
rv = audio_unit_get_property_info(
@ -1169,6 +1175,7 @@ fn audiounit_get_current_channel_layout(output_unit: AudioUnit) -> Result<Vec<mi
}
fn get_channel_layout(output_unit: AudioUnit) -> Result<Vec<mixer::Channel>> {
debug_assert_running_serially();
audiounit_get_current_channel_layout(output_unit)
.or_else(|_| {
// The kAudioUnitProperty_AudioChannelLayout property isn't known before
@ -1208,6 +1215,7 @@ fn create_audiounit(device: &device_info) -> Result<AudioUnit> {
assert!(!device
.flags
.contains(device_flags::DEV_INPUT | device_flags::DEV_OUTPUT));
debug_assert_running_serially();
let unit = create_blank_audiounit()?;
let mut bus = AU_OUT_BUS;
@ -1610,6 +1618,7 @@ fn get_channel_count(
devtype: DeviceType,
) -> std::result::Result<u32, OSStatus> {
assert_ne!(devid, kAudioObjectUnknown);
debug_assert_running_serially();
let mut streams = get_device_streams(devid, devtype)?;
let model_uid =
@ -1691,6 +1700,7 @@ fn get_range_of_sample_rates(
devid: AudioObjectID,
devtype: DeviceType,
) -> std::result::Result<(f64, f64), String> {
debug_assert_running_serially();
let result = get_ranges_of_device_sample_rate(devid, devtype);
if let Err(e) = result {
return Err(format!("status {}", e));
@ -1712,6 +1722,7 @@ fn get_range_of_sample_rates(
}
fn get_fixed_latency(devid: AudioObjectID, devtype: DeviceType) -> u32 {
debug_assert_running_serially();
let device_latency = match get_device_latency(devid, devtype) {
Ok(latency) => latency,
Err(e) => {
@ -1754,6 +1765,7 @@ fn get_device_group_id(
id: AudioDeviceID,
devtype: DeviceType,
) -> std::result::Result<CString, OSStatus> {
debug_assert_running_serially();
match get_device_transport_type(id, devtype) {
Ok(kAudioDeviceTransportTypeBuiltIn) => {
cubeb_log!(
@ -1789,6 +1801,8 @@ fn get_device_group_id(
}
fn get_custom_group_id(id: AudioDeviceID, devtype: DeviceType) -> Option<CString> {
debug_assert_running_serially();
const IMIC: u32 = 0x696D_6963; // "imic" (internal microphone)
const ISPK: u32 = 0x6973_706B; // "ispk" (internal speaker)
const EMIC: u32 = 0x656D_6963; // "emic" (external microphone)
@ -1830,10 +1844,12 @@ fn get_device_label(
id: AudioDeviceID,
devtype: DeviceType,
) -> std::result::Result<StringRef, OSStatus> {
debug_assert_running_serially();
get_device_source_name(id, devtype).or_else(|_| get_device_name(id, devtype))
}
fn get_device_global_uid(id: AudioDeviceID) -> std::result::Result<StringRef, OSStatus> {
debug_assert_running_serially();
get_device_uid(id, DeviceType::INPUT | DeviceType::OUTPUT)
}
@ -2016,6 +2032,7 @@ fn destroy_cubeb_device_info(device: &mut ffi::cubeb_device_info) {
}
fn audiounit_get_devices() -> Vec<AudioObjectID> {
debug_assert_running_serially();
let mut size: usize = 0;
let address = get_property_address(
Property::HardwareDevices,
@ -2042,6 +2059,7 @@ fn audiounit_get_devices() -> Vec<AudioObjectID> {
fn audiounit_get_devices_of_type(devtype: DeviceType) -> Vec<AudioObjectID> {
assert!(devtype.intersects(DeviceType::INPUT | DeviceType::OUTPUT));
debug_assert_running_serially();
let mut devices = audiounit_get_devices();
@ -2708,17 +2726,21 @@ impl ContextOps for AudioUnitContext {
}
#[cfg(not(target_os = "ios"))]
fn max_channel_count(&mut self) -> Result<u32> {
let device = match get_default_device(DeviceType::OUTPUT) {
None => {
cubeb_log!("Could not get default output device");
return Err(Error::error());
}
Some(id) => id,
};
get_channel_count(device, DeviceType::OUTPUT).map_err(|e| {
cubeb_log!("Cannot get the channel count. Error: {}", e);
Error::error()
})
self.serial_queue
.run_sync(|| {
let device = match get_default_device(DeviceType::OUTPUT) {
None => {
cubeb_log!("Could not get default output device");
return Err(Error::error());
}
Some(id) => id,
};
get_channel_count(device, DeviceType::OUTPUT).map_err(|e| {
cubeb_log!("Cannot get the channel count. Error: {}", e);
Error::error()
})
})
.unwrap()
}
#[cfg(target_os = "ios")]
fn min_latency(&mut self, _params: StreamParams) -> Result<u32> {
@ -2726,21 +2748,25 @@ impl ContextOps for AudioUnitContext {
}
#[cfg(not(target_os = "ios"))]
fn min_latency(&mut self, _params: StreamParams) -> Result<u32> {
let device = match get_default_device(DeviceType::OUTPUT) {
None => {
cubeb_log!("Could not get default output device");
return Err(Error::error());
}
Some(id) => id,
};
self.serial_queue
.run_sync(|| {
let device = match get_default_device(DeviceType::OUTPUT) {
None => {
cubeb_log!("Could not get default output device");
return Err(Error::error());
}
Some(id) => id,
};
let range =
get_device_buffer_frame_size_range(device, DeviceType::OUTPUT).map_err(|e| {
cubeb_log!("Could not get acceptable latency range. Error: {}", e);
Error::error()
})?;
let range = get_device_buffer_frame_size_range(device, DeviceType::OUTPUT)
.map_err(|e| {
cubeb_log!("Could not get acceptable latency range. Error: {}", e);
Error::error()
})?;
Ok(cmp::max(range.mMinimum as u32, SAFE_MIN_LATENCY_FRAMES))
Ok(cmp::max(range.mMinimum as u32, SAFE_MIN_LATENCY_FRAMES))
})
.unwrap()
}
#[cfg(target_os = "ios")]
fn preferred_sample_rate(&mut self) -> Result<u32> {
@ -2748,21 +2774,25 @@ impl ContextOps for AudioUnitContext {
}
#[cfg(not(target_os = "ios"))]
fn preferred_sample_rate(&mut self) -> Result<u32> {
let device = match get_default_device(DeviceType::OUTPUT) {
None => {
cubeb_log!("Could not get default output device");
return Err(Error::error());
}
Some(id) => id,
};
let rate = get_device_sample_rate(device, DeviceType::OUTPUT).map_err(|e| {
cubeb_log!(
"Cannot get the sample rate of the default output device. Error: {}",
e
);
Error::error()
})?;
Ok(rate as u32)
self.serial_queue
.run_sync(|| {
let device = match get_default_device(DeviceType::OUTPUT) {
None => {
cubeb_log!("Could not get default output device");
return Err(Error::error());
}
Some(id) => id,
};
let rate = get_device_sample_rate(device, DeviceType::OUTPUT).map_err(|e| {
cubeb_log!(
"Cannot get the sample rate of the default output device. Error: {}",
e
);
Error::error()
})?;
Ok(rate as u32)
})
.unwrap()
}
fn supported_input_processing_params(&mut self) -> Result<InputProcessingParams> {
Ok(InputProcessingParams::ECHO_CANCELLATION
@ -2774,19 +2804,27 @@ impl ContextOps for AudioUnitContext {
devtype: DeviceType,
collection: &DeviceCollectionRef,
) -> Result<()> {
let mut device_infos = Vec::new();
let dev_types = [DeviceType::INPUT, DeviceType::OUTPUT];
for dev_type in dev_types.iter() {
if !devtype.contains(*dev_type) {
continue;
}
let devices = audiounit_get_devices_of_type(*dev_type);
for device in devices {
if let Ok(info) = create_cubeb_device_info(device, *dev_type) {
device_infos.push(info);
let device_infos = self
.serial_queue
.run_sync(|| {
let mut dev_types = vec![DeviceType::INPUT, DeviceType::OUTPUT];
dev_types.retain(|&dt| devtype.contains(dt));
let device_ids: Vec<(DeviceType, Vec<AudioObjectID>)> = dev_types
.iter()
.map(|&dt| (dt, audiounit_get_devices_of_type(dt)))
.collect();
let count = device_ids.iter().map(|(_dt, ids)| ids.len()).sum();
let mut device_infos = Vec::with_capacity(count);
for (dt, dev_ids) in device_ids {
for dev_id in dev_ids {
if let Ok(info) = create_cubeb_device_info(dev_id, dt) {
device_infos.push(info);
}
}
}
}
}
device_infos
})
.unwrap();
let (ptr, len) = if device_infos.is_empty() {
(ptr::null_mut(), 0)
} else {
@ -3071,13 +3109,13 @@ struct CoreStreamData<'ctx> {
output_alive_listener: Option<device_property_listener>,
output_source_listener: Option<device_property_listener>,
input_logging: Option<InputCallbackLogger>,
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_session: ffi::cubeb_audio_dump_session_t,
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_session_running: bool,
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_input: ffi::cubeb_audio_dump_stream_t,
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_output: ffi::cubeb_audio_dump_stream_t,
}
@ -3119,13 +3157,13 @@ impl<'ctx> Default for CoreStreamData<'ctx> {
output_alive_listener: None,
output_source_listener: None,
input_logging: None,
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_session: ptr::null_mut(),
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_session_running: false,
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_input: ptr::null_mut(),
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_output: ptr::null_mut(),
}
}
@ -3174,13 +3212,13 @@ impl<'ctx> CoreStreamData<'ctx> {
output_alive_listener: None,
output_source_listener: None,
input_logging: None,
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_session: ptr::null_mut(),
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_session_running: false,
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_input: ptr::null_mut(),
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
audio_dump_output: ptr::null_mut(),
}
}
@ -3539,7 +3577,7 @@ impl<'ctx> CoreStreamData<'ctx> {
assert!(!self.stm_ptr.is_null());
let stream = unsafe { &(*self.stm_ptr) };
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
unsafe {
ffi::cubeb_audio_dump_init(&mut self.audio_dump_session);
}
@ -3584,7 +3622,7 @@ impl<'ctx> CoreStreamData<'ctx> {
let r = audio_unit_get_property(
self.input_unit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kAudioUnitScope_Output,
AU_IN_BUS,
&mut input_hw_desc,
&mut size,
@ -3601,9 +3639,6 @@ impl<'ctx> CoreStreamData<'ctx> {
self.stm_ptr,
input_hw_desc
);
// In some cases with VPIO the stream format's mChannelsPerFrame is higher than
// expected. Use get_channel_count as source of truth.
input_hw_desc.mChannelsPerFrame = device_channel_count;
// Notice: when we are using aggregate device, the input_hw_desc.mChannelsPerFrame is
// the total of all the input channel count of the devices added in the aggregate device.
// Due to our aggregate device settings, the data captured by the output device's input
@ -3613,12 +3648,7 @@ impl<'ctx> CoreStreamData<'ctx> {
// channels to the audio callback.
let params = unsafe {
let mut p = *self.input_stream_params.as_ptr();
p.channels = if using_voice_processing_unit {
// VPIO is always MONO.
1
} else {
input_hw_desc.mChannelsPerFrame
};
p.channels = input_hw_desc.mChannelsPerFrame;
// Input AudioUnit must be configured with device's sample rate.
// we will resample inside input callback.
p.rate = input_hw_desc.mSampleRate as _;
@ -3633,7 +3663,7 @@ impl<'ctx> CoreStreamData<'ctx> {
e
})?;
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
{
let name = format!("input-{:p}.wav", self.stm_ptr);
let cname = CString::new(name).expect("OK");
@ -3789,7 +3819,7 @@ impl<'ctx> CoreStreamData<'ctx> {
let r = audio_unit_get_property(
self.output_unit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kAudioUnitScope_Input,
AU_OUT_BUS,
&mut output_hw_desc,
&mut size,
@ -3807,11 +3837,6 @@ impl<'ctx> CoreStreamData<'ctx> {
output_hw_desc
);
// In some cases with (other streams using) VPIO the stream format's mChannelsPerFrame
// is higher than expected. Use get_channel_count as source of truth.
output_hw_desc.mChannelsPerFrame =
get_channel_count(self.output_device.id, DeviceType::OUTPUT).unwrap_or(0);
// This has been observed in the wild.
if output_hw_desc.mChannelsPerFrame == 0 {
cubeb_log!(
@ -3827,12 +3852,7 @@ impl<'ctx> CoreStreamData<'ctx> {
// channels will be appended at the end of the raw data given by the output callback.
let params = unsafe {
let mut p = *self.output_stream_params.as_ptr();
p.channels = if using_voice_processing_unit {
// VPIO is always MONO.
1
} else {
output_hw_desc.mChannelsPerFrame
};
p.channels = output_hw_desc.mChannelsPerFrame;
if using_voice_processing_unit {
// VPIO will always use the sample rate of the input hw for both input and output,
// as reported to us. (We can override it but we cannot improve quality this way).
@ -3849,7 +3869,7 @@ impl<'ctx> CoreStreamData<'ctx> {
e
})?;
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
{
let name = format!("output-{:p}.wav", self.stm_ptr);
let cname = CString::new(name).expect("OK");
@ -4038,7 +4058,7 @@ impl<'ctx> CoreStreamData<'ctx> {
self.input_logging = Some(InputCallbackLogger::new());
}
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
{
unsafe { ffi::cubeb_audio_dump_start(self.audio_dump_session) };
self.audio_dump_session_running = true;
@ -4217,7 +4237,7 @@ impl<'ctx> CoreStreamData<'ctx> {
// Return the VPIO unit if present.
self.voiceprocessing_unit_handle = None;
#[cfg(feature = "audio_dump")]
#[cfg(feature = "audio-dump")]
{
if !self.audio_dump_session.is_null() {
unsafe {
@ -4682,6 +4702,11 @@ impl<'ctx> AudioUnitStream<'ctx> {
self.core_stream_data.stop_audiounits();
}
if self.stopped.load(Ordering::SeqCst) {
// Something stopped the stream, we must not reinit.
return Ok(());
}
debug_assert!(
!self.core_stream_data.input_unit.is_null()
|| !self.core_stream_data.output_unit.is_null()
@ -4840,9 +4865,8 @@ impl<'ctx> AudioUnitStream<'ctx> {
// which locks a mutex inside CoreAudio framework, then this call will block the current
// thread until the callback is finished since this call asks to lock a mutex inside
// CoreAudio framework that is used by the data callback.
if !self.stopped.load(Ordering::SeqCst) {
if !self.stopped.swap(true, Ordering::SeqCst) {
self.core_stream_data.stop_audiounits();
self.stopped.store(true, Ordering::SeqCst);
}
self.destroy_internal();
@ -4886,18 +4910,18 @@ impl<'ctx> StreamOps for AudioUnitStream<'ctx> {
Ok(())
}
fn stop(&mut self) -> Result<()> {
self.stopped.store(true, Ordering::SeqCst);
if !self.stopped.swap(true, Ordering::SeqCst) {
// Execute stop in serial queue to avoid racing with destroy or reinit.
self.queue
.run_sync(|| self.core_stream_data.stop_audiounits());
// Execute stop in serial queue to avoid racing with destroy or reinit.
self.queue
.run_sync(|| self.core_stream_data.stop_audiounits());
self.notify_state_changed(State::Stopped);
self.notify_state_changed(State::Stopped);
cubeb_log!(
"Cubeb stream ({:p}) stopped successfully.",
self as *const AudioUnitStream
);
cubeb_log!(
"Cubeb stream ({:p}) stopped successfully.",
self as *const AudioUnitStream
);
}
Ok(())
}
fn position(&mut self) -> Result<u64> {

Просмотреть файл

@ -352,7 +352,7 @@ fn test_aggregate_activate_clock_drift_compensation() {
assert_eq!(devices.len(), compensations.len());
for (device, compensation) in zip(devices, compensations) {
let uid = get_device_uid(device);
let uid = run_serially(|| get_device_uid(device));
assert_eq!(
compensation,
if uid == master_device_uid {

Просмотреть файл

@ -1123,7 +1123,11 @@ fn test_get_channel_count_of_unknwon_type() {
fn test_channel_count(scope: Scope) {
if let Some(device) = test_get_default_device(scope.clone()) {
assert!(get_channel_count(device, DeviceType::UNKNOWN).is_err());
assert!(run_serially_forward_panics(|| get_channel_count(
device,
DeviceType::UNKNOWN
)
.is_err()));
} else {
panic!("Panic by default: No device for {:?}.", scope);
}
@ -1181,7 +1185,8 @@ fn test_get_device_presentation_latency() {
fn test_get_device_presentation_latencies_in_scope(scope: Scope) {
if let Some(device) = test_get_default_device(scope.clone()) {
// TODO: The latencies very from devices to devices. Check nothing here.
let latency = run_serially(|| get_fixed_latency(device, scope.clone().into()));
let latency =
run_serially_forward_panics(|| get_fixed_latency(device, scope.clone().into()));
println!(
"present latency on the device {} in scope {:?}: {}",
device, scope, latency
@ -1197,7 +1202,7 @@ fn test_get_device_presentation_latency() {
#[test]
fn test_get_device_group_id() {
if let Some(device) = test_get_default_device(Scope::Input) {
match run_serially(|| get_device_group_id(device, DeviceType::INPUT)) {
match run_serially_forward_panics(|| get_device_group_id(device, DeviceType::INPUT)) {
Ok(id) => println!("input group id: {:?}", id),
Err(e) => println!("No input group id. Error: {}", e),
}
@ -1206,7 +1211,7 @@ fn test_get_device_group_id() {
}
if let Some(device) = test_get_default_device(Scope::Output) {
match run_serially(|| get_device_group_id(device, DeviceType::OUTPUT)) {
match run_serially_forward_panics(|| get_device_group_id(device, DeviceType::OUTPUT)) {
Ok(id) => println!("output group id: {:?}", id),
Err(e) => println!("No output group id. Error: {}", e),
}
@ -1229,8 +1234,10 @@ fn test_get_same_group_id_for_builtin_device_pairs() {
let mut input_group_ids = HashMap::<u32, String>::new();
let input_devices = test_get_devices_in_scope(Scope::Input);
for device in input_devices.iter() {
match run_serially(|| get_device_source(*device, DeviceType::INPUT)) {
Ok(source) => match run_serially(|| get_device_group_id(*device, DeviceType::INPUT)) {
match run_serially_forward_panics(|| get_device_source(*device, DeviceType::INPUT)) {
Ok(source) => match run_serially_forward_panics(|| {
get_device_group_id(*device, DeviceType::INPUT)
}) {
Ok(id) => assert!(input_group_ids
.insert(source, id.into_string().unwrap())
.is_none()),
@ -1245,8 +1252,10 @@ fn test_get_same_group_id_for_builtin_device_pairs() {
let mut output_group_ids = HashMap::<u32, String>::new();
let output_devices = test_get_devices_in_scope(Scope::Output);
for device in output_devices.iter() {
match run_serially(|| get_device_source(*device, DeviceType::OUTPUT)) {
Ok(source) => match run_serially(|| get_device_group_id(*device, DeviceType::OUTPUT)) {
match run_serially_forward_panics(|| get_device_source(*device, DeviceType::OUTPUT)) {
Ok(source) => match run_serially_forward_panics(|| {
get_device_group_id(*device, DeviceType::OUTPUT)
}) {
Ok(id) => assert!(output_group_ids
.insert(source, id.into_string().unwrap())
.is_none()),
@ -1286,14 +1295,16 @@ fn test_get_device_group_id_by_unknown_device() {
#[test]
fn test_get_device_label() {
if let Some(device) = test_get_default_device(Scope::Input) {
let name = run_serially(|| get_device_label(device, DeviceType::INPUT)).unwrap();
let name =
run_serially_forward_panics(|| get_device_label(device, DeviceType::INPUT)).unwrap();
println!("input device label: {}", name.into_string());
} else {
println!("No input device.");
}
if let Some(device) = test_get_default_device(Scope::Output) {
let name = run_serially(|| get_device_label(device, DeviceType::OUTPUT)).unwrap();
let name =
run_serially_forward_panics(|| get_device_label(device, DeviceType::OUTPUT)).unwrap();
println!("output device label: {}", name.into_string());
} else {
println!("No output device.");
@ -1316,14 +1327,14 @@ fn test_get_device_label_by_unknown_device() {
fn test_get_device_global_uid() {
// Input device.
if let Some(input) = test_get_default_device(Scope::Input) {
let uid = run_serially(|| get_device_global_uid(input)).unwrap();
let uid = run_serially_forward_panics(|| get_device_global_uid(input)).unwrap();
let uid = uid.into_string();
assert!(!uid.is_empty());
}
// Output device.
if let Some(output) = test_get_default_device(Scope::Output) {
let uid = run_serially(|| get_device_global_uid(output)).unwrap();
let uid = run_serially_forward_panics(|| get_device_global_uid(output)).unwrap();
let uid = uid.into_string();
assert!(!uid.is_empty());
}
@ -1333,7 +1344,7 @@ fn test_get_device_global_uid() {
#[should_panic]
fn test_get_device_global_uid_by_unknwon_device() {
// Unknown device.
assert!(get_device_global_uid(kAudioObjectUnknown).is_err());
assert!(run_serially_forward_panics(|| get_device_global_uid(kAudioObjectUnknown)).is_err());
}
// create_cubeb_device_info
@ -1381,7 +1392,9 @@ fn test_create_cubeb_device_info() {
let dev_types = [DeviceType::INPUT, DeviceType::OUTPUT];
let mut results = VecDeque::new();
for dev_type in dev_types.iter() {
results.push_back(run_serially(|| create_cubeb_device_info(id, *dev_type)));
results.push_back(run_serially_forward_panics(|| {
create_cubeb_device_info(id, *dev_type)
}));
}
results
}
@ -1441,9 +1454,11 @@ fn test_create_device_info_with_unknown_type() {
fn test_create_device_info_with_unknown_type_by_scope(scope: Scope) {
if let Some(device) = test_get_default_device(scope.clone()) {
assert!(
run_serially(|| create_cubeb_device_info(device, DeviceType::UNKNOWN)).is_err()
);
assert!(run_serially_forward_panics(|| create_cubeb_device_info(
device,
DeviceType::UNKNOWN
))
.is_err());
}
}
}
@ -1475,7 +1490,7 @@ fn test_create_device_from_hwdev_with_inout_type() {
fn test_create_device_from_hwdev_with_inout_type_by_scope(scope: Scope) {
if let Some(device) = test_get_default_device(scope.clone()) {
// Get a kAudioHardwareUnknownPropertyError in get_channel_count actually.
assert!(run_serially(|| create_cubeb_device_info(
assert!(run_serially_forward_panics(|| create_cubeb_device_info(
device,
DeviceType::INPUT | DeviceType::OUTPUT
))
@ -1492,10 +1507,13 @@ fn test_create_device_from_hwdev_with_inout_type() {
fn test_get_devices_of_type() {
use std::collections::HashSet;
let all_devices =
run_serially(|| audiounit_get_devices_of_type(DeviceType::INPUT | DeviceType::OUTPUT));
let input_devices = run_serially(|| audiounit_get_devices_of_type(DeviceType::INPUT));
let output_devices = run_serially(|| audiounit_get_devices_of_type(DeviceType::OUTPUT));
let all_devices = run_serially_forward_panics(|| {
audiounit_get_devices_of_type(DeviceType::INPUT | DeviceType::OUTPUT)
});
let input_devices =
run_serially_forward_panics(|| audiounit_get_devices_of_type(DeviceType::INPUT));
let output_devices =
run_serially_forward_panics(|| audiounit_get_devices_of_type(DeviceType::OUTPUT));
let mut expected_all = test_get_all_devices(DeviceFilter::ExcludeCubebAggregateAndVPIO);
expected_all.sort();

Просмотреть файл

@ -2,9 +2,9 @@ extern crate itertools;
use self::itertools::iproduct;
use super::utils::{
draining_data_callback, get_devices_info_in_scope, noop_data_callback,
draining_data_callback, get_devices_info_in_scope, noop_data_callback, state_tracking_cb,
test_device_channels_in_scope, test_get_default_device, test_ops_context_operation,
test_ops_stream_operation, test_ops_stream_operation_on_context, Scope,
test_ops_stream_operation, test_ops_stream_operation_on_context, Scope, StateCallbackData,
};
use super::*;
use std::thread;
@ -1177,6 +1177,77 @@ fn test_ops_stream_device_destroy() {
});
}
pub extern "C" fn reiniting_and_erroring_data_callback(
stream: *mut ffi::cubeb_stream,
_user_ptr: *mut c_void,
_input_buffer: *const c_void,
output_buffer: *mut c_void,
nframes: i64,
) -> i64 {
assert!(!stream.is_null());
let stm = unsafe { &mut *(stream as *mut AudioUnitStream) };
// Feed silence data to output buffer
if !output_buffer.is_null() {
let channels = stm.core_stream_data.output_stream_params.channels();
let samples = nframes as usize * channels as usize;
let sample_size = cubeb_sample_size(stm.core_stream_data.output_stream_params.format());
unsafe {
ptr::write_bytes(output_buffer, 0, samples * sample_size);
}
}
// Trigger an async reinit before the backend handles the error below.
// This scenario could happen in the backend's internal input callback.
stm.reinit_async();
ffi::CUBEB_ERROR.into()
}
#[test]
fn test_ops_stream_racy_reinit() {
// Make sure the parameters meet the requirements of AudioUnitContext::stream_init
// (in the comments).
let mut input_params = ffi::cubeb_stream_params::default();
input_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
input_params.rate = 48000;
input_params.channels = 1;
input_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
input_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
let mut output_params = ffi::cubeb_stream_params::default();
output_params.format = ffi::CUBEB_SAMPLE_FLOAT32NE;
output_params.rate = 44100;
output_params.channels = 2;
output_params.layout = ffi::CUBEB_LAYOUT_UNDEFINED;
output_params.prefs = ffi::CUBEB_STREAM_PREF_NONE;
let mut data = StateCallbackData::default();
test_ops_stream_operation(
"stream: racy reinit",
ptr::null_mut(), // Use default input device.
&mut input_params,
ptr::null_mut(), // Use default output device.
&mut output_params,
4096, // TODO: Get latency by get_min_latency instead ?
Some(reiniting_and_erroring_data_callback),
Some(state_tracking_cb),
&mut data as *mut StateCallbackData as *mut c_void,
|stream| {
assert_eq!(unsafe { OPS.stream_start.unwrap()(stream) }, ffi::CUBEB_OK);
while data.error_cnt() == 0 && data.stopped_cnt() == 0 {
thread::sleep(Duration::from_millis(1));
}
assert_eq!(unsafe { OPS.stream_stop.unwrap()(stream) }, ffi::CUBEB_OK);
},
);
assert_eq!(data.started_cnt(), 1);
assert_eq!(data.stopped_cnt(), 0);
assert_eq!(data.drained_cnt(), 0);
assert_eq!(data.error_cnt(), 1);
}
#[test]
fn test_ops_stream_register_device_changed_callback() {
extern "C" fn callback(_: *mut c_void) {}

Просмотреть файл

@ -441,7 +441,7 @@ fn test_stream_tester() {
return;
}
let mut params = InputProcessingParams::NONE;
{
run_serially(|| {
let mut bypass = u32::from(true);
let mut size: usize = mem::size_of::<u32>();
assert_eq!(
@ -477,7 +477,7 @@ fn test_stream_tester() {
if agc == 1 {
params.set(InputProcessingParams::AUTOMATIC_GAIN_CONTROL, true);
}
}
});
let mut done = false;
while !done {
println!(

Просмотреть файл

@ -48,6 +48,56 @@ pub extern "C" fn draining_data_callback(
nframes - 1
}
#[derive(Default)]
pub struct StateCallbackData {
started_cnt: AtomicU32,
stopped_cnt: AtomicU32,
drained_cnt: AtomicU32,
error_cnt: AtomicU32,
}
impl StateCallbackData {
pub fn started_cnt(&self) -> u32 {
self.started_cnt.load(Ordering::SeqCst)
}
pub fn stopped_cnt(&self) -> u32 {
self.stopped_cnt.load(Ordering::SeqCst)
}
pub fn drained_cnt(&self) -> u32 {
self.drained_cnt.load(Ordering::SeqCst)
}
pub fn error_cnt(&self) -> u32 {
self.error_cnt.load(Ordering::SeqCst)
}
}
pub extern "C" fn state_tracking_cb(
stream: *mut ffi::cubeb_stream,
_usr_ptr: *mut c_void,
state: u32,
) {
let data = unsafe { (_usr_ptr as *mut StateCallbackData).as_mut() }.unwrap();
match state {
ffi::CUBEB_STATE_STARTED => {
data.started_cnt.fetch_add(1, Ordering::SeqCst);
cubeb_log!("({:p}) state is now started", stream);
}
ffi::CUBEB_STATE_STOPPED => {
data.stopped_cnt.fetch_add(1, Ordering::SeqCst);
cubeb_log!("({:p}) state is now stopped", stream);
}
ffi::CUBEB_STATE_DRAINED => {
data.drained_cnt.fetch_add(1, Ordering::SeqCst);
cubeb_log!("({:p}) state is now drained", stream);
}
ffi::CUBEB_STATE_ERROR => {
data.error_cnt.fetch_add(1, Ordering::SeqCst);
cubeb_log!("({:p}) state is now error", stream);
}
_ => unreachable!("unknown state"),
};
}
#[derive(Clone, Debug, PartialEq)]
pub enum Scope {
Input,
@ -357,14 +407,14 @@ impl TestDeviceInfo {
}
fn get_label(id: AudioObjectID, scope: Scope) -> String {
match get_device_uid(id, scope.into()) {
match run_serially_forward_panics(|| get_device_uid(id, scope.into())) {
Ok(uid) => uid.into_string(),
Err(status) => format!("Unknow. Error: {}", status).to_string(),
}
}
fn get_uid(id: AudioObjectID, scope: Scope) -> String {
match get_device_label(id, scope.into()) {
match run_serially_forward_panics(|| get_device_label(id, scope.into())) {
Ok(label) => label.into_string(),
Err(status) => format!("Unknown. Error: {}", status).to_string(),
}

Просмотреть файл

@ -1 +1 @@
{"files":{".editorconfig":"bf047bd1da10cabb99eea666d1e57c321eba4716dccb3e4ed0e2c5fe3ca53858",".github/workflows/build.yml":"477366d58c9dc059dbe4a158a6e910f23a3e9ecac7411f73616e06375583b764","AUTHORS":"0e0ac930a68ce2f6b876126b195add177f0d3886facb9260f4d9b69f1988f0cc","Cargo.toml":"4bdd3962429c676eda59de96cad567478bb20b01c785c9ce3ba03b2e7837dd67","LICENSE":"44c6b5ae5ec3fe2fbc608b00e6f4896f4d2d5c7e525fcbaa3eaa3cf2f3d5a983","README.md":"0079450bb4b013bac065ed1750851e461a3710ebad1f323817da1cb82db0bc4f","src/backend/context.rs":"51dbf887e78aed5cd5e2255ac98f50c287960d8949ea3f66f2b05214b76800d8","src/backend/cork_state.rs":"4a0f1afc7d9f333dac89218cc56d7d32fbffb487cd48c1c9a4e03d79cb3b5e28","src/backend/intern.rs":"11ca424e4eb77f8eb9fd5a6717d1e791facf9743156a8534f0016fcf64d57b0f","src/backend/mod.rs":"dfb30ec497d6215e4535e936fea8fe3a407ef24dc1cec43b52c0ffa923d9229c","src/backend/stream.rs":"b3dcaa382981cbaa3af3c445b0ed6b5012b61d420d2e52a5ebd3cb0dd09a917c","src/capi.rs":"fa0fa020f0d0efe55aa0fc3596405e8407bbe2cbe6c7a558345304e6da87994e","src/lib.rs":"b41bbdc562cbfb130ed7c1e53fe69944774f515705341d8ce48a2f82c8c0c2c5"},"package":null}
{"files":{".editorconfig":"bf047bd1da10cabb99eea666d1e57c321eba4716dccb3e4ed0e2c5fe3ca53858",".github/workflows/build.yml":"477366d58c9dc059dbe4a158a6e910f23a3e9ecac7411f73616e06375583b764","AUTHORS":"0e0ac930a68ce2f6b876126b195add177f0d3886facb9260f4d9b69f1988f0cc","Cargo.toml":"8a0a450ae4990e1df322464867212e48587b474dfdc7f8c270fac06980be176a","LICENSE":"44c6b5ae5ec3fe2fbc608b00e6f4896f4d2d5c7e525fcbaa3eaa3cf2f3d5a983","README.md":"0079450bb4b013bac065ed1750851e461a3710ebad1f323817da1cb82db0bc4f","src/backend/context.rs":"c0db5f2447de1d6df5aa2812fa342a085e73156a072c221c7379b9a6a9b86786","src/backend/cork_state.rs":"4a0f1afc7d9f333dac89218cc56d7d32fbffb487cd48c1c9a4e03d79cb3b5e28","src/backend/intern.rs":"11ca424e4eb77f8eb9fd5a6717d1e791facf9743156a8534f0016fcf64d57b0f","src/backend/mod.rs":"dfb30ec497d6215e4535e936fea8fe3a407ef24dc1cec43b52c0ffa923d9229c","src/backend/stream.rs":"dfe5b747e100cae4aeae36cf2ebb9dc4715b411b4116721a40eec2944eb0ec23","src/capi.rs":"fa0fa020f0d0efe55aa0fc3596405e8407bbe2cbe6c7a558345304e6da87994e","src/lib.rs":"b41bbdc562cbfb130ed7c1e53fe69944774f515705341d8ce48a2f82c8c0c2c5"},"package":null}

2
third_party/rust/cubeb-pulse/Cargo.toml поставляемый
Просмотреть файл

@ -24,7 +24,7 @@ crate-type = [
]
[dependencies]
cubeb-backend = "0.12.0"
cubeb-backend = "0.13"
ringbuf = "0.2"
semver = "1.0"

Просмотреть файл

@ -515,7 +515,7 @@ impl ContextOps for PulseContext {
Ok(())
}
#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
#[allow(clippy::too_many_arguments)]
fn stream_init(
&mut self,
stream_name: Option<&CStr>,

Просмотреть файл

@ -283,7 +283,7 @@ pub struct PulseStream<'ctx> {
}
impl<'ctx> PulseStream<'ctx> {
#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
#[allow(clippy::too_many_arguments)]
pub fn new(
context: &'ctx PulseContext,
stream_name: Option<&CStr>,
@ -447,9 +447,9 @@ impl<'ctx> PulseStream<'ctx> {
latency_frames * stm.output_sample_spec.frame_size() as u32;
let battr = pa_buffer_attr {
maxlength: u32::max_value(),
prebuf: u32::max_value(),
fragsize: u32::max_value(),
maxlength: u32::MAX,
prebuf: u32::MAX,
fragsize: u32::MAX,
tlength: buffer_size_bytes * 2,
minreq: buffer_size_bytes / 4,
};
@ -490,8 +490,8 @@ impl<'ctx> PulseStream<'ctx> {
let buffer_size_bytes =
latency_frames * stm.input_sample_spec.frame_size() as u32;
let battr = pa_buffer_attr {
maxlength: u32::max_value(),
prebuf: u32::max_value(),
maxlength: u32::MAX,
prebuf: u32::MAX,
fragsize: buffer_size_bytes,
tlength: buffer_size_bytes,
minreq: buffer_size_bytes,
@ -1033,7 +1033,7 @@ impl<'ctx> PulseStream<'ctx> {
true
}
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))]
#[allow(clippy::cognitive_complexity)]
fn trigger_user_callback(&mut self, input_data: *const c_void, nbytes: usize) {
fn drained_cb(
a: &pulse::MainloopApi,
@ -1074,7 +1074,7 @@ impl<'ctx> PulseStream<'ctx> {
read_offset
);
let read_ptr = unsafe { (input_data as *const u8).add(read_offset) };
#[cfg_attr(feature = "cargo-clippy", allow(clippy::unnecessary_cast))]
#[allow(clippy::unnecessary_cast)]
let mut got = unsafe {
self.data_callback.unwrap()(
self as *const _ as *mut _,

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

2
third_party/rust/cubeb-sys/Cargo.toml поставляемый
Просмотреть файл

@ -11,7 +11,7 @@
[package]
name = "cubeb-sys"
version = "0.12.0"
version = "0.13.0"
authors = ["Dan Glastonbury <dglastonbury@mozilla.com>"]
build = "build.rs"
links = "cubeb"

Просмотреть файл

@ -78,6 +78,7 @@ endif()
add_library(cubeb
src/cubeb.c
src/cubeb_audio_dump.cpp
src/cubeb_mixer.cpp
src/cubeb_resampler.cpp
src/cubeb_log.cpp
@ -264,7 +265,7 @@ if(USE_WASAPI)
target_sources(cubeb PRIVATE
src/cubeb_wasapi.cpp)
target_compile_definitions(cubeb PRIVATE USE_WASAPI)
target_link_libraries(cubeb PRIVATE avrt ole32 ksuser)
target_link_libraries(cubeb PRIVATE ole32 ksuser)
endif()
check_include_files("windows.h;mmsystem.h" USE_WINMM)
@ -406,6 +407,7 @@ if(BUILD_TESTS)
cubeb_add_test(duplex)
cubeb_add_test(logging)
cubeb_add_test(triple_buffer)
cubeb_add_test(audio_dump)
if (USE_WASAPI)
cubeb_add_test(overload_callback)

Просмотреть файл

@ -274,7 +274,7 @@ typedef enum {
typedef struct {
cubeb_sample_format format; /**< Requested sample format. One of
#cubeb_sample_format. */
uint32_t rate; /**< Requested sample rate. Valid range is [1000, 192000]. */
uint32_t rate; /**< Requested sample rate. Valid range is [1000, 384000]. */
uint32_t channels; /**< Requested channel count. Valid range is [1, 8]. */
cubeb_channel_layout
layout; /**< Requested channel layout. This must be consistent with the

Просмотреть файл

@ -95,7 +95,7 @@ validate_stream_params(cubeb_stream_params * input_stream_params,
XASSERT(input_stream_params || output_stream_params);
if (output_stream_params) {
if (output_stream_params->rate < 1000 ||
output_stream_params->rate > 192000 ||
output_stream_params->rate > 768000 ||
output_stream_params->channels < 1 ||
output_stream_params->channels > UINT8_MAX) {
return CUBEB_ERROR_INVALID_FORMAT;
@ -103,7 +103,7 @@ validate_stream_params(cubeb_stream_params * input_stream_params,
}
if (input_stream_params) {
if (input_stream_params->rate < 1000 ||
input_stream_params->rate > 192000 ||
input_stream_params->rate > 768000 ||
input_stream_params->channels < 1 ||
input_stream_params->channels > UINT8_MAX) {
return CUBEB_ERROR_INVALID_FORMAT;
@ -533,7 +533,7 @@ int
cubeb_stream_set_input_processing_params(cubeb_stream * stream,
cubeb_input_processing_params params)
{
if (!stream || !params) {
if (!stream) {
return CUBEB_ERROR_INVALID_PARAMETER;
}

Просмотреть файл

@ -244,13 +244,24 @@ shutdown_with_error(cubeb_stream * stm)
}
int64_t poll_frequency_ns = NS_PER_S * stm->out_frame_size / stm->sample_rate;
int rv;
if (stm->istream) {
wait_for_state_change(stm->istream, AAUDIO_STREAM_STATE_STOPPED,
poll_frequency_ns);
rv = wait_for_state_change(stm->istream, AAUDIO_STREAM_STATE_STOPPED,
poll_frequency_ns);
if (rv != CUBEB_OK) {
LOG("Failure when waiting for stream change on the input side when "
"shutting down in error");
// Not much we can do, carry on
}
}
if (stm->ostream) {
wait_for_state_change(stm->ostream, AAUDIO_STREAM_STATE_STOPPED,
poll_frequency_ns);
rv = wait_for_state_change(stm->ostream, AAUDIO_STREAM_STATE_STOPPED,
poll_frequency_ns);
if (rv != CUBEB_OK) {
LOG("Failure when waiting for stream change on the output side when "
"shutting down in error");
// Not much we can do, carry on
}
}
assert(!stm->in_data_callback.load());
@ -920,7 +931,7 @@ aaudio_error_cb(AAudioStream * astream, void * user_data, aaudio_result_t error)
assert(stm->ostream == astream || stm->istream == astream);
// Device change -- reinitialize on the new default device.
if (error == AAUDIO_ERROR_DISCONNECTED) {
if (error == AAUDIO_ERROR_DISCONNECTED || error == AAUDIO_ERROR_TIMEOUT) {
LOG("Audio device change, reinitializing stream");
reinitialize_stream(stm);
return;

231
third_party/rust/cubeb-sys/libcubeb/src/cubeb_audio_dump.cpp поставляемый Normal file
Просмотреть файл

@ -0,0 +1,231 @@
/*
* Copyright © 2023 Mozilla Foundation
*
* This program is made available under an ISC-style license. See the
* accompanying file LICENSE for details.
*/
#define NOMINMAX
#include "cubeb_audio_dump.h"
#include "cubeb/cubeb.h"
#include "cubeb_ringbuffer.h"
#include <chrono>
#include <limits>
#include <thread>
#include <vector>
using std::thread;
using std::vector;
uint32_t
bytes_per_sample(cubeb_stream_params params)
{
switch (params.format) {
case CUBEB_SAMPLE_S16LE:
case CUBEB_SAMPLE_S16BE:
return sizeof(int16_t);
case CUBEB_SAMPLE_FLOAT32LE:
case CUBEB_SAMPLE_FLOAT32BE:
return sizeof(float);
};
}
struct cubeb_audio_dump_stream {
public:
explicit cubeb_audio_dump_stream(cubeb_stream_params params)
: sample_size(bytes_per_sample(params)),
ringbuffer(
static_cast<int>(params.rate * params.channels * sample_size))
{
}
int open(const char * name)
{
file = fopen(name, "wb");
if (!file) {
return CUBEB_ERROR;
}
return CUBEB_OK;
}
int close()
{
if (fclose(file)) {
return CUBEB_ERROR;
}
return CUBEB_OK;
}
// Directly write to the file. Useful to write the header.
size_t write(uint8_t * data, uint32_t count)
{
return fwrite(data, count, 1, file);
}
size_t write_all()
{
size_t written = 0;
const int buf_sz = 16 * 1024;
uint8_t buf[buf_sz];
while (int rv = ringbuffer.dequeue(buf, buf_sz)) {
written += fwrite(buf, rv, 1, file);
}
return written;
}
int dump(void * samples, uint32_t count)
{
int bytes = static_cast<int>(count * sample_size);
int rv = ringbuffer.enqueue(static_cast<uint8_t *>(samples), bytes);
return rv == bytes;
}
private:
uint32_t sample_size;
FILE * file{};
lock_free_queue<uint8_t> ringbuffer;
};
struct cubeb_audio_dump_session {
public:
cubeb_audio_dump_session() = default;
~cubeb_audio_dump_session()
{
assert(streams.empty());
session_thread.join();
}
cubeb_audio_dump_session(const cubeb_audio_dump_session &) = delete;
cubeb_audio_dump_session &
operator=(const cubeb_audio_dump_session &) = delete;
cubeb_audio_dump_session & operator=(cubeb_audio_dump_session &&) = delete;
cubeb_audio_dump_stream_t create_stream(cubeb_stream_params params,
const char * name)
{
if (running) {
return nullptr;
}
auto * stream = new cubeb_audio_dump_stream(params);
streams.push_back(stream);
int rv = stream->open(name);
if (rv != CUBEB_OK) {
delete stream;
return nullptr;
}
struct riff_header {
char chunk_id[4] = {'R', 'I', 'F', 'F'};
int32_t chunk_size = 0;
char format[4] = {'W', 'A', 'V', 'E'};
char subchunk_id_1[4] = {'f', 'm', 't', 0x20};
int32_t subchunk_1_size = 16;
int16_t audio_format{};
int16_t num_channels{};
int32_t sample_rate{};
int32_t byte_rate{};
int16_t block_align{};
int16_t bits_per_sample{};
char subchunk_id_2[4] = {'d', 'a', 't', 'a'};
int32_t subchunkd_2_size = std::numeric_limits<int32_t>::max();
};
riff_header header;
// 1 is integer PCM, 3 is float PCM
header.audio_format = bytes_per_sample(params) == 2 ? 1 : 3;
header.num_channels = params.channels;
header.sample_rate = params.rate;
header.byte_rate = bytes_per_sample(params) * params.rate * params.channels;
header.block_align = params.channels * bytes_per_sample(params);
header.bits_per_sample = bytes_per_sample(params) * 8;
stream->write(reinterpret_cast<uint8_t *>(&header), sizeof(riff_header));
return stream;
}
int delete_stream(cubeb_audio_dump_stream * stream)
{
assert(!running);
stream->close();
streams.erase(std::remove(streams.begin(), streams.end(), stream),
streams.end());
return CUBEB_OK;
}
int start()
{
assert(!running);
running = true;
session_thread = std::thread([this] {
while (running) {
for (auto * stream : streams) {
stream->write_all();
}
const int DUMP_INTERVAL = 10;
std::this_thread::sleep_for(std::chrono::milliseconds(DUMP_INTERVAL));
}
});
return CUBEB_OK;
}
int stop()
{
assert(running);
running = false;
return CUBEB_OK;
}
private:
thread session_thread;
vector<cubeb_audio_dump_stream_t> streams{};
std::atomic<bool> running = false;
};
int
cubeb_audio_dump_init(cubeb_audio_dump_session_t * session)
{
*session = new cubeb_audio_dump_session;
return CUBEB_OK;
}
int
cubeb_audio_dump_shutdown(cubeb_audio_dump_session_t session)
{
delete session;
return CUBEB_OK;
}
int
cubeb_audio_dump_stream_init(cubeb_audio_dump_session_t session,
cubeb_audio_dump_stream_t * stream,
cubeb_stream_params stream_params,
const char * name)
{
*stream = session->create_stream(stream_params, name);
return CUBEB_OK;
}
int
cubeb_audio_dump_stream_shutdown(cubeb_audio_dump_session_t session,
cubeb_audio_dump_stream_t stream)
{
return session->delete_stream(stream);
}
int
cubeb_audio_dump_start(cubeb_audio_dump_session_t session)
{
return session->start();
}
int
cubeb_audio_dump_stop(cubeb_audio_dump_session_t session)
{
return session->stop();
}
int
cubeb_audio_dump_write(cubeb_audio_dump_stream_t stream, void * audio_samples,
uint32_t count)
{
stream->dump(audio_samples, count);
return CUBEB_OK;
}

108
third_party/rust/cubeb-sys/libcubeb/src/cubeb_audio_dump.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,108 @@
/*
* Copyright © 2023 Mozilla Foundation
*
* This program is made available under an ISC-style license. See the
* accompanying file LICENSE for details.
*/
#ifndef CUBEB_AUDIO_DUMP
#define CUBEB_AUDIO_DUMP
#include "cubeb/cubeb.h"
#if defined(__cplusplus)
extern "C" {
#endif
typedef struct cubeb_audio_dump_stream * cubeb_audio_dump_stream_t;
typedef struct cubeb_audio_dump_session * cubeb_audio_dump_session_t;
// Start audio dumping session
// This can only be called if the other API functions
// aren't currently being called: synchronized externally.
// This is not real-time safe.
//
// This is generally called when deciding to start logging some audio.
//
// Returns 0 in case of success.
int
cubeb_audio_dump_init(cubeb_audio_dump_session_t * session);
// End audio dumping session
// This can only be called if the other API functions
// aren't currently being called: synchronized externally.
//
// This is generally called when deciding to stop logging some audio.
//
// This is not real-time safe.
// Returns 0 in case of success.
int
cubeb_audio_dump_shutdown(cubeb_audio_dump_session_t session);
// Register a stream for dumping to a file
// This can only be called if cubeb_audio_dump_write
// isn't currently being called: synchronized externally.
//
// This is generally called when setting up a system-level stream side (either
// input or output).
//
// This is not real-time safe.
// Returns 0 in case of success.
int
cubeb_audio_dump_stream_init(cubeb_audio_dump_session_t session,
cubeb_audio_dump_stream_t * stream,
cubeb_stream_params stream_params,
const char * name);
// Unregister a stream for dumping to a file
// This can only be called if cubeb_audio_dump_write
// isn't currently being called: synchronized externally.
//
// This is generally called when a system-level audio stream side
// (input/output) has been stopped and drained, and the audio callback isn't
// going to be called.
//
// This is not real-time safe.
// Returns 0 in case of success.
int
cubeb_audio_dump_stream_shutdown(cubeb_audio_dump_session_t session,
cubeb_audio_dump_stream_t stream);
// Start dumping.
// cubeb_audio_dump_write can now be called.
//
// This starts dumping the audio to disk. Generally this is called when
// cubeb_stream_start is caled is called, but can be called at the beginning of
// the application.
//
// This is not real-time safe.
// Returns 0 in case of success.
int
cubeb_audio_dump_start(cubeb_audio_dump_session_t session);
// Stop dumping.
// cubeb_audio_dump_write can't be called at this point.
//
// This stops dumping the audio to disk cubeb_stream_stop is caled is called,
// but can be called before exiting the application.
//
// This is not real-time safe.
// Returns 0 in case of success.
int
cubeb_audio_dump_stop(cubeb_audio_dump_session_t session);
// Dump some audio samples for audio stream id.
//
// This is generally called from the real-time audio callback.
//
// This is real-time safe.
// Returns 0 in case of success.
int
cubeb_audio_dump_write(cubeb_audio_dump_stream_t stream, void * audio_samples,
uint32_t count);
#ifdef __cplusplus
};
#endif
#endif

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше