Bug 1629359 - Update parking_lot to 0.10 r=kats

Differential Revision: https://phabricator.services.mozilla.com/D70646

--HG--
rename : third_party/rust/parking_lot/src/mutex.rs => third_party/rust/parking_lot/src/fair_mutex.rs
extra : moz-landing-system : lando
This commit is contained in:
Dzmitry Malyshau 2020-04-12 19:14:43 +00:00
Родитель a8922cc7ff
Коммит 540f1bea3f
62 изменённых файлов: 11430 добавлений и 10018 удалений

24
Cargo.lock сгенерированный
Просмотреть файл

@ -1536,9 +1536,9 @@ dependencies = [
[[package]]
name = "gfx-backend-dx11"
version = "0.4.2"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c66c77836ff26cf9916e5c8745715a22eae1fc61d994ffa0bea8a7dbd708ece2"
checksum = "d7527cfcd7d1eec6b99f81891293bdd2a41d044ace009717264e5f3b10ce5b86"
dependencies = [
"bitflags",
"gfx-auxil",
@ -1584,9 +1584,9 @@ dependencies = [
[[package]]
name = "gfx-backend-metal"
version = "0.4.4"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b19262dc17be65f2c5b5c72fe5199ab2017faf51f9e0e353c4a68b63a88f933b"
checksum = "05b6130b9a72129ebb5c91d3d75a142a7fa54dcc112603231582e3fdc0b84247"
dependencies = [
"arrayvec",
"bitflags",
@ -2324,9 +2324,9 @@ dependencies = [
[[package]]
name = "lock_api"
version = "0.3.1"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8912e782533a93a167888781b836336a6ca5da6175c05944c86cf28c31104dc"
checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75"
dependencies = [
"scopeguard",
]
@ -3111,27 +3111,25 @@ checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865"
[[package]]
name = "parking_lot"
version = "0.9.0"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252"
checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e"
dependencies = [
"lock_api",
"parking_lot_core",
"rustc_version",
]
[[package]]
name = "parking_lot_core"
version = "0.6.2"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b"
checksum = "0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb"
dependencies = [
"cfg-if",
"cloudabi",
"libc",
"redox_syscall",
"rustc_version",
"smallvec 0.6.10",
"smallvec 1.2.0",
"winapi 0.3.7",
]

118
gfx/wgpu/Cargo.lock сгенерированный
Просмотреть файл

@ -79,13 +79,13 @@ dependencies = [
[[package]]
name = "cocoa"
version = "0.19.1"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)",
"core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"core-graphics 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)",
"foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"objc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -110,11 +110,25 @@ dependencies = [
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "core-foundation"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "core-foundation-sys"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "core-foundation-sys"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "core-graphics"
version = "0.17.3"
@ -126,6 +140,17 @@ dependencies = [
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "core-graphics"
version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "d3d12"
version = "0.3.0"
@ -169,7 +194,7 @@ dependencies = [
[[package]]
name = "gfx-backend-dx11"
version = "0.4.3"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -177,7 +202,7 @@ dependencies = [
"gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
"range-alloc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-window-handle 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
@ -214,23 +239,23 @@ dependencies = [
[[package]]
name = "gfx-backend-metal"
version = "0.4.1"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"cocoa 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)",
"cocoa 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"copyless 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)",
"core-graphics 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)",
"foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-auxil 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"metal 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)",
"metal 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)",
"objc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
"range-alloc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-window-handle 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
@ -309,7 +334,7 @@ dependencies = [
[[package]]
name = "lock_api"
version = "0.3.2"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -346,13 +371,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "metal"
version = "0.17.1"
version = "0.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"cocoa 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)",
"core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)",
"cocoa 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
"core-graphics 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)",
"foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"objc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -397,25 +422,23 @@ dependencies = [
[[package]]
name = "parking_lot"
version = "0.9.0"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lock_api 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"lock_api 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot_core 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "parking_lot_core"
version = "0.6.2"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -510,32 +533,11 @@ dependencies = [
"smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "scopeguard"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "semver"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "semver-parser"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "serde"
version = "1.0.103"
@ -596,7 +598,7 @@ name = "storage-map"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lock_api 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lock_api 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -696,14 +698,14 @@ dependencies = [
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"copyless 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-backend-dx11 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-backend-dx11 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-backend-dx12 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-backend-empty 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-backend-metal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-backend-metal 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-backend-vulkan 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
"peek-poke 0.2.0 (git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075)",
"rendy-descriptor 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rendy-memory 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -721,7 +723,7 @@ dependencies = [
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"objc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-window-handle 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu-core 0.1.0",
"wgpu-types 0.1.0",
@ -732,7 +734,7 @@ name = "wgpu-remote"
version = "0.1.0"
dependencies = [
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
"wgpu-core 0.1.0",
"wgpu-types 0.1.0",
]
@ -795,21 +797,24 @@ dependencies = [
"checksum cc 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "f52a465a666ca3d838ebbf08b241383421412fe7ebb463527bba275526d89f76"
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
"checksum cocoa 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f29f7768b2d1be17b96158e3285951d366b40211320fb30826a76cb7a0da6400"
"checksum cocoa 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0a4736c86d51bd878b474400d9ec888156f4037015f5d09794fab9f26eab1ad4"
"checksum colorful 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0bca1619ff57dd7a56b58a8e25ef4199f123e78e503fe1653410350a1b98ae65"
"checksum copyless 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6ff9c56c9fb2a49c05ef0e431485a22400af20d33226dc0764d891d09e724127"
"checksum core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "25b9e03f145fd4f2bf705e07b900cd41fc636598fe5dc452fd0db1441c3f496d"
"checksum core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171"
"checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b"
"checksum core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac"
"checksum core-graphics 0.17.3 (registry+https://github.com/rust-lang/crates.io-index)" = "56790968ab1c8a1202a102e6de05fc6e1ec87da99e4e93e9a7d13efbfc1e95a9"
"checksum core-graphics 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)" = "59e78b2e0aaf43f08e7ae0d6bc96895ef72ff0921c7d4ff4762201b2dba376dd"
"checksum d3d12 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc7ed48e89905e5e146bcc1951cc3facb9e44aea9adf5dc01078cda1bd24b662"
"checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
"checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
"checksum fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
"checksum gfx-auxil 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "572eee952a9a23c99cfe3e4fd95d277784058a89ac3c77ff6fa3d80a4e321919"
"checksum gfx-backend-dx11 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b2cdc732e8cead82f5bfc8ce147ee0a2d8a425342aa7944f1c8f734e53ca3e6b"
"checksum gfx-backend-dx11 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "d7527cfcd7d1eec6b99f81891293bdd2a41d044ace009717264e5f3b10ce5b86"
"checksum gfx-backend-dx12 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6e913cc800fb12eaba2c420091a02aca9aafbefd672600dfc5b52654343d341"
"checksum gfx-backend-empty 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d383e6bc48867cb37d298a20139fd1eec298f8f6d594690cd1c50ef25470cc7"
"checksum gfx-backend-metal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "15b8aa3d56d78283546ce51adb3db2826b64232ccea961f1d5c55ce986518632"
"checksum gfx-backend-metal 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "05b6130b9a72129ebb5c91d3d75a142a7fa54dcc112603231582e3fdc0b84247"
"checksum gfx-backend-vulkan 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "62538fedd66a78968a162e8e1a29d085ffbc97f8782634684b2f7da7aea59207"
"checksum gfx-hal 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7c88981665c780447bb08eb099e1ded330754a7246719bab927ee4a949c0ba7f"
"checksum hibitset 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "47e7292fd9f7fe89fa35c98048f2d0a69b79ed243604234d18f6f8a1aa6f408d"
@ -818,18 +823,18 @@ dependencies = [
"checksum lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f"
"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558"
"checksum libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753"
"checksum lock_api 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e57b3997725d2b60dbec1297f6c2e2957cc383db1cebd6be812163f969c7d586"
"checksum lock_api 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75"
"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
"checksum mach 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "86dd2487cdfea56def77b88438a2c915fb45113c5319bfe7e14306ca4cd0b0e1"
"checksum malloc_buf 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb"
"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
"checksum metal 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f83c7dcc2038e12f68493fa3de44235df27b2497178e257185b4b5b5d028a1e4"
"checksum metal 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e198a0ee42bdbe9ef2c09d0b9426f3b2b47d90d93a4a9b0395c4cea605e92dc0"
"checksum nix 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3b2e0b4f3320ed72aaedb9a5ac838690a8047c7b275da22711fddff4f8a14229"
"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
"checksum objc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1"
"checksum objc_exception 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ad970fb455818ad6cba4c122ad012fae53ae8b4795f86378bce65e4f6bab2ca4"
"checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252"
"checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b"
"checksum parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e"
"checksum parking_lot_core 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb"
"checksum peek-poke 0.2.0 (git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075)" = "<none>"
"checksum peek-poke-derive 0.2.0 (git+https://github.com/kvark/peek-poke?rev=969bd7fe2be1a83f87916dc8b388c63cfd457075)" = "<none>"
"checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677"
@ -841,10 +846,7 @@ dependencies = [
"checksum relevant 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bbc232e13d37f4547f5b9b42a5efc380cabe5dbc1807f8b893580640b2ab0308"
"checksum rendy-descriptor 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f475bcc0505946e998590f1f0545c52ef4b559174a1b353a7ce6638def8b621e"
"checksum rendy-memory 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ed492161a819feae7f27f418bb16035276ac20649c60d756699152cb5c1960ec"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"
"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
"checksum serde 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "1217f97ab8e8904b57dd22eb61cde455fa7446a9c1cf43966066da047c1f3702"
"checksum serde_derive 1.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "a8c6faef9a2e64b0064f48570289b4bf8823b7581f1d6157c1b52152306651d0"
"checksum shared_library 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11"

Просмотреть файл

@ -29,7 +29,7 @@ fxhash = "0.2"
log = "0.4"
hal = { package = "gfx-hal", version = "0.4" }
gfx-backend-empty = { version = "0.4" }
parking_lot = "0.9"
parking_lot = "0.10"
peek-poke = { git = "https://github.com/kvark/peek-poke", rev = "969bd7fe2be1a83f87916dc8b388c63cfd457075" }
rendy-memory = "0.5"
rendy-descriptor = "0.5"

Просмотреть файл

@ -33,7 +33,7 @@ version = "0.1"
[dependencies]
arrayvec = "0.5"
lazy_static = "1.1"
parking_lot = "0.9"
parking_lot = "0.10"
raw-window-handle = "0.3"
libc = {version="0.2", features=[]}

Просмотреть файл

@ -29,4 +29,4 @@ features = ["serde"]
[dependencies]
log = "0.4"
parking_lot = { version = "0.9" }
parking_lot = { version = "0.10" }

Просмотреть файл

@ -56,7 +56,7 @@ num-integer = "0.1"
num-traits = "0.2"
num-derive = "0.3"
owning_ref = "0.4"
parking_lot = "0.9"
parking_lot = "0.10"
precomputed-hash = "0.1.1"
rayon = "1"
selectors = { path = "../selectors" }

Просмотреть файл

@ -23,7 +23,7 @@ log = {version = "0.4", features = ["release_max_level_info"]}
malloc_size_of = {path = "../../components/malloc_size_of"}
nsstring = {path = "../../../xpcom/rust/nsstring/"}
num-traits = "0.2"
parking_lot = "0.9"
parking_lot = "0.10"
selectors = {path = "../../components/selectors"}
servo_arc = {path = "../../components/servo_arc"}
smallvec = "1.0"

Просмотреть файл

@ -15,7 +15,7 @@ app_units = "0.7"
cssparser = "0.27"
euclid = "0.20"
html5ever = "0.22"
parking_lot = "0.9"
parking_lot = "0.10"
rayon = "1"
serde_json = "1.0"
selectors = {path = "../../../components/selectors"}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"d1a212ec3097c36853d5a06f59ce7b863611b308160c6dcba97a2fcaf5a2a10e","README.md":"aa7ff84146655d3957c043b5f71dc439712392c3a18c8c397d8c179bc43f75c0","shaders/blit.hlsl":"a00c57d25b6704a57cd17923c5b7a47608b3ab17b96e7e2ab1172283dc841194","shaders/clear.hlsl":"9b6747a76dabe37ff8e069cdbb8a9c22f6cf71a6d3041d358cd1569d1bb8e10f","shaders/copy.hlsl":"0a164e64b28e62e1d8895159c13e5aa9c74891f61d54939c0f79b08a2a5223c9","src/conv.rs":"253cfbbc8c5ccc02a4d72afe139819b059047b6743f2d7facec844454d9101d4","src/debug.rs":"6da6b8c5172a6b061e2f3db13473c6013d12c6467ddd073f4de8ee3112c69c8c","src/device.rs":"c446408ce389a10c3e02ff198e1e56966082c1b8cb2fdc5b27d09fc6b2904ae0","src/dxgi.rs":"4923fe1333cae98be33718dfb9a3a57470e6c9a0a6be9f7c738006cab9a3bc93","src/internal.rs":"d7b25c6e8b96a333531d525217723cc0be5def8788ef2c3084e29a31dc8e626d","src/lib.rs":"f36e568ec0b5ee132439b1ee344305aaf8c909b25feccbf90476e4bda061d692","src/shader.rs":"8830e96b702601a621c65b2560a58c752b5d8e123569f73a5017abcc2df7889a"},"package":"c66c77836ff26cf9916e5c8745715a22eae1fc61d994ffa0bea8a7dbd708ece2"}
{"files":{"Cargo.toml":"9395d7e87f0c91d4900088cdbeb0ca8a0d4526911ece1de9cfc746974a9eec4e","README.md":"5c66af7bc110525a57c757859b9b93468ae54222e6ce9ce5ffd55b2a6ca596b9","shaders/blit.hlsl":"92a8b404ee956ceff2728ec8dd68969fba4c32a79f4d879f069a294f245a867c","shaders/clear.hlsl":"b715a0d8ccebd858531de845fdb3f1b31f25d3f62266238cd1d417006a07957c","shaders/copy.hlsl":"13ca6a1826eb5d252332d2bd75cc7f2e13c029a72006d141118d10ea5fb9856b","src/conv.rs":"69845245e8100921c14d3c9ac7b66e734cb167f16d397b15d723e42b93f9ecf0","src/debug.rs":"f51aa06d7eec0eb352ca7d401c474a075a10a01bbc31ea7e9c62973d7105112f","src/device.rs":"c41ba79b013c8dccd6e37fd0e713341066e4e4d16b337fdc108b4c6c09d55898","src/dxgi.rs":"8587c85d6d4223d23143b1b2dcf52d22615ad93852957bc0a0d13dd9bf057d25","src/internal.rs":"d2440eb4734f0765b86c6a3f6ef82005af4b998c1b449a48bd262a76b62845b4","src/lib.rs":"5f25d95df828ce8c3c3a463d80b39a4c981ae0ba50496a51229fa9e9894e9026","src/shader.rs":"58f9ccb451eb9e0db4b27b3b2901c7a9008e95279abb341a0bd30fdf1b45879c"},"package":"d7527cfcd7d1eec6b99f81891293bdd2a41d044ace009717264e5f3b10ce5b86"}

Просмотреть файл

@ -12,7 +12,7 @@
[package]
name = "gfx-backend-dx11"
version = "0.4.2"
version = "0.4.6"
authors = ["The Gfx-rs Developers"]
description = "DirectX-11 API backend for gfx-rs"
homepage = "https://github.com/gfx-rs/gfx"
@ -43,7 +43,7 @@ version = "0.5"
version = "0.4"
[dependencies.parking_lot]
version = "0.9"
version = "0.10"
[dependencies.range-alloc]
version = "0.1"

Просмотреть файл

@ -33,9 +33,11 @@ use {
CommandBuffer,
CommandPool,
ComputePipeline,
Descriptor,
DescriptorContent,
DescriptorIndex,
DescriptorPool,
DescriptorSet,
DescriptorSetInfo,
DescriptorSetLayout,
Fence,
Framebuffer,
@ -45,14 +47,14 @@ use {
InternalBuffer,
InternalImage,
Memory,
MemoryHeapFlags,
PipelineBinding,
MultiStageData,
PipelineLayout,
QueryPool,
RawFence,
RegisterMapping,
RegisterRemapping,
RegisterData,
RegisterAccumulator,
RenderPass,
ResourceIndex,
Sampler,
Semaphore,
ShaderModule,
@ -64,6 +66,10 @@ use {
use {conv, internal, shader};
//TODO: expose coherent type 0x2 when it's properly supported
const BUFFER_TYPE_MASK: u64 = 0x1 | 0x4;
struct InputLayout {
raw: ComPtr<d3d11::ID3D11InputLayout>,
required_bindings: u32,
@ -76,7 +82,6 @@ pub struct Device {
raw: ComPtr<d3d11::ID3D11Device>,
pub(crate) context: ComPtr<d3d11::ID3D11DeviceContext>,
memory_properties: MemoryProperties,
memory_heap_flags: [MemoryHeapFlags; 3],
pub(crate) internal: internal::Internal,
}
@ -113,11 +118,6 @@ impl Device {
raw: device.clone(),
context,
memory_properties,
memory_heap_flags: [
MemoryHeapFlags::DEVICE_LOCAL,
MemoryHeapFlags::HOST_COHERENT,
MemoryHeapFlags::HOST_VISIBLE,
],
internal: internal::Internal::new(&device),
}
}
@ -770,13 +770,12 @@ impl device::Device<Backend> for Device {
) -> Result<Memory, device::AllocationError> {
let vec = Vec::with_capacity(size as usize);
Ok(Memory {
ty: self.memory_heap_flags[mem_type.0],
properties: self.memory_properties.memory_types[mem_type.0].properties,
size,
mapped_ptr: vec.as_ptr() as *mut _,
host_visible: Some(RefCell::new(vec)),
local_buffers: RefCell::new(Vec::new()),
local_images: RefCell::new(Vec::new()),
_local_images: RefCell::new(Vec::new()),
})
}
@ -853,170 +852,20 @@ impl device::Device<Backend> for Device {
IR: IntoIterator,
IR::Item: Borrow<(pso::ShaderStageFlags, Range<u32>)>,
{
use pso::DescriptorType::*;
let mut set_bindings = Vec::new();
let mut set_remapping = Vec::new();
// since we remapped the bindings in our descriptor set layouts to their own local space
// (starting from register 0), we need to combine all the registers when creating our
// pipeline layout. we do this by simply offsetting all the registers by the amount of
// registers in the previous descriptor set layout
let mut s_offset = 0;
let mut t_offset = 0;
let mut c_offset = 0;
let mut u_offset = 0;
fn get_descriptor_offset(ty: pso::DescriptorType, s: u32, t: u32, c: u32, u: u32) -> u32 {
match ty {
Sampler => s,
SampledImage | UniformTexelBuffer => t,
UniformBuffer | UniformBufferDynamic => c,
StorageTexelBuffer | StorageBuffer | InputAttachment | StorageBufferDynamic
| StorageImage => u,
CombinedImageSampler => unreachable!(),
}
}
for layout in set_layouts {
let layout = layout.borrow();
let bindings = &layout.bindings;
let stages = [
pso::ShaderStageFlags::VERTEX,
pso::ShaderStageFlags::HULL,
pso::ShaderStageFlags::DOMAIN,
pso::ShaderStageFlags::GEOMETRY,
pso::ShaderStageFlags::FRAGMENT,
pso::ShaderStageFlags::COMPUTE,
];
let mut optimized_bindings = Vec::new();
// for every shader stage we get a range of descriptor handles that can be bound with
// PS/VS/CSSetXX()
for &stage in &stages {
let mut state = None;
for binding in bindings {
if !binding.stage.contains(stage) {
continue;
}
state = match state {
None => {
if binding.stage.contains(stage) {
let offset = binding.handle_offset;
Some((
binding.ty,
binding.binding_range.start,
binding.binding_range.end,
offset,
offset,
))
} else {
None
}
}
Some((
mut ty,
mut start,
mut end,
mut start_offset,
mut current_offset,
)) => {
// if we encounter another type or the binding/handle
// range is broken, push our current descriptor range
// and begin a new one.
if ty != binding.ty
|| end != binding.binding_range.start
|| current_offset + 1 != binding.handle_offset
{
let register_offset = get_descriptor_offset(
ty, s_offset, t_offset, c_offset, u_offset,
);
optimized_bindings.push(PipelineBinding {
stage,
ty,
binding_range: (register_offset + start)
.. (register_offset + end),
handle_offset: start_offset,
let mut res_offsets = MultiStageData::<RegisterData<RegisterAccumulator>>::default();
let mut sets = Vec::new();
for set_layout in set_layouts {
let layout = set_layout.borrow();
sets.push(DescriptorSetInfo {
bindings: Arc::clone(&layout.bindings),
registers: res_offsets.advance(&layout.pool_mapping),
});
};
if binding.stage.contains(stage) {
ty = binding.ty;
start = binding.binding_range.start;
end = binding.binding_range.end;
start_offset = binding.handle_offset;
current_offset = binding.handle_offset;
Some((ty, start, end, start_offset, current_offset))
} else {
None
}
} else {
end += 1;
current_offset += 1;
Some((ty, start, end, start_offset, current_offset))
}
}
}
}
// catch trailing descriptors
if let Some((ty, start, end, start_offset, _)) = state {
let register_offset =
get_descriptor_offset(ty, s_offset, t_offset, c_offset, u_offset);
optimized_bindings.push(PipelineBinding {
stage,
ty,
binding_range: (register_offset + start) .. (register_offset + end),
handle_offset: start_offset,
});
}
}
let offset_mappings = layout
.register_remap
.mapping
.iter()
.map(|register| {
let register_offset =
get_descriptor_offset(register.ty, s_offset, t_offset, c_offset, u_offset);
RegisterMapping {
ty: register.ty,
spirv_binding: register.spirv_binding,
hlsl_register: register.hlsl_register + register_offset as u8,
combined: register.combined,
}
})
.collect();
set_bindings.push(optimized_bindings);
set_remapping.push(RegisterRemapping {
mapping: offset_mappings,
num_s: layout.register_remap.num_s,
num_t: layout.register_remap.num_t,
num_c: layout.register_remap.num_c,
num_u: layout.register_remap.num_u,
});
s_offset += layout.register_remap.num_s as u32;
t_offset += layout.register_remap.num_t as u32;
c_offset += layout.register_remap.num_c as u32;
u_offset += layout.register_remap.num_u as u32;
}
//TODO: assert that res_offsets are within supported range
Ok(PipelineLayout {
set_bindings,
set_remapping,
sets,
})
}
@ -1027,7 +876,7 @@ impl device::Device<Backend> for Device {
Ok(())
}
unsafe fn get_pipeline_cache_data(&self, cache: &()) -> Result<Vec<u8>, device::OutOfMemory> {
unsafe fn get_pipeline_cache_data(&self, _cache: &()) -> Result<Vec<u8>, device::OutOfMemory> {
//empty
Ok(Vec::new())
}
@ -1223,14 +1072,14 @@ impl device::Device<Backend> for Device {
uav: None,
usage,
},
ty: MemoryHeapFlags::empty(),
properties: memory::Properties::empty(),
bound_range: 0 .. 0,
host_ptr: ptr::null_mut(),
bind,
requirements: memory::Requirements {
size,
alignment: 1,
type_mask: MemoryHeapFlags::all().bits(),
type_mask: BUFFER_TYPE_MASK,
},
})
}
@ -1269,8 +1118,7 @@ impl device::Device<Backend> for Device {
SysMemSlicePitch: 0,
});
let raw = match memory.ty {
MemoryHeapFlags::DEVICE_LOCAL => {
let raw = if memory.properties.contains(memory::Properties::DEVICE_LOCAL) {
// device local memory
let desc = d3d11::D3D11_BUFFER_DESC {
ByteWidth: buffer.requirements.size as _,
@ -1305,8 +1153,7 @@ impl device::Device<Backend> for Device {
}
ComPtr::from_raw(buffer)
}
MemoryHeapFlags::HOST_VISIBLE | MemoryHeapFlags::HOST_COHERENT => {
} else {
let desc = d3d11::D3D11_BUFFER_DESC {
ByteWidth: buffer.requirements.size as _,
// TODO: dynamic?
@ -1341,8 +1188,6 @@ impl device::Device<Backend> for Device {
}
ComPtr::from_raw(buffer)
}
_ => unimplemented!(),
};
let disjoint_cb = if buffer.internal.disjoint_cb.is_some() {
@ -1439,7 +1284,7 @@ impl device::Device<Backend> for Device {
uav,
usage: buffer.internal.usage,
};
let range = offset .. buffer.requirements.size;
let range = offset .. offset + buffer.requirements.size;
memory.bind_buffer(range.clone(), internal.clone());
@ -1450,7 +1295,7 @@ impl device::Device<Backend> for Device {
};
buffer.internal = internal;
buffer.ty = memory.ty;
buffer.properties = memory.properties;
buffer.host_ptr = host_ptr;
buffer.bound_range = range;
@ -1471,7 +1316,7 @@ impl device::Device<Backend> for Device {
kind: image::Kind,
mip_levels: image::Level,
format: format::Format,
tiling: image::Tiling,
_tiling: image::Tiling,
usage: image::Usage,
view_caps: image::ViewCapabilities,
) -> Result<Image, image::CreationError> {
@ -1523,13 +1368,12 @@ impl device::Device<Backend> for Device {
mip_levels,
format,
usage,
tiling,
view_caps,
bind,
requirements: memory::Requirements {
size: size,
alignment: 1,
type_mask: MemoryHeapFlags::DEVICE_LOCAL.bits(),
type_mask: 0x1, // device-local only
},
})
}
@ -1964,7 +1808,6 @@ impl device::Device<Backend> for Device {
})
}
// TODO: make use of `max_sets`
unsafe fn create_descriptor_pool<I>(
&self,
_max_sets: usize,
@ -1975,19 +1818,15 @@ impl device::Device<Backend> for Device {
I: IntoIterator,
I::Item: Borrow<pso::DescriptorRangeDesc>,
{
let count = ranges
.into_iter()
.map(|r| {
let r = r.borrow();
r.count
* match r.ty {
pso::DescriptorType::CombinedImageSampler => 2,
_ => 1,
let mut total = RegisterData::default();
for range in ranges {
let r = range.borrow();
let content = DescriptorContent::from(r.ty);
total.add_content_many(content, r.count as DescriptorIndex);
}
})
.sum::<usize>();
let max_stages = 6;
let count = total.sum() * max_stages;
Ok(DescriptorPool::with_capacity(count))
}
@ -2002,155 +1841,26 @@ impl device::Device<Backend> for Device {
J: IntoIterator,
J::Item: Borrow<Sampler>,
{
use pso::DescriptorType::*;
let mut total = MultiStageData::<RegisterData<_>>::default();
let mut bindings = layout_bindings
.into_iter()
.map(|b| b.borrow().clone())
.collect::<Vec<_>>();
let mut bindings = Vec::new();
let mut mapping = Vec::new();
let mut num_t = 0;
let mut num_s = 0;
let mut num_c = 0;
let mut num_u = 0;
// we check how many hlsl registers we should use
for binding in layout_bindings {
let binding = binding.borrow();
let hlsl_reg = match binding.ty {
Sampler => {
num_s += 1;
num_s
for binding in bindings.iter() {
let content = DescriptorContent::from(binding.ty);
total.add_content(content, binding.stage_flags);
}
CombinedImageSampler => {
num_t += 1;
num_s += 1;
num_t
}
SampledImage | UniformTexelBuffer => {
num_t += 1;
num_t
}
UniformBuffer | UniformBufferDynamic => {
num_c += 1;
num_c
}
StorageTexelBuffer | StorageBuffer | InputAttachment | StorageBufferDynamic
| StorageImage => {
num_u += 1;
num_u
}
} - 1;
// we decompose combined image samplers into a separate sampler and image internally
if binding.ty == pso::DescriptorType::CombinedImageSampler {
// TODO: for now we have to make combined image samplers share registers since
// spirv-cross doesn't support setting the register of the sampler/texture
// pair to separate values (only one `DescriptorSet` decorator)
let shared_reg = num_s.max(num_t);
bindings.sort_by_key(|a| a.binding);
num_s = shared_reg;
num_t = shared_reg;
let sampler_reg = num_s - 1;
let image_reg = num_t - 1;
mapping.push(RegisterMapping {
ty: pso::DescriptorType::Sampler,
spirv_binding: binding.binding,
hlsl_register: sampler_reg as u8,
combined: true,
let accum = total.map_register(|count| RegisterAccumulator {
res_index: *count as ResourceIndex,
});
mapping.push(RegisterMapping {
ty: pso::DescriptorType::SampledImage,
spirv_binding: binding.binding,
hlsl_register: image_reg as u8,
combined: true,
});
bindings.push(PipelineBinding {
stage: binding.stage_flags,
ty: pso::DescriptorType::Sampler,
binding_range: sampler_reg .. (sampler_reg + 1),
handle_offset: 0,
});
bindings.push(PipelineBinding {
stage: binding.stage_flags,
ty: pso::DescriptorType::SampledImage,
binding_range: image_reg .. (image_reg + 1),
handle_offset: 0,
});
} else {
mapping.push(RegisterMapping {
ty: binding.ty,
spirv_binding: binding.binding,
hlsl_register: hlsl_reg as u8,
combined: false,
});
bindings.push(PipelineBinding {
stage: binding.stage_flags,
ty: binding.ty,
binding_range: hlsl_reg .. (hlsl_reg + 1),
handle_offset: 0,
});
}
}
// we sort the internal descriptor's handle (the actual dx interface) by some categories to
// make it easier to group api calls together
bindings.sort_unstable_by(|a, b| {
(b.ty as u32)
.cmp(&(a.ty as u32))
.then(a.binding_range.start.cmp(&b.binding_range.start))
.then(a.stage.cmp(&b.stage))
});
// we assign the handle (interface ptr) offset according to what register type the
// descriptor is. the final layout of the handles should look like this:
//
// 0..num_s num_s..num_t num_t..num_c num_c..handle_len
// +----------+----------------+-------------+------------------+
// | | | | |
// +----------+----------------+-------------+------------------+
// 0 handle_len
//
let mut s = 0;
let mut t = 0;
let mut c = 0;
let mut u = 0;
for mut binding in bindings.iter_mut() {
match binding.ty {
Sampler => {
binding.handle_offset = s;
s += 1;
}
SampledImage | UniformTexelBuffer => {
binding.handle_offset = num_s + t;
t += 1;
}
UniformBuffer | UniformBufferDynamic => {
binding.handle_offset = num_s + num_t + c;
c += 1;
}
StorageTexelBuffer | StorageBuffer | InputAttachment | StorageBufferDynamic
| StorageImage => {
binding.handle_offset = num_s + num_t + num_c + u;
u += 1;
}
CombinedImageSampler => unreachable!(),
}
}
Ok(DescriptorSetLayout {
bindings,
handle_count: num_s + num_t + num_c + num_u,
register_remap: RegisterRemapping {
mapping,
num_s: num_s as _,
num_t: num_t as _,
num_c: num_c as _,
num_u: num_u as _,
},
bindings: Arc::new(bindings),
pool_mapping: accum.to_mapping(),
})
}
@ -2161,72 +1871,72 @@ impl device::Device<Backend> for Device {
J::Item: Borrow<pso::Descriptor<'a, Backend>>,
{
for write in write_iter {
//println!("WriteDescriptorSets({:?})", write.set.handles);
let target_binding = write.binding;
let (ty, first_offset, second_offset) = write.set.get_handle_offset(target_binding);
assert!((first_offset as usize) < write.set.len);
assert!((second_offset as usize) < write.set.len);
for descriptor in write.descriptors {
let handle = write.set.handles.offset(first_offset as isize);
let second_handle = write.set.handles.offset(second_offset as isize);
//println!(" Write(offset={}, handle={:?}) <= {:?}", first_offset, handle, ty);
match *descriptor.borrow() {
pso::Descriptor::Buffer(buffer, ref _range) => match ty {
pso::DescriptorType::UniformBuffer
| pso::DescriptorType::UniformBufferDynamic => {
if buffer.ty == MemoryHeapFlags::HOST_COHERENT {
let old_buffer = (*handle).0 as *mut _;
write.set.add_flush(old_buffer, buffer);
let mut mapping = write.set.layout.pool_mapping
.map_register(|mapping| mapping.offset);
let binding_start = write.set.layout.bindings
.iter()
.position(|binding| binding.binding == write.binding)
.unwrap();
for binding in &write.set.layout.bindings[.. binding_start] {
let content = DescriptorContent::from(binding.ty);
mapping.add_content(content, binding.stage_flags);
}
*handle = if let Some(buffer) = buffer.internal.disjoint_cb {
Descriptor(buffer as *mut _)
} else {
Descriptor(buffer.internal.raw as *mut _)
for (binding, descriptor) in write.set.layout.bindings[binding_start ..]
.iter()
.zip(write.descriptors)
{
let handles = match *descriptor.borrow() {
pso::Descriptor::Buffer(buffer, ref _range) => RegisterData {
c: match buffer.internal.disjoint_cb {
Some(dj_buf) => dj_buf as *mut _,
None => buffer.internal.raw as *mut _,
},
t: buffer.internal.srv.map_or(ptr::null_mut(), |p| p as *mut _),
u: buffer.internal.uav.map_or(ptr::null_mut(), |p| p as *mut _),
s: ptr::null_mut(),
},
pso::Descriptor::Image(image, _layout) => RegisterData {
c: ptr::null_mut(),
t: image.srv_handle.clone().map_or(ptr::null_mut(), |h| h.as_raw() as *mut _),
u: image.uav_handle.clone().map_or(ptr::null_mut(), |h| h.as_raw() as *mut _),
s: ptr::null_mut(),
},
pso::Descriptor::Sampler(sampler) => RegisterData {
c: ptr::null_mut(),
t: ptr::null_mut(),
u: ptr::null_mut(),
s: sampler.sampler_handle.as_raw() as *mut _,
},
pso::Descriptor::CombinedImageSampler(image, _layout, sampler) => RegisterData {
c: ptr::null_mut(),
t: image.srv_handle.clone().map_or(ptr::null_mut(), |h| h.as_raw() as *mut _),
u: image.uav_handle.clone().map_or(ptr::null_mut(), |h| h.as_raw() as *mut _),
s: sampler.sampler_handle.as_raw() as *mut _,
},
pso::Descriptor::UniformTexelBuffer(_buffer_view) => unimplemented!(),
pso::Descriptor::StorageTexelBuffer(_buffer_view) => unimplemented!(),
};
}
pso::DescriptorType::StorageBuffer => {
if buffer.ty == MemoryHeapFlags::HOST_COHERENT {
let old_buffer = (*handle).0 as *mut _;
write.set.add_flush(old_buffer, buffer);
write.set.add_invalidate(old_buffer, buffer);
}
let content = DescriptorContent::from(binding.ty);
if content.contains(DescriptorContent::CBV) {
let offsets = mapping.map_other(|map| map.c);
write.set.assign_stages(&offsets, binding.stage_flags, handles.c);
};
if content.contains(DescriptorContent::SRV) {
let offsets = mapping.map_other(|map| map.t);
write.set.assign_stages(&offsets, binding.stage_flags, handles.t);
};
if content.contains(DescriptorContent::UAV) {
let offsets = mapping.map_other(|map| map.u);
write.set.assign_stages(&offsets, binding.stage_flags, handles.u);
};
if content.contains(DescriptorContent::SAMPLER) {
let offsets = mapping.map_other(|map| map.s);
write.set.assign_stages(&offsets, binding.stage_flags, handles.s);
};
*handle = Descriptor(buffer.internal.uav.unwrap() as *mut _);
}
_ => unreachable!(),
},
pso::Descriptor::Image(image, _layout) => match ty {
pso::DescriptorType::SampledImage => {
*handle =
Descriptor(image.srv_handle.clone().unwrap().as_raw() as *mut _);
}
pso::DescriptorType::StorageImage => {
*handle =
Descriptor(image.uav_handle.clone().unwrap().as_raw() as *mut _);
}
pso::DescriptorType::InputAttachment => {
*handle =
Descriptor(image.srv_handle.clone().unwrap().as_raw() as *mut _);
}
_ => unreachable!(),
},
pso::Descriptor::Sampler(sampler) => {
*handle = Descriptor(sampler.sampler_handle.as_raw() as *mut _);
}
pso::Descriptor::CombinedImageSampler(image, _layout, sampler) => {
*handle = Descriptor(sampler.sampler_handle.as_raw() as *mut _);
*second_handle =
Descriptor(image.srv_handle.clone().unwrap().as_raw() as *mut _);
}
pso::Descriptor::UniformTexelBuffer(_buffer_view) => {}
pso::Descriptor::StorageTexelBuffer(_buffer_view) => {}
}
mapping.add_content(content, binding.stage_flags);
}
}
}
@ -2237,8 +1947,9 @@ impl device::Device<Backend> for Device {
I::Item: Borrow<pso::DescriptorSetCopy<'a, Backend>>,
{
for copy in copy_iter {
let copy = copy.borrow();
let _copy = copy.borrow();
//TODO
/*
for offset in 0 .. copy.count {
let (dst_ty, dst_handle_offset, dst_second_handle_offset) = copy
.dst_set
@ -2267,7 +1978,7 @@ impl device::Device<Backend> for Device {
}
_ => *dst_handle = *src_handle,
}
}
}*/
}
}
@ -2397,15 +2108,15 @@ impl device::Device<Backend> for Device {
unimplemented!()
}
unsafe fn get_event_status(&self, event: &()) -> Result<bool, device::OomOrDeviceLost> {
unsafe fn get_event_status(&self, _event: &()) -> Result<bool, device::OomOrDeviceLost> {
unimplemented!()
}
unsafe fn set_event(&self, event: &()) -> Result<(), device::OutOfMemory> {
unsafe fn set_event(&self, _event: &()) -> Result<(), device::OutOfMemory> {
unimplemented!()
}
unsafe fn reset_event(&self, event: &()) -> Result<(), device::OutOfMemory> {
unsafe fn reset_event(&self, _event: &()) -> Result<(), device::OutOfMemory> {
unimplemented!()
}
@ -2565,11 +2276,10 @@ impl device::Device<Backend> for Device {
bind: 0, // TODO: ?
requirements: memory::Requirements {
// values don't really matter
size: 1,
alignment: 1,
type_mask: MemoryHeapFlags::DEVICE_LOCAL.bits(),
size: 0,
alignment: 0,
type_mask: 0,
},
tiling: image::Tiling::Optimal,
}
})
.collect();

48
third_party/rust/gfx-backend-dx11/src/dxgi.rs поставляемый
Просмотреть файл

@ -1,9 +1,18 @@
use hal::adapter::{AdapterInfo, DeviceType};
use winapi::shared::guiddef::{GUID, REFIID};
use winapi::shared::{dxgi, dxgi1_2, dxgi1_3, dxgi1_4, dxgi1_5, winerror};
use winapi::um::unknwnbase::IUnknown;
use winapi::Interface;
use winapi::{
shared::{
dxgi,
dxgi1_2,
dxgi1_3,
dxgi1_4,
dxgi1_5,
guiddef::{GUID, REFIID},
winerror,
},
um::unknwnbase::IUnknown,
Interface,
};
use wio::com::ComPtr;
@ -67,14 +76,16 @@ pub(crate) enum DxgiVersion {
Dxgi1_5,
}
type DxgiFun = extern "system" fn(REFIID, *mut *mut winapi::ctypes::c_void) -> winerror::HRESULT;
type DxgiFun =
unsafe extern "system" fn(REFIID, *mut *mut winapi::ctypes::c_void) -> winerror::HRESULT;
fn create_dxgi_factory1(
func: &DxgiFun, guid: &GUID
func: &DxgiFun,
guid: &GUID,
) -> Result<ComPtr<dxgi::IDXGIFactory>, winerror::HRESULT> {
let mut factory: *mut IUnknown = ptr::null_mut();
let hr = func(guid, &mut factory as *mut *mut _ as *mut *mut _);
let hr = unsafe { func(guid, &mut factory as *mut *mut _ as *mut *mut _) };
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(factory as *mut _) })
@ -84,37 +95,36 @@ fn create_dxgi_factory1(
}
pub(crate) fn get_dxgi_factory(
) -> Result<(ComPtr<dxgi::IDXGIFactory>, DxgiVersion), winerror::HRESULT> {
let library = libloading::Library::new("dxgi.dll")
.map_err(|_| -1)?;
let func: libloading::Symbol<DxgiFun> = unsafe {
library.get(b"CreateDXGIFactory1")
}.map_err(|_| -1)?;
) -> Result<(libloading::Library, ComPtr<dxgi::IDXGIFactory>, DxgiVersion), winerror::HRESULT> {
// The returned Com-pointer is only safe to use for the lifetime of the Library.
let library = libloading::Library::new("dxgi.dll").map_err(|_| -1)?;
let func: libloading::Symbol<DxgiFun> =
unsafe { library.get(b"CreateDXGIFactory1") }.map_err(|_| -1)?;
// TODO: do we even need `create_dxgi_factory2`?
if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_5::IDXGIFactory5::uuidof()) {
return Ok((factory, DxgiVersion::Dxgi1_5));
return Ok((library, factory, DxgiVersion::Dxgi1_5));
}
if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_4::IDXGIFactory4::uuidof()) {
return Ok((factory, DxgiVersion::Dxgi1_4));
return Ok((library, factory, DxgiVersion::Dxgi1_4));
}
if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_3::IDXGIFactory3::uuidof()) {
return Ok((factory, DxgiVersion::Dxgi1_3));
return Ok((library, factory, DxgiVersion::Dxgi1_3));
}
if let Ok(factory) = create_dxgi_factory1(&func, &dxgi1_2::IDXGIFactory2::uuidof()) {
return Ok((factory, DxgiVersion::Dxgi1_2));
return Ok((library, factory, DxgiVersion::Dxgi1_2));
}
if let Ok(factory) = create_dxgi_factory1(&func, &dxgi::IDXGIFactory1::uuidof()) {
return Ok((factory, DxgiVersion::Dxgi1_0));
return Ok((library, factory, DxgiVersion::Dxgi1_0));
}
// TODO: any reason why above would fail and this wouldnt?
match create_dxgi_factory1(&func, &dxgi::IDXGIFactory::uuidof()) {
Ok(factory) => Ok((factory, DxgiVersion::Dxgi1_0)),
Ok(factory) => Ok((library, factory, DxgiVersion::Dxgi1_0)),
Err(hr) => Err(hr),
}
}

740
third_party/rust/gfx-backend-dx11/src/lib.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -39,9 +39,10 @@ pub(crate) fn compile_spirv_entrypoint(
let mut ast = parse_spirv(raw_data)?;
spirv_cross_specialize_ast(&mut ast, &source.specialization)?;
patch_spirv_resources(&mut ast, layout)?;
patch_spirv_resources(&mut ast, stage, layout)?;
let shader_model = hlsl::ShaderModel::V5_0;
let shader_code = translate_spirv(&mut ast, shader_model, layout, stage)?;
log::debug!("Generated {:?} shader:\n{:?}", stage, shader_code.replace("\n", "\r\n"));
let real_name = ast
.get_cleansed_entry_point_name(source.entry, conv::map_stage(stage))
@ -71,7 +72,7 @@ pub(crate) fn compile_hlsl_shader(
entry: &str,
code: &[u8],
) -> Result<*mut d3dcommon::ID3DBlob, device::ShaderError> {
let stage_to_str = |stage, shader_model| {
let stage_str = {
let stage = match stage {
pso::Stage::Vertex => "vs",
pso::Stage::Fragment => "ps",
@ -102,7 +103,7 @@ pub(crate) fn compile_hlsl_shader(
ptr::null(),
ptr::null_mut(),
entry.as_ptr() as *const _,
stage_to_str(stage, shader_model).as_ptr() as *const i8,
stage_str.as_ptr() as *const i8,
1,
0,
&mut blob as *mut *mut _,
@ -139,6 +140,7 @@ fn parse_spirv(raw_data: &[u32]) -> Result<spirv::Ast<hlsl::Target>, device::Sha
fn patch_spirv_resources(
ast: &mut spirv::Ast<hlsl::Target>,
stage: pso::Stage,
layout: &PipelineLayout,
) -> Result<(), device::ShaderError> {
// we remap all `layout(binding = n, set = n)` to a flat space which we get from our
@ -152,16 +154,13 @@ fn patch_spirv_resources(
let binding = ast
.get_decoration(image.id, spirv::Decoration::Binding)
.map_err(gen_query_error)?;
let mapping = layout.set_remapping[set]
.mapping
.iter()
.find(|&mapping| binding == mapping.spirv_binding)
.unwrap();
let (_content, res_index) = layout.sets[set]
.find_register(stage, binding);
ast.set_decoration(
image.id,
spirv::Decoration::Binding,
mapping.hlsl_register as u32,
res_index.t as u32,
)
.map_err(gen_unexpected_error)?;
}
@ -173,16 +172,13 @@ fn patch_spirv_resources(
let binding = ast
.get_decoration(uniform_buffer.id, spirv::Decoration::Binding)
.map_err(gen_query_error)?;
let mapping = layout.set_remapping[set]
.mapping
.iter()
.find(|&mapping| binding == mapping.spirv_binding)
.unwrap();
let (_content, res_index) = layout.sets[set]
.find_register(stage, binding);
ast.set_decoration(
uniform_buffer.id,
spirv::Decoration::Binding,
mapping.hlsl_register as u32,
res_index.c as u32,
)
.map_err(gen_unexpected_error)?;
}
@ -194,16 +190,13 @@ fn patch_spirv_resources(
let binding = ast
.get_decoration(storage_buffer.id, spirv::Decoration::Binding)
.map_err(gen_query_error)?;
let mapping = layout.set_remapping[set]
.mapping
.iter()
.find(|&mapping| binding == mapping.spirv_binding)
.unwrap();
let (_content, res_index) = layout.sets[set]
.find_register(stage, binding);
ast.set_decoration(
storage_buffer.id,
spirv::Decoration::Binding,
mapping.hlsl_register as u32,
res_index.u as u32, //TODO: also decorate `res_index.t`
)
.map_err(gen_unexpected_error)?;
}
@ -215,16 +208,13 @@ fn patch_spirv_resources(
let binding = ast
.get_decoration(image.id, spirv::Decoration::Binding)
.map_err(gen_query_error)?;
let mapping = layout.set_remapping[set]
.mapping
.iter()
.find(|&mapping| binding == mapping.spirv_binding)
.unwrap();
let (_content, res_index) = layout.sets[set]
.find_register(stage, binding);
ast.set_decoration(
image.id,
spirv::Decoration::Binding,
mapping.hlsl_register as u32,
res_index.u as u32, //TODO: also decorate `res_index.t`
)
.map_err(gen_unexpected_error)?;
}
@ -236,16 +226,13 @@ fn patch_spirv_resources(
let binding = ast
.get_decoration(sampler.id, spirv::Decoration::Binding)
.map_err(gen_query_error)?;
let mapping = layout.set_remapping[set]
.mapping
.iter()
.find(|&mapping| binding == mapping.spirv_binding)
.unwrap();
let (_content, res_index) = layout.sets[set]
.find_register(stage, binding);
ast.set_decoration(
sampler.id,
spirv::Decoration::Binding,
mapping.hlsl_register as u32,
res_index.s as u32,
)
.map_err(gen_unexpected_error)?;
}
@ -257,16 +244,13 @@ fn patch_spirv_resources(
let binding = ast
.get_decoration(image.id, spirv::Decoration::Binding)
.map_err(gen_query_error)?;
let mapping = layout.set_remapping[set]
.mapping
.iter()
.find(|&mapping| binding == mapping.spirv_binding)
.unwrap();
let (_content, res_index) = layout.sets[set]
.find_register(stage, binding);
ast.set_decoration(
image.id,
spirv::Decoration::Binding,
mapping.hlsl_register as u32,
res_index.t as u32,
)
.map_err(gen_unexpected_error)?;
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"9160602c7e183274efa52c346003522912cc285e7065a32a2f7fefc9de7c908e","README.md":"0b5008f38b9cf1bda9de72f8ca467c399404df0e75daf3b1e5796f4d1fd7568f","shaders/blit.metal":"b243873ac0d7ded37b199d17d1a7b53d5332b4a57bfa22f99dcf60273730be45","shaders/clear.metal":"796a612c1cb48e46fc94b7227feaab993d7ddeed293b69e9f09b2dd88e6a1189","shaders/fill.metal":"2642b5df62f8eb2246a442137d083010d2a3132110d9be4eb25b479123098d25","shaders/gfx-shaders-ios.metallib":"b93c70027cf196548eac31a3cf5f37947ee2b13655445bc03c68c8224dad9613","shaders/gfx-shaders-macos.metallib":"cc7e8a6ad0a0d99197bdd9c65939e3a4d9960fa8aa181467363aa3578d68af54","shaders/macros.h":"a4550ac7c180935c2edb57aa7a5f8442b53f1f3dc65df8cc800d0afb8289cdeb","src/command.rs":"06578d5f260ea9c440e81b7239f14a4a0216664a1afbc786a3df5f6687609484","src/conversions.rs":"ab9daf8e97b7d28bea3b8e6773afc287b3441d148a1cc12822c646cdbba2a37f","src/device.rs":"1d475ace1313b3c82a5f1e122ab7c818ca746f45d82ecd6e24932021cb743167","src/internal.rs":"93039ce3266f771c40d186f887f434bbef403e3deef02bc606b7a1a2f6031db1","src/lib.rs":"43bc34e00352819340e92b48516fa101fb1d20b2e6f8d275df9d0a0a31c95a49","src/native.rs":"516229d72433df23296f11b1490278f080d5a90646e7961f0e928da036f7f28d","src/soft.rs":"795767c3756a95b5a1e3bf28d2d4ce3eb85fb358ef098a4fbe0af893509e3941","src/window.rs":"cebbe53f2fb45dbdfcf03ba18ca181fa966997665cec65ae1a1d77d0c193f20b"},"package":"b19262dc17be65f2c5b5c72fe5199ab2017faf51f9e0e353c4a68b63a88f933b"}
{"files":{"Cargo.toml":"f180d9eace35977c44464b71590db85cc2b47e9ffa53687afc20d44dd7eb39ee","README.md":"0b5008f38b9cf1bda9de72f8ca467c399404df0e75daf3b1e5796f4d1fd7568f","shaders/blit.metal":"b243873ac0d7ded37b199d17d1a7b53d5332b4a57bfa22f99dcf60273730be45","shaders/clear.metal":"796a612c1cb48e46fc94b7227feaab993d7ddeed293b69e9f09b2dd88e6a1189","shaders/fill.metal":"2642b5df62f8eb2246a442137d083010d2a3132110d9be4eb25b479123098d25","shaders/gfx-shaders-ios.metallib":"b93c70027cf196548eac31a3cf5f37947ee2b13655445bc03c68c8224dad9613","shaders/gfx-shaders-macos.metallib":"cc7e8a6ad0a0d99197bdd9c65939e3a4d9960fa8aa181467363aa3578d68af54","shaders/macros.h":"a4550ac7c180935c2edb57aa7a5f8442b53f1f3dc65df8cc800d0afb8289cdeb","src/command.rs":"06578d5f260ea9c440e81b7239f14a4a0216664a1afbc786a3df5f6687609484","src/conversions.rs":"ab9daf8e97b7d28bea3b8e6773afc287b3441d148a1cc12822c646cdbba2a37f","src/device.rs":"1d475ace1313b3c82a5f1e122ab7c818ca746f45d82ecd6e24932021cb743167","src/internal.rs":"93039ce3266f771c40d186f887f434bbef403e3deef02bc606b7a1a2f6031db1","src/lib.rs":"43bc34e00352819340e92b48516fa101fb1d20b2e6f8d275df9d0a0a31c95a49","src/native.rs":"516229d72433df23296f11b1490278f080d5a90646e7961f0e928da036f7f28d","src/soft.rs":"795767c3756a95b5a1e3bf28d2d4ce3eb85fb358ef098a4fbe0af893509e3941","src/window.rs":"cebbe53f2fb45dbdfcf03ba18ca181fa966997665cec65ae1a1d77d0c193f20b"},"package":"05b6130b9a72129ebb5c91d3d75a142a7fa54dcc112603231582e3fdc0b84247"}

Просмотреть файл

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "gfx-backend-metal"
version = "0.4.4"
version = "0.4.5"
authors = ["The Gfx-rs Developers"]
description = "Metal API backend for gfx-rs"
homepage = "https://github.com/gfx-rs/gfx"
@ -74,7 +74,7 @@ features = ["private"]
version = "0.2.5"
[dependencies.parking_lot]
version = "0.9"
version = "0.10"
[dependencies.range-alloc]
version = "0.1"

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"62073f7e3f8dd1c6996e2d2310af31b3f0c4a9c6a58cbfd1296d5d4335329c03","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"d9ed1f911f058d066ebfd024940da8a5c1ebbab6cfd65a633dfbc613573dd823","src/mutex.rs":"179f232bbbe8279365af427287566f1e1382ddbee13d611deccbce34705c447b","src/remutex.rs":"29e724285529bc4fdff5be3a1d3066253a6da493bdcebf024c4ccbdfdd94457c","src/rwlock.rs":"5661564ab948f3a71be008bf1abb5c40d5d5660ca2f6a7c57ae73e51f31ababf"},"package":"f8912e782533a93a167888781b836336a6ca5da6175c05944c86cf28c31104dc"}
{"files":{"Cargo.toml":"6b931fdc231a35953748d244fc3ed1b6cd4d95c95c618058955df508f2e7e738","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"3541bfbc3b6121af8fbeb4683ab28412ca46a9b9bdd83a9348778db76e7ea18f","src/mutex.rs":"d010fba6b466937cbc8c16ed4131c7a16753c61362e4be038c1748c2b9431340","src/remutex.rs":"541735f5675c78117cdec802b53df6ac5b7a834d18e0616cff073b7acc6cf02b","src/rwlock.rs":"992394f38f0bc5211fa1f4d7b7af3a1cc9afcec4d48734ded3b248897c7902d9"},"package":"c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75"}

2
third_party/rust/lock_api/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "lock_api"
version = "0.3.1"
version = "0.3.4"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std."
keywords = ["mutex", "rwlock", "lock", "no_std"]

4
third_party/rust/lock_api/src/lib.rs поставляемый
Просмотреть файл

@ -47,7 +47,9 @@
//! }
//!
//! fn try_lock(&self) -> bool {
//! self.0.swap(true, Ordering::Acquire)
//! self.0
//! .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
//! .is_ok()
//! }
//!
//! fn unlock(&self) {

135
third_party/rust/lock_api/src/mutex.rs поставляемый
Просмотреть файл

@ -28,6 +28,9 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// exclusive: a lock can't be acquired while the mutex is already locked.
pub unsafe trait RawMutex {
/// Initial value for an unlocked mutex.
// A “non-constant” const item is a legacy way to supply an initialized value to downstream
// static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self;
/// Marker type which determines whether a lock guard should be `Send`. Use
@ -37,7 +40,8 @@ pub unsafe trait RawMutex {
/// Acquires this mutex, blocking the current thread until it is able to do so.
fn lock(&self);
/// Attempts to acquire this mutex without blocking.
/// Attempts to acquire this mutex without blocking. Returns `true`
/// if the lock was successfully acquired and `false` otherwise.
fn try_lock(&self) -> bool;
/// Unlocks this mutex.
@ -91,40 +95,11 @@ pub unsafe trait RawMutexTimed: RawMutex {
/// it is protecting. The data can only be accessed through the RAII guards
/// returned from `lock` and `try_lock`, which guarantees that the data is only
/// ever accessed when the mutex is locked.
pub struct Mutex<R: RawMutex, T: ?Sized> {
pub struct Mutex<R, T: ?Sized> {
raw: R,
data: UnsafeCell<T>,
}
// Copied and modified from serde
#[cfg(feature = "serde")]
impl<R, T> Serialize for Mutex<R, T>
where
R: RawMutex,
T: Serialize + ?Sized,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.lock().serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de, R, T> Deserialize<'de> for Mutex<R, T>
where
R: RawMutex,
T: Deserialize<'de> + ?Sized,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(Mutex::new)
}
}
unsafe impl<R: RawMutex + Send, T: ?Sized + Send> Send for Mutex<R, T> {}
unsafe impl<R: RawMutex + Sync, T: ?Sized + Send> Sync for Mutex<R, T> {}
@ -133,21 +108,39 @@ impl<R: RawMutex, T> Mutex<R, T> {
#[cfg(feature = "nightly")]
#[inline]
pub const fn new(val: T) -> Mutex<R, T> {
Mutex { data: UnsafeCell::new(val), raw: R::INIT }
Mutex {
raw: R::INIT,
data: UnsafeCell::new(val),
}
}
/// Creates a new mutex in an unlocked state ready for use.
#[cfg(not(feature = "nightly"))]
#[inline]
pub fn new(val: T) -> Mutex<R, T> {
Mutex { data: UnsafeCell::new(val), raw: R::INIT }
Mutex {
raw: R::INIT,
data: UnsafeCell::new(val),
}
}
/// Consumes this mutex, returning the underlying data.
#[inline]
#[allow(unused_unsafe)]
pub fn into_inner(self) -> T {
unsafe { self.data.into_inner() }
self.data.into_inner()
}
}
impl<R, T> Mutex<R, T> {
/// Creates a new mutex based on a pre-existing raw mutex.
///
/// This allows creating a mutex in a constant context on stable Rust.
#[inline]
pub const fn const_new(raw_mutex: R, val: T) -> Mutex<R, T> {
Mutex {
raw: raw_mutex,
data: UnsafeCell::new(val),
}
}
}
@ -157,7 +150,10 @@ impl<R: RawMutex, T: ?Sized> Mutex<R, T> {
/// The lock must be held when calling this method.
#[inline]
unsafe fn guard(&self) -> MutexGuard<'_, R, T> {
MutexGuard { mutex: self, marker: PhantomData }
MutexGuard {
mutex: self,
marker: PhantomData,
}
}
/// Acquires a mutex, blocking the current thread until it is able to do so.
@ -309,12 +305,43 @@ impl<R: RawMutex, T: ?Sized + fmt::Debug> fmt::Debug for Mutex<R, T> {
}
}
f.debug_struct("Mutex").field("data", &LockedPlaceholder).finish()
f.debug_struct("Mutex")
.field("data", &LockedPlaceholder)
.finish()
}
}
}
}
// Copied and modified from serde
#[cfg(feature = "serde")]
impl<R, T> Serialize for Mutex<R, T>
where
R: RawMutex,
T: Serialize + ?Sized,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.lock().serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de, R, T> Deserialize<'de> for Mutex<R, T>
where
R: RawMutex,
T: Deserialize<'de> + ?Sized,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(Mutex::new)
}
}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
@ -350,17 +377,21 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
let raw = &s.mutex.raw;
let data = f(unsafe { &mut *s.mutex.data.get() });
mem::forget(s);
MappedMutexGuard { raw, data, marker: PhantomData }
MappedMutexGuard {
raw,
data,
marker: PhantomData,
}
}
/// Attempts to make a new `MappedMutexGuard` for a component of the
/// locked data. The original guard is return if the closure returns `None`.
/// locked data. The original guard is returned if the closure returns `None`.
///
/// This operation cannot fail as the `MutexGuard` passed
/// in already locked the mutex.
///
/// This is an associated function that needs to be
/// used as `MutexGuard::map(...)`. A method would interfere with methods of
/// used as `MutexGuard::try_map(...)`. A method would interfere with methods of
/// the same name on the contents of the locked data.
#[inline]
pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedMutexGuard<'a, R, U>, Self>
@ -373,7 +404,11 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
None => return Err(s),
};
mem::forget(s);
Ok(MappedMutexGuard { raw, data, marker: PhantomData })
Ok(MappedMutexGuard {
raw,
data,
marker: PhantomData,
})
}
/// Temporarily unlocks the mutex to execute the given function.
@ -412,7 +447,7 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
/// Temporarily unlocks the mutex to execute the given function.
///
/// The mutex is unlocked a fair unlock protocol.
/// The mutex is unlocked using a fair unlock protocol.
///
/// This is safe because `&mut` guarantees that there exist no other
/// references to the data protected by the mutex.
@ -514,17 +549,21 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
let raw = s.raw;
let data = f(unsafe { &mut *s.data });
mem::forget(s);
MappedMutexGuard { raw, data, marker: PhantomData }
MappedMutexGuard {
raw,
data,
marker: PhantomData,
}
}
/// Attempts to make a new `MappedMutexGuard` for a component of the
/// locked data. The original guard is return if the closure returns `None`.
/// locked data. The original guard is returned if the closure returns `None`.
///
/// This operation cannot fail as the `MappedMutexGuard` passed
/// in already locked the mutex.
///
/// This is an associated function that needs to be
/// used as `MappedMutexGuard::map(...)`. A method would interfere with methods of
/// used as `MappedMutexGuard::try_map(...)`. A method would interfere with methods of
/// the same name on the contents of the locked data.
#[inline]
pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedMutexGuard<'a, R, U>, Self>
@ -537,7 +576,11 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
None => return Err(s),
};
mem::forget(s);
Ok(MappedMutexGuard { raw, data, marker: PhantomData })
Ok(MappedMutexGuard {
raw,
data,
marker: PhantomData,
})
}
}

135
third_party/rust/lock_api/src/remutex.rs поставляемый
Просмотреть файл

@ -37,6 +37,9 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// re-used since that thread is no longer active.
pub unsafe trait GetThreadId {
/// Initial value.
// A “non-constant” const item is a legacy way to supply an initialized value to downstream
// static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self;
/// Returns a non-zero thread ID which identifies the current thread of
@ -44,7 +47,7 @@ pub unsafe trait GetThreadId {
fn nonzero_thread_id(&self) -> NonZeroUsize;
}
struct RawReentrantMutex<R: RawMutex, G: GetThreadId> {
struct RawReentrantMutex<R, G> {
owner: AtomicUsize,
lock_count: Cell<usize>,
mutex: R,
@ -57,7 +60,10 @@ impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
let id = self.get_thread_id.nonzero_thread_id().get();
if self.owner.load(Ordering::Relaxed) == id {
self.lock_count.set(
self.lock_count.get().checked_add(1).expect("ReentrantMutex lock count overflow"),
self.lock_count
.get()
.checked_add(1)
.expect("ReentrantMutex lock count overflow"),
);
} else {
if !try_lock() {
@ -139,42 +145,11 @@ impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
///
/// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
/// primitive.
pub struct ReentrantMutex<R: RawMutex, G: GetThreadId, T: ?Sized> {
pub struct ReentrantMutex<R, G, T: ?Sized> {
raw: RawReentrantMutex<R, G>,
data: UnsafeCell<T>,
}
// Copied and modified from serde
#[cfg(feature = "serde")]
impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
where
R: RawMutex,
G: GetThreadId,
T: Serialize + ?Sized,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.lock().serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
where
R: RawMutex,
G: GetThreadId,
T: Deserialize<'de> + ?Sized,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
}
}
unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
for ReentrantMutex<R, G, T>
{
@ -217,9 +192,28 @@ impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
/// Consumes this mutex, returning the underlying data.
#[inline]
#[allow(unused_unsafe)]
pub fn into_inner(self) -> T {
unsafe { self.data.into_inner() }
self.data.into_inner()
}
}
impl<R, G, T> ReentrantMutex<R, G, T> {
/// Creates a new reentrant mutex based on a pre-existing raw mutex and a
/// helper to get the thread ID.
///
/// This allows creating a reentrant mutex in a constant context on stable
/// Rust.
#[inline]
pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
ReentrantMutex {
data: UnsafeCell::new(val),
raw: RawReentrantMutex {
owner: AtomicUsize::new(0),
lock_count: Cell::new(0),
mutex: raw_mutex,
get_thread_id,
},
}
}
}
@ -229,7 +223,10 @@ impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
/// The lock must be held when calling this method.
#[inline]
unsafe fn guard(&self) -> ReentrantMutexGuard<'_, R, G, T> {
ReentrantMutexGuard { remutex: &self, marker: PhantomData }
ReentrantMutexGuard {
remutex: &self,
marker: PhantomData,
}
}
/// Acquires a reentrant mutex, blocking the current thread until it is able
@ -373,7 +370,10 @@ impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.try_lock() {
Some(guard) => f.debug_struct("ReentrantMutex").field("data", &&*guard).finish(),
Some(guard) => f
.debug_struct("ReentrantMutex")
.field("data", &&*guard)
.finish(),
None => {
struct LockedPlaceholder;
impl fmt::Debug for LockedPlaceholder {
@ -382,12 +382,45 @@ impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for Reentra
}
}
f.debug_struct("ReentrantMutex").field("data", &LockedPlaceholder).finish()
f.debug_struct("ReentrantMutex")
.field("data", &LockedPlaceholder)
.finish()
}
}
}
}
// Copied and modified from serde
#[cfg(feature = "serde")]
impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
where
R: RawMutex,
G: GetThreadId,
T: Serialize + ?Sized,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.lock().serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
where
R: RawMutex,
G: GetThreadId,
T: Deserialize<'de> + ?Sized,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
}
}
/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
/// is dropped (falls out of scope), the lock will be unlocked.
///
@ -426,7 +459,11 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGu
let raw = &s.remutex.raw;
let data = f(unsafe { &*s.remutex.data.get() });
mem::forget(s);
MappedReentrantMutexGuard { raw, data, marker: PhantomData }
MappedReentrantMutexGuard {
raw,
data,
marker: PhantomData,
}
}
/// Attempts to make a new `MappedReentrantMutexGuard` for a component of the
@ -452,7 +489,11 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGu
None => return Err(s),
};
mem::forget(s);
Ok(MappedReentrantMutexGuard { raw, data, marker: PhantomData })
Ok(MappedReentrantMutexGuard {
raw,
data,
marker: PhantomData,
})
}
/// Temporarily unlocks the mutex to execute the given function.
@ -597,7 +638,11 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
let raw = s.raw;
let data = f(unsafe { &*s.data });
mem::forget(s);
MappedReentrantMutexGuard { raw, data, marker: PhantomData }
MappedReentrantMutexGuard {
raw,
data,
marker: PhantomData,
}
}
/// Attempts to make a new `MappedReentrantMutexGuard` for a component of the
@ -623,7 +668,11 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
None => return Err(s),
};
mem::forget(s);
Ok(MappedReentrantMutexGuard { raw, data, marker: PhantomData })
Ok(MappedReentrantMutexGuard {
raw,
data,
marker: PhantomData,
})
}
}

142
third_party/rust/lock_api/src/rwlock.rs поставляемый
Просмотреть файл

@ -30,6 +30,9 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// exists.
pub unsafe trait RawRwLock {
/// Initial value for an unlocked `RwLock`.
// A “non-constant” const item is a legacy way to supply an initialized value to downstream
// static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self;
/// Marker type which determines whether a lock guard should be `Send`. Use
@ -228,7 +231,7 @@ pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
/// allow concurrent access through readers. The RAII guards returned from the
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
/// to allow access to the contained of the lock.
pub struct RwLock<R: RawRwLock, T: ?Sized> {
pub struct RwLock<R, T: ?Sized> {
raw: R,
data: UnsafeCell<T>,
}
@ -270,14 +273,20 @@ impl<R: RawRwLock, T> RwLock<R, T> {
#[cfg(feature = "nightly")]
#[inline]
pub const fn new(val: T) -> RwLock<R, T> {
RwLock { data: UnsafeCell::new(val), raw: R::INIT }
RwLock {
data: UnsafeCell::new(val),
raw: R::INIT,
}
}
/// Creates a new instance of an `RwLock<T>` which is unlocked.
#[cfg(not(feature = "nightly"))]
#[inline]
pub fn new(val: T) -> RwLock<R, T> {
RwLock { data: UnsafeCell::new(val), raw: R::INIT }
RwLock {
data: UnsafeCell::new(val),
raw: R::INIT,
}
}
/// Consumes this `RwLock`, returning the underlying data.
@ -288,13 +297,31 @@ impl<R: RawRwLock, T> RwLock<R, T> {
}
}
impl<R, T> RwLock<R, T> {
/// Creates a new new instance of an `RwLock<T>` based on a pre-existing
/// `RawRwLock<T>`.
///
/// This allows creating a `RwLock<T>` in a constant context on stable
/// Rust.
#[inline]
pub const fn const_new(raw_rwlock: R, val: T) -> RwLock<R, T> {
RwLock {
data: UnsafeCell::new(val),
raw: raw_rwlock,
}
}
}
impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
/// # Safety
///
/// The lock must be held when calling this method.
#[inline]
unsafe fn read_guard(&self) -> RwLockReadGuard<'_, R, T> {
RwLockReadGuard { rwlock: self, marker: PhantomData }
RwLockReadGuard {
rwlock: self,
marker: PhantomData,
}
}
/// # Safety
@ -302,7 +329,10 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
/// The lock must be held when calling this method.
#[inline]
unsafe fn write_guard(&self) -> RwLockWriteGuard<'_, R, T> {
RwLockWriteGuard { rwlock: self, marker: PhantomData }
RwLockWriteGuard {
rwlock: self,
marker: PhantomData,
}
}
/// Locks this `RwLock` with shared read access, blocking the current thread
@ -623,7 +653,10 @@ impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
/// The lock must be held when calling this method.
#[inline]
unsafe fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
RwLockUpgradableReadGuard { rwlock: self, marker: PhantomData }
RwLockUpgradableReadGuard {
rwlock: self,
marker: PhantomData,
}
}
/// Locks this `RwLock` with upgradable read access, blocking the current thread
@ -726,7 +759,9 @@ impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
}
}
f.debug_struct("RwLock").field("data", &LockedPlaceholder).finish()
f.debug_struct("RwLock")
.field("data", &LockedPlaceholder)
.finish()
}
}
}
@ -764,7 +799,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
let raw = &s.rwlock.raw;
let data = f(unsafe { &*s.rwlock.data.get() });
mem::forget(s);
MappedRwLockReadGuard { raw, data, marker: PhantomData }
MappedRwLockReadGuard {
raw,
data,
marker: PhantomData,
}
}
/// Attempts to make a new `MappedRwLockReadGuard` for a component of the
@ -787,7 +826,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
None => return Err(s),
};
mem::forget(s);
Ok(MappedRwLockReadGuard { raw, data, marker: PhantomData })
Ok(MappedRwLockReadGuard {
raw,
data,
marker: PhantomData,
})
}
/// Temporarily unlocks the `RwLock` to execute the given function.
@ -917,7 +960,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
let raw = &s.rwlock.raw;
let data = f(unsafe { &mut *s.rwlock.data.get() });
mem::forget(s);
MappedRwLockWriteGuard { raw, data, marker: PhantomData }
MappedRwLockWriteGuard {
raw,
data,
marker: PhantomData,
}
}
/// Attempts to make a new `MappedRwLockWriteGuard` for a component of the
@ -940,7 +987,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
None => return Err(s),
};
mem::forget(s);
Ok(MappedRwLockWriteGuard { raw, data, marker: PhantomData })
Ok(MappedRwLockWriteGuard {
raw,
data,
marker: PhantomData,
})
}
/// Temporarily unlocks the `RwLock` to execute the given function.
@ -969,7 +1020,10 @@ impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T>
s.rwlock.raw.downgrade();
let rwlock = s.rwlock;
mem::forget(s);
RwLockReadGuard { rwlock, marker: PhantomData }
RwLockReadGuard {
rwlock,
marker: PhantomData,
}
}
}
@ -984,7 +1038,10 @@ impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a,
s.rwlock.raw.downgrade_to_upgradable();
let rwlock = s.rwlock;
mem::forget(s);
RwLockUpgradableReadGuard { rwlock, marker: PhantomData }
RwLockUpgradableReadGuard {
rwlock,
marker: PhantomData,
}
}
}
@ -1112,7 +1169,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a,
s.rwlock.raw.upgrade();
let rwlock = s.rwlock;
mem::forget(s);
RwLockWriteGuard { rwlock, marker: PhantomData }
RwLockWriteGuard {
rwlock,
marker: PhantomData,
}
}
/// Tries to atomically upgrade an upgradable read lock into a exclusive write lock.
@ -1122,7 +1182,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a,
if s.rwlock.raw.try_upgrade() {
let rwlock = s.rwlock;
mem::forget(s);
Ok(RwLockWriteGuard { rwlock, marker: PhantomData })
Ok(RwLockWriteGuard {
rwlock,
marker: PhantomData,
})
} else {
Err(s)
}
@ -1187,7 +1250,10 @@ impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableRead
s.rwlock.raw.downgrade_upgradable();
let rwlock = s.rwlock;
mem::forget(s);
RwLockReadGuard { rwlock, marker: PhantomData }
RwLockReadGuard {
rwlock,
marker: PhantomData,
}
}
}
@ -1204,7 +1270,10 @@ impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuar
if s.rwlock.raw.try_upgrade_for(timeout) {
let rwlock = s.rwlock;
mem::forget(s);
Ok(RwLockWriteGuard { rwlock, marker: PhantomData })
Ok(RwLockWriteGuard {
rwlock,
marker: PhantomData,
})
} else {
Err(s)
}
@ -1223,7 +1292,10 @@ impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuar
if s.rwlock.raw.try_upgrade_until(timeout) {
let rwlock = s.rwlock;
mem::forget(s);
Ok(RwLockWriteGuard { rwlock, marker: PhantomData })
Ok(RwLockWriteGuard {
rwlock,
marker: PhantomData,
})
} else {
Err(s)
}
@ -1304,7 +1376,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
let raw = s.raw;
let data = f(unsafe { &*s.data });
mem::forget(s);
MappedRwLockReadGuard { raw, data, marker: PhantomData }
MappedRwLockReadGuard {
raw,
data,
marker: PhantomData,
}
}
/// Attempts to make a new `MappedRwLockReadGuard` for a component of the
@ -1327,7 +1403,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
None => return Err(s),
};
mem::forget(s);
Ok(MappedRwLockReadGuard { raw, data, marker: PhantomData })
Ok(MappedRwLockReadGuard {
raw,
data,
marker: PhantomData,
})
}
}
@ -1428,7 +1508,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
let raw = s.raw;
let data = f(unsafe { &mut *s.data });
mem::forget(s);
MappedRwLockWriteGuard { raw, data, marker: PhantomData }
MappedRwLockWriteGuard {
raw,
data,
marker: PhantomData,
}
}
/// Attempts to make a new `MappedRwLockWriteGuard` for a component of the
@ -1451,7 +1535,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
None => return Err(s),
};
mem::forget(s);
Ok(MappedRwLockWriteGuard { raw, data, marker: PhantomData })
Ok(MappedRwLockWriteGuard {
raw,
data,
marker: PhantomData,
})
}
}
@ -1462,12 +1550,20 @@ impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a,
/// Note that if there are any writers currently waiting to take the lock
/// then other readers may not be able to acquire the lock even if it was
/// downgraded.
#[deprecated(
since = "0.3.3",
note = "This function is unsound and will be removed in the future, see issue #198"
)]
pub fn downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T> {
s.raw.downgrade();
let raw = s.raw;
let data = s.data;
mem::forget(s);
MappedRwLockReadGuard { raw, data, marker: PhantomData }
MappedRwLockReadGuard {
raw,
data,
marker: PhantomData,
}
}
}

2
third_party/rust/nom/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

101
third_party/rust/nom/.travis.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,101 @@
language: rust
# sudo is required to enable kcov to use the personality syscall
sudo: required
dist: trusty
cache: cargo
rust:
- nightly
- beta
- stable
- 1.31.0
env:
matrix:
- FEATURES='--features "regexp regexp_macros"'
before_script:
- eval git pull --rebase https://github.com/Geal/nom master
- eval git log --pretty=oneline HEAD~5..HEAD
matrix:
include:
- rust: nightly
env: FEATURES='--no-default-features'
- rust: nightly
env: FEATURES='--no-default-features --features "alloc"'
- rust: stable
env: FEATURES=''
- rust: nightly
env: DOC_FEATURES='--features "std lexical regexp regexp_macros" --no-default-features'
before_script:
- export PATH=$HOME/.cargo/bin:$PATH
script:
- eval cargo doc --verbose $DOC_FEATURES
- rust: nightly
env: FEATURES=''
before_script:
- export PATH=$HOME/.cargo/bin:$PATH
- cargo install cargo-update || echo "cargo-update already installed"
- cargo install cargo-travis || echo "cargo-travis already installed"
- cargo install-update -a
- mkdir -p target/kcov-master
script:
cargo coveralls --verbose --all-features
allow_failures:
- rust: stable
env: FEATURES=''
before_script:
- export PATH=$HOME/.cargo/bin:$PATH
- rustup component add rustfmt-preview
script:
- eval cargo fmt -- --write-mode=diff
notifications:
webhooks:
urls:
- https://webhooks.gitter.im/e/9c035a194ac4fd4cc061
on_success: change
on_failure: always
on_start: false
addons:
apt:
packages:
- libcurl4-openssl-dev
- libelf-dev
- libdw-dev
- binutils-dev
- cmake
sources:
- kalakris-cmake
cache:
directories:
- /home/travis/.cargo
before_cache:
- rm -rf /home/travis/.cargo/registry
script:
- eval cargo build --verbose $FEATURES
- eval cargo test --verbose $FEATURES
after_success: |
case "$TRAVIS_RUST_VERSION" in
nightly)
if [ "${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" != "master" ]; then
git fetch &&
git checkout master &&
cargo bench --verbose
fi
if [ "$FEATURES" == '--features "regexp regexp_macros"' ]; then
cargo bench --verbose
fi
;;
*)
;;
esac

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"d338e1af7a55941072de9e34e2537da86351b55e09f483f4214f63606f1dcad6","Cargo.toml":"d14378cc93973e6b478ee13195f149a2549791bcc61dbd26df45577e1f86cdf9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"e9af35881a3de707d5e60a15918119e88f7716a09dabe9da7960aa3191e6e7d9","appveyor.yml":"fa41673db7146f34d601a5977d77fe81fd29da706b5981cfd68ce79affd7a667","build.rs":"4ed00d73d71057bcdf6c186559468927fc130fd65cfd806ee5d46d28540bc653","src/condvar.rs":"275f05affa456117f255ccc3de3277c1174e470f307563da0d166915d4a2f68e","src/deadlock.rs":"081dbf009539b113f67ad0a1abd7af889dad684a47aa1a7dc00ae91f08975ef6","src/elision.rs":"ee8735e8695dc90ccc16002a229d5b64ba4e1c867c77f551b8715a1958faaeac","src/lib.rs":"eda1ae488e72f1d514cb7bc19600d3948cb69423a9d68738acd12565346461ec","src/mutex.rs":"e3a48933b7e19d26eab4b5f44ed4e9bcb069b57cdd4a0569d1e65f6c3839b766","src/once.rs":"155694841d62c54b8b489916f14cad887a86138000f3a6925c8d70a4a5711283","src/raw_mutex.rs":"5797de689e5c89eae2b45a4bf15bd42a01345aed0770c56f65846daee083588a","src/raw_rwlock.rs":"f13ff54a30d2fb53f95ab565db4e478f20f0a2b85b2b75f4392dc80e34f5f270","src/remutex.rs":"b62e72028b6d168650a9c3fb9375b3690225126b055a8874a7989b5f8dcb6605","src/rwlock.rs":"b0c92f2c602d13213a5e03f16ecda70dec7ea1d256cc99e4a3d6e2adad1afdd4","src/util.rs":"35f1c1930fb30fca0ceab5e0d68d8c418c7f0bb5b6ac6f21d6019986a3046cca"},"package":"f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252"}
{"files":{"CHANGELOG.md":"18a3b6c2a59fb59450362712afae444070b23c2697cf20aa9ee3911dd9f6d981","Cargo.toml":"c0d17dd8decba5afb1495577e0ded39c0228eeb2eb14be4f4d4d5d9c96ebc0c3","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"0f1b45638eb0d2b3f142baec830319a5790f3f2004eac5bc5f3e95bc632bdaed","appveyor.yml":"e2416e65e27442dd8e17c773deee1e45ee96157221bc81b03d9a6d25bfa570e2","bors.toml":"1c81ede536a37edd30fe4e622ff0531b25372403ac9475a5d6c50f14156565a2","src/condvar.rs":"510f96e94b56f0cb0d200a8e94b0487d0799e13b9e126b9b416f22d8dc11b643","src/deadlock.rs":"7d3ebb5b4f63658435df277bb983e352e4bc651a92c4fd48ae68bf103e452d0d","src/elision.rs":"9aceb0b27fd3cdaf4ef76bda63435a96ec2fdef24be098b9e4edbc39db000765","src/fair_mutex.rs":"d0a032e8207919da04b85f1422dfb14aa2af7aad78843c708d2fe3e0478e401a","src/lib.rs":"3d89619878f3c8b6190261321a4e430e0514c97b65e8c911c0764ea57c0605f2","src/mutex.rs":"afc25db5c45da63c743029ee3cb22e262ea7a32b533245b441c0a5835f9f525f","src/once.rs":"a1c38a5d87077e3d112d57e065ee126a24ab19f04fba9cb1f2cb43bc82caf33c","src/raw_fair_mutex.rs":"a7415aa6cbc040a2f886d06dd6c0c0b3be9963936a31f60f1494e718c9d18acb","src/raw_mutex.rs":"f3507478c34b49bd725dfaed6bf4847fc3aec28700960a7823af9e15b06b5e24","src/raw_rwlock.rs":"f8ce7c4f92299cf64cb6e7b69cd46d9ddefd1211535729b6455e82f7c4eb3eae","src/remutex.rs":"7a0de55161cd57497bb52d3aecca69a89eff2e71cdb2d762df53579e0607b489","src/rwlock.rs":"1a782ab4fafc0c542d1c42151b98475829c96da168d2d0e8947181b7f2d7cb07","src/util.rs":"37a2c8b5c9254df83e8f3a5cd831558c1045061a76c2571bdc4d78eb86e467f2","tests/issue_203.rs":"5fbdf6ec63f391d86457df949678c203a1e81e8aa32d4e10037fa76e768702c0"},"package":"d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e"}

34
third_party/rust/parking_lot/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,8 +1,36 @@
## parking_lot 0.10.2 (2020-04-10)
- Update minimum version of `lock_api`.
## parking_lot 0.10.1, parking_lot_core 0.7.1, lock_api 0.3.4 (2020-04-10)
- Add methods to construct `Mutex`, `RwLock`, etc in a `const` context. (#217)
- Add `FairMutex` which always uses fair unlocking. (#204)
- Fixed panic with deadlock detection on macOS. (#203)
- Fixed incorrect synchronization in `create_hashtable`. (#210)
- Use `llvm_asm!` instead of the deprecated `asm!`. (#223)
## lock_api 0.3.3 (2020-01-04)
- Deprecate unsound `MappedRwLockWriteGuard::downgrade` (#198)
## parking_lot 0.10.0, parking_lot_core 0.7.0, lock_api 0.3.2 (2019-11-25)
- Upgrade smallvec dependency to 1.0 in parking_lot_core.
- Replace all usage of `mem::uninitialized` with `mem::MaybeUninit`.
- The minimum required Rust version is bumped to 1.36. Because of the above two changes.
- Make methods on `WaitTimeoutResult` and `OnceState` take `self` by value instead of reference.
## parking_lot_core 0.6.2 (2019-07-22)
- Fixed compile error on Windows with old cfg_if version. (#164)
## parking_lot_core 0.6.1 (2019-07-17)
- Fixed Android build. (#163)
## parking_lot 0.9.0, parking_lot_core 0.6.0, lock_api 0.3.1 (2019-07-14)
- The minimum supported rust version (MSRV) is now 1.32. This was primarily
increased for testing with the latest _rand_ crate. Rust 1.31 may continue to
work for normal use of these releases.
- Re-export lock_api (0.3.1) from parking_lot (#150)
- Removed (non-dev) dependency on rand crate for fairness mechanism, by
including a simple xorshift PRNG in core (#144)

11
third_party/rust/parking_lot/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "parking_lot"
version = "0.9.0"
version = "0.10.2"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "More compact and efficient implementations of the standard synchronization primitives."
readme = "README.md"
@ -22,20 +22,15 @@ categories = ["concurrency"]
license = "Apache-2.0/MIT"
repository = "https://github.com/Amanieu/parking_lot"
[dependencies.lock_api]
version = "0.3.1"
version = "0.3.4"
[dependencies.parking_lot_core]
version = "0.6"
version = "0.7.1"
[dev-dependencies.bincode]
version = "1.1.3"
[dev-dependencies.lazy_static]
version = "1.0"
[dev-dependencies.rand]
version = "0.7"
[build-dependencies.rustc_version]
version = "0.2"
[features]
deadlock_detection = ["parking_lot_core/deadlock_detection"]

18
third_party/rust/parking_lot/README.md поставляемый
Просмотреть файл

@ -87,9 +87,9 @@ lock.
There are a few restrictions when using this library on stable Rust:
- `Mutex` and `Once` will use 1 word of space instead of 1 byte.
- You will have to use `lazy_static!` to statically initialize `Mutex`,
`Condvar` and `RwLock` types instead of `const fn`.
- You will have to use the `const_*` functions (e.g. `const_mutex(val)`) to
statically initialize the locking primitives. Using e.g. `Mutex::new(val)`
does not work on stable Rust yet.
- `RwLock` will not be able to take advantage of hardware lock elision for
readers, which improves performance when there are multiple readers.
@ -102,20 +102,14 @@ Add this to your `Cargo.toml`:
```toml
[dependencies]
parking_lot = "0.9"
```
and this to your crate root:
```rust
extern crate parking_lot;
parking_lot = "0.10"
```
To enable nightly-only features, add this to your `Cargo.toml` instead:
```toml
[dependencies]
parking_lot = {version = "0.9", features = ["nightly"]}
parking_lot = { version = "0.10", features = ["nightly"] }
```
The experimental deadlock detector can be enabled with the
@ -127,7 +121,7 @@ changes to the core API do not cause breaking changes for users of `parking_lot`
## Minimum Rust version
The current minimum required Rust version is 1.32. Any change to this is
The current minimum required Rust version is 1.36. Any change to this is
considered a breaking change and will require a major version bump.
## License

8
third_party/rust/parking_lot/appveyor.yml поставляемый
Просмотреть файл

@ -25,19 +25,19 @@ environment:
- TARGET: x86_64-pc-windows-msvc
MSYSTEM: MINGW64
CPU: x86_64
TOOLCHAIN: 1.32.0
TOOLCHAIN: 1.36.0
- TARGET: i686-pc-windows-msvc
MSYSTEM: MINGW32
CPU: i686
TOOLCHAIN: 1.32.0
TOOLCHAIN: 1.36.0
- TARGET: x86_64-pc-windows-gnu
MSYSTEM: MINGW64
CPU: x86_64
TOOLCHAIN: 1.32.0
TOOLCHAIN: 1.36.0
- TARGET: i686-pc-windows-gnu
MSYSTEM: MINGW32
CPU: i686
TOOLCHAIN: 1.32.0
TOOLCHAIN: 1.36.0
install:
- set PATH=C:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH%

3
third_party/rust/parking_lot/bors.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
status = [
"continuous-integration/travis-ci/push",
]

8
third_party/rust/parking_lot/build.rs поставляемый
Просмотреть файл

@ -1,8 +0,0 @@
use rustc_version::{version, Version};
fn main() {
if version().unwrap() >= Version::parse("1.34.0").unwrap() {
println!("cargo:rustc-cfg=has_sized_atomics");
println!("cargo:rustc-cfg=has_checked_instant");
}
}

385
third_party/rust/parking_lot/src/condvar.rs поставляемый
Просмотреть файл

@ -12,7 +12,7 @@ use core::{
fmt, ptr,
sync::atomic::{AtomicPtr, Ordering},
};
use lock_api::RawMutex as RawMutexTrait;
use lock_api::RawMutex as RawMutex_;
use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN};
use std::time::{Duration, Instant};
@ -24,7 +24,7 @@ pub struct WaitTimeoutResult(bool);
impl WaitTimeoutResult {
/// Returns whether the wait was known to have timed out.
#[inline]
pub fn timed_out(&self) -> bool {
pub fn timed_out(self) -> bool {
self.0
}
}
@ -78,9 +78,13 @@ impl WaitTimeoutResult {
/// // wait for the thread to start up
/// let &(ref lock, ref cvar) = &*pair;
/// let mut started = lock.lock();
/// while !*started {
/// if !*started {
/// cvar.wait(&mut started);
/// }
/// // Note that we used an if instead of a while loop above. This is only
/// // possible because parking_lot's Condvar will never spuriously wake up.
/// // This means that wait() will only return after notify_one or notify_all is
/// // called.
/// ```
pub struct Condvar {
state: AtomicPtr<RawMutex>,
@ -91,7 +95,9 @@ impl Condvar {
/// notified.
#[inline]
pub const fn new() -> Condvar {
Condvar { state: AtomicPtr::new(ptr::null_mut()) }
Condvar {
state: AtomicPtr::new(ptr::null_mut()),
}
}
/// Wakes up one blocked thread on this condvar.
@ -282,7 +288,10 @@ impl Condvar {
mutex_guard: &mut MutexGuard<'_, T>,
timeout: Instant,
) -> WaitTimeoutResult {
self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, Some(timeout))
self.wait_until_internal(
unsafe { MutexGuard::mutex(mutex_guard).raw() },
Some(timeout),
)
}
// This is a non-generic function to reduce the monomorphization cost of
@ -573,8 +582,10 @@ mod tests {
let _g = m2.lock();
c2.notify_one();
});
let timeout_res =
c.wait_until(&mut g, Instant::now() + Duration::from_millis(u32::max_value() as u64));
let timeout_res = c.wait_until(
&mut g,
Instant::now() + Duration::from_millis(u32::max_value() as u64),
);
assert!(!timeout_res.timed_out());
drop(g);
}
@ -607,7 +618,7 @@ mod tests {
rx.recv().unwrap();
let _g = m.lock();
let _guard = PanicGuard(&*c);
let _ = c.wait(&mut m3.lock());
c.wait(&mut m3.lock());
}
#[test]
@ -681,3 +692,361 @@ mod tests {
}
}
}
/// This module contains an integration test that is heavily inspired from WebKit's own integration
/// tests for it's own Condvar.
#[cfg(test)]
mod webkit_queue_test {
use crate::{Condvar, Mutex, MutexGuard};
use std::{collections::VecDeque, sync::Arc, thread, time::Duration};
#[derive(Clone, Copy)]
enum Timeout {
Bounded(Duration),
Forever,
}
#[derive(Clone, Copy)]
enum NotifyStyle {
One,
All,
}
struct Queue {
items: VecDeque<usize>,
should_continue: bool,
}
impl Queue {
fn new() -> Self {
Self {
items: VecDeque::new(),
should_continue: true,
}
}
}
fn wait<T: ?Sized>(
condition: &Condvar,
lock: &mut MutexGuard<'_, T>,
predicate: impl Fn(&mut MutexGuard<'_, T>) -> bool,
timeout: &Timeout,
) {
while !predicate(lock) {
match timeout {
Timeout::Forever => condition.wait(lock),
Timeout::Bounded(bound) => {
condition.wait_for(lock, *bound);
}
}
}
}
fn notify(style: NotifyStyle, condition: &Condvar, should_notify: bool) {
match style {
NotifyStyle::One => {
condition.notify_one();
}
NotifyStyle::All => {
if should_notify {
condition.notify_all();
}
}
}
}
fn run_queue_test(
num_producers: usize,
num_consumers: usize,
max_queue_size: usize,
messages_per_producer: usize,
notify_style: NotifyStyle,
timeout: Timeout,
delay: Duration,
) {
let input_queue = Arc::new(Mutex::new(Queue::new()));
let empty_condition = Arc::new(Condvar::new());
let full_condition = Arc::new(Condvar::new());
let output_vec = Arc::new(Mutex::new(vec![]));
let consumers = (0..num_consumers)
.map(|_| {
consumer_thread(
input_queue.clone(),
empty_condition.clone(),
full_condition.clone(),
timeout,
notify_style,
output_vec.clone(),
max_queue_size,
)
})
.collect::<Vec<_>>();
let producers = (0..num_producers)
.map(|_| {
producer_thread(
messages_per_producer,
input_queue.clone(),
empty_condition.clone(),
full_condition.clone(),
timeout,
notify_style,
max_queue_size,
)
})
.collect::<Vec<_>>();
thread::sleep(delay);
for producer in producers.into_iter() {
producer.join().expect("Producer thread panicked");
}
{
let mut input_queue = input_queue.lock();
input_queue.should_continue = false;
}
empty_condition.notify_all();
for consumer in consumers.into_iter() {
consumer.join().expect("Consumer thread panicked");
}
let mut output_vec = output_vec.lock();
assert_eq!(output_vec.len(), num_producers * messages_per_producer);
output_vec.sort();
for msg_idx in 0..messages_per_producer {
for producer_idx in 0..num_producers {
assert_eq!(msg_idx, output_vec[msg_idx * num_producers + producer_idx]);
}
}
}
fn consumer_thread(
input_queue: Arc<Mutex<Queue>>,
empty_condition: Arc<Condvar>,
full_condition: Arc<Condvar>,
timeout: Timeout,
notify_style: NotifyStyle,
output_queue: Arc<Mutex<Vec<usize>>>,
max_queue_size: usize,
) -> thread::JoinHandle<()> {
thread::spawn(move || loop {
let (should_notify, result) = {
let mut queue = input_queue.lock();
wait(
&*empty_condition,
&mut queue,
|state| -> bool { !state.items.is_empty() || !state.should_continue },
&timeout,
);
if queue.items.is_empty() && !queue.should_continue {
return;
}
let should_notify = queue.items.len() == max_queue_size;
let result = queue.items.pop_front();
std::mem::drop(queue);
(should_notify, result)
};
notify(notify_style, &*full_condition, should_notify);
if let Some(result) = result {
output_queue.lock().push(result);
}
})
}
fn producer_thread(
num_messages: usize,
queue: Arc<Mutex<Queue>>,
empty_condition: Arc<Condvar>,
full_condition: Arc<Condvar>,
timeout: Timeout,
notify_style: NotifyStyle,
max_queue_size: usize,
) -> thread::JoinHandle<()> {
thread::spawn(move || {
for message in 0..num_messages {
let should_notify = {
let mut queue = queue.lock();
wait(
&*full_condition,
&mut queue,
|state| state.items.len() < max_queue_size,
&timeout,
);
let should_notify = queue.items.is_empty();
queue.items.push_back(message);
std::mem::drop(queue);
should_notify
};
notify(notify_style, &*empty_condition, should_notify);
}
})
}
macro_rules! run_queue_tests {
( $( $name:ident(
num_producers: $num_producers:expr,
num_consumers: $num_consumers:expr,
max_queue_size: $max_queue_size:expr,
messages_per_producer: $messages_per_producer:expr,
notification_style: $notification_style:expr,
timeout: $timeout:expr,
delay_seconds: $delay_seconds:expr);
)* ) => {
$(#[test]
fn $name() {
let delay = Duration::from_secs($delay_seconds);
run_queue_test(
$num_producers,
$num_consumers,
$max_queue_size,
$messages_per_producer,
$notification_style,
$timeout,
delay,
);
})*
};
}
run_queue_tests! {
sanity_check_queue(
num_producers: 1,
num_consumers: 1,
max_queue_size: 1,
messages_per_producer: 100_000,
notification_style: NotifyStyle::All,
timeout: Timeout::Bounded(Duration::from_secs(1)),
delay_seconds: 0
);
sanity_check_queue_timeout(
num_producers: 1,
num_consumers: 1,
max_queue_size: 1,
messages_per_producer: 100_000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 0
);
new_test_without_timeout_5(
num_producers: 1,
num_consumers: 5,
max_queue_size: 1,
messages_per_producer: 100_000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 0
);
one_producer_one_consumer_one_slot(
num_producers: 1,
num_consumers: 1,
max_queue_size: 1,
messages_per_producer: 100_000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 0
);
one_producer_one_consumer_one_slot_timeout(
num_producers: 1,
num_consumers: 1,
max_queue_size: 1,
messages_per_producer: 100_000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 1
);
one_producer_one_consumer_hundred_slots(
num_producers: 1,
num_consumers: 1,
max_queue_size: 100,
messages_per_producer: 1_000_000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 0
);
ten_producers_one_consumer_one_slot(
num_producers: 10,
num_consumers: 1,
max_queue_size: 1,
messages_per_producer: 10000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 0
);
ten_producers_one_consumer_hundred_slots_notify_all(
num_producers: 10,
num_consumers: 1,
max_queue_size: 100,
messages_per_producer: 10000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 0
);
ten_producers_one_consumer_hundred_slots_notify_one(
num_producers: 10,
num_consumers: 1,
max_queue_size: 100,
messages_per_producer: 10000,
notification_style: NotifyStyle::One,
timeout: Timeout::Forever,
delay_seconds: 0
);
one_producer_ten_consumers_one_slot(
num_producers: 1,
num_consumers: 10,
max_queue_size: 1,
messages_per_producer: 10000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 0
);
one_producer_ten_consumers_hundred_slots_notify_all(
num_producers: 1,
num_consumers: 10,
max_queue_size: 100,
messages_per_producer: 100_000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 0
);
one_producer_ten_consumers_hundred_slots_notify_one(
num_producers: 1,
num_consumers: 10,
max_queue_size: 100,
messages_per_producer: 100_000,
notification_style: NotifyStyle::One,
timeout: Timeout::Forever,
delay_seconds: 0
);
ten_producers_ten_consumers_one_slot(
num_producers: 10,
num_consumers: 10,
max_queue_size: 1,
messages_per_producer: 50000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 0
);
ten_producers_ten_consumers_hundred_slots_notify_all(
num_producers: 10,
num_consumers: 10,
max_queue_size: 100,
messages_per_producer: 50000,
notification_style: NotifyStyle::All,
timeout: Timeout::Forever,
delay_seconds: 0
);
ten_producers_ten_consumers_hundred_slots_notify_one(
num_producers: 10,
num_consumers: 10,
max_queue_size: 100,
messages_per_producer: 50000,
notification_style: NotifyStyle::One,
timeout: Timeout::Forever,
delay_seconds: 0
);
}
}

Просмотреть файл

@ -46,9 +46,7 @@ mod tests {
use std::time::Duration;
// We need to serialize these tests since deadlock detection uses global state
lazy_static::lazy_static! {
static ref DEADLOCK_DETECTION_LOCK: Mutex<()> = Mutex::new(());
}
static DEADLOCK_DETECTION_LOCK: Mutex<()> = crate::const_mutex(());
fn check_deadlock() -> bool {
use parking_lot_core::deadlock::check_deadlock;

25
third_party/rust/parking_lot/src/elision.rs поставляемый
Просмотреть файл

@ -25,7 +25,10 @@ pub trait AtomicElisionExt {
// Indicates whether the target architecture supports lock elision
#[inline]
pub fn have_elision() -> bool {
cfg!(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64"),))
cfg!(all(
feature = "nightly",
any(target_arch = "x86", target_arch = "x86_64"),
))
}
// This implementation is never actually called because it is guarded by
@ -54,12 +57,16 @@ impl AtomicElisionExt for AtomicUsize {
fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
unsafe {
let prev: usize;
asm!("xacquire; lock; cmpxchgl $2, $1"
llvm_asm!("xacquire; lock; cmpxchgl $2, $1"
: "={eax}" (prev), "+*m" (self)
: "r" (new), "{eax}" (current)
: "memory"
: "volatile");
if prev == current { Ok(prev) } else { Err(prev) }
if prev == current {
Ok(prev)
} else {
Err(prev)
}
}
}
#[cfg(target_pointer_width = "64")]
@ -67,12 +74,16 @@ impl AtomicElisionExt for AtomicUsize {
fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
unsafe {
let prev: usize;
asm!("xacquire; lock; cmpxchgq $2, $1"
llvm_asm!("xacquire; lock; cmpxchgq $2, $1"
: "={rax}" (prev), "+*m" (self)
: "r" (new), "{rax}" (current)
: "memory"
: "volatile");
if prev == current { Ok(prev) } else { Err(prev) }
if prev == current {
Ok(prev)
} else {
Err(prev)
}
}
}
@ -81,7 +92,7 @@ impl AtomicElisionExt for AtomicUsize {
fn elision_fetch_sub_release(&self, val: usize) -> usize {
unsafe {
let prev: usize;
asm!("xrelease; lock; xaddl $2, $1"
llvm_asm!("xrelease; lock; xaddl $2, $1"
: "=r" (prev), "+*m" (self)
: "0" (val.wrapping_neg())
: "memory"
@ -94,7 +105,7 @@ impl AtomicElisionExt for AtomicUsize {
fn elision_fetch_sub_release(&self, val: usize) -> usize {
unsafe {
let prev: usize;
asm!("xrelease; lock; xaddq $2, $1"
llvm_asm!("xrelease; lock; xaddq $2, $1"
: "=r" (prev), "+*m" (self)
: "0" (val.wrapping_neg())
: "memory"

278
third_party/rust/parking_lot/src/fair_mutex.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,278 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::raw_fair_mutex::RawFairMutex;
use lock_api;
/// A mutual exclusive primitive that is always fair, useful for protecting shared data
///
/// This mutex will block threads waiting for the lock to become available. The
/// mutex can also be statically initialized or created via a `new`
/// constructor. Each mutex has a type parameter which represents the data that
/// it is protecting. The data can only be accessed through the RAII guards
/// returned from `lock` and `try_lock`, which guarantees that the data is only
/// ever accessed when the mutex is locked.
///
/// The regular mutex provided by `parking_lot` uses eventual locking fairness
/// (after some time it will default to the fair algorithm), but eventual
/// fairness does not provide the same garantees a always fair method would.
/// Fair mutexes are generally slower, but sometimes needed. This wrapper was
/// created to avoid using a unfair protocol when it's forbidden by mistake.
///
/// In a fair mutex the lock is provided to whichever thread asked first,
/// they form a queue and always follow the first-in first-out order. This
/// means some thread in the queue won't be able to steal the lock and use it fast
/// to increase throughput, at the cost of latency. Since the response time will grow
/// for some threads that are waiting for the lock and losing to faster but later ones,
/// but it may make sending more responses possible.
///
/// A fair mutex may not be interesting if threads have different priorities (this is known as
/// priority inversion).
///
/// # Differences from the standard library `Mutex`
///
/// - No poisoning, the lock is released normally on panic.
/// - Only requires 1 byte of space, whereas the standard library boxes the
/// `FairMutex` due to platform limitations.
/// - Can be statically constructed (requires the `const_fn` nightly feature).
/// - Does not require any drop glue when dropped.
/// - Inline fast path for the uncontended case.
/// - Efficient handling of micro-contention using adaptive spinning.
/// - Allows raw locking & unlocking without a guard.
///
/// # Examples
///
/// ```
/// use parking_lot::FairMutex;
/// use std::sync::{Arc, mpsc::channel};
/// use std::thread;
///
/// const N: usize = 10;
///
/// // Spawn a few threads to increment a shared variable (non-atomically), and
/// // let the main thread know once all increments are done.
/// //
/// // Here we're using an Arc to share memory among threads, and the data inside
/// // the Arc is protected with a mutex.
/// let data = Arc::new(FairMutex::new(0));
///
/// let (tx, rx) = channel();
/// for _ in 0..10 {
/// let (data, tx) = (Arc::clone(&data), tx.clone());
/// thread::spawn(move || {
/// // The shared state can only be accessed once the lock is held.
/// // Our non-atomic increment is safe because we're the only thread
/// // which can access the shared state when the lock is held.
/// let mut data = data.lock();
/// *data += 1;
/// if *data == N {
/// tx.send(()).unwrap();
/// }
/// // the lock is unlocked here when `data` goes out of scope.
/// });
/// }
///
/// rx.recv().unwrap();
/// ```
pub type FairMutex<T> = lock_api::Mutex<RawFairMutex, T>;
/// Creates a new fair mutex in an unlocked state ready for use.
///
/// This allows creating a fair mutex in a constant context on stable Rust.
pub const fn const_fair_mutex<T>(val: T) -> FairMutex<T> {
FairMutex::const_new(<RawFairMutex as lock_api::RawMutex>::INIT, val)
}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
/// The data protected by the mutex can be accessed through this guard via its
/// `Deref` and `DerefMut` implementations.
pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>;
/// An RAII mutex guard returned by `FairMutexGuard::map`, which can point to a
/// subfield of the protected data.
///
/// The main difference between `MappedFairMutexGuard` and `FairMutexGuard` is that the
/// former doesn't support temporarily unlocking and re-locking, since that
/// could introduce soundness issues if the locked object is modified by another
/// thread.
pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>;
#[cfg(test)]
mod tests {
use crate::FairMutex;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
#[cfg(feature = "serde")]
use bincode::{deserialize, serialize};
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let m = FairMutex::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
fn lots_and_lots() {
const J: u32 = 1000;
const K: u32 = 3;
let m = Arc::new(FairMutex::new(0));
fn inc(m: &FairMutex<u32>) {
for _ in 0..J {
*m.lock() += 1;
}
}
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
let m2 = m.clone();
thread::spawn(move || {
inc(&m2);
tx2.send(()).unwrap();
});
let tx2 = tx.clone();
let m2 = m.clone();
thread::spawn(move || {
inc(&m2);
tx2.send(()).unwrap();
});
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(*m.lock(), J * K * 2);
}
#[test]
fn try_lock() {
let m = FairMutex::new(());
*m.try_lock().unwrap() = ();
}
#[test]
fn test_into_inner() {
let m = FairMutex::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = FairMutex::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_get_mut() {
let mut m = FairMutex::new(NonCopy(10));
*m.get_mut() = NonCopy(20);
assert_eq!(m.into_inner(), NonCopy(20));
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(FairMutex::new(1));
let arc2 = Arc::new(FairMutex::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(FairMutex::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || {
struct Unwinder {
i: Arc<FairMutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &FairMutex<[i32]> = &FairMutex::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn test_mutexguard_sync() {
fn sync<T: Sync>(_: T) {}
let mutex = FairMutex::new(());
sync(mutex.lock());
}
#[test]
fn test_mutex_debug() {
let mutex = FairMutex::new(vec![0u8, 10]);
assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
let _lock = mutex.lock();
assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }");
}
#[cfg(feature = "serde")]
#[test]
fn test_serde() {
let contents: Vec<u8> = vec![0, 1, 2];
let mutex = FairMutex::new(contents.clone());
let serialized = serialize(&mutex).unwrap();
let deserialized: FairMutex<Vec<u8>> = deserialize(&serialized).unwrap();
assert_eq!(*(mutex.lock()), *(deserialized.lock()));
assert_eq!(contents, *(deserialized.lock()));
}
}

15
third_party/rust/parking_lot/src/lib.rs поставляемый
Просмотреть файл

@ -11,12 +11,14 @@
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
#![cfg_attr(feature = "nightly", feature(asm))]
#![cfg_attr(feature = "nightly", feature(llvm_asm))]
mod condvar;
mod elision;
mod fair_mutex;
mod mutex;
mod once;
mod raw_fair_mutex;
mod raw_mutex;
mod raw_rwlock;
mod remutex;
@ -28,16 +30,19 @@ pub mod deadlock;
#[cfg(not(feature = "deadlock_detection"))]
mod deadlock;
pub use ::lock_api as lock_api;
pub use self::condvar::{Condvar, WaitTimeoutResult};
pub use self::mutex::{MappedMutexGuard, Mutex, MutexGuard};
pub use self::fair_mutex::{const_fair_mutex, FairMutex, FairMutexGuard, MappedFairMutexGuard};
pub use self::mutex::{const_mutex, MappedMutexGuard, Mutex, MutexGuard};
pub use self::once::{Once, OnceState};
pub use self::raw_fair_mutex::RawFairMutex;
pub use self::raw_mutex::RawMutex;
pub use self::raw_rwlock::RawRwLock;
pub use self::remutex::{
MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard,
const_reentrant_mutex, MappedReentrantMutexGuard, RawThreadId, ReentrantMutex,
ReentrantMutexGuard,
};
pub use self::rwlock::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard,
const_rwlock, MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard,
RwLockUpgradableReadGuard, RwLockWriteGuard,
};
pub use ::lock_api;

12
third_party/rust/parking_lot/src/mutex.rs поставляемый
Просмотреть файл

@ -53,10 +53,9 @@ use lock_api;
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use parking_lot::Mutex;
/// use std::sync::{Arc, mpsc::channel};
/// use std::thread;
/// use std::sync::mpsc::channel;
///
/// const N: usize = 10;
///
@ -87,6 +86,13 @@ use lock_api;
/// ```
pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
/// Creates a new mutex in an unlocked state ready for use.
///
/// This allows creating a mutex in a constant context on stable Rust.
pub const fn const_mutex<T>(val: T) -> Mutex<T> {
Mutex::const_new(<RawMutex as lock_api::RawMutex>::INIT, val)
}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
@ -245,7 +251,7 @@ mod tests {
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(Mutex::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
let _ = thread::spawn(move || {
struct Unwinder {
i: Arc<Mutex<i32>>,
}

41
third_party/rust/parking_lot/src/once.rs поставляемый
Просмотреть файл

@ -6,25 +6,16 @@
// copied, modified, or distributed except according to those terms.
use crate::util::UncheckedOptionExt;
#[cfg(has_sized_atomics)]
use core::sync::atomic::AtomicU8;
#[cfg(not(has_sized_atomics))]
use core::sync::atomic::AtomicUsize as AtomicU8;
use core::{
fmt, mem,
sync::atomic::{fence, Ordering},
sync::atomic::{fence, AtomicU8, Ordering},
};
use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
#[cfg(has_sized_atomics)]
type U8 = u8;
#[cfg(not(has_sized_atomics))]
type U8 = usize;
const DONE_BIT: U8 = 1;
const POISON_BIT: U8 = 2;
const LOCKED_BIT: U8 = 4;
const PARKED_BIT: U8 = 8;
const DONE_BIT: u8 = 1;
const POISON_BIT: u8 = 2;
const LOCKED_BIT: u8 = 4;
const PARKED_BIT: u8 = 8;
/// Current state of a `Once`.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
@ -48,8 +39,8 @@ impl OnceState {
/// Once an initialization routine for a `Once` has panicked it will forever
/// indicate to future forced initialization routines that it is poisoned.
#[inline]
pub fn poisoned(&self) -> bool {
match *self {
pub fn poisoned(self) -> bool {
match self {
OnceState::Poisoned => true,
_ => false,
}
@ -58,8 +49,8 @@ impl OnceState {
/// Returns whether the associated `Once` has successfully executed a
/// closure.
#[inline]
pub fn done(&self) -> bool {
match *self {
pub fn done(self) -> bool {
match self {
OnceState::Done => true,
_ => false,
}
@ -194,7 +185,9 @@ impl Once {
}
let mut f = Some(f);
self.call_once_slow(true, &mut |state| unsafe { f.take().unchecked_unwrap()(state) });
self.call_once_slow(true, &mut |state| unsafe {
f.take().unchecked_unwrap()(state)
});
}
// This is a non-generic function to reduce the monomorphization cost of
@ -303,7 +296,11 @@ impl Once {
// At this point we have the lock, so run the closure. Make sure we
// properly clean up if the closure panicks.
let guard = PanicGuard(self);
let once_state = if state & POISON_BIT != 0 { OnceState::Poisoned } else { OnceState::New };
let once_state = if state & POISON_BIT != 0 {
OnceState::Poisoned
} else {
OnceState::New
};
f(once_state);
mem::forget(guard);
@ -327,7 +324,9 @@ impl Default for Once {
impl fmt::Debug for Once {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Once").field("state", &self.state()).finish()
f.debug_struct("Once")
.field("state", &self.state())
.finish()
}
}

60
third_party/rust/parking_lot/src/raw_fair_mutex.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,60 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::raw_mutex::RawMutex;
use lock_api::RawMutexFair;
/// Raw fair mutex type backed by the parking lot.
pub struct RawFairMutex(RawMutex);
unsafe impl lock_api::RawMutex for RawFairMutex {
const INIT: Self = RawFairMutex(<RawMutex as lock_api::RawMutex>::INIT);
type GuardMarker = <RawMutex as lock_api::RawMutex>::GuardMarker;
#[inline]
fn lock(&self) {
self.0.lock()
}
#[inline]
fn try_lock(&self) -> bool {
self.0.try_lock()
}
#[inline]
fn unlock(&self) {
self.unlock_fair()
}
}
unsafe impl lock_api::RawMutexFair for RawFairMutex {
#[inline]
fn unlock_fair(&self) {
self.0.unlock_fair()
}
#[inline]
fn bump(&self) {
self.0.bump()
}
}
unsafe impl lock_api::RawMutexTimed for RawFairMutex {
type Duration = <RawMutex as lock_api::RawMutexTimed>::Duration;
type Instant = <RawMutex as lock_api::RawMutexTimed>::Instant;
#[inline]
fn try_lock_until(&self, timeout: Self::Instant) -> bool {
self.0.try_lock_until(timeout)
}
#[inline]
fn try_lock_for(&self, timeout: Self::Duration) -> bool {
self.0.try_lock_for(timeout)
}
}

82
third_party/rust/parking_lot/src/raw_mutex.rs поставляемый
Просмотреть файл

@ -6,20 +6,14 @@
// copied, modified, or distributed except according to those terms.
use crate::{deadlock, util};
#[cfg(has_sized_atomics)]
use core::sync::atomic::AtomicU8;
#[cfg(not(has_sized_atomics))]
use core::sync::atomic::AtomicUsize as AtomicU8;
use core::{sync::atomic::Ordering, time::Duration};
use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed};
use core::{
sync::atomic::{AtomicU8, Ordering},
time::Duration,
};
use lock_api::{GuardNoSend, RawMutex as RawMutex_};
use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
use std::time::Instant;
#[cfg(has_sized_atomics)]
type U8 = u8;
#[cfg(not(has_sized_atomics))]
type U8 = usize;
// UnparkToken used to indicate that that the target thread should attempt to
// lock the mutex again as soon as it is unparked.
pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
@ -28,16 +22,43 @@ pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
// thread directly without unlocking it.
pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
const LOCKED_BIT: U8 = 1;
const PARKED_BIT: U8 = 2;
/// This bit is set in the `state` of a `RawMutex` when that mutex is locked by some thread.
const LOCKED_BIT: u8 = 0b01;
/// This bit is set in the `state` of a `RawMutex` just before parking a thread. A thread is being
/// parked if it wants to lock the mutex, but it is currently being held by some other thread.
const PARKED_BIT: u8 = 0b10;
/// Raw mutex type backed by the parking lot.
pub struct RawMutex {
/// This atomic integer holds the current state of the mutex instance. Only the two lowest bits
/// are used. See `LOCKED_BIT` and `PARKED_BIT` for the bitmask for these bits.
///
/// # State table:
///
/// PARKED_BIT | LOCKED_BIT | Description
/// 0 | 0 | The mutex is not locked, nor is anyone waiting for it.
/// -----------+------------+------------------------------------------------------------------
/// 0 | 1 | The mutex is locked by exactly one thread. No other thread is
/// | | waiting for it.
/// -----------+------------+------------------------------------------------------------------
/// 1 | 0 | The mutex is not locked. One or more thread is parked or about to
/// | | park. At least one of the parked threads are just about to be
/// | | unparked, or a thread heading for parking might abort the park.
/// -----------+------------+------------------------------------------------------------------
/// 1 | 1 | The mutex is locked by exactly one thread. One or more thread is
/// | | parked or about to park, waiting for the lock to become available.
/// | | In this state, PARKED_BIT is only ever cleared when a bucket lock
/// | | is held (i.e. in a parking_lot_core callback). This ensures that
/// | | we never end up in a situation where there are parked threads but
/// | | PARKED_BIT is not set (which would result in those threads
/// | | potentially never getting woken up).
state: AtomicU8,
}
unsafe impl RawMutexTrait for RawMutex {
const INIT: RawMutex = RawMutex { state: AtomicU8::new(0) };
unsafe impl lock_api::RawMutex for RawMutex {
const INIT: RawMutex = RawMutex {
state: AtomicU8::new(0),
};
type GuardMarker = GuardNoSend;
@ -78,7 +99,10 @@ unsafe impl RawMutexTrait for RawMutex {
#[inline]
fn unlock(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
if self.state.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok()
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
@ -86,11 +110,14 @@ unsafe impl RawMutexTrait for RawMutex {
}
}
unsafe impl RawMutexFair for RawMutex {
unsafe impl lock_api::RawMutexFair for RawMutex {
#[inline]
fn unlock_fair(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
if self.state.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok()
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
@ -105,7 +132,7 @@ unsafe impl RawMutexFair for RawMutex {
}
}
unsafe impl RawMutexTimed for RawMutex {
unsafe impl lock_api::RawMutexTimed for RawMutex {
type Duration = Duration;
type Instant = Instant;
@ -212,7 +239,6 @@ impl RawMutex {
}
// Park our thread until we are woken up by an unlock
unsafe {
let addr = self as *const _ as usize;
let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
let before_sleep = || {};
@ -222,14 +248,20 @@ impl RawMutex {
self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
}
};
match parking_lot_core::park(
// SAFETY:
// * `addr` is an address we control.
// * `validate`/`timed_out` does not panic or call into any function of `parking_lot`.
// * `before_sleep` does not call `park`, nor does it panic.
match unsafe {
parking_lot_core::park(
addr,
validate,
before_sleep,
timed_out,
DEFAULT_PARK_TOKEN,
timeout,
) {
)
} {
// The thread that unparked us passed the lock on to us
// directly without unlocking it.
ParkResult::Unparked(TOKEN_HANDOFF) => return true,
@ -243,7 +275,6 @@ impl RawMutex {
// Timeout expired
ParkResult::TimedOut => return false,
}
}
// Loop back and try locking again
spinwait.reset();
@ -255,7 +286,6 @@ impl RawMutex {
fn unlock_slow(&self, force_fair: bool) {
// Unpark one thread and leave the parked bit set if there might
// still be parked threads on this address.
unsafe {
let addr = self as *const _ as usize;
let callback = |result: UnparkResult| {
// If we are using a fair unlock then we should keep the
@ -278,6 +308,10 @@ impl RawMutex {
}
TOKEN_NORMAL
};
// SAFETY:
// * `addr` is an address we control.
// * `callback` does not panic or call into any function of `parking_lot`.
unsafe {
parking_lot_core::unpark_one(addr, callback);
}
}

252
third_party/rust/parking_lot/src/raw_rwlock.rs поставляемый
Просмотреть файл

@ -12,11 +12,7 @@ use core::{
cell::Cell,
sync::atomic::{AtomicUsize, Ordering},
};
use lock_api::{
GuardNoSend, RawRwLock as RawRwLockTrait, RawRwLockDowngrade, RawRwLockFair,
RawRwLockRecursive, RawRwLockRecursiveTimed, RawRwLockTimed, RawRwLockUpgrade,
RawRwLockUpgradeDowngrade, RawRwLockUpgradeFair, RawRwLockUpgradeTimed,
};
use lock_api::{GuardNoSend, RawRwLock as RawRwLock_, RawRwLockUpgrade};
use parking_lot_core::{
self, deadlock, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult, UnparkToken,
};
@ -60,8 +56,10 @@ pub struct RawRwLock {
state: AtomicUsize,
}
unsafe impl RawRwLockTrait for RawRwLock {
const INIT: RawRwLock = RawRwLock { state: AtomicUsize::new(0) };
unsafe impl lock_api::RawRwLock for RawRwLock {
const INIT: RawRwLock = RawRwLock {
state: AtomicUsize::new(0),
};
type GuardMarker = GuardNoSend;
@ -80,7 +78,10 @@ unsafe impl RawRwLockTrait for RawRwLock {
#[inline]
fn try_lock_exclusive(&self) -> bool {
if self.state.compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed).is_ok()
if self
.state
.compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
self.deadlock_acquire();
true
@ -92,7 +93,10 @@ unsafe impl RawRwLockTrait for RawRwLock {
#[inline]
fn unlock_exclusive(&self) {
self.deadlock_release();
if self.state.compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok()
if self
.state
.compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
@ -110,8 +114,11 @@ unsafe impl RawRwLockTrait for RawRwLock {
#[inline]
fn try_lock_shared(&self) -> bool {
let result =
if self.try_lock_shared_fast(false) { true } else { self.try_lock_shared_slow(false) };
let result = if self.try_lock_shared_fast(false) {
true
} else {
self.try_lock_shared_slow(false)
};
if result {
self.deadlock_acquire();
}
@ -132,7 +139,7 @@ unsafe impl RawRwLockTrait for RawRwLock {
}
}
unsafe impl RawRwLockFair for RawRwLock {
unsafe impl lock_api::RawRwLockFair for RawRwLock {
#[inline]
fn unlock_shared_fair(&self) {
// Shared unlocking is always fair in this implementation.
@ -142,7 +149,10 @@ unsafe impl RawRwLockFair for RawRwLock {
#[inline]
fn unlock_exclusive_fair(&self) {
self.deadlock_release();
if self.state.compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok()
if self
.state
.compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
@ -166,10 +176,12 @@ unsafe impl RawRwLockFair for RawRwLock {
}
}
unsafe impl RawRwLockDowngrade for RawRwLock {
unsafe impl lock_api::RawRwLockDowngrade for RawRwLock {
#[inline]
fn downgrade(&self) {
let state = self.state.fetch_add(ONE_READER - WRITER_BIT, Ordering::Release);
let state = self
.state
.fetch_add(ONE_READER - WRITER_BIT, Ordering::Release);
// Wake up parked shared and upgradable threads if there are any
if state & PARKED_BIT != 0 {
@ -178,7 +190,7 @@ unsafe impl RawRwLockDowngrade for RawRwLock {
}
}
unsafe impl RawRwLockTimed for RawRwLock {
unsafe impl lock_api::RawRwLockTimed for RawRwLock {
type Duration = Duration;
type Instant = Instant;
@ -243,7 +255,7 @@ unsafe impl RawRwLockTimed for RawRwLock {
}
}
unsafe impl RawRwLockRecursive for RawRwLock {
unsafe impl lock_api::RawRwLockRecursive for RawRwLock {
#[inline]
fn lock_shared_recursive(&self) {
if !self.try_lock_shared_fast(true) {
@ -255,8 +267,11 @@ unsafe impl RawRwLockRecursive for RawRwLock {
#[inline]
fn try_lock_shared_recursive(&self) -> bool {
let result =
if self.try_lock_shared_fast(true) { true } else { self.try_lock_shared_slow(true) };
let result = if self.try_lock_shared_fast(true) {
true
} else {
self.try_lock_shared_slow(true)
};
if result {
self.deadlock_acquire();
}
@ -264,7 +279,7 @@ unsafe impl RawRwLockRecursive for RawRwLock {
}
}
unsafe impl RawRwLockRecursiveTimed for RawRwLock {
unsafe impl lock_api::RawRwLockRecursiveTimed for RawRwLock {
#[inline]
fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool {
let result = if self.try_lock_shared_fast(true) {
@ -292,7 +307,7 @@ unsafe impl RawRwLockRecursiveTimed for RawRwLock {
}
}
unsafe impl RawRwLockUpgrade for RawRwLock {
unsafe impl lock_api::RawRwLockUpgrade for RawRwLock {
#[inline]
fn lock_upgradable(&self) {
if !self.try_lock_upgradable_fast() {
@ -304,8 +319,11 @@ unsafe impl RawRwLockUpgrade for RawRwLock {
#[inline]
fn try_lock_upgradable(&self) -> bool {
let result =
if self.try_lock_upgradable_fast() { true } else { self.try_lock_upgradable_slow() };
let result = if self.try_lock_upgradable_fast() {
true
} else {
self.try_lock_upgradable_slow()
};
if result {
self.deadlock_acquire();
}
@ -335,8 +353,10 @@ unsafe impl RawRwLockUpgrade for RawRwLock {
#[inline]
fn upgrade(&self) {
let state =
self.state.fetch_sub((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed);
let state = self.state.fetch_sub(
(ONE_READER | UPGRADABLE_BIT) - WRITER_BIT,
Ordering::Relaxed,
);
if state & READERS_MASK != ONE_READER {
let result = self.upgrade_slow(None);
debug_assert!(result);
@ -362,7 +382,7 @@ unsafe impl RawRwLockUpgrade for RawRwLock {
}
}
unsafe impl RawRwLockUpgradeFair for RawRwLock {
unsafe impl lock_api::RawRwLockUpgradeFair for RawRwLock {
#[inline]
fn unlock_upgradable_fair(&self) {
self.deadlock_release();
@ -392,7 +412,7 @@ unsafe impl RawRwLockUpgradeFair for RawRwLock {
}
}
unsafe impl RawRwLockUpgradeDowngrade for RawRwLock {
unsafe impl lock_api::RawRwLockUpgradeDowngrade for RawRwLock {
#[inline]
fn downgrade_upgradable(&self) {
let state = self.state.fetch_sub(UPGRADABLE_BIT, Ordering::Relaxed);
@ -405,8 +425,10 @@ unsafe impl RawRwLockUpgradeDowngrade for RawRwLock {
#[inline]
fn downgrade_to_upgradable(&self) {
let state =
self.state.fetch_add((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Release);
let state = self.state.fetch_add(
(ONE_READER | UPGRADABLE_BIT) - WRITER_BIT,
Ordering::Release,
);
// Wake up parked shared threads if there are any
if state & PARKED_BIT != 0 {
@ -415,7 +437,7 @@ unsafe impl RawRwLockUpgradeDowngrade for RawRwLock {
}
}
unsafe impl RawRwLockUpgradeTimed for RawRwLock {
unsafe impl lock_api::RawRwLockUpgradeTimed for RawRwLock {
#[inline]
fn try_lock_upgradable_until(&self, timeout: Instant) -> bool {
let result = if self.try_lock_upgradable_fast() {
@ -444,15 +466,23 @@ unsafe impl RawRwLockUpgradeTimed for RawRwLock {
#[inline]
fn try_upgrade_until(&self, timeout: Instant) -> bool {
let state =
self.state.fetch_sub((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed);
if state & READERS_MASK == ONE_READER { true } else { self.upgrade_slow(Some(timeout)) }
let state = self.state.fetch_sub(
(ONE_READER | UPGRADABLE_BIT) - WRITER_BIT,
Ordering::Relaxed,
);
if state & READERS_MASK == ONE_READER {
true
} else {
self.upgrade_slow(Some(timeout))
}
}
#[inline]
fn try_upgrade_for(&self, timeout: Duration) -> bool {
let state =
self.state.fetch_sub((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed);
let state = self.state.fetch_sub(
(ONE_READER | UPGRADABLE_BIT) - WRITER_BIT,
Ordering::Relaxed,
);
if state & READERS_MASK == ONE_READER {
true
} else {
@ -481,7 +511,9 @@ impl RawRwLock {
// readers try to acquire the lock. We only do this if the lock is
// completely empty since elision handles conflicts poorly.
if have_elision() && state == 0 {
self.state.elision_compare_exchange_acquire(0, ONE_READER).is_ok()
self.state
.elision_compare_exchange_acquire(0, ONE_READER)
.is_ok()
} else if let Some(new_state) = state.checked_add(ONE_READER) {
self.state
.compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed)
@ -509,7 +541,9 @@ impl RawRwLock {
} else {
match self.state.compare_exchange_weak(
state,
state.checked_add(ONE_READER).expect("RwLock reader count overflow"),
state
.checked_add(ONE_READER)
.expect("RwLock reader count overflow"),
Ordering::Acquire,
Ordering::Relaxed,
) {
@ -564,11 +598,7 @@ impl RawRwLock {
#[cold]
fn lock_exclusive_slow(&self, timeout: Option<Instant>) -> bool {
// Step 1: grab exclusive ownership of WRITER_BIT
let timed_out = !self.lock_common(
timeout,
TOKEN_EXCLUSIVE,
|state| {
let try_lock = |state: &mut usize| {
loop {
if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 {
return false;
@ -585,8 +615,14 @@ impl RawRwLock {
Err(x) => *state = x,
}
}
},
|state| state & (WRITER_BIT | UPGRADABLE_BIT) != 0,
};
// Step 1: grab exclusive ownership of WRITER_BIT
let timed_out = !self.lock_common(
timeout,
TOKEN_EXCLUSIVE,
try_lock,
WRITER_BIT | UPGRADABLE_BIT,
);
if timed_out {
return false;
@ -618,15 +654,15 @@ impl RawRwLock {
TOKEN_NORMAL
}
};
// SAFETY: `callback` does not panic or call into any function of `parking_lot`.
unsafe {
self.wake_parked_threads(0, callback);
}
}
#[cold]
fn lock_shared_slow(&self, recursive: bool, timeout: Option<Instant>) -> bool {
self.lock_common(
timeout,
TOKEN_SHARED,
|state| {
let try_lock = |state: &mut usize| {
let mut spinwait_shared = SpinWait::new();
loop {
// Use hardware lock elision to avoid cache conflicts when multiple
@ -650,7 +686,9 @@ impl RawRwLock {
.state
.compare_exchange_weak(
*state,
state.checked_add(ONE_READER).expect("RwLock reader count overflow"),
state
.checked_add(ONE_READER)
.expect("RwLock reader count overflow"),
Ordering::Acquire,
Ordering::Relaxed,
)
@ -665,35 +703,33 @@ impl RawRwLock {
spinwait_shared.spin_no_yield();
*state = self.state.load(Ordering::Relaxed);
}
},
|state| state & WRITER_BIT != 0,
)
};
self.lock_common(timeout, TOKEN_SHARED, try_lock, WRITER_BIT)
}
#[cold]
fn unlock_shared_slow(&self) {
// At this point WRITER_PARKED_BIT is set and READER_MASK is empty. We
// just need to wake up a potentially sleeping pending writer.
unsafe {
// Using the 2nd key at addr + 1
let addr = self as *const _ as usize + 1;
let callback = |result: UnparkResult| {
let callback = |_result: UnparkResult| {
// Clear the WRITER_PARKED_BIT here since there can only be one
// parked writer thread.
debug_assert!(!result.have_more_threads);
self.state.fetch_and(!WRITER_PARKED_BIT, Ordering::Relaxed);
TOKEN_NORMAL
};
// SAFETY:
// * `addr` is an address we control.
// * `callback` does not panic or call into any function of `parking_lot`.
unsafe {
parking_lot_core::unpark_one(addr, callback);
}
}
#[cold]
fn lock_upgradable_slow(&self, timeout: Option<Instant>) -> bool {
self.lock_common(
timeout,
TOKEN_UPGRADABLE,
|state| {
let try_lock = |state: &mut usize| {
let mut spinwait_shared = SpinWait::new();
loop {
if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 {
@ -721,8 +757,12 @@ impl RawRwLock {
spinwait_shared.spin_no_yield();
*state = self.state.load(Ordering::Relaxed);
}
},
|state| state & (WRITER_BIT | UPGRADABLE_BIT) != 0,
};
self.lock_common(
timeout,
TOKEN_UPGRADABLE,
try_lock,
WRITER_BIT | UPGRADABLE_BIT,
)
}
@ -789,8 +829,11 @@ impl RawRwLock {
}
}
};
// SAFETY: `callback` does not panic or call into any function of `parking_lot`.
unsafe {
self.wake_parked_threads(0, callback);
}
}
#[cold]
fn try_upgrade_slow(&self) -> bool {
@ -826,8 +869,11 @@ impl RawRwLock {
}
TOKEN_NORMAL
};
// SAFETY: `callback` does not panic or call into any function of `parking_lot`.
unsafe {
self.wake_parked_threads(ONE_READER, callback);
}
}
#[cold]
fn downgrade_to_upgradable_slow(&self) {
@ -839,8 +885,11 @@ impl RawRwLock {
}
TOKEN_NORMAL
};
// SAFETY: `callback` does not panic or call into any function of `parking_lot`.
unsafe {
self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback);
}
}
#[cold]
fn bump_shared_slow(&self) {
@ -862,18 +911,24 @@ impl RawRwLock {
self.lock_upgradable();
}
// Common code for waking up parked threads after releasing WRITER_BIT or
// UPGRADABLE_BIT.
/// Common code for waking up parked threads after releasing WRITER_BIT or
/// UPGRADABLE_BIT.
///
/// # Safety
///
/// `callback` must uphold the requirements of the `callback` parameter to
/// `parking_lot_core::unpark_filter`. Meaning no panics or calls into any function in
/// `parking_lot`.
#[inline]
fn wake_parked_threads<C>(&self, new_state: usize, callback: C)
where
C: FnOnce(usize, UnparkResult) -> UnparkToken,
{
unsafe fn wake_parked_threads(
&self,
new_state: usize,
callback: impl FnOnce(usize, UnparkResult) -> UnparkToken,
) {
// We must wake up at least one upgrader or writer if there is one,
// otherwise they may end up parked indefinitely since unlock_shared
// does not call wake_parked_threads.
let new_state = Cell::new(new_state);
unsafe {
let addr = self as *const _ as usize;
let filter = |ParkToken(token)| {
let s = new_state.get();
@ -893,10 +948,12 @@ impl RawRwLock {
FilterOp::Unpark
}
};
parking_lot_core::unpark_filter(addr, filter, |result| {
callback(new_state.get(), result)
});
}
let callback = |result| callback(new_state.get(), result);
// SAFETY:
// * `addr` is an address we control.
// * `filter` does not panic or call into any function of `parking_lot`.
// * `callback` safety responsibility is on caller
parking_lot_core::unpark_filter(addr, filter, callback);
}
// Common code for waiting for readers to exit the lock after acquiring
@ -928,7 +985,6 @@ impl RawRwLock {
}
// Park our thread until we are woken up by an unlock
unsafe {
// Using the 2nd key at addr + 1
let addr = self as *const _ as usize + 1;
let validate = || {
@ -937,14 +993,21 @@ impl RawRwLock {
};
let before_sleep = || {};
let timed_out = |_, _| {};
match parking_lot_core::park(
// SAFETY:
// * `addr` is an address we control.
// * `validate`/`timed_out` does not panic or call into any function of `parking_lot`.
// * `before_sleep` does not call `park`, nor does it panic.
let park_result = unsafe {
parking_lot_core::park(
addr,
validate,
before_sleep,
timed_out,
TOKEN_EXCLUSIVE,
timeout,
) {
)
};
match park_result {
// We still need to re-check the state if we are unparked
// since a previous writer timing-out could have allowed
// another reader to sneak in before we parked.
@ -970,29 +1033,27 @@ impl RawRwLock {
}
TOKEN_NORMAL
};
// SAFETY: `callback` does not panic or call any function of `parking_lot`.
unsafe {
self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback);
}
return false;
}
return false;
}
}
}
true
}
// Common code for acquiring a lock
/// Common code for acquiring a lock
#[inline]
fn lock_common<F, V>(
fn lock_common(
&self,
timeout: Option<Instant>,
token: ParkToken,
mut try_lock: F,
validate: V,
) -> bool
where
F: FnMut(&mut usize) -> bool,
V: Fn(usize) -> bool,
{
mut try_lock: impl FnMut(&mut usize) -> bool,
validate_flags: usize,
) -> bool {
let mut spinwait = SpinWait::new();
let mut state = self.state.load(Ordering::Relaxed);
loop {
@ -1021,11 +1082,10 @@ impl RawRwLock {
}
// Park our thread until we are woken up by an unlock
unsafe {
let addr = self as *const _ as usize;
let validate = || {
let state = self.state.load(Ordering::Relaxed);
state & PARKED_BIT != 0 && validate(state)
state & PARKED_BIT != 0 && (state & validate_flags != 0)
};
let before_sleep = || {};
let timed_out = |_, was_last_thread| {
@ -1034,14 +1094,15 @@ impl RawRwLock {
self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
}
};
match parking_lot_core::park(
addr,
validate,
before_sleep,
timed_out,
token,
timeout,
) {
// SAFETY:
// * `addr` is an address we control.
// * `validate`/`timed_out` does not panic or call into any function of `parking_lot`.
// * `before_sleep` does not call `park`, nor does it panic.
let park_result = unsafe {
parking_lot_core::park(addr, validate, before_sleep, timed_out, token, timeout)
};
match park_result {
// The thread that unparked us passed the lock on to us
// directly without unlocking it.
ParkResult::Unparked(TOKEN_HANDOFF) => return true,
@ -1055,7 +1116,6 @@ impl RawRwLock {
// Timeout expired
ParkResult::TimedOut => return false,
}
}
// Loop back and try locking again
spinwait.reset();

26
third_party/rust/parking_lot/src/remutex.rs поставляемый
Просмотреть файл

@ -17,8 +17,9 @@ unsafe impl GetThreadId for RawThreadId {
fn nonzero_thread_id(&self) -> NonZeroUsize {
// The address of a thread-local variable is guaranteed to be unique to the
// current thread, and is also guaranteed to be non-zero.
thread_local!(static KEY: u8 = unsafe { ::std::mem::uninitialized() });
// current thread, and is also guaranteed to be non-zero. The variable has to have a
// non-zero size to guarantee it has a unique address for each thread.
thread_local!(static KEY: u8 = 0);
KEY.with(|x| {
NonZeroUsize::new(x as *const _ as usize)
.expect("thread-local variable address is null")
@ -35,10 +36,21 @@ unsafe impl GetThreadId for RawThreadId {
/// - `ReentrantMutexGuard` does not give mutable references to the locked data.
/// Use a `RefCell` if you need this.
///
/// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
/// See [`Mutex`](type.Mutex.html) for more details about the underlying mutex
/// primitive.
pub type ReentrantMutex<T> = lock_api::ReentrantMutex<RawMutex, RawThreadId, T>;
/// Creates a new reentrant mutex in an unlocked state ready for use.
///
/// This allows creating a reentrant mutex in a constant context on stable Rust.
pub const fn const_reentrant_mutex<T>(val: T) -> ReentrantMutex<T> {
ReentrantMutex::const_new(
<RawMutex as lock_api::RawMutex>::INIT,
<RawThreadId as lock_api::GetThreadId>::INIT,
val,
)
}
/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
/// is dropped (falls out of scope), the lock will be unlocked.
///
@ -68,18 +80,18 @@ mod tests {
#[test]
fn smoke() {
let m = ReentrantMutex::new(());
let m = ReentrantMutex::new(2);
{
let a = m.lock();
{
let b = m.lock();
{
let c = m.lock();
assert_eq!(*c, ());
assert_eq!(*c, 2);
}
assert_eq!(*b, ());
assert_eq!(*b, 2);
}
assert_eq!(*a, ());
assert_eq!(*a, 2);
}
}

53
third_party/rust/parking_lot/src/rwlock.rs поставляемый
Просмотреть файл

@ -88,6 +88,13 @@ use lock_api;
/// ```
pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>;
/// Creates a new instance of an `RwLock<T>` which is unlocked.
///
/// This allows creating a `RwLock<T>` in a constant context on stable Rust.
pub const fn const_rwlock<T>(val: T) -> RwLock<T> {
RwLock::const_new(<RawRwLock as lock_api::RawRwLock>::INIT, val)
}
/// RAII structure used to release the shared read access of a lock when
/// dropped.
pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>;
@ -322,7 +329,7 @@ mod tests {
fn test_rw_arc_access_in_unwind() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
let _ = thread::spawn(move || {
struct Unwinder {
i: Arc<RwLock<isize>>,
}
@ -359,7 +366,10 @@ mod tests {
let read_guard = lock.read();
let read_result = lock.try_read();
assert!(read_result.is_some(), "try_read should succeed while read_guard is in scope");
assert!(
read_result.is_some(),
"try_read should succeed while read_guard is in scope"
);
drop(read_guard);
}
@ -378,7 +388,10 @@ mod tests {
let write_guard = lock.write();
let read_result = lock.try_read();
assert!(read_result.is_none(), "try_read should fail while write_guard is in scope");
assert!(
read_result.is_none(),
"try_read should fail while write_guard is in scope"
);
drop(write_guard);
}
@ -391,7 +404,10 @@ mod tests {
let read_guard = lock.read();
let write_result = lock.try_write();
assert!(write_result.is_none(), "try_write should fail while read_guard is in scope");
assert!(
write_result.is_none(),
"try_write should fail while read_guard is in scope"
);
drop(read_guard);
}
@ -410,7 +426,10 @@ mod tests {
let write_guard = lock.write();
let write_result = lock.try_write();
assert!(write_result.is_none(), "try_write should fail while write_guard is in scope");
assert!(
write_result.is_none(),
"try_write should fail while write_guard is in scope"
);
drop(write_guard);
}
@ -567,4 +586,28 @@ mod tests {
assert_eq!(*(mutex.read()), *(deserialized.read()));
assert_eq!(contents, *(deserialized.read()));
}
#[test]
fn test_issue_203() {
struct Bar(RwLock<()>);
impl Drop for Bar {
fn drop(&mut self) {
let _n = self.0.write();
}
}
thread_local! {
static B: Bar = Bar(RwLock::new(()));
}
thread::spawn(|| {
B.with(|_| ());
let a = RwLock::new(());
let _a = a.read();
})
.join()
.unwrap();
}
}

7
third_party/rust/parking_lot/src/util.rs поставляемый
Просмотреть файл

@ -34,10 +34,5 @@ unsafe fn unreachable() -> ! {
#[inline]
pub fn to_deadline(timeout: Duration) -> Option<Instant> {
#[cfg(has_checked_instant)]
let deadline = Instant::now().checked_add(timeout);
#[cfg(not(has_checked_instant))]
let deadline = Some(Instant::now() + timeout);
deadline
Instant::now().checked_add(timeout)
}

26
third_party/rust/parking_lot/tests/issue_203.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,26 @@
use parking_lot::RwLock;
use std::thread;
struct Bar(RwLock<()>);
impl Drop for Bar {
fn drop(&mut self) {
let _n = self.0.write();
}
}
thread_local! {
static B: Bar = Bar(RwLock::new(()));
}
#[test]
fn main() {
thread::spawn(|| {
B.with(|_| ());
let a = RwLock::new(());
let _a = a.read();
})
.join()
.unwrap();
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"b63bbe68314522e15a5bbe3ae70bd92278f96301e3b7bca99bf11375c7914be6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","build.rs":"d6aa24b67fdcacf238778c5efaf1f622ec7f7a7ec27fa051f415a1e2d31f3532","src/lib.rs":"5f93085983b6fe90306e2a8b19102a5e5dc495c6628e5bea0806ad6143fdf6a2","src/parking_lot.rs":"fcd9a449ecd98544b3e728c5c0e19eec8963a5131a529f4a89aed96bf2844e5e","src/spinwait.rs":"d568d8a81f9144ec4c4a139dc934d7d04ee1656a4a221eb548742fe7aba09ab1","src/thread_parker/cloudabi.rs":"fe21f7b70a80b5fa0fa3209e56a090bf8b0b7dba26f2199d37477208f3f7e47d","src/thread_parker/generic.rs":"2f501c6e46fcff434ba9e13ae8859e66de3327f601ed92989b310124e4129ff4","src/thread_parker/linux.rs":"853fd22f51215d1f553ad6461ad3c92c4ec9c294e607e69ed5f53b2e8c7a11d7","src/thread_parker/mod.rs":"e23da913e184c12e2f566beabdcbb141df0610dabf3ea83e6b5cefede51c4bcf","src/thread_parker/redox.rs":"081c76af1e24be12da45d8093e261c48d558342ac2ac64dc3f7dd95eaaa1bf11","src/thread_parker/sgx.rs":"3fd71a7066db58189f302d2344e4e425320f82c298ca482ca4318bae44ae37fd","src/thread_parker/unix.rs":"da20f3151add154947054c8c7cab22c93231ade2e5dfe43c78eba7dbfc1aea5d","src/thread_parker/wasm.rs":"b4c9f9e9c1fd636b235a0e8e0227c954b1e7432d8394b58af77b348cdfa2141e","src/thread_parker/wasm_atomic.rs":"a1ab05981a833e72d8d353350ab2b95e6f833cd7224591e595ccdb3692968c23","src/thread_parker/windows/keyed_event.rs":"e0c2ed647e0550bffa003160405b5f4ddd40500134c2eb15c3eb598792c30e84","src/thread_parker/windows/mod.rs":"7702ff9b72ac647ec998a9b205ace961a28839fcd94631fb750ca459e4804260","src/thread_parker/windows/waitaddress.rs":"06d994633006e237dc940f377432ea00cf1609e56096d69d46f7bb3b80eeb857","src/util.rs":"285e6133150645525f2ca1ece41f6d35bad4e7c5e08b42b20c99d2a97e04a974","src/word_lock.rs":"e5af5bdae754f4799d1e0e0bbdcf48b82213ca5bfc785104aa27f3d6ea728dd4"},"package":"b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b"}
{"files":{"Cargo.toml":"79dd446832ea5ac3330902a4de04bae062dea978229c5a0bc6117a794ba0c71b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"058dddad16d91c8d0160fa2a78bb5f7c2f801f2fd9770fc387c5843395bf0379","src/parking_lot.rs":"540104584ea70aa10425b786e4d49c8a3e3b56496b78a7ba1a356d03d97204e2","src/spinwait.rs":"d568d8a81f9144ec4c4a139dc934d7d04ee1656a4a221eb548742fe7aba09ab1","src/thread_parker/cloudabi.rs":"0668b50898c20e7267ac6cc977e7ad376a18958e2d07faeca8199794d873d2eb","src/thread_parker/generic.rs":"2f501c6e46fcff434ba9e13ae8859e66de3327f601ed92989b310124e4129ff4","src/thread_parker/linux.rs":"853fd22f51215d1f553ad6461ad3c92c4ec9c294e607e69ed5f53b2e8c7a11d7","src/thread_parker/mod.rs":"5bc2100d2f575608b5b76e626ca92ce3ba4830176ecc773c5594cda6ca0905e9","src/thread_parker/redox.rs":"081c76af1e24be12da45d8093e261c48d558342ac2ac64dc3f7dd95eaaa1bf11","src/thread_parker/sgx.rs":"3fd71a7066db58189f302d2344e4e425320f82c298ca482ca4318bae44ae37fd","src/thread_parker/unix.rs":"77e1f049207b7e89b22ef05e5134c7538b31fff99aa9660784136f96fec1845a","src/thread_parker/wasm.rs":"b4c9f9e9c1fd636b235a0e8e0227c954b1e7432d8394b58af77b348cdfa2141e","src/thread_parker/wasm_atomic.rs":"a1ab05981a833e72d8d353350ab2b95e6f833cd7224591e595ccdb3692968c23","src/thread_parker/windows/keyed_event.rs":"34fc4693e7afd69a5c426ae7face83b8363f114a44dece44197cd03861cfdded","src/thread_parker/windows/mod.rs":"7702ff9b72ac647ec998a9b205ace961a28839fcd94631fb750ca459e4804260","src/thread_parker/windows/waitaddress.rs":"06d994633006e237dc940f377432ea00cf1609e56096d69d46f7bb3b80eeb857","src/util.rs":"285e6133150645525f2ca1ece41f6d35bad4e7c5e08b42b20c99d2a97e04a974","src/word_lock.rs":"2c030aedb340ae8ca564365206452c298fe29986d005d6a40e808c9760f91c95"},"package":"0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb"}

Просмотреть файл

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "parking_lot_core"
version = "0.6.2"
version = "0.7.1"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "An advanced API for creating custom synchronization primitives."
keywords = ["mutex", "condvar", "rwlock", "once", "thread"]
@ -28,17 +28,15 @@ optional = true
version = "0.1.5"
[dependencies.petgraph]
version = "0.4.5"
version = "0.5"
optional = true
[dependencies.smallvec]
version = "0.6"
version = "1.0"
[dependencies.thread-id]
version = "3.2.0"
optional = true
[build-dependencies.rustc_version]
version = "0.2"
[features]
deadlock_detection = ["petgraph", "thread-id", "backtrace"]

7
third_party/rust/parking_lot_core/build.rs поставляемый
Просмотреть файл

@ -1,7 +0,0 @@
use rustc_version::{version, Version};
fn main() {
if version().unwrap() >= Version::parse("1.34.0").unwrap() {
println!("cargo:rustc-cfg=has_sized_atomics");
}
}

Просмотреть файл

@ -49,11 +49,11 @@
target_arch = "wasm32",
target_feature = "atomics"
),
feature(checked_duration_since, stdsimd)
feature(stdsimd)
)]
#![cfg_attr(
all(feature = "nightly", target_os = "cloudabi",),
feature(thread_local, checked_duration_since)
feature(thread_local)
)]
mod parking_lot;

Просмотреть файл

@ -17,6 +17,13 @@ use smallvec::SmallVec;
use std::time::{Duration, Instant};
static NUM_THREADS: AtomicUsize = AtomicUsize::new(0);
/// Holds the pointer to the currently active `HashTable`.
///
/// # Safety
///
/// Except for the initial value of null, it must always point to a valid `HashTable` instance.
/// Any `HashTable` this global static has ever pointed to must never be freed.
static HASHTABLE: AtomicPtr<HashTable> = AtomicPtr::new(ptr::null_mut());
// Even with 3x more buckets than threads, the memory overhead per thread is
@ -72,7 +79,7 @@ impl Bucket {
#[inline]
pub fn new(timeout: Instant, seed: u32) -> Self {
Self {
mutex: WordLock::INIT,
mutex: WordLock::new(),
queue_head: Cell::new(ptr::null()),
queue_tail: Cell::new(ptr::null()),
fair_timeout: UnsafeCell::new(FairTimeout::new(timeout, seed)),
@ -146,9 +153,7 @@ impl ThreadData {
// Keep track of the total number of live ThreadData objects and resize
// the hash table accordingly.
let num_threads = NUM_THREADS.fetch_add(1, Ordering::Relaxed) + 1;
unsafe {
grow_hashtable(num_threads);
}
ThreadData {
parker: ThreadParker::new(),
@ -184,116 +189,92 @@ impl Drop for ThreadData {
}
}
// Get a pointer to the latest hash table, creating one if it doesn't exist yet.
/// Returns a reference to the latest hash table, creating one if it doesn't exist yet.
/// The reference is valid forever. However, the `HashTable` it references might become stale
/// at any point. Meaning it still exists, but it is not the instance in active use.
#[inline]
fn get_hashtable() -> *mut HashTable {
fn get_hashtable() -> &'static HashTable {
let table = HASHTABLE.load(Ordering::Acquire);
// If there is no table, create one
if table.is_null() {
create_hashtable()
} else {
table
// SAFETY: when not null, `HASHTABLE` always points to a `HashTable` that is never freed.
unsafe { &*table }
}
}
// Get a pointer to the latest hash table, creating one if it doesn't exist yet.
/// Returns a reference to the latest hash table, creating one if it doesn't exist yet.
/// The reference is valid forever. However, the `HashTable` it references might become stale
/// at any point. Meaning it still exists, but it is not the instance in active use.
#[cold]
fn create_hashtable() -> *mut HashTable {
fn create_hashtable() -> &'static HashTable {
let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null()));
// If this fails then it means some other thread created the hash
// table first.
match HASHTABLE.compare_exchange(
// If this fails then it means some other thread created the hash table first.
let table = match HASHTABLE.compare_exchange(
ptr::null_mut(),
new_table,
Ordering::Release,
Ordering::Relaxed,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => new_table,
Err(old_table) => {
// Free the table we created
// SAFETY: `new_table` is created from `Box::into_raw` above and only freed here.
unsafe {
Box::from_raw(new_table);
}
old_table
}
}
};
// SAFETY: The `HashTable` behind `table` is never freed. It is either the table pointer we
// created here, or it is one loaded from `HASHTABLE`.
unsafe { &*table }
}
// Grow the hash table so that it is big enough for the given number of threads.
// This isn't performance-critical since it is only done when a ThreadData is
// created, which only happens once per thread.
unsafe fn grow_hashtable(num_threads: usize) {
// If there is no table, create one
if HASHTABLE.load(Ordering::Relaxed).is_null() {
let new_table = Box::into_raw(HashTable::new(num_threads, ptr::null()));
// If this fails then it means some other thread created the hash
// table first.
if HASHTABLE
.compare_exchange(
ptr::null_mut(),
new_table,
Ordering::Release,
Ordering::Relaxed,
)
.is_ok()
{
return;
}
// Free the table we created
Box::from_raw(new_table);
}
let mut old_table;
loop {
old_table = HASHTABLE.load(Ordering::Acquire);
fn grow_hashtable(num_threads: usize) {
// Lock all buckets in the existing table and get a reference to it
let old_table = loop {
let table = get_hashtable();
// Check if we need to resize the existing table
if (*old_table).entries.len() >= LOAD_FACTOR * num_threads {
if table.entries.len() >= LOAD_FACTOR * num_threads {
return;
}
// Lock all buckets in the old table
for b in &(*old_table).entries[..] {
b.mutex.lock();
for bucket in &table.entries[..] {
bucket.mutex.lock();
}
// Now check if our table is still the latest one. Another thread could
// have grown the hash table between us reading HASHTABLE and locking
// the buckets.
if HASHTABLE.load(Ordering::Relaxed) == old_table {
break;
if HASHTABLE.load(Ordering::Relaxed) == table as *const _ as *mut _ {
break table;
}
// Unlock buckets and try again
for b in &(*old_table).entries[..] {
b.mutex.unlock();
}
for bucket in &table.entries[..] {
// SAFETY: We hold the lock here, as required
unsafe { bucket.mutex.unlock() };
}
};
// Create the new table
let new_table = HashTable::new(num_threads, old_table);
let mut new_table = HashTable::new(num_threads, old_table);
// Move the entries from the old table to the new one
for b in &(*old_table).entries[..] {
let mut current = b.queue_head.get();
while !current.is_null() {
let next = (*current).next_in_queue.get();
let hash = hash((*current).key.load(Ordering::Relaxed), new_table.hash_bits);
if new_table.entries[hash].queue_tail.get().is_null() {
new_table.entries[hash].queue_head.set(current);
} else {
(*new_table.entries[hash].queue_tail.get())
.next_in_queue
.set(current);
}
new_table.entries[hash].queue_tail.set(current);
(*current).next_in_queue.set(ptr::null());
current = next;
}
for bucket in &old_table.entries[..] {
// SAFETY: The park, unpark* and check_wait_graph_fast functions create only correct linked
// lists. All `ThreadData` instances in these lists will remain valid as long as they are
// present in the lists, meaning as long as their threads are parked.
unsafe { rehash_bucket_into(bucket, &mut new_table) };
}
// Publish the new table. No races are possible at this point because
@ -302,8 +283,36 @@ unsafe fn grow_hashtable(num_threads: usize) {
HASHTABLE.store(Box::into_raw(new_table), Ordering::Release);
// Unlock all buckets in the old table
for b in &(*old_table).entries[..] {
b.mutex.unlock();
for bucket in &old_table.entries[..] {
// SAFETY: We hold the lock here, as required
unsafe { bucket.mutex.unlock() };
}
}
/// Iterate through all `ThreadData` objects in the bucket and insert them into the given table
/// in the bucket their key correspond to for this table.
///
/// # Safety
///
/// The given `bucket` must have a correctly constructed linked list under `queue_head`, containing
/// `ThreadData` instances that must stay valid at least as long as the given `table` is in use.
///
/// The given `table` must only contain buckets with correctly constructed linked lists.
unsafe fn rehash_bucket_into(bucket: &'static Bucket, table: &mut HashTable) {
let mut current: *const ThreadData = bucket.queue_head.get();
while !current.is_null() {
let next = (*current).next_in_queue.get();
let hash = hash((*current).key.load(Ordering::Relaxed), table.hash_bits);
if table.entries[hash].queue_tail.get().is_null() {
table.entries[hash].queue_head.set(current);
} else {
(*table.entries[hash].queue_tail.get())
.next_in_queue
.set(current);
}
table.entries[hash].queue_tail.set(current);
(*current).next_in_queue.set(ptr::null());
current = next;
}
}
@ -319,41 +328,42 @@ fn hash(key: usize, bits: u32) -> usize {
key.wrapping_mul(0x9E3779B97F4A7C15) >> (64 - bits)
}
// Lock the bucket for the given key
/// Locks the bucket for the given key and returns a reference to it.
/// The returned bucket must be unlocked again in order to not cause deadlocks.
#[inline]
unsafe fn lock_bucket<'a>(key: usize) -> &'a Bucket {
let mut bucket;
fn lock_bucket(key: usize) -> &'static Bucket {
loop {
let hashtable = get_hashtable();
let hash = hash(key, (*hashtable).hash_bits);
bucket = &(*hashtable).entries[hash];
let hash = hash(key, hashtable.hash_bits);
let bucket = &hashtable.entries[hash];
// Lock the bucket
bucket.mutex.lock();
// If no other thread has rehashed the table before we grabbed the lock
// then we are good to go! The lock we grabbed prevents any rehashes.
if HASHTABLE.load(Ordering::Relaxed) == hashtable {
if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ {
return bucket;
}
// Unlock the bucket and try again
bucket.mutex.unlock();
// SAFETY: We hold the lock here, as required
unsafe { bucket.mutex.unlock() };
}
}
// Lock the bucket for the given key, but check that the key hasn't been changed
// in the meantime due to a requeue.
/// Locks the bucket for the given key and returns a reference to it. But checks that the key
/// hasn't been changed in the meantime due to a requeue.
/// The returned bucket must be unlocked again in order to not cause deadlocks.
#[inline]
unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) {
let mut bucket;
fn lock_bucket_checked(key: &AtomicUsize) -> (usize, &'static Bucket) {
loop {
let hashtable = get_hashtable();
let current_key = key.load(Ordering::Relaxed);
let hash = hash(current_key, (*hashtable).hash_bits);
bucket = &(*hashtable).entries[hash];
let hash = hash(current_key, hashtable.hash_bits);
let bucket = &hashtable.entries[hash];
// Lock the bucket
bucket.mutex.lock();
@ -361,59 +371,69 @@ unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) {
// Check that both the hash table and key are correct while the bucket
// is locked. Note that the key can't change once we locked the proper
// bucket for it, so we just keep trying until we have the correct key.
if HASHTABLE.load(Ordering::Relaxed) == hashtable
if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _
&& key.load(Ordering::Relaxed) == current_key
{
return (current_key, bucket);
}
// Unlock the bucket and try again
bucket.mutex.unlock();
// SAFETY: We hold the lock here, as required
unsafe { bucket.mutex.unlock() };
}
}
// Lock the two buckets for the given pair of keys
/// Locks the two buckets for the given pair of keys and returns references to them.
/// The returned buckets must be unlocked again in order to not cause deadlocks.
///
/// If both keys hash to the same value, both returned references will be to the same bucket. Be
/// careful to only unlock it once in this case, always use `unlock_bucket_pair`.
#[inline]
unsafe fn lock_bucket_pair<'a>(key1: usize, key2: usize) -> (&'a Bucket, &'a Bucket) {
let mut bucket1;
fn lock_bucket_pair(key1: usize, key2: usize) -> (&'static Bucket, &'static Bucket) {
loop {
let hashtable = get_hashtable();
// Get the lowest bucket first
let hash1 = hash(key1, (*hashtable).hash_bits);
let hash2 = hash(key2, (*hashtable).hash_bits);
if hash1 <= hash2 {
bucket1 = &(*hashtable).entries[hash1];
let hash1 = hash(key1, hashtable.hash_bits);
let hash2 = hash(key2, hashtable.hash_bits);
// Get the bucket at the lowest hash/index first
let bucket1 = if hash1 <= hash2 {
&hashtable.entries[hash1]
} else {
bucket1 = &(*hashtable).entries[hash2];
}
&hashtable.entries[hash2]
};
// Lock the first bucket
bucket1.mutex.lock();
// If no other thread has rehashed the table before we grabbed the lock
// then we are good to go! The lock we grabbed prevents any rehashes.
if HASHTABLE.load(Ordering::Relaxed) == hashtable {
if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ {
// Now lock the second bucket and return the two buckets
if hash1 == hash2 {
return (bucket1, bucket1);
} else if hash1 < hash2 {
let bucket2 = &(*hashtable).entries[hash2];
let bucket2 = &hashtable.entries[hash2];
bucket2.mutex.lock();
return (bucket1, bucket2);
} else {
let bucket2 = &(*hashtable).entries[hash1];
let bucket2 = &hashtable.entries[hash1];
bucket2.mutex.lock();
return (bucket2, bucket1);
}
}
// Unlock the bucket and try again
bucket1.mutex.unlock();
// SAFETY: We hold the lock here, as required
unsafe { bucket1.mutex.unlock() };
}
}
// Unlock a pair of buckets
/// Unlock a pair of buckets
///
/// # Safety
///
/// Both buckets must be locked
#[inline]
unsafe fn unlock_bucket_pair(bucket1: &Bucket, bucket2: &Bucket) {
bucket1.mutex.unlock();
@ -559,6 +579,7 @@ pub unsafe fn park(
// If the validation function fails, just return
if !validate() {
// SAFETY: We hold the lock here, as required
bucket.mutex.unlock();
return ParkResult::Invalid;
}
@ -575,6 +596,7 @@ pub unsafe fn park(
bucket.queue_head.set(thread_data);
}
bucket.queue_tail.set(thread_data);
// SAFETY: We hold the lock here, as required
bucket.mutex.unlock();
// Invoke the pre-sleep callback
@ -605,6 +627,7 @@ pub unsafe fn park(
// Now we need to check again if we were unparked or timed out. Unlike the
// last check this is precise because we hold the bucket lock.
if !thread_data.parker.timed_out() {
// SAFETY: We hold the lock here, as required
bucket.mutex.unlock();
return ParkResult::Unparked(thread_data.unpark_token.get());
}
@ -652,6 +675,7 @@ pub unsafe fn park(
debug_assert!(!current.is_null());
// Unlock the bucket, we are done
// SAFETY: We hold the lock here, as required
bucket.mutex.unlock();
ParkResult::TimedOut
})
@ -723,6 +747,7 @@ pub unsafe fn unpark_one(
// the queue locked while we perform a system call. Finally we wake
// up the parked thread.
let handle = (*current).parker.unpark_lock();
// SAFETY: We hold the lock here, as required
bucket.mutex.unlock();
handle.unpark();
@ -736,6 +761,7 @@ pub unsafe fn unpark_one(
// No threads with a matching key were found in the bucket
callback(result);
// SAFETY: We hold the lock here, as required
bucket.mutex.unlock();
result
}
@ -786,6 +812,7 @@ pub unsafe fn unpark_all(key: usize, unpark_token: UnparkToken) -> usize {
}
// Unlock the bucket
// SAFETY: We hold the lock here, as required
bucket.mutex.unlock();
// Now that we are outside the lock, wake up all the threads that we removed
@ -839,6 +866,7 @@ pub unsafe fn unpark_requeue(
let mut result = UnparkResult::default();
let op = validate();
if op == RequeueOp::Abort {
// SAFETY: Both buckets are locked, as required.
unlock_bucket_pair(bucket_from, bucket_to);
return result;
}
@ -919,9 +947,11 @@ pub unsafe fn unpark_requeue(
if let Some(wakeup_thread) = wakeup_thread {
(*wakeup_thread).unpark_token.set(token);
let handle = (*wakeup_thread).parker.unpark_lock();
// SAFETY: Both buckets are locked, as required.
unlock_bucket_pair(bucket_from, bucket_to);
handle.unpark();
} else {
// SAFETY: Both buckets are locked, as required.
unlock_bucket_pair(bucket_from, bucket_to);
}
@ -1018,6 +1048,7 @@ pub unsafe fn unpark_filter(
t.1 = Some((*t.0).parker.unpark_lock());
}
// SAFETY: We hold the lock here, as required
bucket.mutex.unlock();
// Now that we are outside the lock, wake up all the threads that we removed
@ -1041,7 +1072,10 @@ pub mod deadlock {
/// Acquire a resource identified by key in the deadlock detector
/// Noop if deadlock_detection feature isn't enabled.
/// Note: Call after the resource is acquired
///
/// # Safety
///
/// Call after the resource is acquired
#[inline]
pub unsafe fn acquire_resource(_key: usize) {
#[cfg(feature = "deadlock_detection")]
@ -1050,9 +1084,14 @@ pub mod deadlock {
/// Release a resource identified by key in the deadlock detector.
/// Noop if deadlock_detection feature isn't enabled.
/// Note: Call before the resource is released
///
/// # Panics
///
/// Panics if the resource was already released or wasn't acquired in this thread.
///
/// # Safety
///
/// Call before the resource is released
#[inline]
pub unsafe fn release_resource(_key: usize) {
#[cfg(feature = "deadlock_detection")]
@ -1159,10 +1198,14 @@ mod deadlock_impl {
pub unsafe fn release_resource(key: usize) {
with_thread_data(|thread_data| {
let resources = &mut (*thread_data.deadlock_data.resources.get());
match resources.iter().rposition(|x| *x == key) {
Some(p) => resources.swap_remove(p),
None => panic!("key {} not found in thread resources", key),
};
// There is only one situation where we can fail to find the
// resource: we are currently running TLS destructors and our
// ThreadData has already been freed. There isn't much we can do
// about it at this point, so just ignore it.
if let Some(p) = resources.iter().rposition(|x| *x == key) {
resources.swap_remove(p);
}
});
}
@ -1202,6 +1245,7 @@ mod deadlock_impl {
}
current = (*current).next_in_queue.get();
}
// SAFETY: We hold the lock here, as required
b.mutex.unlock();
}
@ -1220,25 +1264,26 @@ mod deadlock_impl {
// Returns all detected thread wait cycles.
// Note that once a cycle is reported it's never reported again.
unsafe fn check_wait_graph_slow() -> Vec<Vec<DeadlockedThread>> {
static DEADLOCK_DETECTION_LOCK: WordLock = WordLock::INIT;
static DEADLOCK_DETECTION_LOCK: WordLock = WordLock::new();
DEADLOCK_DETECTION_LOCK.lock();
let mut table = get_hashtable();
loop {
// Lock all buckets in the old table
for b in &(*table).entries[..] {
for b in &table.entries[..] {
b.mutex.lock();
}
// Now check if our table is still the latest one. Another thread could
// have grown the hash table between us getting and locking the hash table.
let new_table = get_hashtable();
if new_table == table {
if new_table as *const _ == table as *const _ {
break;
}
// Unlock buckets and try again
for b in &(*table).entries[..] {
for b in &table.entries[..] {
// SAFETY: We hold the lock here, as required
b.mutex.unlock();
}
@ -1249,7 +1294,7 @@ mod deadlock_impl {
let mut graph =
DiGraphMap::<WaitGraphNode, ()>::with_capacity(thread_count * 2, thread_count * 2);
for b in &(*table).entries[..] {
for b in &table.entries[..] {
let mut current = b.queue_head.get();
while !current.is_null() {
if !(*current).parked_with_timeout.get()
@ -1270,7 +1315,8 @@ mod deadlock_impl {
}
}
for b in &(*table).entries[..] {
for b in &table.entries[..] {
// SAFETY: We hold the lock here, as required
b.mutex.unlock();
}
@ -1286,6 +1332,7 @@ mod deadlock_impl {
(*td).deadlock_data.deadlocked.set(true);
*(*td).deadlock_data.backtrace_sender.get() = Some(sender.clone());
let handle = (*td).parker.unpark_lock();
// SAFETY: We hold the lock here, as required
bucket.mutex.unlock();
// unpark the deadlocked thread!
// on unpark it'll notice the deadlocked flag and report back
@ -1346,3 +1393,276 @@ mod deadlock_impl {
cycles.iter().cloned().collect()
}
}
#[cfg(test)]
mod tests {
use super::{ThreadData, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
use std::{
ptr,
sync::{
atomic::{AtomicIsize, AtomicPtr, AtomicUsize, Ordering},
Arc,
},
thread,
time::Duration,
};
/// Calls a closure for every `ThreadData` currently parked on a given key
fn for_each(key: usize, mut f: impl FnMut(&ThreadData)) {
let bucket = super::lock_bucket(key);
let mut current: *const ThreadData = bucket.queue_head.get();
while !current.is_null() {
let current_ref = unsafe { &*current };
if current_ref.key.load(Ordering::Relaxed) == key {
f(current_ref);
}
current = current_ref.next_in_queue.get();
}
// SAFETY: We hold the lock here, as required
unsafe { bucket.mutex.unlock() };
}
macro_rules! test {
( $( $name:ident(
repeats: $repeats:expr,
latches: $latches:expr,
delay: $delay:expr,
threads: $threads:expr,
single_unparks: $single_unparks:expr);
)* ) => {
$(#[test]
fn $name() {
let delay = Duration::from_micros($delay);
for _ in 0..$repeats {
run_parking_test($latches, delay, $threads, $single_unparks);
}
})*
};
}
test! {
unpark_all_one_fast(
repeats: 10000, latches: 1, delay: 0, threads: 1, single_unparks: 0
);
unpark_all_hundred_fast(
repeats: 100, latches: 1, delay: 0, threads: 100, single_unparks: 0
);
unpark_one_one_fast(
repeats: 1000, latches: 1, delay: 0, threads: 1, single_unparks: 1
);
unpark_one_hundred_fast(
repeats: 20, latches: 1, delay: 0, threads: 100, single_unparks: 100
);
unpark_one_fifty_then_fifty_all_fast(
repeats: 50, latches: 1, delay: 0, threads: 100, single_unparks: 50
);
unpark_all_one(
repeats: 100, latches: 1, delay: 10000, threads: 1, single_unparks: 0
);
unpark_all_hundred(
repeats: 100, latches: 1, delay: 10000, threads: 100, single_unparks: 0
);
unpark_one_one(
repeats: 10, latches: 1, delay: 10000, threads: 1, single_unparks: 1
);
unpark_one_fifty(
repeats: 1, latches: 1, delay: 10000, threads: 50, single_unparks: 50
);
unpark_one_fifty_then_fifty_all(
repeats: 2, latches: 1, delay: 10000, threads: 100, single_unparks: 50
);
hundred_unpark_all_one_fast(
repeats: 100, latches: 100, delay: 0, threads: 1, single_unparks: 0
);
hundred_unpark_all_one(
repeats: 1, latches: 100, delay: 10000, threads: 1, single_unparks: 0
);
}
fn run_parking_test(
num_latches: usize,
delay: Duration,
num_threads: usize,
num_single_unparks: usize,
) {
let mut tests = Vec::with_capacity(num_latches);
for _ in 0..num_latches {
let test = Arc::new(SingleLatchTest::new(num_threads));
let mut threads = Vec::with_capacity(num_threads);
for _ in 0..num_threads {
let test = test.clone();
threads.push(thread::spawn(move || test.run()));
}
tests.push((test, threads));
}
for unpark_index in 0..num_single_unparks {
thread::sleep(delay);
for (test, _) in &tests {
test.unpark_one(unpark_index);
}
}
for (test, threads) in tests {
test.finish(num_single_unparks);
for thread in threads {
thread.join().expect("Test thread panic");
}
}
}
struct SingleLatchTest {
semaphore: AtomicIsize,
num_awake: AtomicUsize,
/// Holds the pointer to the last *unprocessed* woken up thread.
last_awoken: AtomicPtr<ThreadData>,
/// Total number of threads participating in this test.
num_threads: usize,
}
impl SingleLatchTest {
pub fn new(num_threads: usize) -> Self {
Self {
// This implements a fair (FIFO) semaphore, and it starts out unavailable.
semaphore: AtomicIsize::new(0),
num_awake: AtomicUsize::new(0),
last_awoken: AtomicPtr::new(ptr::null_mut()),
num_threads,
}
}
pub fn run(&self) {
// Get one slot from the semaphore
self.down();
// Report back to the test verification code that this thread woke up
let this_thread_ptr = super::with_thread_data(|t| t as *const _ as *mut _);
self.last_awoken.store(this_thread_ptr, Ordering::SeqCst);
self.num_awake.fetch_add(1, Ordering::SeqCst);
}
pub fn unpark_one(&self, single_unpark_index: usize) {
// last_awoken should be null at all times except between self.up() and at the bottom
// of this method where it's reset to null again
assert!(self.last_awoken.load(Ordering::SeqCst).is_null());
let mut queue: Vec<*mut ThreadData> = Vec::with_capacity(self.num_threads);
for_each(self.semaphore_addr(), |thread_data| {
queue.push(thread_data as *const _ as *mut _);
});
assert!(queue.len() <= self.num_threads - single_unpark_index);
let num_awake_before_up = self.num_awake.load(Ordering::SeqCst);
self.up();
// Wait for a parked thread to wake up and update num_awake + last_awoken.
while self.num_awake.load(Ordering::SeqCst) != num_awake_before_up + 1 {
thread::yield_now();
}
// At this point the other thread should have set last_awoken inside the run() method
let last_awoken = self.last_awoken.load(Ordering::SeqCst);
assert!(!last_awoken.is_null());
if !queue.is_empty() && queue[0] != last_awoken {
panic!(
"Woke up wrong thread:\n\tqueue: {:?}\n\tlast awoken: {:?}",
queue, last_awoken
);
}
self.last_awoken.store(ptr::null_mut(), Ordering::SeqCst);
}
pub fn finish(&self, num_single_unparks: usize) {
// The amount of threads not unparked via unpark_one
let mut num_threads_left = self.num_threads.checked_sub(num_single_unparks).unwrap();
// Wake remaining threads up with unpark_all. Has to be in a loop, because there might
// still be threads that has not yet parked.
while num_threads_left > 0 {
let mut num_waiting_on_address = 0;
for_each(self.semaphore_addr(), |_thread_data| {
num_waiting_on_address += 1;
});
assert!(num_waiting_on_address <= num_threads_left);
let num_awake_before_unpark = self.num_awake.load(Ordering::SeqCst);
let num_unparked =
unsafe { super::unpark_all(self.semaphore_addr(), DEFAULT_UNPARK_TOKEN) };
assert!(num_unparked >= num_waiting_on_address);
assert!(num_unparked <= num_threads_left);
// Wait for all unparked threads to wake up and update num_awake + last_awoken.
while self.num_awake.load(Ordering::SeqCst)
!= num_awake_before_unpark + num_unparked
{
thread::yield_now()
}
num_threads_left = num_threads_left.checked_sub(num_unparked).unwrap();
}
// By now, all threads should have been woken up
assert_eq!(self.num_awake.load(Ordering::SeqCst), self.num_threads);
// Make sure no thread is parked on our semaphore address
let mut num_waiting_on_address = 0;
for_each(self.semaphore_addr(), |_thread_data| {
num_waiting_on_address += 1;
});
assert_eq!(num_waiting_on_address, 0);
}
pub fn down(&self) {
let old_semaphore_value = self.semaphore.fetch_sub(1, Ordering::SeqCst);
if old_semaphore_value > 0 {
// We acquired the semaphore. Done.
return;
}
// We need to wait.
let validate = || true;
let before_sleep = || {};
let timed_out = |_, _| {};
unsafe {
super::park(
self.semaphore_addr(),
validate,
before_sleep,
timed_out,
DEFAULT_PARK_TOKEN,
None,
);
}
}
pub fn up(&self) {
let old_semaphore_value = self.semaphore.fetch_add(1, Ordering::SeqCst);
// Check if anyone was waiting on the semaphore. If they were, then pass ownership to them.
if old_semaphore_value < 0 {
// We need to continue until we have actually unparked someone. It might be that
// the thread we want to pass ownership to has decremented the semaphore counter,
// but not yet parked.
loop {
match unsafe {
super::unpark_one(self.semaphore_addr(), |_| DEFAULT_UNPARK_TOKEN)
.unparked_threads
} {
1 => break,
0 => (),
i => panic!("Should not wake up {} threads", i),
}
}
}
}
fn semaphore_addr(&self) -> usize {
&self.semaphore as *const _ as usize
}
}
}

Просмотреть файл

@ -8,7 +8,7 @@
use cloudabi as abi;
use core::{
cell::Cell,
mem,
mem::{self, MaybeUninit},
sync::atomic::{AtomicU32, Ordering},
};
use std::{convert::TryFrom, thread, time::Instant};
@ -70,11 +70,11 @@ impl Lock {
},
..mem::zeroed()
};
let mut event: abi::event = mem::uninitialized();
let mut nevents: usize = mem::uninitialized();
let ret = abi::poll(&subscription, &mut event, 1, &mut nevents);
let mut event = MaybeUninit::<abi::event>::uninit();
let mut nevents: usize = 0;
let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, &mut nevents);
debug_assert_eq!(ret, abi::errno::SUCCESS);
debug_assert_eq!(event.error, abi::errno::SUCCESS);
debug_assert_eq!(event.assume_init().error, abi::errno::SUCCESS);
LockGuard { lock: &self.lock }
})
@ -146,12 +146,12 @@ impl Condvar {
},
..mem::zeroed()
};
let mut event: abi::event = mem::uninitialized();
let mut nevents: usize = mem::uninitialized();
let mut event = MaybeUninit::<abi::event>::uninit();
let mut nevents: usize = 0;
let ret = abi::poll(&subscription, &mut event, 1, &mut nevents);
let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, &mut nevents);
debug_assert_eq!(ret, abi::errno::SUCCESS);
debug_assert_eq!(event.error, abi::errno::SUCCESS);
debug_assert_eq!(event.assume_init().error, abi::errno::SUCCESS);
}
}
@ -184,11 +184,17 @@ impl Condvar {
..mem::zeroed()
},
];
let mut events: [abi::event; 2] = mem::uninitialized();
let mut nevents: usize = mem::uninitialized();
let mut events = MaybeUninit::<[abi::event; 2]>::uninit();
let mut nevents: usize = 0;
let ret = abi::poll(subscriptions.as_ptr(), events.as_mut_ptr(), 2, &mut nevents);
let ret = abi::poll(
subscriptions.as_ptr(),
events.as_mut_ptr() as *mut _,
2,
&mut nevents,
);
debug_assert_eq!(ret, abi::errno::SUCCESS);
let events = events.assume_init();
for i in 0..nevents {
debug_assert_eq!(events[i].error, abi::errno::SUCCESS);
if events[i].type_ == abi::eventtype::CONDVAR {

Просмотреть файл

@ -47,12 +47,11 @@ pub trait UnparkHandleT {
///
/// This method is unsafe for the same reason as the unsafe methods in
/// `ThreadParkerT`.
#[inline]
unsafe fn unpark(self);
}
cfg_if! {
if #[cfg(all(has_sized_atomics, any(target_os = "linux", target_os = "android")))] {
if #[cfg(any(target_os = "linux", target_os = "android"))] {
#[path = "linux.rs"]
mod imp;
} else if #[cfg(unix)] {
@ -61,7 +60,7 @@ cfg_if! {
} else if #[cfg(windows)] {
#[path = "windows/mod.rs"]
mod imp;
} else if #[cfg(all(has_sized_atomics, target_os = "redox"))] {
} else if #[cfg(target_os = "redox")] {
#[path = "redox.rs"]
mod imp;
} else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] {

Просмотреть файл

@ -9,7 +9,7 @@
use core::ptr;
use core::{
cell::{Cell, UnsafeCell},
mem,
mem::MaybeUninit,
};
use libc;
use std::{
@ -137,14 +137,14 @@ impl ThreadParker {
#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android")))]
#[inline]
unsafe fn init(&self) {
let mut attr: libc::pthread_condattr_t = mem::uninitialized();
let r = libc::pthread_condattr_init(&mut attr);
let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
let r = libc::pthread_condattr_init(attr.as_mut_ptr());
debug_assert_eq!(r, 0);
let r = libc::pthread_condattr_setclock(&mut attr, libc::CLOCK_MONOTONIC);
let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
debug_assert_eq!(r, 0);
let r = libc::pthread_cond_init(self.condvar.get(), &attr);
let r = libc::pthread_cond_init(self.condvar.get(), attr.as_ptr());
debug_assert_eq!(r, 0);
let r = libc::pthread_condattr_destroy(&mut attr);
let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
debug_assert_eq!(r, 0);
}
}
@ -196,9 +196,11 @@ impl super::UnparkHandleT for UnparkHandle {
#[cfg(any(target_os = "macos", target_os = "ios"))]
#[inline]
fn timespec_now() -> libc::timespec {
let mut now: libc::timeval = unsafe { mem::uninitialized() };
let r = unsafe { libc::gettimeofday(&mut now, ptr::null_mut()) };
let mut now = MaybeUninit::<libc::timeval>::uninit();
let r = unsafe { libc::gettimeofday(now.as_mut_ptr(), ptr::null_mut()) };
debug_assert_eq!(r, 0);
// SAFETY: We know `libc::gettimeofday` has initialized the value.
let now = unsafe { now.assume_init() };
libc::timespec {
tv_sec: now.tv_sec,
tv_nsec: now.tv_usec as tv_nsec_t * 1000,
@ -207,7 +209,7 @@ fn timespec_now() -> libc::timespec {
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
#[inline]
fn timespec_now() -> libc::timespec {
let mut now: libc::timespec = unsafe { mem::uninitialized() };
let mut now = MaybeUninit::<libc::timespec>::uninit();
let clock = if cfg!(target_os = "android") {
// Android doesn't support pthread_condattr_setclock, so we need to
// specify the timeout in CLOCK_REALTIME.
@ -215,9 +217,10 @@ fn timespec_now() -> libc::timespec {
} else {
libc::CLOCK_MONOTONIC
};
let r = unsafe { libc::clock_gettime(clock, &mut now) };
let r = unsafe { libc::clock_gettime(clock, now.as_mut_ptr()) };
debug_assert_eq!(r, 0);
now
// SAFETY: We know `libc::clock_gettime` has initialized the value.
unsafe { now.assume_init() }
}
// Converts a relative timeout into an absolute timeout in the clock used by

Просмотреть файл

@ -5,7 +5,10 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{mem, ptr};
use core::{
mem::{self, MaybeUninit},
ptr,
};
use std::{
sync::atomic::{AtomicUsize, Ordering},
time::Instant,
@ -88,9 +91,9 @@ impl KeyedEvent {
ObjectAttributes: PVOID,
Flags: ULONG,
) -> NTSTATUS = mem::transmute(NtCreateKeyedEvent);
let mut handle = mem::uninitialized();
let mut handle = MaybeUninit::uninit();
let status = NtCreateKeyedEvent(
&mut handle,
handle.as_mut_ptr(),
GENERIC_READ | GENERIC_WRITE,
ptr::null_mut(),
0,
@ -100,7 +103,7 @@ impl KeyedEvent {
}
Some(KeyedEvent {
handle,
handle: handle.assume_init(),
NtReleaseKeyedEvent: mem::transmute(NtReleaseKeyedEvent),
NtWaitForKeyedEvent: mem::transmute(NtWaitForKeyedEvent),
})

Просмотреть файл

@ -78,9 +78,12 @@ pub struct WordLock {
}
impl WordLock {
pub const INIT: WordLock = WordLock {
/// Returns a new, unlocked, WordLock.
pub const fn new() -> Self {
WordLock {
state: AtomicUsize::new(0),
};
}
}
#[inline]
pub fn lock(&self) {